From e6ef644db4e88cdb5a8c4362d8037e6abfbb0abc Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 27 May 2025 05:55:58 +1000 Subject: [PATCH 01/13] Verify `getBlobsV2` response and avoid reprocessing imported data columns (#7493) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit #7461 and partly #6439. Desired behaviour after receiving `engine_getBlobs` response: 1. Gossip verify the blobs and proofs, but don't mark them as observed yet. This is because not all blobs are published immediately (due to staggered publishing). If we mark them as observed and not publish them, we could end up blocking the gossip propagation. 2. Blobs are marked as observed _either_ when: * They are received from gossip and forwarded to the network . * They are published by the node. Current behaviour: - ❗ We only gossip verify `engine_getBlobsV1` responses, but not `engine_getBlobsV2` responses (PeerDAS). - ❗ After importing EL blobs AND before they're published, if the same blobs arrive via gossip, they will get re-processed, which may result in a re-import. 1. Perform gossip verification on data columns computed from EL `getBlobsV2` response. We currently only do this for `getBlobsV1` to prevent importing blobs with invalid proofs into the `DataAvailabilityChecker`, this should be done on V2 responses too. 2. Add additional gossip verification to make sure we don't re-process a ~~blob~~ or data column that was imported via the EL `getBlobs` but not yet "seen" on the gossip network. If an "unobserved" gossip blob is found in the availability cache, then we know it has passed verification so we can immediately propagate the `ACCEPT` result and forward it to the network, but without re-processing it. **UPDATE:** I've left blobs out for the second change mentioned above, as the likelihood and impact is very slow and we haven't seen it enough, but under PeerDAS this issue is a regular occurrence and we do see the same block getting imported many times. --- beacon_node/beacon_chain/src/beacon_chain.rs | 57 ++++--- .../beacon_chain/src/blob_verification.rs | 26 ++-- .../src/data_availability_checker.rs | 86 ++++------- .../src/data_column_verification.rs | 51 ++++++- .../fetch_blobs/fetch_blobs_beacon_adapter.rs | 13 +- .../beacon_chain/src/fetch_blobs/mod.rs | 97 ++++++++---- .../beacon_chain/src/fetch_blobs/tests.rs | 18 ++- beacon_node/http_api/src/publish_blocks.rs | 8 + .../gossip_methods.rs | 13 ++ .../src/network_beacon_processor/mod.rs | 11 +- .../src/network_beacon_processor/tests.rs | 143 +++++++++++++++--- beacon_node/network/src/sync/tests/lookups.rs | 8 +- 12 files changed, 371 insertions(+), 160 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c1d30253a3..990f4b6099 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3146,7 +3146,7 @@ impl BeaconChain { self: &Arc, slot: Slot, block_root: Hash256, - engine_get_blobs_output: EngineGetBlobsOutput, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. @@ -3161,7 +3161,7 @@ impl BeaconChain { // process_engine_blobs is called for both pre and post PeerDAS. However, post PeerDAS // consumers don't expect the blobs event to fire erratically. if let EngineGetBlobsOutput::Blobs(blobs) = &engine_get_blobs_output { - self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); + self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().map(|b| b.as_blob())); } let r = self @@ -3545,7 +3545,9 @@ impl BeaconChain { if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(blob.signed_block_header()); } - let availability = self.data_availability_checker.put_gossip_blob(blob)?; + let availability = self + .data_availability_checker + .put_gossip_verified_blobs(blob.block_root(), std::iter::once(blob))?; self.process_availability(slot, availability, || Ok(())) .await @@ -3568,21 +3570,21 @@ impl BeaconChain { let availability = self .data_availability_checker - .put_gossip_data_columns(block_root, data_columns)?; + .put_gossip_verified_data_columns(block_root, data_columns)?; self.process_availability(slot, availability, publish_fn) .await } - fn check_blobs_for_slashability( + fn check_blobs_for_slashability<'a>( self: &Arc, block_root: Hash256, - blobs: &FixedBlobSidecarList, + blobs: impl IntoIterator>, ) -> Result<(), BlockError> { let mut slashable_cache = self.observed_slashable.write(); for header in blobs - .iter() - .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) + .into_iter() + .map(|b| b.signed_block_header.clone()) .unique() { if verify_header_signature::(self, &header).is_ok() { @@ -3609,7 +3611,7 @@ impl BeaconChain { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result { - self.check_blobs_for_slashability(block_root, &blobs)?; + self.check_blobs_for_slashability(block_root, blobs.iter().flatten().map(Arc::as_ref))?; let availability = self .data_availability_checker .put_rpc_blobs(block_root, blobs)?; @@ -3622,18 +3624,21 @@ impl BeaconChain { self: &Arc, slot: Slot, block_root: Hash256, - engine_get_blobs_output: EngineGetBlobsOutput, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { let availability = match engine_get_blobs_output { EngineGetBlobsOutput::Blobs(blobs) => { - self.check_blobs_for_slashability(block_root, &blobs)?; + self.check_blobs_for_slashability(block_root, blobs.iter().map(|b| b.as_blob()))?; self.data_availability_checker - .put_engine_blobs(block_root, blobs)? + .put_gossip_verified_blobs(block_root, blobs)? } EngineGetBlobsOutput::CustodyColumns(data_columns) => { - self.check_columns_for_slashability(block_root, &data_columns)?; + self.check_columns_for_slashability( + block_root, + data_columns.iter().map(|c| c.as_data_column()), + )?; self.data_availability_checker - .put_engine_data_columns(block_root, data_columns)? + .put_gossip_verified_data_columns(block_root, data_columns)? } }; @@ -3649,7 +3654,10 @@ impl BeaconChain { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result { - self.check_columns_for_slashability(block_root, &custody_columns)?; + self.check_columns_for_slashability( + block_root, + custody_columns.iter().map(|c| c.as_ref()), + )?; // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. @@ -3661,16 +3669,21 @@ impl BeaconChain { .await } - fn check_columns_for_slashability( + fn check_columns_for_slashability<'a>( self: &Arc, block_root: Hash256, - custody_columns: &DataColumnSidecarList, + custody_columns: impl IntoIterator>, ) -> Result<(), BlockError> { let mut slashable_cache = self.observed_slashable.write(); - // Assumes all items in custody_columns are for the same block_root - if let Some(column) = custody_columns.first() { - let header = &column.signed_block_header; - if verify_header_signature::(self, header).is_ok() { + // Process all unique block headers - previous logic assumed all headers were identical and + // only processed the first one. However, we should not make assumptions about data received + // from RPC. + for header in custody_columns + .into_iter() + .map(|c| c.signed_block_header.clone()) + .unique() + { + if verify_header_signature::(self, &header).is_ok() { slashable_cache .observe_slashable( header.message.slot, @@ -3679,7 +3692,7 @@ impl BeaconChain { ) .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; if let Some(slasher) = self.slasher.as_ref() { - slasher.accept_block_header(header.clone()); + slasher.accept_block_header(header); } } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6fe710f41a..d7acb78408 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -166,6 +166,16 @@ pub struct GossipVerifiedBlob, } +impl Clone for GossipVerifiedBlob { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + blob: self.blob.clone(), + _phantom: PhantomData, + } + } +} + impl GossipVerifiedBlob { pub fn new( blob: Arc>, @@ -335,21 +345,9 @@ impl KzgVerifiedBlobList { } /// Create a `KzgVerifiedBlobList` from `blobs` that are already KZG verified. - /// - /// This should be used with caution, as used incorrectly it could result in KZG verification - /// being skipped and invalid blobs being deemed valid. - pub fn from_verified>>>( - blobs: I, - seen_timestamp: Duration, - ) -> Self { + pub fn from_verified>>(blobs: I) -> Self { Self { - verified_blobs: blobs - .into_iter() - .map(|blob| KzgVerifiedBlob { - blob, - seen_timestamp, - }) - .collect(), + verified_blobs: blobs.into_iter().collect(), } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 6f292f3551..0fd417389b 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -17,7 +17,7 @@ use task_executor::TaskExecutor; use tracing::{debug, error, info_span, Instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ - BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, Hash256, + BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, }; @@ -32,6 +32,7 @@ use crate::data_column_verification::{ use crate::metrics::{ KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, }; +use crate::observed_data_sidecars::ObservationStrategy; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; @@ -155,6 +156,21 @@ impl DataAvailabilityChecker { }) } + /// Check if the exact data column is in the availability cache. + pub fn is_data_column_cached( + &self, + block_root: &Hash256, + data_column: &DataColumnSidecar, + ) -> bool { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.is_some_and(|components| { + let cached_column_opt = components.get_cached_data_column(data_column.index); + cached_column_opt.is_some_and(|cached| *cached == *data_column) + }) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -218,65 +234,21 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, verified_custody_columns) } - /// Put a list of blobs received from the EL pool into the availability cache. - /// - /// This DOES NOT perform KZG verification because the KZG proofs should have been constructed - /// immediately prior to calling this function so they are assumed to be valid. - pub fn put_engine_blobs( - &self, - block_root: Hash256, - blobs: FixedBlobSidecarList, - ) -> Result, AvailabilityCheckError> { - let seen_timestamp = self - .slot_clock - .now_duration() - .ok_or(AvailabilityCheckError::SlotClockError)?; - self.availability_cache.put_kzg_verified_blobs( - block_root, - KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), - ) - } - - /// Put a list of data columns computed from blobs received from the EL pool into the - /// availability cache. - /// - /// This DOES NOT perform KZG proof and inclusion proof verification because - /// - The KZG proofs should have been verified by the trusted EL. - /// - The KZG commitments inclusion proof should have been constructed immediately prior to - /// calling this function so they are assumed to be valid. - /// - /// This method is used if the EL already has the blobs and returns them via the `getBlobsV2` - /// engine method. - /// More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). - pub fn put_engine_data_columns( - &self, - block_root: Hash256, - data_columns: DataColumnSidecarList, - ) -> Result, AvailabilityCheckError> { - let kzg_verified_custody_columns = data_columns - .into_iter() - .map(|d| { - KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::from_verified(d), - ) - }) - .collect::>(); - - self.availability_cache - .put_kzg_verified_data_columns(block_root, kzg_verified_custody_columns) - } - /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. /// /// This should only accept gossip verified blobs, so we should not have to worry about dupes. - pub fn put_gossip_blob( + pub fn put_gossip_verified_blobs< + I: IntoIterator>, + O: ObservationStrategy, + >( &self, - gossip_blob: GossipVerifiedBlob, + block_root: Hash256, + blobs: I, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()]) + .put_kzg_verified_blobs(block_root, blobs.into_iter().map(|b| b.into_inner())) } /// Check if we've cached other data columns for this block. If it satisfies the custody requirement and we also @@ -284,13 +256,15 @@ impl DataAvailabilityChecker { /// Otherwise cache the data column sidecar. /// /// This should only accept gossip verified data columns, so we should not have to worry about dupes. - #[allow(clippy::type_complexity)] - pub fn put_gossip_data_columns( + pub fn put_gossip_verified_data_columns< + O: ObservationStrategy, + I: IntoIterator>, + >( &self, block_root: Hash256, - gossip_data_columns: Vec>, + data_columns: I, ) -> Result, AvailabilityCheckError> { - let custody_columns = gossip_data_columns + let custody_columns = data_columns .into_iter() .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) .collect::>(); diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index b43b259cf6..4e847d9f9f 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -129,6 +129,10 @@ pub enum GossipDataColumnError { slot: Slot, index: ColumnIndex, }, + /// A column has already been processed from non-gossip source and have not yet been seen on + /// the gossip network. + /// This column should be accepted and forwarded over gossip. + PriorKnownUnpublished, /// Data column index must be between 0 and `NUMBER_OF_COLUMNS` (exclusive). /// /// ## Peer scoring @@ -181,6 +185,16 @@ pub struct GossipVerifiedDataColumn, } +impl Clone for GossipVerifiedDataColumn { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + data_column: self.data_column.clone(), + _phantom: PhantomData, + } + } +} + impl GossipVerifiedDataColumn { pub fn new( column_sidecar: Arc>, @@ -200,6 +214,16 @@ impl GossipVerifiedDataColumn ) } + /// Create a `GossipVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. + #[cfg(test)] + pub(crate) fn __new_for_testing(column_sidecar: Arc>) -> Self { + Self { + block_root: column_sidecar.block_root(), + data_column: KzgVerifiedDataColumn::__new_for_testing(column_sidecar), + _phantom: Default::default(), + } + } + pub fn as_data_column(&self) -> &DataColumnSidecar { self.data_column.as_data_column() } @@ -243,11 +267,9 @@ impl KzgVerifiedDataColumn { verify_kzg_for_data_column(data_column, kzg) } - /// Create a `KzgVerifiedDataColumn` from `data_column` that are already KZG verified. - /// - /// This should be used with caution, as used incorrectly it could result in KZG verification - /// being skipped and invalid data_columns being deemed valid. - pub fn from_verified(data_column: Arc>) -> Self { + /// Create a `KzgVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. + #[cfg(test)] + pub(crate) fn __new_for_testing(data_column: Arc>) -> Self { Self { data: data_column } } @@ -444,6 +466,23 @@ pub fn validate_data_column_sidecar_for_gossip { @@ -74,11 +75,19 @@ impl FetchBlobsBeaconAdapter { GossipVerifiedBlob::::new(blob.clone(), blob.index, &self.chain) } + pub(crate) fn verify_data_column_for_gossip( + &self, + data_column: Arc>, + ) -> Result, GossipDataColumnError> { + let index = data_column.index; + GossipVerifiedDataColumn::::new(data_column, index, &self.chain) + } + pub(crate) async fn process_engine_blobs( &self, slot: Slot, block_root: Hash256, - blobs: EngineGetBlobsOutput, + blobs: EngineGetBlobsOutput, ) -> Result { self.chain .process_engine_blobs(slot, block_root, blobs) diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index ba798137b0..927841376f 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -13,6 +13,7 @@ mod fetch_blobs_beacon_adapter; mod tests; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; #[cfg_attr(test, double)] use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter; use crate::kzg_utils::blobs_to_data_column_sidecars; @@ -34,24 +35,17 @@ use tracing::{debug, warn}; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconStateError, Blob, BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecarList, EthSpec, - FullPayload, Hash256, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, + BeaconStateError, Blob, BlobSidecar, ChainSpec, ColumnIndex, EthSpec, FullPayload, Hash256, + KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, }; -/// Blobs or data column to be published to the gossip network. -pub enum BlobsOrDataColumns { +/// Result from engine get blobs to be passed onto `DataAvailabilityChecker` and published to the +/// gossip network. The blobs / data columns have not been marked as observed yet, as they may not +/// be published immediately. +pub enum EngineGetBlobsOutput { Blobs(Vec>), - DataColumns(DataColumnSidecarList), -} - -/// Result from engine get blobs to be passed onto `DataAvailabilityChecker`. -/// -/// The blobs are retrieved from a trusted EL and columns are computed locally, therefore they are -/// considered valid without requiring extra validation. -pub enum EngineGetBlobsOutput { - Blobs(FixedBlobSidecarList), /// A filtered list of custody data columns to be imported into the `DataAvailabilityChecker`. - CustodyColumns(DataColumnSidecarList), + CustodyColumns(Vec>), } #[derive(Debug)] @@ -64,6 +58,7 @@ pub enum FetchEngineBlobError { ExecutionLayerMissing, InternalError(String), GossipBlob(GossipBlobError), + GossipDataColumn(GossipDataColumnError), RequestFailed(ExecutionLayerError), RuntimeShutdown, TokioJoin(tokio::task::JoinError), @@ -76,7 +71,7 @@ pub async fn fetch_and_process_engine_blobs( block_root: Hash256, block: Arc>>, custody_columns: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { fetch_and_process_engine_blobs_inner( FetchBlobsBeaconAdapter::new(chain), @@ -95,7 +90,7 @@ async fn fetch_and_process_engine_blobs_inner( block_root: Hash256, block: Arc>>, custody_columns: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { let versioned_hashes = if let Some(kzg_commitments) = block .message() @@ -148,7 +143,7 @@ async fn fetch_and_process_blobs_v1( block_root: Hash256, block: Arc>, versioned_hashes: Vec, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + Sized, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + Sized, ) -> Result, FetchEngineBlobError> { let num_expected_blobs = versioned_hashes.len(); metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); @@ -189,7 +184,7 @@ async fn fetch_and_process_blobs_v1( // and be accepted (and propagated) while we are waiting to publish. Just before publishing // we will observe the blobs/columns and only proceed with publishing if they are not yet seen. let blobs_to_import_and_publish = fixed_blob_sidecar_list - .iter() + .into_iter() .filter_map(|opt_blob| { let blob = opt_blob.as_ref()?; match chain_adapter.verify_blob_for_gossip(blob) { @@ -203,7 +198,9 @@ async fn fetch_and_process_blobs_v1( .map_err(FetchEngineBlobError::GossipBlob)?; if !blobs_to_import_and_publish.is_empty() { - publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); + publish_fn(EngineGetBlobsOutput::Blobs( + blobs_to_import_and_publish.clone(), + )); } debug!(num_fetched_blobs, "Processing engine blobs"); @@ -212,7 +209,7 @@ async fn fetch_and_process_blobs_v1( .process_engine_blobs( block.slot(), block_root, - EngineGetBlobsOutput::Blobs(fixed_blob_sidecar_list.clone()), + EngineGetBlobsOutput::Blobs(blobs_to_import_and_publish), ) .await?; @@ -225,7 +222,7 @@ async fn fetch_and_process_blobs_v2( block: Arc>, versioned_hashes: Vec, custody_columns_indices: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { let num_expected_blobs = versioned_hashes.len(); @@ -278,6 +275,7 @@ async fn fetch_and_process_blobs_v2( return Ok(None); } + let chain_adapter = Arc::new(chain_adapter); let custody_columns = compute_and_publish_data_columns( &chain_adapter, block.clone(), @@ -303,15 +301,16 @@ async fn fetch_and_process_blobs_v2( /// Offload the data column computation to a blocking task to avoid holding up the async runtime. async fn compute_and_publish_data_columns( - chain_adapter: &FetchBlobsBeaconAdapter, + chain_adapter: &Arc>, block: Arc>>, blobs: Vec>, proofs: Vec>, custody_columns_indices: HashSet, - publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, -) -> Result, FetchEngineBlobError> { + publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, +) -> Result>, FetchEngineBlobError> { let kzg = chain_adapter.kzg().clone(); let spec = chain_adapter.spec().clone(); + let chain_adapter_cloned = chain_adapter.clone(); chain_adapter .executor() .spawn_blocking_handle( @@ -338,8 +337,54 @@ async fn compute_and_publish_data_columns( }) .map_err(FetchEngineBlobError::DataColumnSidecarError)?; - publish_fn(BlobsOrDataColumns::DataColumns(custody_columns.clone())); - Ok(custody_columns) + // Gossip verify data columns before publishing. This prevents blobs with invalid + // KZG proofs from the EL making it into the data availability checker. We do not + // immediately add these blobs to the observed blobs/columns cache because we want + // to allow blobs/columns to arrive on gossip and be accepted (and propagated) while + // we are waiting to publish. Just before publishing we will observe the blobs/columns + // and only proceed with publishing if they are not yet seen. + // TODO(das): we may want to just perform kzg proof verification here, since the + // `DataColumnSidecar` and inclusion proof is computed just above and is unnecessary + // to verify them. + let columns_to_import_and_publish = custody_columns + .into_iter() + .filter_map(|col| { + match chain_adapter_cloned.verify_data_column_for_gossip(col) { + Ok(verified) => Some(Ok(verified)), + Err(e) => match e { + // Ignore already seen data columns + GossipDataColumnError::PriorKnown { .. } + | GossipDataColumnError::PriorKnownUnpublished => None, + GossipDataColumnError::BeaconChainError(_) + | GossipDataColumnError::ProposalSignatureInvalid + | GossipDataColumnError::UnknownValidator(_) + | GossipDataColumnError::IsNotLaterThanParent { .. } + | GossipDataColumnError::InvalidKzgProof(_) + | GossipDataColumnError::InvalidSubnetId { .. } + | GossipDataColumnError::FutureSlot { .. } + | GossipDataColumnError::PastFinalizedSlot { .. } + | GossipDataColumnError::PubkeyCacheTimeout + | GossipDataColumnError::ProposerIndexMismatch { .. } + | GossipDataColumnError::ParentUnknown { .. } + | GossipDataColumnError::NotFinalizedDescendant { .. } + | GossipDataColumnError::InvalidInclusionProof + | GossipDataColumnError::InvalidColumnIndex(_) + | GossipDataColumnError::UnexpectedDataColumn + | GossipDataColumnError::InconsistentCommitmentsLength { .. } + | GossipDataColumnError::InconsistentProofsLength { .. } => { + Some(Err(e)) + } + }, + } + }) + .collect::, _>>() + .map_err(FetchEngineBlobError::GossipDataColumn)?; + + publish_fn(EngineGetBlobsOutput::CustodyColumns( + columns_to_import_and_publish.clone(), + )); + + Ok(columns_to_import_and_publish) }, "compute_and_publish_data_columns", ) diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index be3d29e9c9..8eefd4ddf8 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -1,6 +1,7 @@ +use crate::data_column_verification::GossipVerifiedDataColumn; use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter; use crate::fetch_blobs::{ - fetch_and_process_engine_blobs_inner, BlobsOrDataColumns, FetchEngineBlobError, + fetch_and_process_engine_blobs_inner, EngineGetBlobsOutput, FetchEngineBlobError, }; use crate::test_utils::{get_kzg, EphemeralHarnessType}; use crate::AvailabilityProcessingStatus; @@ -148,6 +149,9 @@ async fn test_fetch_blobs_v2_success() { // All blobs returned, fork choice doesn't contain block mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + mock_adapter + .expect_verify_data_column_for_gossip() + .returning(|c| Ok(GossipVerifiedDataColumn::__new_for_testing(c))); mock_process_engine_blobs_result( &mut mock_adapter, Ok(AvailabilityProcessingStatus::Imported(block_root)), @@ -174,16 +178,16 @@ async fn test_fetch_blobs_v2_success() { assert!( matches!( published_columns, - BlobsOrDataColumns::DataColumns (columns) if columns.len() == custody_columns.len() + EngineGetBlobsOutput::CustodyColumns(columns) if columns.len() == custody_columns.len() ), "should publish custody columns" ); } -/// Extract the `BlobsOrDataColumns` passed to the `publish_fn`. +/// Extract the `EngineGetBlobsOutput` passed to the `publish_fn`. fn extract_published_blobs( - publish_fn_args: Arc>>>, -) -> BlobsOrDataColumns { + publish_fn_args: Arc>>>, +) -> EngineGetBlobsOutput { let mut calls = publish_fn_args.lock().unwrap(); assert_eq!(calls.len(), 1); calls.pop().unwrap() @@ -250,8 +254,8 @@ fn create_test_block_and_blobs( #[allow(clippy::type_complexity)] fn mock_publish_fn() -> ( - impl Fn(BlobsOrDataColumns) + Send + 'static, - Arc>>>, + impl Fn(EngineGetBlobsOutput) + Send + 'static, + Arc>>>, ) { // Keep track of the arguments captured by `publish_fn`. let captured_args = Arc::new(Mutex::new(vec![])); diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9b1a3f8677..463f585f2c 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -424,6 +424,14 @@ fn build_gossip_verified_data_columns( ); Ok(None) } + Err(GossipDataColumnError::PriorKnownUnpublished) => { + debug!( + column_index, + %slot, + "Data column for publication already known via the EL" + ); + Ok(None) + } Err(e) => { error!( column_index, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 638f9e4824..3be416165b 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -797,6 +797,19 @@ impl NetworkBeaconProcessor { } Err(err) => { match err { + GossipDataColumnError::PriorKnownUnpublished => { + debug!( + %slot, + %block_root, + %index, + "Gossip data column already processed via the EL. Accepting the column sidecar without re-processing." + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Accept, + ); + } GossipDataColumnError::ParentUnknown { parent_root } => { debug!( action = "requesting parent", diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index ba681eed14..f9390a2c7b 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -5,7 +5,7 @@ use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::data_column_verification::{observe_gossip_data_column, GossipDataColumnError}; use beacon_chain::fetch_blobs::{ - fetch_and_process_engine_blobs, BlobsOrDataColumns, FetchEngineBlobError, + fetch_and_process_engine_blobs, EngineGetBlobsOutput, FetchEngineBlobError, }; use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::{ @@ -848,11 +848,14 @@ impl NetworkBeaconProcessor { let publish_fn = move |blobs_or_data_column| { if publish_blobs { match blobs_or_data_column { - BlobsOrDataColumns::Blobs(blobs) => { + EngineGetBlobsOutput::Blobs(blobs) => { self_cloned.publish_blobs_gradually(blobs, block_root); } - BlobsOrDataColumns::DataColumns(columns) => { - self_cloned.publish_data_columns_gradually(columns, block_root); + EngineGetBlobsOutput::CustodyColumns(columns) => { + self_cloned.publish_data_columns_gradually( + columns.into_iter().map(|c| c.clone_data_column()).collect(), + block_root, + ); } }; } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 292e894870..87a5a77294 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -9,13 +9,16 @@ use crate::{ sync::{manager::BlockProcessType, SyncMessage}, }; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; +use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::test_utils::{ get_kzg, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; +use gossipsub::MessageAcceptance; use itertools::Itertools; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::rpc::InboundRequestId; @@ -25,6 +28,7 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, }; +use matches::assert_matches; use slot_clock::SlotClock; use std::iter::Iterator; use std::sync::Arc; @@ -32,9 +36,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, DataColumnSidecarList, - DataColumnSubnetId, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, + DataColumnSubnetId, Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; type E = MainnetEthSpec; @@ -64,7 +68,7 @@ struct TestRig { voluntary_exit: SignedVoluntaryExit, beacon_processor_tx: BeaconProcessorSend, work_journal_rx: mpsc::Receiver<&'static str>, - _network_rx: mpsc::UnboundedReceiver>, + network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, duplicate_cache: DuplicateCache, network_beacon_processor: Arc>, @@ -83,19 +87,18 @@ impl Drop for TestRig { impl TestRig { pub async fn new(chain_length: u64) -> Self { - Self::new_parametric( - chain_length, - BeaconProcessorConfig::default().enable_backfill_rate_limiting, - ) - .await - } - - pub async fn new_parametric(chain_length: u64, enable_backfill_rate_limiting: bool) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = test_spec::(); spec.shard_committee_period = 2; - let spec = Arc::new(spec); + Self::new_parametric(chain_length, BeaconProcessorConfig::default(), spec).await + } + pub async fn new_parametric( + chain_length: u64, + beacon_processor_config: BeaconProcessorConfig, + spec: ChainSpec, + ) -> Self { + let spec = Arc::new(spec); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) @@ -183,12 +186,8 @@ impl TestRig { let chain = harness.chain.clone(); - let (network_tx, _network_rx) = mpsc::unbounded_channel(); + let (network_tx, network_rx) = mpsc::unbounded_channel(); - let beacon_processor_config = BeaconProcessorConfig { - enable_backfill_rate_limiting, - ..Default::default() - }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -304,7 +303,7 @@ impl TestRig { voluntary_exit, beacon_processor_tx, work_journal_rx, - _network_rx, + network_rx, _sync_rx, duplicate_cache, network_beacon_processor, @@ -643,6 +642,50 @@ impl TestRig { assert_eq!(events, expected); } + + /// Listen for network messages and collect them for a specified duration or until reaching a count. + /// + /// Returns None if no messages were received, or Some(Vec) containing the received messages. + /// + /// # Arguments + /// + /// * `timeout` - Maximum duration to listen for messages + /// * `count` - Optional maximum number of messages to collect before returning + pub async fn receive_network_messages_with_timeout( + &mut self, + timeout: Duration, + count: Option, + ) -> Option>> { + let mut events = vec![]; + + let timeout_future = tokio::time::sleep(timeout); + tokio::pin!(timeout_future); + + loop { + // Break if we've received the requested count of messages + if let Some(target_count) = count { + if events.len() >= target_count { + break; + } + } + + tokio::select! { + _ = &mut timeout_future => break, + maybe_msg = self.network_rx.recv() => { + match maybe_msg { + Some(msg) => events.push(msg), + None => break, // Channel closed + } + } + } + } + + if events.is_empty() { + None + } else { + Some(events) + } + } } fn junk_peer_id() -> PeerId { @@ -753,6 +796,58 @@ async fn import_gossip_block_unacceptably_early() { ); } +/// Data columns that have already been processed but unobserved should be propagated without re-importing. +#[tokio::test] +async fn accept_processed_gossip_data_columns_without_import() { + let processor_config = BeaconProcessorConfig::default(); + let fulu_genesis_spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let mut rig = TestRig::new_parametric(SMALL_CHAIN, processor_config, fulu_genesis_spec).await; + + // GIVEN the data columns have already been processed but unobserved. + // 1. verify data column with `DoNotObserve` to create verified but unobserved data columns. + // 2. put verified but unobserved data columns into the data availability cache. + let verified_data_columns: Vec<_> = rig + .next_data_columns + .clone() + .unwrap() + .into_iter() + .map(|data_column| { + let subnet_id = data_column.index; + validate_data_column_sidecar_for_gossip::<_, DoNotObserve>( + data_column, + subnet_id, + &rig.chain, + ) + .expect("should be valid data column") + }) + .collect(); + + let block_root = rig.next_block.canonical_root(); + rig.chain + .data_availability_checker + .put_gossip_verified_data_columns(block_root, verified_data_columns) + .expect("should put data columns into availability cache"); + + // WHEN an already processed but unobserved data column is received via gossip + rig.enqueue_gossip_data_columns(0); + + // THEN the data column should be propagated without re-importing (not sure if there's an easy way to test this) + let network_message = rig + .receive_network_messages_with_timeout(Duration::from_millis(100), Some(1)) + .await + .and_then(|mut vec| vec.pop()) + .expect("should receive network messages"); + + assert_matches!( + network_message, + NetworkMessage::ValidationResult { + propagation_source: _, + message_id: _, + validation_result: MessageAcceptance::Accept, + } + ); +} + /// Blocks that arrive on-time should be processed normally. #[tokio::test] async fn import_gossip_block_at_current_slot() { @@ -1192,8 +1287,12 @@ async fn test_backfill_sync_processing() { /// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. #[tokio::test] async fn test_backfill_sync_processing_rate_limiting_disabled() { - let enable_backfill_rate_limiting = false; - let mut rig = TestRig::new_parametric(SMALL_CHAIN, enable_backfill_rate_limiting).await; + let beacon_processor_config = BeaconProcessorConfig { + enable_backfill_rate_limiting: false, + ..Default::default() + }; + let mut rig = + TestRig::new_parametric(SMALL_CHAIN, beacon_processor_config, test_spec::()).await; for _ in 0..3 { rig.enqueue_backfill_batch(); @@ -1236,7 +1335,7 @@ async fn test_blobs_by_range() { .unwrap_or(0); } let mut actual_count = 0; - while let Some(next) = rig._network_rx.recv().await { + while let Some(next) = rig.network_rx.recv().await { if let NetworkMessage::SendResponse { peer_id: _, response: Response::BlobsByRange(blob), diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 38095ec434..84ff1c7e25 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -14,6 +14,7 @@ use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; +use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::{ blob_verification::GossipVerifiedBlob, block_verification_types::{AsBlock, BlockImportData}, @@ -1229,7 +1230,12 @@ impl TestRig { .harness .chain .data_availability_checker - .put_gossip_blob(GossipVerifiedBlob::__assumed_valid(blob.into())) + .put_gossip_verified_blobs( + blob.block_root(), + std::iter::once(GossipVerifiedBlob::<_, Observe>::__assumed_valid( + blob.into(), + )), + ) .unwrap() { Availability::Available(_) => panic!("blob removed from da_checker, available"), From 7c89b970afe2f1ab2687e97eeef26cbd53ad498d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 27 May 2025 11:55:17 +1000 Subject: [PATCH 02/13] Handle attestation validation errors (#7382) Partly addresses: - https://github.com/sigp/lighthouse/issues/7379 Handle attestation validation errors from `get_attesting_indices` to prevent an error log, downscore the peer, and reject the message. --- .../gossip_methods.rs | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 3be416165b..8757ab4383 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2775,6 +2775,26 @@ impl NetworkBeaconProcessor { MessageAcceptance::Ignore, ); } + BeaconChainError::AttestationValidationError(e) => { + // Failures from `get_attesting_indices` end up here. + debug!( + %peer_id, + block_root = ?beacon_block_root, + attestation_slot = %failed_att.attestation_data().slot, + error = ?e, + "Rejecting attestation that failed validation" + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_validation_error", + ); + } _ => { /* * Lighthouse hit an unexpected error whilst processing the attestation. It From 8989ef8fb11e1d455bd68a8c6c0116b8fba577aa Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Wed, 28 May 2025 00:43:22 +0900 Subject: [PATCH 03/13] Enable arithmetic lint in rate-limiter (#7025) https://github.com/sigp/lighthouse/issues/6875 - Enabled the linter in rate-limiter and fixed errors. - Changed the type of `Quota::max_tokens` from `u64` to `NonZeroU64` because `max_tokens` cannot be zero. - Added a test to ensure that a large value for `tokens`, which causes an overflow, is handled properly. --- .../lighthouse_network/src/rpc/config.rs | 30 ++++++----- .../src/rpc/rate_limiter.rs | 53 +++++++++++++------ .../src/rpc/self_limiter.rs | 3 +- 3 files changed, 58 insertions(+), 28 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 75d49e9cb5..7a746a63e1 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -1,11 +1,11 @@ +use super::{rate_limiter::Quota, Protocol}; +use std::num::NonZeroU64; use std::{ fmt::{Debug, Display}, str::FromStr, time::Duration, }; -use super::{rate_limiter::Quota, Protocol}; - use serde::{Deserialize, Serialize}; /// Auxiliary struct to aid on configuration parsing. @@ -100,24 +100,30 @@ pub struct RateLimiterConfig { } impl RateLimiterConfig { - pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(2, 10); - pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); - pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); + pub const DEFAULT_PING_QUOTA: Quota = Quota::n_every(NonZeroU64::new(2).unwrap(), 10); + pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(NonZeroU64::new(2).unwrap(), 5); + pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(NonZeroU64::new(5).unwrap(), 15); pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); // The number is chosen to balance between upload bandwidth required to serve // blocks and a decent syncing rate for honest nodes. Malicious nodes would need to // spread out their requests over the time window to max out bandwidth on the server. - pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); - pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(896).unwrap(), 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(896).unwrap(), 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. - pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); + pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(5120).unwrap(), 10); // 512 columns per request from spec. This should be plenty as peers are unlikely to send all // sampling requests to a single peer. - pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(512).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -275,7 +281,7 @@ mod tests { protocol: Protocol::Goodbye, quota: Quota { replenish_all_every: Duration::from_secs(10), - max_tokens: 8, + max_tokens: NonZeroU64::new(8).unwrap(), }, }; assert_eq!(quota.to_string().parse(), Ok(quota)) diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index f666c30d52..6e66999612 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -1,3 +1,5 @@ +#![deny(clippy::arithmetic_side_effects)] + use super::config::RateLimiterConfig; use crate::rpc::Protocol; use fnv::FnvHashMap; @@ -5,6 +7,7 @@ use libp2p::PeerId; use serde::{Deserialize, Serialize}; use std::future::Future; use std::hash::Hash; +use std::num::NonZeroU64; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; @@ -55,7 +58,7 @@ pub struct Quota { pub(super) replenish_all_every: Duration, /// Token limit. This translates on how large can an instantaneous batch of /// tokens be. - pub(super) max_tokens: u64, + pub(super) max_tokens: NonZeroU64, } impl Quota { @@ -63,12 +66,12 @@ impl Quota { pub const fn one_every(seconds: u64) -> Self { Quota { replenish_all_every: Duration::from_secs(seconds), - max_tokens: 1, + max_tokens: NonZeroU64::new(1).unwrap(), } } /// Allow `n` tokens to be use used every `seconds`. - pub const fn n_every(n: u64, seconds: u64) -> Self { + pub const fn n_every(n: NonZeroU64, seconds: u64) -> Self { Quota { replenish_all_every: Duration::from_secs(seconds), max_tokens: n, @@ -236,7 +239,9 @@ impl RPCRateLimiterBuilder { // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); - let prune_start = tokio::time::Instant::now() + prune_every; + let prune_start = tokio::time::Instant::now() + .checked_add(prune_every) + .ok_or("prune time overflow")?; let prune_interval = tokio::time::interval_at(prune_start, prune_every); Ok(RPCRateLimiter { prune_interval, @@ -412,14 +417,13 @@ pub struct Limiter { impl Limiter { pub fn from_quota(quota: Quota) -> Result { - if quota.max_tokens == 0 { - return Err("Max number of tokens should be positive"); - } let tau = quota.replenish_all_every.as_nanos(); if tau == 0 { return Err("Replenish time must be positive"); } - let t = (tau / quota.max_tokens as u128) + let t = tau + .checked_div(quota.max_tokens.get() as u128) + .expect("Division by zero never occurs, since Quota::max_token is of type NonZeroU64.") .try_into() .map_err(|_| "total replenish time is too long")?; let tau = tau @@ -442,7 +446,7 @@ impl Limiter { let tau = self.tau; let t = self.t; // how long does it take to replenish these tokens - let additional_time = t * tokens; + let additional_time = t.saturating_mul(tokens); if additional_time > tau { // the time required to process this amount of tokens is longer than the time that // makes the bucket full. So, this batch can _never_ be processed @@ -455,16 +459,16 @@ impl Limiter { .entry(key.clone()) .or_insert(time_since_start); // check how soon could the request be made - let earliest_time = (*tat + additional_time).saturating_sub(tau); + let earliest_time = (*tat).saturating_add(additional_time).saturating_sub(tau); // earliest_time is in the future if time_since_start < earliest_time { Err(RateLimitedErr::TooSoon(Duration::from_nanos( /* time they need to wait, i.e. how soon were they */ - earliest_time - time_since_start, + earliest_time.saturating_sub(time_since_start), ))) } else { // calculate the new TAT - *tat = time_since_start.max(*tat) + additional_time; + *tat = time_since_start.max(*tat).saturating_add(additional_time); Ok(()) } } @@ -479,14 +483,15 @@ impl Limiter { #[cfg(test)] mod tests { - use crate::rpc::rate_limiter::{Limiter, Quota}; + use crate::rpc::rate_limiter::{Limiter, Quota, RateLimitedErr}; + use std::num::NonZeroU64; use std::time::Duration; #[test] fn it_works_a() { let mut limiter = Limiter::from_quota(Quota { replenish_all_every: Duration::from_secs(2), - max_tokens: 4, + max_tokens: NonZeroU64::new(4).unwrap(), }) .unwrap(); let key = 10; @@ -523,7 +528,7 @@ mod tests { fn it_works_b() { let mut limiter = Limiter::from_quota(Quota { replenish_all_every: Duration::from_secs(2), - max_tokens: 4, + max_tokens: NonZeroU64::new(4).unwrap(), }) .unwrap(); let key = 10; @@ -547,4 +552,22 @@ mod tests { .allows(Duration::from_secs_f32(0.4), &key, 1) .is_err()); } + + #[test] + fn large_tokens() { + // These have been adjusted so that an overflow occurs when calculating `additional_time` in + // `Limiter::allows`. If we don't handle overflow properly, `Limiter::allows` returns `Ok` + // in this case. + let replenish_all_every = 2; + let tokens = u64::MAX / 2 + 1; + + let mut limiter = Limiter::from_quota(Quota { + replenish_all_every: Duration::from_nanos(replenish_all_every), + max_tokens: NonZeroU64::new(1).unwrap(), + }) + .unwrap(); + + let result = limiter.allows(Duration::from_secs_f32(0.0), &10, tokens); + assert!(matches!(result, Err(RateLimitedErr::TooLarge))); + } } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e5b685676f..f26dc4c7a8 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -316,6 +316,7 @@ mod tests { use crate::service::api_types::{AppRequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; use logging::create_test_tracing_subscriber; + use std::num::NonZeroU64; use std::time::Duration; use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; @@ -324,7 +325,7 @@ mod tests { async fn test_next_peer_request_ready() { create_test_tracing_subscriber(); let config = OutboundRateLimiterConfig(RateLimiterConfig { - ping_quota: Quota::n_every(1, 2), + ping_quota: Quota::n_every(NonZeroU64::new(1).unwrap(), 2), ..Default::default() }); let fork_context = std::sync::Arc::new(ForkContext::new::( From 0ddf9a99d64a4d927726088a74ef16f4473e7e30 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 28 May 2025 23:47:21 +1000 Subject: [PATCH 04/13] Remove support for database migrations prior to schema version v22 (#7332) Remove deprecated database migrations prior to v22 along with v22 migration specific code. --- beacon_node/beacon_chain/src/schema_change.rs | 36 +--- .../src/schema_change/migration_schema_v20.rs | 111 ---------- .../src/schema_change/migration_schema_v21.rs | 74 ------- .../src/schema_change/migration_schema_v22.rs | 196 ------------------ beacon_node/beacon_chain/tests/store_tests.rs | 28 +-- beacon_node/client/src/builder.rs | 20 +- beacon_node/store/src/hot_cold_store.rs | 37 +--- book/src/advanced_database_migrations.md | 2 + database_manager/src/lib.rs | 12 +- wordlist.txt | 1 + 10 files changed, 19 insertions(+), 498 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 49aa116f6c..d159aabda0 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,7 +1,4 @@ //! Utilities for managing database schema changes. -mod migration_schema_v20; -mod migration_schema_v21; -mod migration_schema_v22; mod migration_schema_v23; use crate::beacon_chain::BeaconChainTypes; @@ -9,12 +6,10 @@ use std::sync::Arc; use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::Error as StoreError; -use types::Hash256; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( db: Arc>, - genesis_state_root: Option, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { @@ -24,40 +19,19 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), genesis_state_root, from, next)?; - migrate_schema::(db, genesis_state_root, next, to) + migrate_schema::(db.clone(), from, next)?; + migrate_schema::(db, next, to) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), genesis_state_root, from, next)?; - migrate_schema::(db, genesis_state_root, next, to) + migrate_schema::(db.clone(), from, next)?; + migrate_schema::(db, next, to) } // - // Migrations from before SchemaVersion(19) are deprecated. + // Migrations from before SchemaVersion(22) are deprecated. // - (SchemaVersion(19), SchemaVersion(20)) => { - let ops = migration_schema_v20::upgrade_to_v20::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(20), SchemaVersion(19)) => { - let ops = migration_schema_v20::downgrade_from_v20::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(20), SchemaVersion(21)) => { - let ops = migration_schema_v21::upgrade_to_v21::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(21), SchemaVersion(20)) => { - let ops = migration_schema_v21::downgrade_from_v21::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(21), SchemaVersion(22)) => { - // This migration needs to sync data between hot and cold DBs. The schema version is - // bumped inside the upgrade_to_v22 fn - migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root) - } (SchemaVersion(22), SchemaVersion(23)) => { let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; db.store_schema_version_atomically(to, ops) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs deleted file mode 100644 index 13fde349f5..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs +++ /dev/null @@ -1,111 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; -use operation_pool::{ - PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, -}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::{debug, info}; -use types::Attestation; - -pub fn upgrade_to_v20( - db: Arc>, -) -> Result, Error> { - info!("Upgrading from v19 to v20"); - - // Load a V15 op pool and transform it to V20. - let Some(PersistedOperationPoolV15:: { - attestations_v15, - sync_contributions, - attester_slashings_v15, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!("Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - let attestations = attestations_v15 - .into_iter() - .map(|(attestation, indices)| (Attestation::Base(attestation).into(), indices)) - .collect(); - - let attester_slashings = attester_slashings_v15 - .into_iter() - .map(|slashing| slashing.into()) - .collect(); - - let v20 = PersistedOperationPool::V20(PersistedOperationPoolV20 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }); - Ok(vec![v20.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v20( - db: Arc>, -) -> Result, Error> { - info!("Downgrading from v20 to v19"); - - // Load a V20 op pool and transform it to V15. - let Some(PersistedOperationPoolV20:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!("Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - let attestations_v15 = attestations - .into_iter() - .filter_map(|(attestation, indices)| { - if let Attestation::Base(attestation) = attestation.into() { - Some((attestation, indices)) - } else { - info!( - reason = "not a base attestation", - "Dropping attestation during downgrade" - ); - None - } - }) - .collect(); - - let attester_slashings_v15 = attester_slashings - .into_iter() - .filter_map(|slashing| match slashing.try_into() { - Ok(slashing) => Some(slashing), - Err(_) => { - info!( - reason = "not a base attester slashing", - "Dropping attester slashing during downgrade" - ); - None - } - }) - .collect(); - - let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { - attestations_v15, - sync_contributions, - attester_slashings_v15, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }); - Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs deleted file mode 100644 index d73660cf3c..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use crate::validator_pubkey_cache::DatabasePubkey; -use ssz::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; -use tracing::info; -use types::{Hash256, PublicKey}; - -const LOG_EVERY: usize = 200_000; - -pub fn upgrade_to_v21( - db: Arc>, -) -> Result, Error> { - info!("Upgrading from v20 to v21"); - - let mut ops = vec![]; - - // Iterate through all pubkeys and decompress them. - for (i, res) in db - .hot_db - .iter_column::(DBColumn::PubkeyCache) - .enumerate() - { - let (key, value) = res?; - let pubkey = PublicKey::from_ssz_bytes(&value)?; - let decompressed = DatabasePubkey::from_pubkey(&pubkey); - ops.push(decompressed.as_kv_store_op(key)); - - if i > 0 && i % LOG_EVERY == 0 { - info!( - keys_decompressed = i, - "Public key decompression in progress" - ); - } - } - info!("Public key decompression complete"); - - Ok(ops) -} - -pub fn downgrade_from_v21( - db: Arc>, -) -> Result, Error> { - info!("Downgrading from v21 to v20"); - - let mut ops = vec![]; - - // Iterate through all pubkeys and recompress them. - for (i, res) in db - .hot_db - .iter_column::(DBColumn::PubkeyCache) - .enumerate() - { - let (key, value) = res?; - let decompressed = DatabasePubkey::from_ssz_bytes(&value)?; - let (_, pubkey_bytes) = decompressed.as_pubkey().map_err(|e| Error::DBError { - message: format!("{e:?}"), - })?; - - ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::PubkeyCache, - key.as_slice().to_vec(), - pubkey_bytes.as_ssz_bytes(), - )); - - if i > 0 && i % LOG_EVERY == 0 { - info!(keys_compressed = i, "Public key compression in progress"); - } - } - - info!("Public key compression complete"); - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs deleted file mode 100644 index a995f9d6b4..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ /dev/null @@ -1,196 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use std::sync::Arc; -use store::chunked_iter::ChunkedVectorIter; -use store::{ - chunked_vector::BlockRootsChunked, - metadata::{ - SchemaVersion, ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN, - }, - partial_beacon_state::PartialBeaconState, - AnchorInfo, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, -}; -use tracing::info; -use types::{BeaconState, Hash256, Slot}; - -const LOG_EVERY: usize = 200_000; - -fn load_old_schema_frozen_state( - db: &HotColdDB, - state_root: Hash256, -) -> Result>, Error> { - let Some(partial_state_bytes) = db - .cold_db - .get_bytes(DBColumn::BeaconState, state_root.as_slice())? - else { - return Ok(None); - }; - let mut partial_state: PartialBeaconState = - PartialBeaconState::from_ssz_bytes(&partial_state_bytes, db.get_chain_spec())?; - - // Fill in the fields of the partial state. - partial_state.load_block_roots(&db.cold_db, db.get_chain_spec())?; - partial_state.load_state_roots(&db.cold_db, db.get_chain_spec())?; - partial_state.load_historical_roots(&db.cold_db, db.get_chain_spec())?; - partial_state.load_randao_mixes(&db.cold_db, db.get_chain_spec())?; - partial_state.load_historical_summaries(&db.cold_db, db.get_chain_spec())?; - - partial_state.try_into().map(Some) -} - -pub fn upgrade_to_v22( - db: Arc>, - genesis_state_root: Option, -) -> Result<(), Error> { - info!("Upgrading DB schema from v21 to v22"); - - let old_anchor = db.get_anchor_info(); - - // If the anchor was uninitialized in the old schema (`None`), this represents a full archive - // node. - let effective_anchor = if old_anchor == ANCHOR_UNINITIALIZED { - ANCHOR_FOR_ARCHIVE_NODE - } else { - old_anchor.clone() - }; - - let split_slot = db.get_split_slot(); - let genesis_state_root = genesis_state_root.ok_or(Error::GenesisStateUnknown)?; - - let mut cold_ops = vec![]; - - // Load the genesis state in the previous chunked format, BEFORE we go deleting or rewriting - // anything. - let mut genesis_state = load_old_schema_frozen_state::(&db, genesis_state_root)? - .ok_or(Error::MissingGenesisState)?; - let genesis_state_root = genesis_state.update_tree_hash_cache()?; - let genesis_block_root = genesis_state.get_latest_block_root(genesis_state_root); - - // Store the genesis state in the new format, prior to updating the schema version on disk. - // In case of a crash no data is lost because we will re-load it in the old format and re-do - // this write. - if split_slot > 0 { - info!( - state_root = ?genesis_state_root, - "Re-storing genesis state" - ); - db.store_cold_state(&genesis_state_root, &genesis_state, &mut cold_ops)?; - } - - // Write the block roots in the new format in a new column. Similar to above, we do this - // separately from deleting the old format block roots so that this is crash safe. - let oldest_block_slot = effective_anchor.oldest_block_slot; - write_new_schema_block_roots::( - &db, - genesis_block_root, - oldest_block_slot, - split_slot, - &mut cold_ops, - )?; - - // Commit this first batch of non-destructive cold database ops. - db.cold_db.do_atomically(cold_ops)?; - - // Now we update the anchor and the schema version atomically in the hot database. - // - // If we crash after commiting this change, then there will be some leftover cruft left in the - // freezer database, but no corruption because all the new-format data has already been written - // above. - let new_anchor = AnchorInfo { - state_upper_limit: STATE_UPPER_LIMIT_NO_RETAIN, - state_lower_limit: Slot::new(0), - ..effective_anchor.clone() - }; - let hot_ops = vec![db.compare_and_set_anchor_info(old_anchor, new_anchor)?]; - db.store_schema_version_atomically(SchemaVersion(22), hot_ops)?; - - // Finally, clean up the old-format data from the freezer database. - delete_old_schema_freezer_data::(&db)?; - - Ok(()) -} - -pub fn delete_old_schema_freezer_data( - db: &Arc>, -) -> Result<(), Error> { - let mut cold_ops = vec![]; - - let columns = [ - DBColumn::BeaconState, - // Cold state summaries indexed by state root were stored in this column. - DBColumn::BeaconStateSummary, - // Mapping from restore point number to state root was stored in this column. - DBColumn::BeaconRestorePoint, - // Chunked vector values were stored in these columns. - DBColumn::BeaconHistoricalRoots, - DBColumn::BeaconRandaoMixes, - DBColumn::BeaconHistoricalSummaries, - DBColumn::BeaconBlockRootsChunked, - DBColumn::BeaconStateRootsChunked, - ]; - - for column in columns { - for res in db.cold_db.iter_column_keys::>(column) { - let key = res?; - cold_ops.push(KeyValueStoreOp::DeleteKey(column, key)); - } - } - let delete_ops = cold_ops.len(); - - info!(delete_ops, "Deleting historic states"); - db.cold_db.do_atomically(cold_ops)?; - - // In order to reclaim space, we need to compact the freezer DB as well. - db.compact_freezer()?; - - Ok(()) -} - -pub fn write_new_schema_block_roots( - db: &HotColdDB, - genesis_block_root: Hash256, - oldest_block_slot: Slot, - split_slot: Slot, - cold_ops: &mut Vec, -) -> Result<(), Error> { - info!( - %oldest_block_slot, - ?genesis_block_root, - "Starting beacon block root migration" - ); - - // Store the genesis block root if it would otherwise not be stored. - if oldest_block_slot != 0 { - cold_ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconBlockRoots, - 0u64.to_be_bytes().to_vec(), - genesis_block_root.as_slice().to_vec(), - )); - } - - // Block roots are available from the `oldest_block_slot` to the `split_slot`. - let start_vindex = oldest_block_slot.as_usize(); - let block_root_iter = ChunkedVectorIter::::new( - db, - start_vindex, - split_slot, - db.get_chain_spec(), - ); - - // OK to hold these in memory (10M slots * 43 bytes per KV ~= 430 MB). - for (i, (slot, block_root)) in block_root_iter.enumerate() { - cold_ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconBlockRoots, - slot.to_be_bytes().to_vec(), - block_root.as_slice().to_vec(), - )); - - if i > 0 && i % LOG_EVERY == 0 { - info!( - roots_migrated = i, - "Beacon block root migration in progress" - ); - } - } - - Ok(()) -} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 3343dc101b..51c7f0c289 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3023,7 +3023,6 @@ async fn schema_downgrade_to_min_version() { .await; let min_version = SchemaVersion(22); - let genesis_state_root = Some(harness.chain.genesis_state_root); // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -3036,22 +3035,12 @@ async fn schema_downgrade_to_min_version() { let store = get_store(&db_path); // Downgrade. - migrate_schema::>( - store.clone(), - genesis_state_root, - CURRENT_SCHEMA_VERSION, - min_version, - ) - .expect("schema downgrade to minimum version should work"); + migrate_schema::>(store.clone(), CURRENT_SCHEMA_VERSION, min_version) + .expect("schema downgrade to minimum version should work"); // Upgrade back. - migrate_schema::>( - store.clone(), - genesis_state_root, - min_version, - CURRENT_SCHEMA_VERSION, - ) - .expect("schema upgrade from minimum version should work"); + migrate_schema::>(store.clone(), min_version, CURRENT_SCHEMA_VERSION) + .expect("schema upgrade from minimum version should work"); // Recreate the harness. let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -3069,13 +3058,8 @@ async fn schema_downgrade_to_min_version() { // Check that downgrading beyond the minimum version fails (bound is *tight*). let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); - migrate_schema::>( - store.clone(), - genesis_state_root, - CURRENT_SCHEMA_VERSION, - min_version_sub_1, - ) - .expect_err("should not downgrade below minimum version"); + migrate_schema::>(store.clone(), CURRENT_SCHEMA_VERSION, min_version_sub_1) + .expect_err("should not downgrade below minimum version"); } /// Check that blob pruning prunes blobs older than the data availability boundary. diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a581d5c128..94e6961455 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1002,11 +1002,6 @@ where blobs_path: &Path, config: StoreConfig, ) -> Result { - let context = self - .runtime_context - .as_ref() - .ok_or("disk_store requires a log")? - .service_context("freezer_db".into()); let spec = self .chain_spec .clone() @@ -1015,21 +1010,8 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); - // Optionally grab the genesis state root. - // This will only be required if a DB upgrade to V22 is needed. - let genesis_state_root = context - .eth2_network_config - .as_ref() - .and_then(|config| config.genesis_state_root::().transpose()) - .transpose()?; - let schema_upgrade = |db, from, to| { - migrate_schema::>( - db, - genesis_state_root, - from, - to, - ) + migrate_schema::>(db, from, to) }; let store = HotColdDB::open( diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index d4b68357b2..1663ec7b4d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2501,27 +2501,13 @@ impl, Cold: ItemStore> HotColdDB /// Run a compaction pass on the freezer DB to free up space used by deleted states. pub fn compact_freezer(&self) -> Result<(), Error> { - let current_schema_columns = vec![ + let columns = vec![ DBColumn::BeaconColdStateSummary, DBColumn::BeaconStateSnapshot, DBColumn::BeaconStateDiff, DBColumn::BeaconStateRoots, ]; - // We can remove this once schema V21 has been gone for a while. - let previous_schema_columns = vec![ - DBColumn::BeaconState, - DBColumn::BeaconStateSummary, - DBColumn::BeaconBlockRootsChunked, - DBColumn::BeaconStateRootsChunked, - DBColumn::BeaconRestorePoint, - DBColumn::BeaconHistoricalRoots, - DBColumn::BeaconRandaoMixes, - DBColumn::BeaconHistoricalSummaries, - ]; - let mut columns = current_schema_columns; - columns.extend(previous_schema_columns); - for column in columns { info!(?column, "Starting compaction"); self.cold_db.compact_column(column)?; @@ -2871,32 +2857,13 @@ impl, Cold: ItemStore> HotColdDB // migrating to the tree-states schema (delete everything in the freezer then start afresh). let mut cold_ops = vec![]; - let current_schema_columns = vec![ + let columns = vec![ DBColumn::BeaconColdStateSummary, DBColumn::BeaconStateSnapshot, DBColumn::BeaconStateDiff, DBColumn::BeaconStateRoots, ]; - // This function is intended to be able to clean up leftover V21 freezer database stuff in - // the case where the V22 schema upgrade failed *after* commiting the version increment but - // *before* cleaning up the freezer DB. - // - // We can remove this once schema V21 has been gone for a while. - let previous_schema_columns = vec![ - DBColumn::BeaconState, - DBColumn::BeaconStateSummary, - DBColumn::BeaconBlockRootsChunked, - DBColumn::BeaconStateRootsChunked, - DBColumn::BeaconRestorePoint, - DBColumn::BeaconHistoricalRoots, - DBColumn::BeaconRandaoMixes, - DBColumn::BeaconHistoricalSummaries, - ]; - - let mut columns = current_schema_columns; - columns.extend(previous_schema_columns); - for column in columns { for res in self.cold_db.iter_column_keys::>(column) { let key = res?; diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index e9954e2ad9..f92ae7846b 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -17,6 +17,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v7.1.0 | TBD 2025 | v23 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | @@ -206,6 +207,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v7.1.0 | TBD 2025 | v23 | yes | | v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes before Electra using <= v7.0.0 | diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index f38c28d8b0..d15a8419df 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -301,7 +301,6 @@ fn parse_migrate_config(migrate_config: &Migrate) -> Result( migrate_config: MigrateConfig, client_config: ClientConfig, - mut genesis_state: BeaconState, runtime_context: &RuntimeContext, ) -> Result<(), Error> { let spec = runtime_context.eth2_config.spec.clone(); @@ -329,13 +328,7 @@ pub fn migrate_db( "Migrating database schema" ); - let genesis_state_root = genesis_state.canonical_root()?; - migrate_schema::, _, _, _>>( - db, - Some(genesis_state_root), - from, - to, - ) + migrate_schema::, _, _, _>>(db, from, to) } pub fn prune_payloads( @@ -487,8 +480,7 @@ pub fn run( match &db_manager_config.subcommand { cli::DatabaseManagerSubcommand::Migrate(migrate_config) => { let migrate_config = parse_migrate_config(migrate_config)?; - let genesis_state = get_genesis_state()?; - migrate_db(migrate_config, client_config, genesis_state, &context).map_err(format_err) + migrate_db(migrate_config, client_config, &context).map_err(format_err) } cli::DatabaseManagerSubcommand::Inspect(inspect_config) => { let inspect_config = parse_inspect_config(inspect_config)?; diff --git a/wordlist.txt b/wordlist.txt index 682fae0261..e0d1afdf7c 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -89,6 +89,7 @@ SSD SSL SSZ Styleguide +TBD TCP Teku TLS From 5cda6a6f9e4b57005f57e08e7bb3362a50988d36 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Thu, 29 May 2025 10:37:04 +0900 Subject: [PATCH 05/13] Mitigate flakiness in test_delayed_rpc_response (#7522) https://github.com/sigp/lighthouse/issues/7466 Expanded the margin from 100ms to 500ms. --- beacon_node/lighthouse_network/tests/rpc_tests.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 72d7aa0074..5e54b595f2 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1246,10 +1246,12 @@ fn test_delayed_rpc_response() { // The second and subsequent responses are delayed due to the response rate-limiter on the receiver side. // Adding a slight margin to the elapsed time check to account for potential timing issues caused by system // scheduling or execution delays during testing. + // https://github.com/sigp/lighthouse/issues/7466 + let margin = 500; assert!( request_sent_at.elapsed() > (Duration::from_secs(QUOTA_SEC) - - Duration::from_millis(100)) + - Duration::from_millis(margin)) ); if request_id == 5 { // End the test From 4d21846aba6b77879c368b5dc2b435c64d29b018 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 29 May 2025 12:54:34 +1000 Subject: [PATCH 06/13] Prevent `AvailabilityCheckError` when there's no new custody columns to import (#7533) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses a regression recently introduced when we started gossip verifying data columns from EL blobs ``` failures: network_beacon_processor::tests::accept_processed_gossip_data_columns_without_import test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 90 filtered out; finished in 16.60s stderr ─── thread 'network_beacon_processor::tests::accept_processed_gossip_data_columns_without_import' panicked at beacon_node/network/src/network_beacon_processor/tests.rs:829:10: should put data columns into availability cache: Unexpected("empty columns") note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace ``` https://github.com/sigp/lighthouse/actions/runs/15309278812/job/43082341868?pr=7521 If an empty `Vec` is passed to the DA checker, it causes an unexpected error. This PR addresses it by not passing an empty `Vec` for processing, and not spawning a task to publish. --- .../beacon_chain/src/fetch_blobs/mod.rs | 27 ++++++----- .../beacon_chain/src/fetch_blobs/tests.rs | 48 ++++++++++++++++++- 2 files changed, 63 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index 927841376f..9d986f8fb6 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -264,6 +264,7 @@ async fn fetch_and_process_blobs_v2( return Ok(None); } + debug!(num_fetched_blobs, "All expected blobs received from the EL"); inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); if chain_adapter.fork_choice_contains_block(&block_root) { @@ -276,23 +277,32 @@ async fn fetch_and_process_blobs_v2( } let chain_adapter = Arc::new(chain_adapter); - let custody_columns = compute_and_publish_data_columns( + let custody_columns_to_import = compute_custody_columns_to_import( &chain_adapter, block.clone(), blobs, proofs, custody_columns_indices, - publish_fn, ) .await?; - debug!(num_fetched_blobs, "Processing engine blobs"); + if custody_columns_to_import.is_empty() { + debug!( + info = "No new data columns to import", + "Ignoring EL blobs response" + ); + return Ok(None); + } + + publish_fn(EngineGetBlobsOutput::CustodyColumns( + custody_columns_to_import.clone(), + )); let availability_processing_status = chain_adapter .process_engine_blobs( block.slot(), block_root, - EngineGetBlobsOutput::CustodyColumns(custody_columns), + EngineGetBlobsOutput::CustodyColumns(custody_columns_to_import), ) .await?; @@ -300,13 +310,12 @@ async fn fetch_and_process_blobs_v2( } /// Offload the data column computation to a blocking task to avoid holding up the async runtime. -async fn compute_and_publish_data_columns( +async fn compute_custody_columns_to_import( chain_adapter: &Arc>, block: Arc>>, blobs: Vec>, proofs: Vec>, custody_columns_indices: HashSet, - publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result>, FetchEngineBlobError> { let kzg = chain_adapter.kzg().clone(); let spec = chain_adapter.spec().clone(); @@ -380,13 +389,9 @@ async fn compute_and_publish_data_columns( .collect::, _>>() .map_err(FetchEngineBlobError::GossipDataColumn)?; - publish_fn(EngineGetBlobsOutput::CustodyColumns( - columns_to_import_and_publish.clone(), - )); - Ok(columns_to_import_and_publish) }, - "compute_and_publish_data_columns", + "compute_custody_columns_to_import", ) .ok_or(FetchEngineBlobError::RuntimeShutdown)? .await diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index 8eefd4ddf8..afee5adcb2 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -1,4 +1,4 @@ -use crate::data_column_verification::GossipVerifiedDataColumn; +use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter; use crate::fetch_blobs::{ fetch_and_process_engine_blobs_inner, EngineGetBlobsOutput, FetchEngineBlobError, @@ -139,6 +139,52 @@ async fn test_fetch_blobs_v2_block_imported_after_el_response() { ); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_fetch_blobs_v2_no_new_columns_to_import() { + let mut mock_adapter = mock_beacon_adapter(); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter); + let block_root = block.canonical_root(); + + // **GIVEN**: + // All blobs returned + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + // block not yet imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // All data columns already seen on gossip + mock_adapter + .expect_verify_data_column_for_gossip() + .returning(|c| { + Err(GossipDataColumnError::PriorKnown { + proposer: c.block_proposer_index(), + slot: c.slot(), + index: c.index, + }) + }); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // **WHEN**: Trigger `fetch_blobs` on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // **THEN**: Should NOT be processed and no columns should be published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_success() { let mut mock_adapter = mock_beacon_adapter(); From 39744df93f0ba67880f95c0ac75ac4e7a5a5ae62 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 30 May 2025 12:05:37 +0900 Subject: [PATCH 07/13] simulator: Fix `Failed to initialize dependency logging` (#7393) The simulator works but always emits the following message: ``` $ cargo run --release --bin simulator basic-sim ... ... Failed to initialize dependency logging: attempted to set a logger after the logging system was already initialized ... ... ``` This PR removes the initialization with `env_logger`. (Update) With https://github.com/sigp/lighthouse/pull/7433 merged, the libp2p/discv5 logs are saved in separate files and respect the `RUST_LOG` env var for log level configuration. --- Cargo.lock | 2 -- testing/simulator/Cargo.toml | 2 -- testing/simulator/src/main.rs | 4 ---- 3 files changed, 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b506f6212..327908027c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8469,9 +8469,7 @@ name = "simulator" version = "0.2.0" dependencies = [ "clap", - "env_logger 0.9.3", "environment", - "eth2_network_config", "execution_layer", "futures", "kzg", diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index cf0d03c24f..cd23138a1c 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -7,9 +7,7 @@ edition = { workspace = true } [dependencies] clap = { workspace = true } -env_logger = { workspace = true } environment = { workspace = true } -eth2_network_config = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } kzg = { workspace = true } diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index 1cc4a1779b..7bd6e546f7 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -18,16 +18,12 @@ mod local_network; mod retry; use cli::cli_app; -use env_logger::{Builder, Env}; use local_network::LocalNetwork; use types::MinimalEthSpec; pub type E = MinimalEthSpec; fn main() { - // Debugging output for libp2p and external crates. - Builder::from_env(Env::default()).init(); - let matches = cli_app().get_matches(); match matches.subcommand_name() { Some("basic-sim") => match basic_sim::run_basic_sim(&matches) { From 38a5f338fad77e9e76762897490d7f67b8982b7d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Sat, 31 May 2025 00:15:54 +1000 Subject: [PATCH 08/13] Add `console-subscriber` feature for debugging (#7529) Add `console-subscriber` feature for debugging tokio async tasks. Supersedes #7420 to work with `unstable`. Usage: - Build Lighthouse with `RUSTFLAGS=--cfg tokio_unstable` and `--features console-subscriber`, e.g.: ``` RUSTFLAGS=-"-cfg=tokio_unstable --remap-path-prefix=$(pwd)=." FEATURES=console-subscriber make ``` - Run the Lighthouse binary. - Install `tokio-console` and run it in a terminal. --- Cargo.lock | 252 ++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 1 + lighthouse/Cargo.toml | 3 + lighthouse/src/main.rs | 6 + 4 files changed, 261 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 327908027c..48a39cf304 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -612,6 +612,28 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "async-trait" version = "0.1.88" @@ -704,6 +726,53 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.2", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -1622,6 +1691,45 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console-api" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857" +dependencies = [ + "futures-core", + "prost", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures-task", + "hdrhistogram", + "humantime", + "hyper-util", + "prost", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "const-hex" version = "1.14.0" @@ -3964,6 +4072,19 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.7", + "byteorder", + "flate2", + "nom", + "num-traits", +] + [[package]] name = "headers" version = "0.3.9" @@ -4339,6 +4460,19 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper 1.6.0", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -5496,6 +5630,7 @@ dependencies = [ "boot_node", "clap", "clap_utils", + "console-subscriber", "database_manager", "directory", "environment", @@ -5801,6 +5936,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -7165,6 +7306,38 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + [[package]] name = "proto_array" version = "0.2.0" @@ -7569,7 +7742,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration 0.5.1", "tokio", "tokio-native-tls", @@ -8831,6 +9004,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + [[package]] name = "synstructure" version = "0.13.2" @@ -9202,6 +9381,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", + "tracing", "windows-sys 0.52.0", ] @@ -9321,6 +9501,76 @@ dependencies = [ "winnow 0.7.10", ] +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" diff --git a/Cargo.toml b/Cargo.toml index 952b43a66b..4850b2f56c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,6 +124,7 @@ clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } compare_fields = { path = "common/compare_fields" } compare_fields_derive = { path = "common/compare_fields_derive" } +console-subscriber = "0.4" context_deserialize = { path = "consensus/context_deserialize" } context_deserialize_derive = { path = "consensus/context_deserialize_derive" } criterion = "0.5" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index cc17f638fd..3ca93aedf7 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -33,6 +33,8 @@ slasher-redb = ["slasher/redb"] beacon-node-leveldb = ["store/leveldb"] # Supports beacon node redb backend. beacon-node-redb = ["store/redb"] +# Supports console subscriber for debugging +console-subscriber = ["console-subscriber/default"] # Deprecated. This is now enabled by default on non windows targets. jemalloc = [] @@ -45,6 +47,7 @@ bls = { workspace = true } boot_node = { path = "../boot_node" } clap = { workspace = true } clap_utils = { workspace = true } +console-subscriber = { workspace = true, optional = true } database_manager = { path = "../database_manager" } directory = { workspace = true } environment = { workspace = true } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 7ddf04db01..bbd8f764e7 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -663,6 +663,12 @@ fn run( logging_layers.push(MetricsLayer.boxed()); + #[cfg(feature = "console-subscriber")] + { + let console_layer = console_subscriber::spawn(); + logging_layers.push(console_layer.boxed()); + } + let logging_result = tracing_subscriber::registry() .with(logging_layers) .try_init(); From 886ceb7e25e06cf4a4809508a83ee342b75f1ba4 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 2 Jun 2025 09:47:06 +0900 Subject: [PATCH 09/13] Run Assertoor tests in CI (#6882) Added Assertoor tests to the local-testnet CI. - The assertoor logs are included in the `logs-local-testnet` that is uploaded to GitHub Artifacts. - Use `start_local_testnet.sh` so that we can also easily run the test locally. --- .github/workflows/local-testnet.yml | 43 +++++++++++++++----- scripts/local_testnet/start_local_testnet.sh | 14 ++++++- 2 files changed, 44 insertions(+), 13 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 1cd2f24548..455931aa1e 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -52,20 +52,18 @@ jobs: - name: Load Docker image run: docker load -i lighthouse-docker.tar - - name: Start local testnet - run: ./start_local_testnet.sh -e local -c -b false && sleep 60 + - name: Start local testnet with Assertoor + run: ./start_local_testnet.sh -e local-assertoor -c -a -b false && sleep 60 working-directory: scripts/local_testnet + - name: Await Assertoor test result + id: assertoor_test_result + uses: ethpandaops/assertoor-github-action@v1 + with: + kurtosis_enclave_name: local-assertoor + - name: Stop local testnet and dump logs - run: ./stop_local_testnet.sh local - working-directory: scripts/local_testnet - - - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -e local-blinded -c -p -b false && sleep 60 - working-directory: scripts/local_testnet - - - name: Stop local testnet and dump logs - run: ./stop_local_testnet.sh local-blinded + run: ./stop_local_testnet.sh local-assertoor working-directory: scripts/local_testnet - name: Upload logs artifact @@ -76,6 +74,29 @@ jobs: scripts/local_testnet/logs retention-days: 3 + - name: Return Assertoor test result + shell: bash + run: | + test_result="${{ steps.assertoor_test_result.outputs.result }}" + test_status=$( + cat <<"EOF" + ${{ steps.assertoor_test_result.outputs.test_overview }} + EOF + ) + failed_test_status=$( + cat <<"EOF" + ${{ steps.assertoor_test_result.outputs.failed_test_details }} + EOF + ) + + echo "Test Result: $test_result" + echo "$test_status" + if ! [ "$test_result" == "success" ]; then + echo "Failed Test Task Status:" + echo "$failed_test_status" + exit 1 + fi + doppelganger-protection-success-test: needs: dockerfile-ubuntu runs-on: ubuntu-22.04 diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 1f15688693..8e8859ca0e 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -13,10 +13,12 @@ BUILD_IMAGE=true BUILDER_PROPOSALS=false CI=false KEEP_ENCLAVE=false +RUN_ASSERTOOR_TESTS=false # Get options -while getopts "e:b:n:phck" flag; do +while getopts "e:b:n:phcak" flag; do case "${flag}" in + a) RUN_ASSERTOOR_TESTS=true;; e) ENCLAVE_NAME=${OPTARG};; b) BUILD_IMAGE=${OPTARG};; n) NETWORK_PARAMS_FILE=${OPTARG};; @@ -34,6 +36,7 @@ while getopts "e:b:n:phck" flag; do echo " -n: kurtosis network params file path default: $NETWORK_PARAMS_FILE" echo " -p: enable builder proposals" echo " -c: CI mode, run without other additional services like Grafana and Dora explorer" + echo " -a: run Assertoor tests" echo " -k: keeping enclave to allow starting the testnet without destroying the existing one" echo " -h: this help" exit @@ -63,11 +66,18 @@ if [ "$BUILDER_PROPOSALS" = true ]; then fi if [ "$CI" = true ]; then - # TODO: run assertoor tests yq eval '.additional_services = []' -i $NETWORK_PARAMS_FILE echo "Running without additional services (CI mode)." fi +if [ "$RUN_ASSERTOOR_TESTS" = true ]; then + yq eval '.additional_services += ["assertoor"] | .additional_services |= unique' -i $NETWORK_PARAMS_FILE + # The available tests can be found in the `assertoor_params` section: + # https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration + yq eval '.assertoor_params = {"run_stability_check": true, "run_block_proposal_check": true, "run_transaction_test": true, "run_blob_transaction_test": true}' -i $NETWORK_PARAMS_FILE + echo "Assertoor has been added to $NETWORK_PARAMS_FILE." +fi + if [ "$BUILD_IMAGE" = true ]; then echo "Building Lighthouse Docker image." ROOT_DIR="$SCRIPT_DIR/../.." From 94a1446ac9557c0382cdb96d5d5d01f249d1ddda Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 2 Jun 2025 11:51:09 +1000 Subject: [PATCH 10/13] Fix unexpected blob error and duplicate import in fetch blobs (#7541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Getting this error on a non-PeerDAS network: ``` May 29 13:30:13.484 ERROR Error fetching or processing blobs from EL error: BlobProcessingError(AvailabilityCheck(Unexpected("empty blobs"))), block_root: 0x98aa3927056d453614fefbc79eb1f9865666d1f119d0e8aa9e6f4d02aa9395d9 ``` It appears we're passing an empty `Vec` to DA checker, because all blobs were already seen on gossip and filtered out, this causes a `AvailabilityCheckError::Unexpected("empty blobs")`. I've added equivalent unit tests for `getBlobsV1` to cover all the scenarios we test in `getBlobsV2`. This would have caught the bug if I had added it earlier. It also caught another bug which could trigger duplicate block import. Thanks Santito for reporting this! 🙏 --- .../beacon_chain/src/fetch_blobs/mod.rs | 24 +- .../beacon_chain/src/fetch_blobs/tests.rs | 706 ++++++++++++------ .../src/observed_data_sidecars.rs | 1 + 3 files changed, 515 insertions(+), 216 deletions(-) diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index 9d986f8fb6..74dc680a9a 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -42,6 +42,7 @@ use types::{ /// Result from engine get blobs to be passed onto `DataAvailabilityChecker` and published to the /// gossip network. The blobs / data columns have not been marked as observed yet, as they may not /// be published immediately. +#[derive(Debug)] pub enum EngineGetBlobsOutput { Blobs(Vec>), /// A filtered list of custody data columns to be imported into the `DataAvailabilityChecker`. @@ -163,9 +164,22 @@ async fn fetch_and_process_blobs_v1( inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); return Ok(None); } else { + debug!( + num_expected_blobs, + num_fetched_blobs, "Received blobs from the EL" + ); inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); } + if chain_adapter.fork_choice_contains_block(&block_root) { + // Avoid computing sidecars if the block has already been imported. + debug!( + info = "block has already been imported", + "Ignoring EL blobs response" + ); + return Ok(None); + } + let (signed_block_header, kzg_commitments_proof) = block .signed_block_header_and_kzg_commitments_proof() .map_err(FetchEngineBlobError::BeaconStateError)?; @@ -197,13 +211,13 @@ async fn fetch_and_process_blobs_v1( .collect::, _>>() .map_err(FetchEngineBlobError::GossipBlob)?; - if !blobs_to_import_and_publish.is_empty() { - publish_fn(EngineGetBlobsOutput::Blobs( - blobs_to_import_and_publish.clone(), - )); + if blobs_to_import_and_publish.is_empty() { + return Ok(None); } - debug!(num_fetched_blobs, "Processing engine blobs"); + publish_fn(EngineGetBlobsOutput::Blobs( + blobs_to_import_and_publish.clone(), + )); let availability_processing_status = chain_adapter .process_engine_blobs( diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index afee5adcb2..4556948ffc 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -7,227 +7,509 @@ use crate::test_utils::{get_kzg, EphemeralHarnessType}; use crate::AvailabilityProcessingStatus; use bls::Signature; use eth2::types::BlobsBundle; -use execution_layer::json_structures::BlobAndProofV2; +use execution_layer::json_structures::{BlobAndProof, BlobAndProofV1, BlobAndProofV2}; use execution_layer::test_utils::generate_blobs; use maplit::hashset; use std::sync::{Arc, Mutex}; use task_executor::test_utils::TestRuntime; use types::{ - BeaconBlockFulu, EmptyBlock, EthSpec, ForkName, Hash256, MainnetEthSpec, SignedBeaconBlock, - SignedBeaconBlockFulu, + BeaconBlock, BeaconBlockFulu, EmptyBlock, EthSpec, ForkName, Hash256, MainnetEthSpec, + SignedBeaconBlock, SignedBeaconBlockFulu, }; type E = MainnetEthSpec; type T = EphemeralHarnessType; -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_fetch_blobs_v2_no_blobs_in_block() { - let mut mock_adapter = mock_beacon_adapter(); - let (publish_fn, _s) = mock_publish_fn(); - let block = SignedBeaconBlock::::Fulu(SignedBeaconBlockFulu { - message: BeaconBlockFulu::empty(mock_adapter.spec()), - signature: Signature::empty(), - }); - let block_root = block.canonical_root(); +mod get_blobs_v2 { + use super::*; - // Expectations: engine fetch blobs should not be triggered - mock_adapter.expect_get_blobs_v2().times(0); - mock_adapter.expect_process_engine_blobs().times(0); + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_no_blobs_in_block() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, _s) = mock_publish_fn(); + let block = SignedBeaconBlock::::Fulu(SignedBeaconBlockFulu { + message: BeaconBlockFulu::empty(mock_adapter.spec()), + signature: Signature::empty(), + }); + let block_root = block.canonical_root(); - let custody_columns = hashset![0, 1, 2]; - let processing_status = fetch_and_process_engine_blobs_inner( - mock_adapter, - block_root, - Arc::new(block), - custody_columns.clone(), - publish_fn, - ) - .await - .expect("fetch blobs should succeed"); + // Expectations: engine fetch blobs should not be triggered + mock_adapter.expect_get_blobs_v2().times(0); + mock_adapter.expect_process_engine_blobs().times(0); - assert_eq!(processing_status, None); + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + Arc::new(block), + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_no_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, _) = mock_publish_fn(); + let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // No blobs in EL response + mock_get_blobs_v2_response(&mut mock_adapter, None); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_partial_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, mut blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // Missing blob in EL response + blobs_and_proofs.pop(); + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_block_imported_after_el_response() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // All blobs returned, but fork choice already imported the block + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_no_new_columns_to_import() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // **GIVEN**: + // All blobs returned + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + // block not yet imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // All data columns already seen on gossip + mock_adapter + .expect_verify_data_column_for_gossip() + .returning(|c| { + Err(GossipDataColumnError::PriorKnown { + proposer: c.block_proposer_index(), + slot: c.slot(), + index: c.index, + }) + }); + // No blobs should be processed + mock_adapter.expect_process_engine_blobs().times(0); + + // **WHEN**: Trigger `fetch_blobs` on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // **THEN**: Should NOT be processed and no columns should be published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no columns should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v2_success() { + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // All blobs returned, fork choice doesn't contain block + mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + mock_adapter + .expect_verify_data_column_for_gossip() + .returning(|c| Ok(GossipVerifiedDataColumn::__new_for_testing(c))); + mock_process_engine_blobs_result( + &mut mock_adapter, + Ok(AvailabilityProcessingStatus::Imported(block_root)), + ); + + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns.clone(), + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + assert_eq!( + processing_status, + Some(AvailabilityProcessingStatus::Imported(block_root)) + ); + + let published_columns = extract_published_blobs(publish_fn_args); + assert!( + matches!( + published_columns, + EngineGetBlobsOutput::CustodyColumns(columns) if columns.len() == custody_columns.len() + ), + "should publish custody columns" + ); + } + + fn mock_get_blobs_v2_response( + mock_adapter: &mut MockFetchBlobsBeaconAdapter, + blobs_and_proofs_opt: Option>>, + ) { + let blobs_and_proofs_v2_opt = blobs_and_proofs_opt.map(|blobs_and_proofs| { + blobs_and_proofs + .into_iter() + .map(|blob_and_proof| match blob_and_proof { + BlobAndProof::V2(inner) => inner, + _ => panic!("BlobAndProofV2 not expected"), + }) + .collect() + }); + mock_adapter + .expect_get_blobs_v2() + .return_once(move |_| Ok(blobs_and_proofs_v2_opt)); + } } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_fetch_blobs_v2_no_blobs_returned() { - let mut mock_adapter = mock_beacon_adapter(); - let (publish_fn, _) = mock_publish_fn(); - let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter); - let block_root = block.canonical_root(); +mod get_blobs_v1 { + use super::*; + use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; + use crate::block_verification_types::AsBlock; - // No blobs in EL response - mock_get_blobs_v2_response(&mut mock_adapter, None); + const ELECTRA_FORK: ForkName = ForkName::Electra; - // Trigger fetch blobs on the block - let custody_columns = hashset![0, 1, 2]; - let processing_status = fetch_and_process_engine_blobs_inner( - mock_adapter, - block_root, - block, - custody_columns.clone(), - publish_fn, - ) - .await - .expect("fetch blobs should succeed"); + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_no_blobs_in_block() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let spec = mock_adapter.spec(); + let (publish_fn, _s) = mock_publish_fn(); + let block_no_blobs = + SignedBeaconBlock::from_block(BeaconBlock::empty(spec), Signature::empty()); + let block_root = block_no_blobs.canonical_root(); - assert_eq!(processing_status, None); -} + // Expectations: engine fetch blobs should not be triggered + mock_adapter.expect_get_blobs_v1().times(0); -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_fetch_blobs_v2_partial_blobs_returned() { - let mut mock_adapter = mock_beacon_adapter(); - let (publish_fn, publish_fn_args) = mock_publish_fn(); - let (block, mut blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter); - let block_root = block.canonical_root(); + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + Arc::new(block_no_blobs), + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); - // Missing blob in EL response - blobs_and_proofs.pop(); - mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); - // No blobs should be processed - mock_adapter.expect_process_engine_blobs().times(0); + // THEN: No blob is processed + assert_eq!(processing_status, None); + } - // Trigger fetch blobs on the block - let custody_columns = hashset![0, 1, 2]; - let processing_status = fetch_and_process_engine_blobs_inner( - mock_adapter, - block_root, - block, - custody_columns.clone(), - publish_fn, - ) - .await - .expect("fetch blobs should succeed"); + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_no_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, _) = mock_publish_fn(); + let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); - assert_eq!(processing_status, None); - assert_eq!( - publish_fn_args.lock().unwrap().len(), - 0, - "no columns should be published" - ); -} + // GIVEN: No blobs in EL response + let expected_blob_count = block.message().body().blob_kzg_commitments().unwrap().len(); + mock_get_blobs_v1_response(&mut mock_adapter, vec![None; expected_blob_count]); -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_fetch_blobs_v2_block_imported_after_el_response() { - let mut mock_adapter = mock_beacon_adapter(); - let (publish_fn, publish_fn_args) = mock_publish_fn(); - let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter); - let block_root = block.canonical_root(); + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); - // All blobs returned, but fork choice already imported the block - mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); - mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]); - // No blobs should be processed - mock_adapter.expect_process_engine_blobs().times(0); + // THEN: No blob is processed + assert_eq!(processing_status, None); + } - // Trigger fetch blobs on the block - let custody_columns = hashset![0, 1, 2]; - let processing_status = fetch_and_process_engine_blobs_inner( - mock_adapter, - block_root, - block, - custody_columns.clone(), - publish_fn, - ) - .await - .expect("fetch blobs should succeed"); + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_partial_blobs_returned() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let blob_count = 2; + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, blob_count); + let block_slot = block.slot(); + let block_root = block.canonical_root(); - assert_eq!(processing_status, None); - assert_eq!( - publish_fn_args.lock().unwrap().len(), - 0, - "no columns should be published" - ); -} + // GIVEN: Missing a blob in EL response (remove 1 blob from response) + let mut blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + blob_and_proof_opts.first_mut().unwrap().take(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + // AND block is not imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // AND all blobs returned are valid + mock_adapter + .expect_verify_blob_for_gossip() + .returning(|b| Ok(GossipVerifiedBlob::__assumed_valid(b.clone()))); + // Returned blobs should be processed + mock_process_engine_blobs_result( + &mut mock_adapter, + Ok(AvailabilityProcessingStatus::MissingComponents( + block_slot, block_root, + )), + ); -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_fetch_blobs_v2_no_new_columns_to_import() { - let mut mock_adapter = mock_beacon_adapter(); - let (publish_fn, publish_fn_args) = mock_publish_fn(); - let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter); - let block_root = block.canonical_root(); + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); - // **GIVEN**: - // All blobs returned - mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); - // block not yet imported into fork choice - mock_fork_choice_contains_block(&mut mock_adapter, vec![]); - // All data columns already seen on gossip - mock_adapter - .expect_verify_data_column_for_gossip() - .returning(|c| { - Err(GossipDataColumnError::PriorKnown { - proposer: c.block_proposer_index(), - slot: c.slot(), - index: c.index, + // THEN: Returned blobs are processed and published + assert_eq!( + processing_status, + Some(AvailabilityProcessingStatus::MissingComponents( + block_slot, block_root, + )) + ); + assert!( + matches!( + extract_published_blobs(publish_fn_args), + EngineGetBlobsOutput::Blobs(blobs) if blobs.len() == blob_count - 1 + ), + "partial blob results should still be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_block_imported_after_el_response() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // GIVEN: All blobs returned, but fork choice already imported the block + let blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]); + + // WHEN: Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); + + // THEN: Returned blobs should NOT be processed or published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no blobs should be published" + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_no_new_blobs_to_import() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); + let block_root = block.canonical_root(); + + // **GIVEN**: + // All blobs returned + let blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + // block not yet imported into fork choice + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + // All blobs already seen on gossip + mock_adapter.expect_verify_blob_for_gossip().returning(|b| { + Err(GossipBlobError::RepeatBlob { + proposer: b.block_proposer_index(), + slot: b.slot(), + index: b.index, }) }); - // No blobs should be processed - mock_adapter.expect_process_engine_blobs().times(0); - // **WHEN**: Trigger `fetch_blobs` on the block - let custody_columns = hashset![0, 1, 2]; - let processing_status = fetch_and_process_engine_blobs_inner( - mock_adapter, - block_root, - block, - custody_columns.clone(), - publish_fn, - ) - .await - .expect("fetch blobs should succeed"); + // **WHEN**: Trigger `fetch_blobs` on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); - // **THEN**: Should NOT be processed and no columns should be published. - assert_eq!(processing_status, None); - assert_eq!( - publish_fn_args.lock().unwrap().len(), - 0, - "no columns should be published" - ); -} + // **THEN**: Should NOT be processed and no blobs should be published. + assert_eq!(processing_status, None); + assert_eq!( + publish_fn_args.lock().unwrap().len(), + 0, + "no blobs should be published" + ); + } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_fetch_blobs_v2_success() { - let mut mock_adapter = mock_beacon_adapter(); - let (publish_fn, publish_fn_args) = mock_publish_fn(); - let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter); - let block_root = block.canonical_root(); + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_fetch_blobs_v1_success() { + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let (publish_fn, publish_fn_args) = mock_publish_fn(); + let blob_count = 2; + let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, blob_count); + let block_root = block.canonical_root(); - // All blobs returned, fork choice doesn't contain block - mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); - mock_fork_choice_contains_block(&mut mock_adapter, vec![]); - mock_adapter - .expect_verify_data_column_for_gossip() - .returning(|c| Ok(GossipVerifiedDataColumn::__new_for_testing(c))); - mock_process_engine_blobs_result( - &mut mock_adapter, - Ok(AvailabilityProcessingStatus::Imported(block_root)), - ); + // All blobs returned, fork choice doesn't contain block + let blob_and_proof_opts = blobs_and_proofs.into_iter().map(Some).collect::>(); + mock_get_blobs_v1_response(&mut mock_adapter, blob_and_proof_opts); + mock_fork_choice_contains_block(&mut mock_adapter, vec![]); + mock_adapter + .expect_verify_blob_for_gossip() + .returning(|b| Ok(GossipVerifiedBlob::__assumed_valid(b.clone()))); + mock_process_engine_blobs_result( + &mut mock_adapter, + Ok(AvailabilityProcessingStatus::Imported(block_root)), + ); - // Trigger fetch blobs on the block - let custody_columns = hashset![0, 1, 2]; - let processing_status = fetch_and_process_engine_blobs_inner( - mock_adapter, - block_root, - block, - custody_columns.clone(), - publish_fn, - ) - .await - .expect("fetch blobs should succeed"); + // Trigger fetch blobs on the block + let custody_columns = hashset![0, 1, 2]; + let processing_status = fetch_and_process_engine_blobs_inner( + mock_adapter, + block_root, + block, + custody_columns, + publish_fn, + ) + .await + .expect("fetch blobs should succeed"); - assert_eq!( - processing_status, - Some(AvailabilityProcessingStatus::Imported(block_root)) - ); + // THEN all fetched blobs are processed and published + assert_eq!( + processing_status, + Some(AvailabilityProcessingStatus::Imported(block_root)) + ); - let published_columns = extract_published_blobs(publish_fn_args); - assert!( - matches!( - published_columns, - EngineGetBlobsOutput::CustodyColumns(columns) if columns.len() == custody_columns.len() - ), - "should publish custody columns" - ); + let published_blobs = extract_published_blobs(publish_fn_args); + assert!( + matches!( + published_blobs, + EngineGetBlobsOutput::Blobs(blobs) if blobs.len() == blob_count + ), + "should publish fetched blobs" + ); + } + + fn mock_get_blobs_v1_response( + mock_adapter: &mut MockFetchBlobsBeaconAdapter, + blobs_and_proofs_opt: Vec>>, + ) { + let blobs_and_proofs_v1 = blobs_and_proofs_opt + .into_iter() + .map(|blob_and_proof_opt| { + blob_and_proof_opt.map(|blob_and_proof| match blob_and_proof { + BlobAndProof::V1(inner) => inner, + _ => panic!("BlobAndProofV1 not expected"), + }) + }) + .collect(); + mock_adapter + .expect_get_blobs_v1() + .return_once(move |_| Ok(blobs_and_proofs_v1)); + } } /// Extract the `EngineGetBlobsOutput` passed to the `publish_fn`. @@ -257,23 +539,14 @@ fn mock_fork_choice_contains_block( .returning(move |block_root| block_roots.contains(block_root)); } -fn mock_get_blobs_v2_response( - mock_adapter: &mut MockFetchBlobsBeaconAdapter, - blobs_and_proofs_opt: Option>>, -) { - mock_adapter - .expect_get_blobs_v2() - .return_once(move |_| Ok(blobs_and_proofs_opt)); -} - fn create_test_block_and_blobs( mock_adapter: &MockFetchBlobsBeaconAdapter, -) -> (Arc>, Vec>) { - let mut block = SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { - message: BeaconBlockFulu::empty(mock_adapter.spec()), - signature: Signature::empty(), - }); - let (blobs_bundle, _tx) = generate_blobs::(2, block.fork_name_unchecked()).unwrap(); + blob_count: usize, +) -> (Arc>, Vec>) { + let mut block = + SignedBeaconBlock::from_block(BeaconBlock::empty(mock_adapter.spec()), Signature::empty()); + let fork = block.fork_name_unchecked(); + let (blobs_bundle, _tx) = generate_blobs::(blob_count, fork).unwrap(); let BlobsBundle { commitments, proofs, @@ -286,16 +559,27 @@ fn create_test_block_and_blobs( .blob_kzg_commitments_mut() .unwrap() = commitments; - let proofs_len = proofs.len() / blobs.len(); - let blob_and_proofs: Vec> = blobs - .into_iter() - .zip(proofs.chunks(proofs_len)) - .map(|(blob, proofs)| BlobAndProofV2 { - blob, - proofs: proofs.to_vec().into(), - }) - .collect(); - (Arc::new(block), blob_and_proofs) + let blobs_and_proofs = if fork.fulu_enabled() { + let proofs_len = proofs.len() / blobs.len(); + blobs + .into_iter() + .zip(proofs.chunks(proofs_len)) + .map(|(blob, proofs)| { + BlobAndProof::V2(BlobAndProofV2 { + blob, + proofs: proofs.to_vec().into(), + }) + }) + .collect() + } else { + blobs + .into_iter() + .zip(proofs) + .map(|(blob, proof)| BlobAndProof::V1(BlobAndProofV1 { blob, proof })) + .collect() + }; + + (Arc::new(block), blobs_and_proofs) } #[allow(clippy::type_complexity)] @@ -313,9 +597,9 @@ fn mock_publish_fn() -> ( (publish_fn, captured_args) } -fn mock_beacon_adapter() -> MockFetchBlobsBeaconAdapter { +fn mock_beacon_adapter(fork_name: ForkName) -> MockFetchBlobsBeaconAdapter { let test_runtime = TestRuntime::default(); - let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + let spec = Arc::new(fork_name.make_genesis_spec(E::default_spec())); let kzg = get_kzg(&spec); let mut mock_adapter = MockFetchBlobsBeaconAdapter::default(); diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 1ca6c03f00..d3bda09712 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -161,6 +161,7 @@ pub trait ObservationStrategy { /// Type for messages that are observed immediately. pub struct Observe; /// Type for messages that have not been observed. +#[derive(Debug)] pub struct DoNotObserve; impl ObservationStrategy for Observe { From ae30480926b68e8f2ea7f7beca415625b9378699 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 2 Jun 2025 01:54:42 -0500 Subject: [PATCH 11/13] Implement EIP-7892 BPO hardforks (#7521) [EIP-7892: Blob Parameter Only Hardforks](https://eips.ethereum.org/EIPS/eip-7892) #7467 --- beacon_node/beacon_chain/src/test_utils.rs | 6 +- .../test_utils/execution_block_generator.rs | 12 +- beacon_node/http_api/tests/tests.rs | 2 +- .../lighthouse_network/src/rpc/handler.rs | 7 +- .../lighthouse_network/src/rpc/methods.rs | 7 +- .../lighthouse_network/src/rpc/protocol.rs | 12 +- .../src/network_beacon_processor/tests.rs | 20 +- common/eth2/src/lib.rs | 4 +- consensus/types/src/chain_spec.rs | 264 ++++++++++++++++-- 9 files changed, 286 insertions(+), 48 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d3689f7068..3ee8c7ae3f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -516,11 +516,7 @@ where self } - pub fn mock_execution_layer(self) -> Self { - self.mock_execution_layer_with_config() - } - - pub fn mock_execution_layer_with_config(mut self) -> Self { + pub fn mock_execution_layer(mut self) -> Self { let mock = mock_execution_layer_from_parts::( self.spec.clone().expect("cannot build without spec"), self.runtime.task_executor.clone(), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b057abe887..e01b8de9e3 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -142,6 +142,7 @@ pub struct ExecutionBlockGenerator { pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, + min_blobs_count: usize, /* * Post-merge fork triggers */ @@ -188,6 +189,7 @@ impl ExecutionBlockGenerator { pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), + min_blobs_count: 0, shanghai_time, cancun_time, prague_time, @@ -318,6 +320,10 @@ impl ExecutionBlockGenerator { Ok(()) } + pub fn set_min_blob_count(&mut self, count: usize) { + self.min_blobs_count = count; + } + pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { if let Some(finalized_block_hash) = self.finalized_block_hash { return Err(format!( @@ -702,8 +708,10 @@ impl ExecutionBlockGenerator { if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let max_blobs = self.spec.max_blobs_per_block_by_fork(fork_name) as usize; - let num_blobs = rng.gen::() % (max_blobs + 1); + // TODO(EIP-7892): see FIXME below + // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. + let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; + let num_blobs = rng.gen_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { execution_payload diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a5a21fd985..4ad70c3467 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -138,7 +138,7 @@ impl ApiTester { .deterministic_keypairs(VALIDATOR_COUNT) .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_config() + .mock_execution_layer() .build(); harness diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 33c5521c3b..8c35bf7145 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -28,7 +28,7 @@ use std::{ use tokio::time::{sleep, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; use tracing::{debug, trace}; -use types::{EthSpec, ForkContext}; +use types::{EthSpec, ForkContext, Slot}; /// The number of times to retry an outbound upgrade in the case of IO errors. const IO_ERROR_RETRIES: u8 = 3; @@ -932,9 +932,8 @@ where } } RequestType::BlobsByRange(request) => { - let max_requested_blobs = request - .count - .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + let epoch = Slot::new(request.start_slot).epoch(E::slots_per_epoch()); + let max_requested_blobs = request.max_blobs_requested(epoch, spec); let max_allowed = spec.max_request_blob_sidecars(current_fork) as u64; if max_requested_blobs > max_allowed { self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9fe2fef9e8..8a11a6f29d 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -16,11 +16,10 @@ use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnsByRootIdentifier, Epoch, EthSpec, Hash256, LightClientBootstrap, + DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; -use types::{ForkContext, ForkName}; /// Maximum length of error message. pub type MaxErrorLen = U256; @@ -328,8 +327,8 @@ pub struct BlobsByRangeRequest { } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { - let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork); + pub fn max_blobs_requested(&self, epoch: Epoch, spec: &ChainSpec) -> u64 { + let max_blobs_per_block = spec.max_blobs_per_block(epoch); self.count.saturating_mul(max_blobs_per_block) } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 820f50ac93..bfe64f58db 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,7 @@ use types::{ EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, + MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -633,7 +633,8 @@ pub fn rpc_blob_limits() -> RpcLimits { pub fn rpc_data_column_limits(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::min_size(), - DataColumnSidecar::::max_size(spec.max_blobs_per_block_by_fork(fork_name) as usize), + // TODO(EIP-7892): fix this once we change fork-version on BPO forks + DataColumnSidecar::::max_size(spec.max_blobs_per_block_within_fork(fork_name) as usize), ) } @@ -732,13 +733,16 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + /// TODO(EIP-7892): refactor this to remove `_current_fork` + pub fn max_responses(&self, _current_fork: ForkName, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec), + RequestType::BlobsByRange(req) => { + req.max_blobs_requested(Slot::new(req.start_slot).epoch(E::slots_per_epoch()), spec) + } RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 87a5a77294..cb9c976404 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -37,8 +37,8 @@ use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, - DataColumnSubnetId, Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, + DataColumnSubnetId, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; type E = MainnetEthSpec; @@ -129,6 +129,14 @@ impl TestRig { "precondition: current slot is one after head" ); + // Ensure there is a blob in the next block. Required for some tests. + harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .execution_block_generator() + .set_min_blob_count(1); let (next_block_tuple, next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; @@ -799,9 +807,11 @@ async fn import_gossip_block_unacceptably_early() { /// Data columns that have already been processed but unobserved should be propagated without re-importing. #[tokio::test] async fn accept_processed_gossip_data_columns_without_import() { - let processor_config = BeaconProcessorConfig::default(); - let fulu_genesis_spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); - let mut rig = TestRig::new_parametric(SMALL_CHAIN, processor_config, fulu_genesis_spec).await; + if test_spec::().fulu_fork_epoch.is_none() { + return; + }; + + let mut rig = TestRig::new(SMALL_CHAIN).await; // GIVEN the data columns have already been processed but unobserved. // 1. verify data column with `DoNotObserve` to create verified but unobserved data columns. diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index fa3fa04783..52cc91ba29 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1296,7 +1296,9 @@ impl BeaconNodeHttpClient { } self.get_fork_contextual(path, |fork| { - (fork, spec.max_blobs_per_block_by_fork(fork) as usize) + // TODO(EIP-7892): this will overestimate the max number of blobs + // It would be better if we could get an epoch passed into this function + (fork, spec.max_blobs_per_block_within_fork(fork) as usize) }) .await .map(|opt| opt.map(BeaconResponse::ForkVersioned)) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7b9950db91..59472e2edc 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -243,7 +243,7 @@ pub struct ChainSpec { /* * Networking Fulu */ - max_blobs_per_block_fulu: u64, + blob_schedule: BlobSchedule, /* * Networking Derived @@ -653,19 +653,40 @@ impl ChainSpec { } } - /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. + /// Return the value of `MAX_BLOBS_PER_BLOCK` for the given `epoch`. + /// NOTE: this function is *technically* not spec compliant, but + /// I'm told this is what the other clients are doing for `devnet-0`.. pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { - self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => self + .blob_schedule + .max_blobs_for_epoch(epoch) + .unwrap_or(self.max_blobs_per_block_electra), + _ => match self.electra_fork_epoch { + Some(electra_epoch) if epoch >= electra_epoch => self.max_blobs_per_block_electra, + _ => self.max_blobs_per_block, + }, + } } - /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. - pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { - if fork_name.fulu_enabled() { - self.max_blobs_per_block_fulu - } else if fork_name.electra_enabled() { - self.max_blobs_per_block_electra + // TODO(EIP-7892): remove this once we have fork-version changes on BPO forks + pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 { + if !fork_name.fulu_enabled() { + if fork_name.electra_enabled() { + self.max_blobs_per_block_electra + } else { + self.max_blobs_per_block + } } else { - self.max_blobs_per_block + // Find the max blobs per block in the fork schedule + // This logic will need to be more complex once there are forks beyond Fulu + let mut max_blobs_per_block = self.max_blobs_per_block_electra; + for entry in &self.blob_schedule { + if entry.max_blobs_per_block > max_blobs_per_block { + max_blobs_per_block = entry.max_blobs_per_block; + } + } + max_blobs_per_block } } @@ -1002,7 +1023,7 @@ impl ChainSpec { /* * Networking Fulu specific */ - max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + blob_schedule: BlobSchedule::default(), /* * Application specific @@ -1336,7 +1357,7 @@ impl ChainSpec { /* * Networking Fulu specific */ - max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + blob_schedule: BlobSchedule::default(), /* * Application specific @@ -1357,6 +1378,75 @@ impl Default for ChainSpec { } } +#[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "UPPERCASE")] +pub struct BPOFork { + epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block: u64, +} + +// A wrapper around a vector of BPOFork to ensure that the vector is reverse +// sorted by epoch. +#[derive(arbitrary::Arbitrary, Serialize, Debug, PartialEq, Clone)] +pub struct BlobSchedule(Vec); + +impl<'de> Deserialize<'de> for BlobSchedule { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let vec = Vec::::deserialize(deserializer)?; + Ok(BlobSchedule::new(vec)) + } +} + +impl BlobSchedule { + pub fn new(mut vec: Vec) -> Self { + // reverse sort by epoch + vec.sort_by(|a, b| b.epoch.cmp(&a.epoch)); + Self(vec) + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn max_blobs_for_epoch(&self, epoch: Epoch) -> Option { + self.0 + .iter() + .find(|entry| epoch >= entry.epoch) + .map(|entry| entry.max_blobs_per_block) + } + + pub const fn default() -> Self { + // TODO(EIP-7892): think about what the default should be + Self(vec![]) + } + + pub fn as_vec(&self) -> &Vec { + &self.0 + } +} + +impl<'a> IntoIterator for &'a BlobSchedule { + type Item = &'a BPOFork; + type IntoIter = std::slice::Iter<'a, BPOFork>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl IntoIterator for BlobSchedule { + type Item = BPOFork; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + /// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). /// /// Fields relevant to hard forks after Altair should be optional so that we can continue @@ -1557,9 +1647,9 @@ pub struct Config { #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, - #[serde(default = "default_max_blobs_per_block_fulu")] - #[serde(with = "serde_utils::quoted_u64")] - max_blobs_per_block_fulu: u64, + #[serde(default = "BlobSchedule::default")] + #[serde(skip_serializing_if = "BlobSchedule::is_empty")] + blob_schedule: BlobSchedule, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1697,10 +1787,6 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } -const fn default_max_blobs_per_block_fulu() -> u64 { - 12 -} - const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1937,7 +2023,7 @@ impl Config { data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, samples_per_slot: spec.samples_per_slot, custody_requirement: spec.custody_requirement, - max_blobs_per_block_fulu: spec.max_blobs_per_block_fulu, + blob_schedule: spec.blob_schedule.clone(), } } @@ -2016,7 +2102,7 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, - max_blobs_per_block_fulu, + ref blob_schedule, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -2100,7 +2186,7 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, - max_blobs_per_block_fulu, + blob_schedule: blob_schedule.clone(), ..chain_spec.clone() }) @@ -2287,6 +2373,140 @@ mod yaml_tests { assert_eq!(from, yamlconfig); } + #[test] + fn blob_schedule_max_blobs_per_block() { + let spec_contents = r#" + PRESET_BASE: 'mainnet' + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384 + MIN_GENESIS_TIME: 1748264340 + GENESIS_FORK_VERSION: 0x10355025 + GENESIS_DELAY: 60 + SECONDS_PER_SLOT: 12 + SECONDS_PER_ETH1_BLOCK: 12 + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 + SHARD_COMMITTEE_PERIOD: 256 + ETH1_FOLLOW_DISTANCE: 2048 + INACTIVITY_SCORE_BIAS: 4 + INACTIVITY_SCORE_RECOVERY_RATE: 16 + EJECTION_BALANCE: 16000000000 + MIN_PER_EPOCH_CHURN_LIMIT: 4 + CHURN_LIMIT_QUOTIENT: 65536 + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + PROPOSER_SCORE_BOOST: 40 + REORG_HEAD_WEIGHT_THRESHOLD: 20 + REORG_PARENT_WEIGHT_THRESHOLD: 160 + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + DEPOSIT_CHAIN_ID: 7042643276 + DEPOSIT_NETWORK_ID: 7042643276 + DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + + ALTAIR_FORK_VERSION: 0x20355025 + ALTAIR_FORK_EPOCH: 0 + BELLATRIX_FORK_VERSION: 0x30355025 + BELLATRIX_FORK_EPOCH: 0 + CAPELLA_FORK_VERSION: 0x40355025 + CAPELLA_FORK_EPOCH: 0 + DENEB_FORK_VERSION: 0x50355025 + DENEB_FORK_EPOCH: 64 + ELECTRA_FORK_VERSION: 0x60355025 + ELECTRA_FORK_EPOCH: 128 + FULU_FORK_VERSION: 0x70355025 + FULU_FORK_EPOCH: 256 + BLOB_SCHEDULE: + - EPOCH: 512 + MAX_BLOBS_PER_BLOCK: 12 + - EPOCH: 768 + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 1024 + MAX_BLOBS_PER_BLOCK: 18 + - EPOCH: 1280 + MAX_BLOBS_PER_BLOCK: 9 + - EPOCH: 1584 + MAX_BLOBS_PER_BLOCK: 20 + "#; + let config: Config = + serde_yaml::from_str(spec_contents).expect("error while deserializing"); + let spec = + ChainSpec::from_config::(&config).expect("error while creating spec"); + + // test out max_blobs_per_block(epoch) + assert_eq!( + spec.max_blobs_per_block(Epoch::new(64)), + default_max_blobs_per_block() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(127)), + default_max_blobs_per_block() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(128)), + default_max_blobs_per_block_electra() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(255)), + default_max_blobs_per_block_electra() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(256)), + default_max_blobs_per_block_electra() + ); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(511)), + default_max_blobs_per_block_electra() + ); + assert_eq!(spec.max_blobs_per_block(Epoch::new(512)), 12); + assert_eq!(spec.max_blobs_per_block(Epoch::new(767)), 12); + assert_eq!(spec.max_blobs_per_block(Epoch::new(768)), 15); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1023)), 15); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1024)), 18); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1279)), 18); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1280)), 9); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1583)), 9); + assert_eq!(spec.max_blobs_per_block(Epoch::new(1584)), 20); + assert_eq!( + spec.max_blobs_per_block(Epoch::new(18446744073709551615)), + 20 + ); + + // blob schedule is reverse sorted by epoch + assert_eq!( + config.blob_schedule.as_vec(), + &vec![ + BPOFork { + epoch: Epoch::new(1584), + max_blobs_per_block: 20 + }, + BPOFork { + epoch: Epoch::new(1280), + max_blobs_per_block: 9 + }, + BPOFork { + epoch: Epoch::new(1024), + max_blobs_per_block: 18 + }, + BPOFork { + epoch: Epoch::new(768), + max_blobs_per_block: 15 + }, + BPOFork { + epoch: Epoch::new(512), + max_blobs_per_block: 12 + }, + ] + ); + + // test max_blobs_per_block_within_fork + assert_eq!( + spec.max_blobs_per_block_within_fork(ForkName::Deneb), + default_max_blobs_per_block() + ); + assert_eq!( + spec.max_blobs_per_block_within_fork(ForkName::Electra), + default_max_blobs_per_block_electra() + ); + assert_eq!(spec.max_blobs_per_block_within_fork(ForkName::Fulu), 20); + } + #[test] fn apply_to_spec() { let mut spec = ChainSpec::minimal(); From f67068e1ec539e37599acac1ef6fd52fae1f0957 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 3 Jun 2025 12:13:57 +0800 Subject: [PATCH 12/13] Update `staking-deposit-cli` to `ethstaker-deposit-cli` (#7518) --- account_manager/src/validator/import.rs | 2 +- book/src/archived_key_management.md | 2 +- book/src/faq.md | 2 +- book/src/help_vm.md | 2 +- book/src/help_vm_create.md | 2 +- book/src/help_vm_import.md | 3 +-- book/src/installation_homebrew.md | 3 +++ book/src/mainnet_validator.md | 14 +++++++------- book/src/validator_manager.md | 2 +- book/src/validator_manager_api.md | 4 ++-- book/src/validator_manager_create.md | 4 ++-- book/src/validator_slashing_protection.md | 2 +- book/src/validator_voluntary_exit.md | 2 +- common/account_utils/src/validator_definitions.rs | 4 ++-- validator_manager/src/common.rs | 8 ++++---- validator_manager/src/create_validators.rs | 4 ++-- validator_manager/src/import_validators.rs | 2 +- validator_manager/test_vectors/generate.py | 8 ++++---- wordlist.txt | 2 +- 19 files changed, 37 insertions(+), 35 deletions(-) diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 4d2353b553..b985484d11 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -32,7 +32,7 @@ pub fn cli_app() -> Command { .about( "Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \ requesting passwords interactively. The directory flag provides a convenient \ - method for importing a directory of keys generated by the eth2-deposit-cli \ + method for importing a directory of keys generated by the ethstaker-deposit-cli \ Python utility.", ) .arg( diff --git a/book/src/archived_key_management.md b/book/src/archived_key_management.md index d8b00e8352..ad285ac4ec 100644 --- a/book/src/archived_key_management.md +++ b/book/src/archived_key_management.md @@ -21,7 +21,7 @@ using Lighthouse. Rather than continuing to read this page, we recommend users visit either: - The [Staking Launchpad][launchpad] for detailed, beginner-friendly instructions. -- The [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) for a CLI tool used by the [Staking Launchpad][launchpad]. +- The [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli/releases) for a CLI tool used by the [Staking Launchpad][launchpad]. - The [validator-manager documentation](./validator_manager.md) for a Lighthouse-specific tool for streamlined validator management tools. ## The `lighthouse account-manager` diff --git a/book/src/faq.md b/book/src/faq.md index b97a82fcca..27726e59a5 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -209,7 +209,7 @@ The first thing is to ensure both consensus and execution clients are synced wit - the internet is working well - you have sufficient peers -You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). +You can see more information on the [EthStaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Another cause for missing attestations is the block arriving late, or there are delays during block processing. diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 85e1a1168f..8ff54122ef 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -12,7 +12,7 @@ Commands: data. This file can then be imported to a validator client using the "import-validators" command. Another, optional JSON file is created which contains a list of validator deposits in the same format as the - "ethereum/staking-deposit-cli" tool. + "ethstaker-deposit-cli" tool. import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be generated using the diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 3b88206397..96ae261252 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -5,7 +5,7 @@ Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and other validator data. This file can then be imported to a validator client using the "import-validators" command. Another, optional JSON file is created which contains a list of validator -deposits in the same format as the "ethereum/staking-deposit-cli" tool. +deposits in the same format as the "ethstaker-deposit-cli" tool. Usage: lighthouse validator_manager create [OPTIONS] --output-path diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 63cca91ee5..ca635be5f1 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -39,8 +39,7 @@ Options: [default: 300] --keystore-file The path to a keystore JSON file to be imported to the validator - client. This file is usually created using staking-deposit-cli or - ethstaker-deposit-cli + client. This file is usually created using ethstaker-deposit-cli --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] diff --git a/book/src/installation_homebrew.md b/book/src/installation_homebrew.md index f94764889e..9d33bfb3eb 100644 --- a/book/src/installation_homebrew.md +++ b/book/src/installation_homebrew.md @@ -5,6 +5,9 @@ Lighthouse is available on Linux and macOS via the [Homebrew package manager](ht Please note that this installation method is maintained by the Homebrew community. It is not officially supported by the Lighthouse team. +> Note: There is a [compilation error](https://github.com/Homebrew/homebrew-core/pull/220922) for Lighthouse v7.0.0 and above that remains unresolved. Users are recommended to download the binary from [the release +page](https://github.com/sigp/lighthouse/releases) or build from source. + ## Installation Install the latest version of the [`lighthouse`][formula] formula with: diff --git a/book/src/mainnet_validator.md b/book/src/mainnet_validator.md index 8da8b98f89..106461aa9b 100644 --- a/book/src/mainnet_validator.md +++ b/book/src/mainnet_validator.md @@ -42,7 +42,7 @@ hardware. 32 ETH is a significant outlay and joining a testnet is a great way to ### Step 1. Create validator keys -The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: +EthStaker provides the [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli/releases) for creating validator keys. Download and run the `ethstaker-deposit-cli` with the command: ```bash ./deposit new-mnemonic @@ -52,7 +52,7 @@ and follow the instructions to generate the keys. When prompted for a network, s > **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. -Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. +Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from `ethstaker-deposit-cli` can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. > Lighthouse also supports creating validator keys, see [Validator Manager Create](./validator_manager_create.md) for more info. @@ -62,19 +62,19 @@ Start an execution client and Lighthouse beacon node according to the [Run a Nod ### Step 3. Import validator keys to Lighthouse -In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that -this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: +In [Step 1](#step-1-create-validator-keys), the `ethstaker-deposit-cli` will generate the validator keys into a `validator_keys` directory. Let's assume that +this directory is `$HOME/ethstaker-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: Mainnet: ```bash -lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys +lighthouse --network mainnet account validator import --directory $HOME/ethstaker-deposit-cli/validator_keys ``` Hoodi testnet: ```bash -lighthouse --network hoodi account validator import --directory $HOME/staking-deposit-cli/validator_keys +lighthouse --network hoodi account validator import --directory $HOME/ethstaker-deposit-cli/validator_keys ``` > Note: The user must specify the consensus client network that they are importing the keys by using the `--network` flag. @@ -88,7 +88,7 @@ lighthouse --network hoodi account validator import --directory $HOME/staking-de The user will be prompted for a password for each keystore discovered: ``` -Keystore found at "/home/{username}/staking-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": +Keystore found at "/home/{username}/ethstaker-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f diff --git a/book/src/validator_manager.md b/book/src/validator_manager.md index c610340b39..b0190c1812 100644 --- a/book/src/validator_manager.md +++ b/book/src/validator_manager.md @@ -15,7 +15,7 @@ except the latter creates files that will be read by the VC next time it starts whilst the former makes instant changes to a live VC. The `account-manager` is ideal for importing keys created with the -[staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli). On the +[ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli). On the other hand, the `validator-manager` is ideal for moving existing validators between two VCs or for advanced users to create validators at scale with less downtime. diff --git a/book/src/validator_manager_api.md b/book/src/validator_manager_api.md index a5fc69fd5a..7bc5be8557 100644 --- a/book/src/validator_manager_api.md +++ b/book/src/validator_manager_api.md @@ -1,6 +1,6 @@ # Managing Validators -The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. +The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. By default, the validator client HTTP address is `http://localhost:5062`. If a different IP address or port is used, add the flag `--vc-url http://IP:port_number` to the command below. ## Delete @@ -18,7 +18,7 @@ lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt - ## Import -The `import` command imports validator keystores generated by the staking-deposit-cli/ethstaker-deposit-cli. To import a validator keystore: +The `import` command imports validator keystores generated by the `ethstaker-deposit-cli`. To import a validator keystore: ```bash lighthouse vm import --vc-token --keystore-file /path/to/json --password keystore_password diff --git a/book/src/validator_manager_create.md b/book/src/validator_manager_create.md index 458907bc65..ae40910d5c 100644 --- a/book/src/validator_manager_create.md +++ b/book/src/validator_manager_create.md @@ -8,7 +8,7 @@ mnemonic and produces two files: - `validators.json`: the keystores and passwords for the newly generated validators, in JSON format. - `deposits.json`: a JSON file of the same format as - [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which can + [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli) which can be used for deposit submission via the [Ethereum Staking Launchpad][]. @@ -69,7 +69,7 @@ lighthouse \ > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. -> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator_manager_api.md#import). +> Note: To import validators with validator-manager using keystore files created using the `ethstaker-deposit-cli`, refer to [Managing Validators](./validator_manager_api.md#import). ## Detailed Guide diff --git a/book/src/validator_slashing_protection.md b/book/src/validator_slashing_protection.md index 3e0fe184e5..03e54e5827 100644 --- a/book/src/validator_slashing_protection.md +++ b/book/src/validator_slashing_protection.md @@ -21,7 +21,7 @@ and carefully to keep your validators safe. See the [Troubleshooting](#troublesh The database will be automatically created, and your validators registered with it when: -* Importing keys from another source (e.g. [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). +* Importing keys from another source (e.g. [ethstaker-deposit-cli](https://github.com/eth-educators/ethstaker-deposit-cli), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). See [import validator keys](./mainnet_validator.md#step-3-import-validator-keys-to-lighthouse). * Creating keys using Lighthouse itself (`lighthouse account validator create`) * Creating keys via the [validator client API](./api_vc.md). diff --git a/book/src/validator_voluntary_exit.md b/book/src/validator_voluntary_exit.md index d5d1722d59..2a45852f32 100644 --- a/book/src/validator_voluntary_exit.md +++ b/book/src/validator_voluntary_exit.md @@ -94,7 +94,7 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. -- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `ethstaker-deposit-cli` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). ### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 4c253283fe..5f32645c92 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -450,11 +450,11 @@ pub fn is_voting_keystore(file_name: &str) -> bool { return true; } - // The format exported by the `eth2.0-deposit-cli` library. + // The format exported by the `ethstaker-deposit-cli` library. // // Reference to function that generates keystores: // - // https://github.com/ethereum/eth2.0-deposit-cli/blob/7cebff15eac299b3b1b090c896dd3410c8463450/eth2deposit/credentials.py#L58-L62 + // https://github.com/eth-educators/ethstaker-deposit-cli/blob/80d536374de838ccae142974ed0e747b46beb030/ethstaker_deposit/credentials.py#L186-L190 // // Since we include the key derivation path of `m/12381/3600/x/0/0` this should only ever match // with a voting keystore and never a withdrawal keystore. diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index cc4157990f..715f1068f0 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -19,9 +19,9 @@ use zeroize::Zeroizing; pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; pub const COUNT_FLAG: &str = "count"; -/// When the `ethereum/staking-deposit-cli` tool generates deposit data JSON, it adds a +/// When the `ethstaker-deposit-cli` tool generates deposit data JSON, it adds a /// `deposit_cli_version` to protect the web-based "Launchpad" tool against a breaking change that -/// was introduced in `ethereum/staking-deposit-cli`. Lighthouse don't really have a version that it +/// was introduced in `ethstaker-deposit-cli`. Lighthouse don't really have a version that it /// can use here, so we choose a static string that is: /// /// 1. High enough that it's accepted by Launchpad. @@ -163,12 +163,12 @@ pub struct CreateSpec { pub validators: Vec, } -/// The structure generated by the `staking-deposit-cli` which has become a quasi-standard for +/// The structure generated by the `ethstaker-deposit-cli` which has become a quasi-standard for /// browser-based deposit submission tools (e.g., the Ethereum Launchpad and Lido). /// /// We assume this code as the canonical definition: /// -/// https://github.com/ethereum/staking-deposit-cli/blob/76ed78224fdfe3daca788d12442b3d1a37978296/staking_deposit/credentials.py#L131-L144 +/// https://github.com/eth-educators/ethstaker-deposit-cli/blob/80d536374de838ccae142974ed0e747b46beb030/ethstaker_deposit/credentials.py#L164-L177 #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct StandardDepositDataJson { #[serde(with = "public_key_bytes_without_0x_prefix")] diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 07578033cd..c21ebeabf8 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -43,7 +43,7 @@ pub fn cli_app() -> Command { contains all the validator keystores and other validator data. This file can then \ be imported to a validator client using the \"import-validators\" command. \ Another, optional JSON file is created which contains a list of validator \ - deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", + deposits in the same format as the \"ethstaker-deposit-cli\" tool.", ) .arg( Arg::new(OUTPUT_PATH_FLAG) @@ -487,7 +487,7 @@ impl ValidatorsAndDeposits { }; // Create a JSON structure equivalent to the one generated by - // `ethereum/staking-deposit-cli`. + // `ethstaker-deposit-cli`. let json_deposit = StandardDepositDataJson::new( &voting_keypair, withdrawal_credentials.into(), diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 63c7ca4596..6cfbf7b54e 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -55,7 +55,7 @@ pub fn cli_app() -> Command { .help( "The path to a keystore JSON file to be \ imported to the validator client. This file is usually created \ - using staking-deposit-cli or ethstaker-deposit-cli", + using ethstaker-deposit-cli", ) .action(ArgAction::Set) .display_order(0) diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py index 8bf7f5f52d..45bb408eb1 100644 --- a/validator_manager/test_vectors/generate.py +++ b/validator_manager/test_vectors/generate.py @@ -1,4 +1,4 @@ -# This script uses the `ethereum/staking-deposit-cli` tool to generate +# This script uses the `ethstaker-deposit-cli` tool to generate # deposit data files which are then used for testing by Lighthouse. # # To generate vectors, run this Python script: @@ -6,7 +6,7 @@ # `python generate.py` # # This script was last run on Linux using Python v3.10.4. Python v3.11.0 was not working at time -# of writing due to dependency issues in `staking-deposit-cli`. You should probably use `pyenv` and +# of writing due to dependency issues in `ethstaker-deposit-cli`. You should probably use `pyenv` and # `virtualenv`. import os import sys @@ -23,7 +23,7 @@ WALLET_NAME="test_wallet" tmp_dir = os.path.join(".", "tmp") mnemonic_path = os.path.join(tmp_dir, "mnemonic.txt") sdc_dir = os.path.join(tmp_dir, "sdc") -sdc_git_dir = os.path.join(sdc_dir, "staking-deposit-cli") +sdc_git_dir = os.path.join(sdc_dir, "ethstaker-deposit-cli") vectors_dir = os.path.join(".", "vectors") @@ -59,7 +59,7 @@ def setup_sdc(): "git", "clone", "--single-branch", - "https://github.com/ethereum/staking-deposit-cli.git", + "https://github.com/eth-educators/ethstaker-deposit-cli.git", str(sdc_git_dir) ]) assert(result.returncode == 0) diff --git a/wordlist.txt b/wordlist.txt index e0d1afdf7c..3c7070c642 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -34,7 +34,7 @@ Esat's ETH EthDocker Ethereum -Ethstaker +EthStaker Exercism Extractable FFG From cd83d8d95dddd7d965999b8a7fd940ed17bbfc18 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Tue, 3 Jun 2025 14:08:03 +0900 Subject: [PATCH 13/13] Add a name to the Tokio task (#7544) The `console-subscriber` feature was added in https://github.com/sigp/lighthouse/pull/7529. However, the names of the running tasks are blank: image Set the task name using `tokio::task::Builder`, which is availble when the `tokio_unstable` is enabled. image --- common/task_executor/Cargo.toml | 3 +++ common/task_executor/src/lib.rs | 30 ++++++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 4224f00acc..d4faf1e4b8 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -10,3 +10,6 @@ futures = { workspace = true } metrics = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ["cfg(tokio_unstable)"] } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index dbdac600f3..b47e30cb68 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -144,7 +144,7 @@ impl TaskExecutor { ) { let mut shutdown_sender = self.shutdown_sender(); if let Some(handle) = self.handle() { - handle.spawn(async move { + let fut = async move { let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]); if let Err(join_error) = task_handle.await { if let Ok(_panic) = join_error.try_into_panic() { @@ -153,7 +153,14 @@ impl TaskExecutor { } } drop(timer); - }); + }; + #[cfg(tokio_unstable)] + tokio::task::Builder::new() + .name(&format!("{name}-monitor")) + .spawn_on(fut, &handle) + .expect("Failed to spawn monitor task"); + #[cfg(not(tokio_unstable))] + handle.spawn(fut); } else { debug!("Couldn't spawn monitor task. Runtime shutting down") } @@ -199,6 +206,12 @@ impl TaskExecutor { int_gauge.inc(); if let Some(handle) = self.handle() { + #[cfg(tokio_unstable)] + tokio::task::Builder::new() + .name(name) + .spawn_on(future, &handle) + .expect("Failed to spawn task"); + #[cfg(not(tokio_unstable))] handle.spawn(future); } else { debug!("Couldn't spawn task. Runtime shutting down"); @@ -234,7 +247,7 @@ impl TaskExecutor { let int_gauge_1 = int_gauge.clone(); int_gauge.inc(); if let Some(handle) = self.handle() { - Some(handle.spawn(async move { + let fut = async move { futures::pin_mut!(exit); let result = match future::select(Box::pin(task), exit).await { future::Either::Left((value, _)) => Some(value), @@ -245,7 +258,16 @@ impl TaskExecutor { }; int_gauge_1.dec(); result - })) + }; + #[cfg(tokio_unstable)] + return Some( + tokio::task::Builder::new() + .name(name) + .spawn_on(fut, &handle) + .expect("Failed to spawn task"), + ); + #[cfg(not(tokio_unstable))] + Some(handle.spawn(fut)) } else { debug!("Couldn't spawn task. Runtime shutting down"); None