From 62a39af19ef2e530d2c6f2928899679c5653b992 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Thu, 25 Jul 2024 21:08:59 +0900 Subject: [PATCH 01/43] Fix unexpected `Marking peer disconnected in DHT` (#6140) * Don't disconnect peer in DHT if there's an active connection * Merge branch 'unstable' into dont-disconnect-if-active-connection --- beacon_node/lighthouse_network/src/discovery/mod.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 017db26049..865d707495 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1166,8 +1166,19 @@ impl Discovery { fn on_dial_failure(&mut self, peer_id: Option, error: &DialError) { if let Some(peer_id) = peer_id { match error { + DialError::Denied { .. } => { + if self.network_globals.peers.read().is_connected(&peer_id) { + // There's an active connection, so we don’t disconnect the peer. + // Lighthouse dials to a peer twice using TCP and QUIC (if QUIC is not + // disabled). Usually, one establishes a connection, and the other fails + // because the peer allows only one connection per peer. + return; + } + // set peer as disconnected in discovery DHT + debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id, "error" => %ClearDialError(error)); + self.disconnect_peer(&peer_id); + } DialError::LocalPeerId { .. } - | DialError::Denied { .. } | DialError::NoAddresses | DialError::Transport(_) | DialError::WrongPeerId { .. } => { From cc55e610b9ce9c54b5d699e08f05a700d4af88ad Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 25 Jul 2024 19:56:22 +0400 Subject: [PATCH 02/43] Rust 1.80.0 lints (#6183) * Fix lints --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../beacon_chain/src/block_verification.rs | 2 ++ beacon_node/beacon_chain/src/test_utils.rs | 4 ++-- beacon_node/eth1/src/block_cache.rs | 2 +- beacon_node/genesis/src/eth1_genesis_service.rs | 2 +- .../lighthouse_network/gossipsub/Cargo.toml | 1 + .../lighthouse_network/gossipsub/src/lib.rs | 16 ++++++++-------- .../lighthouse_network/src/peer_manager/mod.rs | 6 +++--- .../network/src/sync/block_lookups/mod.rs | 1 + beacon_node/network/src/sync/manager.rs | 8 ++++---- beacon_node/network/src/sync/range_sync/range.rs | 2 +- beacon_node/operation_pool/src/max_cover.rs | 2 +- common/lighthouse_metrics/src/lib.rs | 4 ++-- common/logging/src/async_record.rs | 2 +- common/validator_dir/src/lib.rs | 2 +- consensus/proto_array/src/proto_array.rs | 2 +- .../proto_array/src/proto_array_fork_choice.rs | 2 +- .../block_signature_verifier.rs | 2 +- consensus/swap_or_not_shuffle/src/lib.rs | 4 ++-- consensus/types/src/shuffling_id.rs | 2 +- lighthouse/tests/beacon_node.rs | 2 +- slasher/src/database.rs | 16 ++++++++-------- validator_client/src/http_api/api_secret.rs | 4 ++-- validator_client/src/validator_store.rs | 2 +- watch/src/updater/handler.rs | 8 ++++---- 25 files changed, 52 insertions(+), 48 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1fa77a2043..f795128b71 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1450,7 +1450,7 @@ impl BeaconChain { /// Returns the `BeaconState` the current slot (viz., `self.slot()`). /// /// - A reference to the head state (note: this keeps a read lock on the head, try to use - /// sparingly). + /// sparingly). /// - The head state, but with skipped slots (for states later than the head). /// /// Returns `None` when there is an error skipping to a future state or the slot clock cannot diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 5ae98cefbe..c71a2bcab3 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -300,7 +300,9 @@ pub enum BlockError { /// 1. The block proposer is faulty /// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block). /// 3. It is an internal error + /// /// For all these cases, we cannot penalize the peer that gave us the block. + /// /// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob. /// https://github.com/sigp/lighthouse/issues/4546 AvailabilityCheck(AvailabilityCheckError), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6b85d7aadf..4e33f1661b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2511,9 +2511,9 @@ where /// Creates two forks: /// /// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks` - /// on the head + /// on the head /// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and - /// then built `faulty_fork_blocks`. + /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. pub async fn generate_two_forks_by_skipping_a_block( diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 399634a9fa..0ccdb4fc0e 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -135,7 +135,7 @@ impl BlockCache { /// /// - If the cache is not empty and `item.block.block_number - 1` is not already in `self`. /// - If `item.block.block_number` is in `self`, but is not identical to the supplied - /// `Eth1Snapshot`. + /// `Eth1Snapshot`. /// - If `item.block.timestamp` is prior to the parent. pub fn insert_root_or_child(&mut self, block: Eth1Block) -> Result<(), Error> { let expected_block_number = self diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 7015705027..9a4f85c064 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -352,7 +352,7 @@ impl Eth1GenesisService { /// /// - `Ok(genesis_state)`: if all went well. /// - `Err(e)`: if the given `eth1_block` was not a viable block to trigger genesis or there was - /// an internal error. + /// an internal error. fn genesis_from_eth1_block( &self, eth1_block: Eth1Block, diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 56c42d2992..ef8fb1b12b 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -11,6 +11,7 @@ categories = ["network-programming", "asynchronous"] [features] wasm-bindgen = ["getrandom/js"] +rsa = [] [dependencies] async-channel = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/src/lib.rs b/beacon_node/lighthouse_network/gossipsub/src/lib.rs index e825024cc7..1d29aaa759 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/lib.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/lib.rs @@ -43,16 +43,16 @@ //! implementations, due to undefined elements in the current specification. //! //! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. -//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this -//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 -//! encoded) by setting the `hash_topics` configuration parameter to true. +//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this +//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 +//! encoded) by setting the `hash_topics` configuration parameter to true. //! //! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in -//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned -//! integers. When messages are signed, they are monotonically increasing integers starting from a -//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. -//! NOTE: These numbers are sequential in the current go implementation. +//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in +//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned +//! integers. When messages are signed, they are monotonically increasing integers starting from a +//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. +//! NOTE: These numbers are sequential in the current go implementation. //! //! # Peer Discovery //! diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index c86c2098d6..ea3b51092e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -918,9 +918,9 @@ impl PeerManager { /// number should be set low as an absolute lower bound to maintain peers on the sync /// committees. /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the - /// excess peer limit, all of the following logic is subverted as we will not prune any peers. - /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage - /// its peers across the subnets. + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 0a44cf2fdf..3b93b8072c 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -214,6 +214,7 @@ impl BlockLookups { /// Check if this new lookup extends a bad chain: /// - Extending `child_block_root_trigger` would exceed the max depth /// - `block_root_to_search` is a failed chain + /// /// Returns true if the lookup is created or already exists pub fn search_parent_of_child( &mut self, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c9894c8b24..7149395839 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -448,12 +448,12 @@ impl SyncManager { /// /// The logic for which sync should be running is as follows: /// - If there is a range-sync running (or required) pause any backfill and let range-sync - /// complete. + /// complete. /// - If there is no current range sync, check for any requirement to backfill and either - /// start/resume a backfill sync if required. The global state will be BackFillSync if a - /// backfill sync is running. + /// start/resume a backfill sync if required. The global state will be BackFillSync if a + /// backfill sync is running. /// - If there is no range sync and no required backfill and we have synced up to the currently - /// known peers, we consider ourselves synced. + /// known peers, we consider ourselves synced. fn update_sync_state(&mut self) { let new_state: SyncState = match self.range_sync.state() { Err(e) => { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 4213771d48..334c58090e 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -22,7 +22,7 @@ //! - Only one finalized chain can sync at a time //! - The finalized chain with the largest peer pool takes priority. //! - As one finalized chain completes, others are checked to see if we they can be continued, -//! otherwise they are removed. +//! otherwise they are removed. //! //! ## Head Chain Sync //! diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index b4a95b1de0..5dbbe32838 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -7,7 +7,7 @@ use itertools::Itertools; /// * `item`: something that implements this trait /// * `element`: something contained in a set, and covered by the covering set of an item /// * `object`: something extracted from an item in order to comprise a solution -/// See: https://en.wikipedia.org/wiki/Maximum_coverage_problem +/// See: https://en.wikipedia.org/wiki/Maximum_coverage_problem pub trait MaxCover: Clone { /// The result type, of which we would eventually like a collection of maximal quality. type Object: Clone; diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 4a76184b8a..f2424ccabe 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -4,9 +4,9 @@ //! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)): //! //! - `Histogram`: used with `start_timer(..)` and `stop_timer(..)` to record durations (e.g., -//! block processing time). +//! block processing time). //! - `IncCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g., -//! number of block processing requests). +//! number of block processing requests). //! - `IntGauge`: used to represent an varying integer (e.g., number of attestations per block). //! //! ## Important diff --git a/common/logging/src/async_record.rs b/common/logging/src/async_record.rs index 81037b11a4..7a97fa1a75 100644 --- a/common/logging/src/async_record.rs +++ b/common/logging/src/async_record.rs @@ -175,7 +175,7 @@ impl Serialize for AsyncRecord { // Convoluted pattern to avoid binding `format_args!` to a temporary. // See: https://stackoverflow.com/questions/56304313/cannot-use-format-args-due-to-temporary-value-is-freed-at-the-end-of-this-state let mut f = |msg: std::fmt::Arguments| { - map_serializer.serialize_entry("msg", &msg.to_string())?; + map_serializer.serialize_entry("msg", msg.to_string())?; let record = Record::new(&rs, &msg, BorrowedKV(&(*kv))); self.logger_values diff --git a/common/validator_dir/src/lib.rs b/common/validator_dir/src/lib.rs index 4aa0d590a1..c21b0b44cf 100644 --- a/common/validator_dir/src/lib.rs +++ b/common/validator_dir/src/lib.rs @@ -1,7 +1,7 @@ //! Provides: //! //! - `ValidatorDir`: manages a directory containing validator keypairs, deposit info and other -//! things. +//! things. //! //! This crate is intended to be used by the account manager to create validators and the validator //! client to load those validators. diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index efe154a27e..74f3a986c9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -149,7 +149,7 @@ impl ProtoArray { /// - Update the node's weight with the corresponding delta. /// - Back-propagate each node's delta to its parents delta. /// - Compare the current node with the parents best-child, updating it if the current node - /// should become the best child. + /// should become the best child. /// - If required, update the parents best-descendant with the current node or its best-descendant. #[allow(clippy::too_many_arguments)] pub fn apply_score_changes( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4b7050df7d..606269aee0 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -896,7 +896,7 @@ impl ProtoArrayForkChoice { /// /// - If a value in `indices` is greater to or equal to `indices.len()`. /// - If some `Hash256` in `votes` is not a key in `indices` (except for `Hash256::zero()`, this is -/// always valid). +/// always valid). fn compute_deltas( indices: &HashMap, votes: &mut ElasticList, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 28ca8935e4..223d7a4b89 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -121,7 +121,7 @@ where /// are valid. /// /// * : _Does not verify any signatures in `block.body.deposits`. A block is still valid if it - /// contains invalid signatures on deposits._ + /// contains invalid signatures on deposits._ /// /// See `Self::verify` for more detail. pub fn verify_entire_block>( diff --git a/consensus/swap_or_not_shuffle/src/lib.rs b/consensus/swap_or_not_shuffle/src/lib.rs index ede15b3196..e9a131ab05 100644 --- a/consensus/swap_or_not_shuffle/src/lib.rs +++ b/consensus/swap_or_not_shuffle/src/lib.rs @@ -7,9 +7,9 @@ //! There are two functions exported by this crate: //! //! - `compute_shuffled_index`: given a single index, computes the index resulting from a shuffle. -//! Runs in less time than it takes to run `shuffle_list`. +//! Runs in less time than it takes to run `shuffle_list`. //! - `shuffle_list`: shuffles an entire list in-place. Runs in less time than it takes to run -//! `compute_shuffled_index` on each index. +//! `compute_shuffled_index` on each index. //! //! In general, use `compute_shuffled_index` to calculate the shuffling of a small subset of a much //! larger list (~250x larger is a good guide, but solid figures yet to be calculated). diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs index a5bdc86673..df16f605ed 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/shuffling_id.rs @@ -11,7 +11,7 @@ use std::hash::Hash; /// /// - The epoch for which the shuffling should be effective. /// - A block root, where this is the root at the *last* slot of the penultimate epoch. I.e., the -/// final block which contributed a randao reveal to the seed for the shuffling. +/// final block which contributed a randao reveal to the seed for the shuffling. /// /// The struct stores exactly that 2-tuple. #[derive(Debug, PartialEq, Eq, Clone, Hash, Serialize, Deserialize, Encode, Decode)] diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 2101a48f14..fa87fbdd81 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2253,7 +2253,7 @@ fn slasher_broadcast_flag_false() { }); } -#[cfg(all(feature = "lmdb"))] +#[cfg(all(feature = "slasher-lmdb"))] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 4f4729a123..5c22c60982 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -409,7 +409,7 @@ impl SlasherDB { for target_epoch in (start_epoch..max_target.as_u64()).map(Epoch::new) { txn.put( &self.databases.attesters_db, - &AttesterKey::new(validator_index, target_epoch, &self.config), + AttesterKey::new(validator_index, target_epoch, &self.config), CompactAttesterRecord::null().as_bytes(), )?; } @@ -417,8 +417,8 @@ impl SlasherDB { txn.put( &self.databases.attesters_max_targets_db, - &CurrentEpochKey::new(validator_index), - &max_target.as_ssz_bytes(), + CurrentEpochKey::new(validator_index), + max_target.as_ssz_bytes(), )?; Ok(()) } @@ -444,8 +444,8 @@ impl SlasherDB { ) -> Result<(), Error> { txn.put( &self.databases.current_epochs_db, - &CurrentEpochKey::new(validator_index), - ¤t_epoch.as_ssz_bytes(), + CurrentEpochKey::new(validator_index), + current_epoch.as_ssz_bytes(), )?; Ok(()) } @@ -621,7 +621,7 @@ impl SlasherDB { txn.put( &self.databases.attesters_db, - &AttesterKey::new(validator_index, target_epoch, &self.config), + AttesterKey::new(validator_index, target_epoch, &self.config), indexed_attestation_id, )?; @@ -699,8 +699,8 @@ impl SlasherDB { } else { txn.put( &self.databases.proposers_db, - &ProposerKey::new(proposer_index, slot), - &block_header.as_ssz_bytes(), + ProposerKey::new(proposer_index, slot), + block_header.as_ssz_bytes(), )?; Ok(ProposerSlashingStatus::NotSlashable) } diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index 32035caf47..afcac477ec 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -15,12 +15,12 @@ pub const PK_LEN: usize = 33; /// Provides convenience functions to ultimately provide: /// /// - Verification of proof-of-knowledge of the public key in `self` for incoming HTTP requests, -/// via the `Authorization` header. +/// via the `Authorization` header. /// /// The aforementioned scheme was first defined here: /// /// https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855 -/// +/// /// This scheme has since been tweaked to remove VC response signing and secp256k1 key generation. /// https://github.com/sigp/lighthouse/issues/5423 pub struct ApiSecret { diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index e6e19b6e06..574ae7449d 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -502,7 +502,7 @@ impl ValidatorStore { /// Translate the per validator `builder_proposals`, `builder_boost_factor` and /// `prefer_builder_proposals` to a boost factor, if available. /// - If `prefer_builder_proposals` is true, set boost factor to `u64::MAX` to indicate a - /// preference for builder payloads. + /// preference for builder payloads. /// - If `builder_boost_factor` is a value other than None, return its value as the boost factor. /// - If `builder_proposals` is set to false, set boost factor to 0 to indicate a preference for /// local payloads. diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs index 3ee32560ad..8f5e3f8e4a 100644 --- a/watch/src/updater/handler.rs +++ b/watch/src/updater/handler.rs @@ -112,14 +112,14 @@ impl UpdateHandler { /// Performs a head update with the following steps: /// 1. Pull the latest header from the beacon node and the latest canonical slot from the - /// database. + /// database. /// 2. Loop back through the beacon node and database to find the first matching slot -> root - /// pair. + /// pair. /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is - /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. + /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. /// 4. Remove any invalid slots from the database. /// 5. Sync all blocks between the first valid block of the database and the head of the beacon - /// chain. + /// chain. /// /// In the event there are no slots present in the database, it will sync from the head block /// block back to the first slot of the epoch. From 24169b2faa2a0a2b08d6b5d6808051028d16f45b Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 25 Jul 2024 21:01:16 +0200 Subject: [PATCH 03/43] Iterate expired components from the da_checker (#5895) * Iterate expired components from the da_checker * Fix rebase --- .../overflow_lru_cache.rs | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index dfe369cc47..e7bb2034fc 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -269,6 +269,26 @@ impl PendingComponents { AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), ))) } + + /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. + pub fn epoch(&self) -> Option { + self.executed_block + .as_ref() + .map(|pending_block| pending_block.as_block().epoch()) + .or_else(|| { + for maybe_blob in self.verified_blobs.iter() { + if maybe_blob.is_some() { + return maybe_blob.as_ref().map(|kzg_verified_blob| { + kzg_verified_blob + .as_blob() + .slot() + .epoch(E::slots_per_epoch()) + }); + } + } + None + }) + } } /// This is the main struct for this module. Outside methods should @@ -475,6 +495,22 @@ impl DataAvailabilityCheckerInner { pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { // clean up any lingering states in the state cache self.state_cache.do_maintenance(cutoff_epoch); + + // Collect keys of pending blocks from a previous epoch to cutoff + let mut write_lock = self.critical.write(); + let mut keys_to_remove = vec![]; + for (key, value) in write_lock.iter() { + if let Some(epoch) = value.epoch() { + if epoch < cutoff_epoch { + keys_to_remove.push(*key); + } + } + } + // Now remove keys + for key in keys_to_remove { + write_lock.pop(&key); + } + Ok(()) } From 8d175f67790ba02965b70b0534db7d4c66e21dfe Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Fri, 26 Jul 2024 09:21:05 +0800 Subject: [PATCH 04/43] Fix `lighthouse account validator` subcommands help text (#6091) * create * import, exit and list * validator the rest * FIx * wallet subcommands * Fix white spaces * Revert "create" This reverts commit 2eac633a5815f5c19afb208b0daa6f33adbc83cf. * Add global * Merge remote-tracking branch 'origin/unstable' into cli-help --- account_manager/src/validator/mod.rs | 3 ++- account_manager/src/wallet/mod.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index 6616bb0c45..61584cbfbb 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -27,7 +27,8 @@ pub fn cli_app() -> Command { .help("Prints help information") .action(ArgAction::HelpLong) .display_order(0) - .help_heading(FLAG_HEADER), + .help_heading(FLAG_HEADER) + .global(true), ) .arg( Arg::new(VALIDATOR_DIR_FLAG) diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index 59f5f36252..020858db77 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -22,6 +22,7 @@ pub fn cli_app() -> Command { .action(ArgAction::HelpLong) .display_order(0) .help_heading(FLAG_HEADER) + .global(true) ) .arg( Arg::new(WALLETS_DIR_FLAG) From 28e3b86bee4669e028d97a28fe0272fc1386ee56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 26 Jul 2024 02:21:07 +0100 Subject: [PATCH 05/43] limit dial concurrency on Swarm (#6184) * limit dial concurrency on Swarm * Merge branch 'unstable' into set-dial-concurrency --- .../lighthouse_network/src/service/mod.rs | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index aaf9dda523..c2a2a03fe8 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -33,6 +33,7 @@ use libp2p::swarm::behaviour::toggle::Toggle; use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::{identify, PeerId, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; +use std::num::{NonZeroU8, NonZeroUsize}; use std::path::PathBuf; use std::pin::Pin; use std::{ @@ -415,6 +416,11 @@ impl Network { // sets up the libp2p swarm. let swarm = { + let config = libp2p::swarm::Config::with_executor(Executor(executor)) + .with_notify_handler_buffer_size(NonZeroUsize::new(7).expect("Not zero")) + .with_per_connection_event_buffer_size(4) + .with_dial_concurrency_factor(NonZeroU8::new(1).unwrap()); + let builder = SwarmBuilder::with_existing_identity(local_keypair) .with_tokio() .with_other_transport(|_key| transport) @@ -426,25 +432,13 @@ impl Network { .with_bandwidth_metrics(libp2p_registry) .with_behaviour(|_| behaviour) .expect("infalible") - .with_swarm_config(|_| { - libp2p::swarm::Config::with_executor(Executor(executor)) - .with_notify_handler_buffer_size( - std::num::NonZeroUsize::new(7).expect("Not zero"), - ) - .with_per_connection_event_buffer_size(4) - }) + .with_swarm_config(|_| config) .build() } else { builder .with_behaviour(|_| behaviour) .expect("infalible") - .with_swarm_config(|_| { - libp2p::swarm::Config::with_executor(Executor(executor)) - .with_notify_handler_buffer_size( - std::num::NonZeroUsize::new(7).expect("Not zero"), - ) - .with_per_connection_event_buffer_size(4) - }) + .with_swarm_config(|_| config) .build() } }; From b949db0a8b79b12d4cdd1395558b8e47ef25e696 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 26 Jul 2024 04:01:40 +0200 Subject: [PATCH 06/43] Remove timeout locks (#6048) * Remove locks with timeouts * Readd test * Update docs * Merge remote-tracking branch 'origin/unstable' into pk-cache-timeout --- .../src/attestation_verification.rs | 11 +-- .../src/attestation_verification/batch.rs | 15 +-- beacon_node/beacon_chain/src/beacon_chain.rs | 94 ++++++------------- .../beacon_chain/src/block_verification.rs | 7 +- beacon_node/beacon_chain/src/builder.rs | 7 +- .../beacon_chain/src/canonical_head.rs | 20 +--- beacon_node/beacon_chain/src/chain_config.rs | 3 - beacon_node/beacon_chain/src/lib.rs | 2 - beacon_node/beacon_chain/src/metrics.rs | 4 + .../beacon_chain/src/state_advance_timer.rs | 6 +- .../src/sync_committee_verification.rs | 13 +-- .../beacon_chain/src/timeout_rw_lock.rs | 48 ---------- beacon_node/src/cli.rs | 4 +- beacon_node/src/config.rs | 6 +- beacon_node/src/lib.rs | 6 -- book/src/faq.md | 14 +-- book/src/help_bn.md | 4 +- lighthouse/tests/beacon_node.rs | 10 +- 18 files changed, 60 insertions(+), 214 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/timeout_rw_lock.rs diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 06fba937d8..5a730719bf 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -35,7 +35,6 @@ mod batch; use crate::{ - beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, observed_aggregates::{ObserveOutcome, ObservedAttestationKey}, observed_attesters::Error as ObservedAttestersError, @@ -1174,10 +1173,7 @@ pub fn verify_attestation_signature( let signature_setup_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SIGNATURE_SETUP_TIMES); - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); let fork = chain .spec @@ -1272,10 +1268,7 @@ pub fn verify_signed_aggregate_signatures( signed_aggregate: &SignedAggregateAndProof, indexed_attestation: &IndexedAttestation, ) -> Result { - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); let aggregator_index = signed_aggregate.message().aggregator_index(); if aggregator_index >= pubkey_cache.len() as u64 { diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 07fad1bd4a..5f856140ba 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -13,10 +13,7 @@ use super::{ CheckAttestationSignature, Error, IndexedAggregatedAttestation, IndexedUnaggregatedAttestation, VerifiedAggregatedAttestation, VerifiedUnaggregatedAttestation, }; -use crate::{ - beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, BeaconChain, BeaconChainError, - BeaconChainTypes, -}; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::verify_signature_sets; use state_processing::signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, @@ -60,10 +57,7 @@ where let signature_setup_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_SETUP_TIMES); - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); let mut signature_sets = Vec::with_capacity(num_indexed * 3); // Iterate, flattening to get only the `Ok` values. @@ -169,10 +163,7 @@ where &metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES, ); - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); let mut signature_sets = Vec::with_capacity(num_partially_verified); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f795128b71..6d2973021a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -63,7 +63,6 @@ use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; -use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ get_slot_delay_ms, timestamp_now, ValidatorMonitor, HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, @@ -132,17 +131,6 @@ pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. type HashBlockTuple = (Hash256, RpcBlock); -/// The time-out before failure during an operation to take a read/write RwLock on the -/// attestation cache. -pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); - -/// The time-out before failure during an operation to take a read/write RwLock on the -/// validator pubkey cache. -pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); - -/// The timeout for the eth1 finalization cache -pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); - // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -465,13 +453,13 @@ pub struct BeaconChain { /// Used to track the heads of the beacon chain. pub(crate) head_tracker: Arc, /// Caches the attester shuffling for a given epoch and shuffling key root. - pub shuffling_cache: TimeoutRwLock, + pub shuffling_cache: RwLock, /// A cache of eth1 deposit data at epoch boundaries for deposit finalization - pub eth1_finalization_cache: TimeoutRwLock, + pub eth1_finalization_cache: RwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. - pub(crate) validator_pubkey_cache: TimeoutRwLock>, + pub(crate) validator_pubkey_cache: RwLock>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, /// A cache used when producing attestations whilst the head block is still being imported. @@ -1472,10 +1460,7 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_index(&self, pubkey: &PublicKeyBytes) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get_index(pubkey)) } @@ -1488,10 +1473,7 @@ impl BeaconChain { &self, validator_pubkeys: impl Iterator, ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); validator_pubkeys .map(|pubkey| { @@ -1516,10 +1498,7 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_pubkey(&self, validator_index: usize) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get(validator_index).cloned()) } @@ -1529,10 +1508,7 @@ impl BeaconChain { &self, validator_index: usize, ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get_pubkey_bytes(validator_index).copied()) } @@ -1546,10 +1522,7 @@ impl BeaconChain { &self, validator_indices: &[usize], ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); let mut map = HashMap::with_capacity(validator_indices.len()); for &validator_index in validator_indices { @@ -3506,11 +3479,12 @@ impl BeaconChain { // is so we don't have to think about lock ordering with respect to the fork choice lock. // There are a bunch of places where we lock both fork choice and the pubkey cache and it // would be difficult to check that they all lock fork choice first. - let mut ops = self - .validator_pubkey_cache - .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? - .import_new_pubkeys(&state)?; + let mut ops = { + let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_PUBKEY_CACHE_LOCK); + self.validator_pubkey_cache + .write() + .import_new_pubkeys(&state)? + }; // Apply the state to the attester cache, only if it is from the previous epoch or later. // @@ -4116,18 +4090,13 @@ impl BeaconChain { for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] { let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?; - let shuffling_is_cached = self - .shuffling_cache - .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? - .contains(&shuffling_id); + let shuffling_is_cached = self.shuffling_cache.read().contains(&shuffling_id); if !shuffling_is_cached { state.build_committee_cache(relative_epoch, &self.spec)?; let committee_cache = state.committee_cache(relative_epoch)?; self.shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? + .write() .insert_committee_cache(shuffling_id, committee_cache); } } @@ -4174,14 +4143,12 @@ impl BeaconChain { ) }; - if let Some(finalized_eth1_data) = self - .eth1_finalization_cache - .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) - .and_then(|mut cache| { - cache.insert(checkpoint, eth1_finalization_data); - cache.finalize(¤t_finalized_checkpoint) - }) - { + let finalized_eth1_data = { + let mut cache = self.eth1_finalization_cache.write(); + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }; + if let Some(finalized_eth1_data) = finalized_eth1_data { if let Some(eth1_chain) = self.eth1_chain.as_ref() { let finalized_deposit_count = finalized_eth1_data.deposit_count; eth1_chain.finalize_eth1_data(finalized_eth1_data); @@ -6365,15 +6332,11 @@ impl BeaconChain { })?; // Obtain the shuffling cache, timing how long we wait. - let cache_wait_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); - - let mut shuffling_cache = self - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)?; - - metrics::stop_timer(cache_wait_timer); + let mut shuffling_cache = { + let _ = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); + self.shuffling_cache.write() + }; if let Some(cache_item) = shuffling_cache.get(&shuffling_id) { // The shuffling cache is no longer required, drop the write-lock to allow concurrent @@ -6481,8 +6444,7 @@ impl BeaconChain { let shuffling_decision_block = shuffling_id.shuffling_decision_block; self.shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(Error::AttestationCacheLockTimeout)? + .write() .insert_committee_cache(shuffling_id, &committee_cache); metrics::stop_timer(committee_building_timer); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c71a2bcab3..832eaccc80 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -64,7 +64,7 @@ use crate::observed_block_producers::SeenBlock; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - beacon_chain::{BeaconForkChoice, ForkChoiceError, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, + beacon_chain::{BeaconForkChoice, ForkChoiceError}, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; @@ -2096,10 +2096,7 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobEr pub fn get_validator_pubkey_cache( chain: &BeaconChain, ) -> Result>, BeaconChainError> { - chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) + Ok(chain.validator_pubkey_cache.read()) } /// Produces an _empty_ `BlockSignatureVerifier`. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 7217f2c640..14e61e1265 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -13,7 +13,6 @@ use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; @@ -935,16 +934,16 @@ where fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, - shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( + shuffling_cache: RwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, log.clone(), )), - eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), + eth1_finalization_cache: RwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), - validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), + validator_pubkey_cache: RwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), reqresp_pre_import_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 84e1544451..a5d85d5603 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -31,7 +31,6 @@ //! the head block root. This is unacceptable for fast-responding functions like the networking //! stack. -use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT; use crate::persisted_fork_choice::PersistedForkChoice; use crate::shuffling_cache::BlockShufflingIds; use crate::{ @@ -817,21 +816,10 @@ impl BeaconChain { new_snapshot.beacon_block_root, &new_snapshot.beacon_state, ) { - Ok(head_shuffling_ids) => { - self.shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .map(|mut shuffling_cache| { - shuffling_cache.update_head_shuffling_ids(head_shuffling_ids) - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "shuffling_cache", - "task" => "update head shuffling decision root" - ); - }); - } + Ok(head_shuffling_ids) => self + .shuffling_cache + .write() + .update_head_shuffling_ids(head_shuffling_ids), Err(e) => { error!( self.log, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 255b8f0049..c908efa07c 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -28,8 +28,6 @@ pub struct ChainConfig { pub weak_subjectivity_checkpoint: Option, /// Determine whether to reconstruct historic states, usually after a checkpoint sync. pub reconstruct_historic_states: bool, - /// Whether timeouts on `TimeoutRwLock`s are enabled or not. - pub enable_lock_timeouts: bool, /// The max size of a message that can be sent over the network. pub max_network_size: usize, /// Maximum percentage of the head committee weight at which to attempt re-orging the canonical head. @@ -94,7 +92,6 @@ impl Default for ChainConfig { import_max_skip_slots: None, weak_subjectivity_checkpoint: None, reconstruct_historic_states: false, - enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M re_org_head_threshold: Some(DEFAULT_RE_ORG_HEAD_THRESHOLD), re_org_parent_threshold: Some(DEFAULT_RE_ORG_PARENT_THRESHOLD), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e1d0f61c58..5f3ccac4e4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -57,7 +57,6 @@ pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; -mod timeout_rw_lock; pub mod validator_monitor; pub mod validator_pubkey_cache; @@ -98,5 +97,4 @@ pub use state_processing::per_block_processing::errors::{ ExitValidationError, ProposerSlashingValidationError, }; pub use store; -pub use timeout_rw_lock::TimeoutRwLock; pub use types; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ab547cb600..4ca511370d 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -93,6 +93,10 @@ lazy_static! { "Time spent running fork choice's `get_head` during block import", exponential_buckets(1e-3, 2.0, 8) ); + pub static ref BLOCK_PROCESSING_PUBKEY_CACHE_LOCK: Result = try_create_histogram( + "beacon_block_processing_pubkey_cache_lock_seconds", + "Time spent waiting or holding the pubkey cache write lock", + ); pub static ref BLOCK_SYNC_AGGREGATE_SET_BITS: Result = try_create_int_gauge( "block_sync_aggregate_set_bits", "The number of true bits in the last sync aggregate in a block" diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 1f928a16e4..1d8bfff216 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -15,8 +15,7 @@ //! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles. use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ - beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT, chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, - BeaconChain, BeaconChainError, BeaconChainTypes, + chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, BeaconChain, BeaconChainError, BeaconChainTypes, }; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; @@ -417,8 +416,7 @@ fn advance_head( .map_err(BeaconChainError::from)?; beacon_chain .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::AttestationCacheLockTimeout)? + .write() .insert_committee_cache(shuffling_id.clone(), committee_cache); debug!( diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 5c6710bfd6..e1a5de56d1 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -28,8 +28,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ - beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, - observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, + metrics, observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::{verify_signature_sets, PublicKeyBytes}; use derivative::Derivative; @@ -619,10 +618,7 @@ pub fn verify_signed_aggregate_signatures( signed_aggregate: &SignedContributionAndProof, participant_pubkeys: &[PublicKeyBytes], ) -> Result { - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); let aggregator_index = signed_aggregate.message.aggregator_index; if aggregator_index >= pubkey_cache.len() as u64 { @@ -683,10 +679,7 @@ pub fn verify_sync_committee_message( let signature_setup_timer = metrics::start_timer(&metrics::SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES); - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = chain.validator_pubkey_cache.read(); let pubkey = pubkey_cache .get_pubkey_from_pubkey_bytes(pubkey_bytes) diff --git a/beacon_node/beacon_chain/src/timeout_rw_lock.rs b/beacon_node/beacon_chain/src/timeout_rw_lock.rs deleted file mode 100644 index b2eea76265..0000000000 --- a/beacon_node/beacon_chain/src/timeout_rw_lock.rs +++ /dev/null @@ -1,48 +0,0 @@ -use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::time::Duration; - -/// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a -/// time-out (i.e., no indefinitely-blocking operations). -/// -/// Timeouts can be optionally disabled at runtime for all instances of this type by calling -/// `TimeoutRwLock::disable_timeouts()`. -pub struct TimeoutRwLock(RwLock); - -const TIMEOUT_LOCKS_ENABLED_DEFAULT: bool = true; -static TIMEOUT_LOCKS_ENABLED: AtomicBool = AtomicBool::new(TIMEOUT_LOCKS_ENABLED_DEFAULT); - -impl TimeoutRwLock<()> { - pub fn disable_timeouts() { - // Use the strongest `SeqCst` ordering for the write, as it should only happen once. - TIMEOUT_LOCKS_ENABLED.store(false, Ordering::SeqCst); - } -} - -impl TimeoutRwLock { - pub fn new(inner: T) -> Self { - Self(RwLock::new(inner)) - } - - fn timeouts_enabled() -> bool { - // Use relaxed ordering as it's OK for a few locks to run with timeouts "accidentally", - // and we want the atomic check to be as fast as possible. - TIMEOUT_LOCKS_ENABLED.load(Ordering::Relaxed) - } - - pub fn try_read_for(&self, timeout: Duration) -> Option> { - if Self::timeouts_enabled() { - self.0.try_read_for(timeout) - } else { - Some(self.0.read()) - } - } - - pub fn try_write_for(&self, timeout: Duration) -> Option> { - if Self::timeouts_enabled() { - self.0.try_write_for(timeout) - } else { - Some(self.0.write()) - } - } -} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 689c3437a3..2e1b1c093c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1246,9 +1246,7 @@ pub fn cli_app() -> Command { .arg( Arg::new("disable-lock-timeouts") .long("disable-lock-timeouts") - .help("Disable the timeouts applied to some internal locks by default. This can \ - lead to less spurious failures on slow hardware but is considered \ - experimental as it may obscure performance issues.") + .help("This flag is deprecated and has no effect.") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6bde0411af..b4fa38da7d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -753,7 +753,11 @@ pub fn get_config( } if cli_args.get_flag("disable-lock-timeouts") { - client_config.chain.enable_lock_timeouts = false; + warn!( + log, + "Ignoring --disable-lock-timeouts"; + "info" => "this flag is deprecated and will be removed" + ); } if cli_args.get_flag("disable-proposer-reorgs") { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 40b667a744..945bd787dd 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -5,7 +5,6 @@ pub use beacon_chain; use beacon_chain::store::LevelDB; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, - TimeoutRwLock, }; use clap::ArgMatches; pub use cli::cli_app; @@ -73,11 +72,6 @@ impl ProductionBeaconNode { ) } - if !client_config.chain.enable_lock_timeouts { - info!(log, "Disabling lock timeouts globally"); - TimeoutRwLock::disable_timeouts() - } - if let Err(misaligned_forks) = validator_fork_epochs(&spec) { warn!( log, diff --git a/book/src/faq.md b/book/src/faq.md index 2de7841343..4c58c2e16d 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -8,7 +8,6 @@ - [My beacon node is stuck at downloading historical block using checkpoint sync. What should I do?](#bn-download-historical) - [I proposed a block but the beacon node shows `could not publish message` with error `duplicate` as below, should I be worried?](#bn-duplicate) - [I see beacon node logs `Head is optimistic` and I am missing attestations. What should I do?](#bn-optimistic) -- [My beacon node logs `CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout`, what should I do?](#bn-timeout) - [My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do?](#bn-missing-beacon) - [After checkpoint sync, the progress of `downloading historical blocks` is slow. Why?](#bn-download-slow) - [My beacon node logs `WARN Error processing HTTP API request`, what should I do?](#bn-http) @@ -84,7 +83,7 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann `error: HttpClient(url: http://127.0.0.1:8551/, kind: timeout, detail: operation timed out), service: exec` -which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flag `--execution-timeout-multiplier 3` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: 1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. @@ -140,17 +139,6 @@ WARN Head is optimistic execution_block_hash: 0x47e7555f1d4215d1ad409b1ac1 It means the beacon node will follow the chain, but it will not be able to attest or produce blocks. This is because the execution client is not synced, so the beacon chain cannot verify the authenticity of the chain head, hence the word `optimistic`. What you need to do is to make sure that the execution client is up and syncing. Once the execution client is synced, the error will disappear. -### My beacon node logs `CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon`, what should I do? - -An example of the log is shown below: - -```text -CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon -WARN BlockProcessingFailure outcome: ValidatorPubkeyCacheLockTimeout, msg: unexpected condition in processing block. -``` - -A `Timeout` error suggests that the computer may be overloaded at the moment, for example, the execution client is still syncing. You may use the flag `--disable-lock-timeouts` to silence this error, although it will not fix the underlying slowness. Nevertheless, this is a relatively harmless log, and the error should go away once the resources used are back to normal. - ### My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do? An example of the full log is shown below: diff --git a/book/src/help_bn.md b/book/src/help_bn.md index d46427970f..f9180b6583 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -480,9 +480,7 @@ Flags: --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). --disable-lock-timeouts - Disable the timeouts applied to some internal locks by default. This - can lead to less spurious failures on slow hardware but is considered - experimental as it may obscure performance issues. + This flag is deprecated and has no effect. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index fa87fbdd81..4fdd967c65 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -159,19 +159,11 @@ fn max_skip_slots_flag() { .with_config(|config| assert_eq!(config.chain.import_max_skip_slots, Some(10))); } -#[test] -fn enable_lock_timeouts_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(config.chain.enable_lock_timeouts)); -} - #[test] fn disable_lock_timeouts_flag() { CommandLineTest::new() .flag("disable-lock-timeouts", None) - .run_with_zero_port() - .with_config(|config| assert!(!config.chain.enable_lock_timeouts)); + .run_with_zero_port(); } #[test] From f60503cd403faad78e23247e5618a78513f9e205 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 25 Jul 2024 22:53:18 -0700 Subject: [PATCH 07/43] default vc to block v3 endpoint and deprecate block-v3 flag (#5292) * default vc to block v3 endpoint and deprecate block-v3 flag * kick off ci * Merge branch 'unstable' of https://github.com/sigp/lighthouse into default-vc-to-block-v3-endpoint * fix formatting in cli docs * Merge branch 'unstable' of https://github.com/sigp/lighthouse into default-vc-to-block-v3-endpoint * Resolve merge conflicts * resolve conflicts * merge * merge * revert * retry * fix * Merge branch 'unstable' into default-vc-to-block-v3-endpoint * Merge branch 'unstable' of https://github.com/sigp/lighthouse into default-vc-to-block-v3-endpoint * fix issues w/ fallback sim --- book/src/help_vc.md | 5 +- lighthouse/tests/validator_client.rs | 15 +- testing/simulator/src/cli.rs | 1 - validator_client/src/block_service.rs | 282 ++----------------- validator_client/src/cli.rs | 5 +- validator_client/src/config.rs | 15 +- validator_client/src/http_metrics/metrics.rs | 1 - validator_client/src/validator_store.rs | 6 - 8 files changed, 41 insertions(+), 289 deletions(-) diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 347c818ede..e205f4c345 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -265,10 +265,7 @@ Flags: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. --produce-block-v3 - Enable block production via the block v3 endpoint for this validator - client. This should only be enabled when paired with a beacon node - that has this endpoint implemented. This flag will be enabled by - default in future. + This flag is deprecated and is no longer in use. --unencrypted-http-transport This is a safety flag to ensure that the user is aware that the http transport is unencrypted and using a custom HTTP address is unsafe. diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 1fc5d22698..cb16ca4792 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -423,19 +423,12 @@ fn no_doppelganger_protection_flag() { .run() .with_config(|config| assert!(!config.enable_doppelganger_protection)); } -#[test] -fn produce_block_v3_flag() { - CommandLineTest::new() - .flag("produce-block-v3", None) - .run() - .with_config(|config| assert!(config.produce_block_v3)); -} #[test] -fn no_produce_block_v3_flag() { - CommandLineTest::new() - .run() - .with_config(|config| assert!(!config.produce_block_v3)); +fn produce_block_v3_flag() { + // The flag is DEPRECATED but providing it should not trigger an error. + // We can delete this test when deleting the flag entirely. + CommandLineTest::new().flag("produce-block-v3", None).run(); } #[test] diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index a82c8b8577..3d61dcde74 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -77,7 +77,6 @@ pub fn cli_app() -> Command { ) .arg( Arg::new("vc-count") - .short('c') .long("vc-count") .action(ArgAction::Set) .default_value("3") diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index cb3636135c..af11d82eb5 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -323,105 +323,32 @@ impl BlockService { ) } - if self.validator_store.produce_block_v3() { - for validator_pubkey in proposers { - let builder_boost_factor = self.get_builder_boost_factor(&validator_pubkey); - let service = self.clone(); - let log = log.clone(); - self.inner.context.executor.spawn( - async move { - let result = service - .publish_block_v3(slot, validator_pubkey, builder_boost_factor) - .await; + for validator_pubkey in proposers { + let builder_boost_factor = self.get_builder_boost_factor(&validator_pubkey); + let service = self.clone(); + let log = log.clone(); + self.inner.context.executor.spawn( + async move { + let result = service + .publish_block(slot, validator_pubkey, builder_boost_factor) + .await; - match result { - Ok(_) => {} - Err(BlockError::Recoverable(e)) | Err(BlockError::Irrecoverable(e)) => { - error!( - log, - "Error whilst producing block"; - "error" => ?e, - "block_slot" => ?slot, - "info" => "block v3 proposal failed, this error may or may not result in a missed block" - ); - } + match result { + Ok(_) => {} + Err(BlockError::Recoverable(e)) | Err(BlockError::Irrecoverable(e)) => { + error!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "block v3 proposal failed, this error may or may not result in a missed block" + ); } - }, - "block service", - ) - } - } else { - for validator_pubkey in proposers { - let builder_proposals = self - .validator_store - .get_builder_proposals(&validator_pubkey); - let service = self.clone(); - let log = log.clone(); - self.inner.context.executor.spawn( - async move { - if builder_proposals { - let result = service - .publish_block(slot, validator_pubkey, true) - .await; - - match result { - Err(BlockError::Recoverable(e)) => { - error!( - log, - "Error whilst producing block"; - "error" => ?e, - "block_slot" => ?slot, - "info" => "blinded proposal failed, attempting full block" - ); - if let Err(e) = service - .publish_block(slot, validator_pubkey, false) - .await - { - // Log a `crit` since a full block - // (non-builder) proposal failed. - crit!( - log, - "Error whilst producing block"; - "error" => ?e, - "block_slot" => ?slot, - "info" => "full block attempted after a blinded failure", - ); - } - } - Err(BlockError::Irrecoverable(e)) => { - // Only log an `error` since it's common for - // builders to timeout on their response, only - // to publish the block successfully themselves. - error!( - log, - "Error whilst producing block"; - "error" => ?e, - "block_slot" => ?slot, - "info" => "this error may or may not result in a missed block", - ) - } - Ok(_) => {} - }; - } else if let Err(e) = service - .publish_block(slot, validator_pubkey, false) - .await - { - // Log a `crit` since a full block (non-builder) - // proposal failed. - crit!( - log, - "Error whilst producing block"; - "message" => ?e, - "block_slot" => ?slot, - "info" => "proposal did not use a builder", - ); - } - }, - "block service", - ) - } + } + }, + "block service", + ) } - Ok(()) } @@ -513,7 +440,7 @@ impl BlockService { Ok(()) } - async fn publish_block_v3( + async fn publish_block( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -584,7 +511,7 @@ impl BlockService { &metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK_HTTP_GET], ); - let block_response = Self::get_validator_block_v3( + Self::get_validator_block( beacon_node, slot, randao_reveal_ref, @@ -599,103 +526,7 @@ impl BlockService { "Error from beacon node when producing block: {:?}", e )) - }); - - Ok::<_, BlockError>(block_response) - }, - ) - .await??; - - self_ref - .sign_and_publish_block( - proposer_fallback, - slot, - graffiti, - &validator_pubkey, - unsigned_block, - ) - .await?; - - Ok(()) - } - - /// Produce a block at the given slot for validator_pubkey - async fn publish_block( - &self, - slot: Slot, - validator_pubkey: PublicKeyBytes, - builder_proposal: bool, - ) -> Result<(), BlockError> { - let log = self.context.log(); - let _timer = - metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); - - let randao_reveal = match self - .validator_store - .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) - .await - { - Ok(signature) => signature.into(), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently removed - // via the API. - warn!( - log, - "Missing pubkey for block"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "slot" => ?slot - ); - return Ok(()); - } - Err(e) => { - return Err(BlockError::Recoverable(format!( - "Unable to sign block: {:?}", - e - ))) - } - }; - - let graffiti = determine_graffiti( - &validator_pubkey, - log, - self.graffiti_file.clone(), - self.validator_store.graffiti(&validator_pubkey), - self.graffiti, - ); - - let randao_reveal_ref = &randao_reveal; - let self_ref = &self; - let proposer_index = self.validator_store.validator_index(&validator_pubkey); - let proposer_fallback = ProposerFallback { - beacon_nodes: self.beacon_nodes.clone(), - proposer_nodes: self.proposer_nodes.clone(), - }; - - info!( - log, - "Requesting unsigned block"; - "slot" => slot.as_u64(), - ); - - // Request block from first responsive beacon node. - // - // Try the proposer nodes last, since it's likely that they don't have a - // great view of attestations on the network. - let unsigned_block = proposer_fallback - .request_proposers_last( - RequireSynced::No, - OfflineOnFailure::Yes, - move |beacon_node| { - Self::get_validator_block( - beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_proposal, - log, - ) + }) }, ) .await?; @@ -745,7 +576,7 @@ impl BlockService { Ok::<_, BlockError>(()) } - async fn get_validator_block_v3( + async fn get_validator_block( beacon_node: &BeaconNodeHttpClient, slot: Slot, randao_reveal_ref: &SignatureBytes, @@ -788,65 +619,6 @@ impl BlockService { Ok::<_, BlockError>(unsigned_block) } - async fn get_validator_block( - beacon_node: &BeaconNodeHttpClient, - slot: Slot, - randao_reveal_ref: &SignatureBytes, - graffiti: Option, - proposer_index: Option, - builder_proposal: bool, - log: &Logger, - ) -> Result, BlockError> { - let unsigned_block = if !builder_proposal { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - UnsignedBlock::Full( - beacon_node - .get_validator_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data, - ) - } else { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); - UnsignedBlock::Blinded( - beacon_node - .get_validator_blinded_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data, - ) - }; - - info!( - log, - "Received unsigned block"; - "slot" => slot.as_u64(), - ); - if proposer_index != Some(unsigned_block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), - )); - } - - Ok::<_, BlockError>(unsigned_block) - } - /// Returns the builder boost factor of the given public key. /// The priority order for fetching this value is: /// diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 24f9f41415..f84260a924 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -170,10 +170,7 @@ pub fn cli_app() -> Command { .arg( Arg::new("produce-block-v3") .long("produce-block-v3") - .help("Enable block production via the block v3 endpoint for this validator client. \ - This should only be enabled when paired with a beacon node \ - that has this endpoint implemented. This flag will be enabled by default in \ - future.") + .help("This flag is deprecated and is no longer in use.") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7378d924b7..204c5b8b6c 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -79,8 +79,6 @@ pub struct Config { pub validator_registration_batch_size: usize, /// Enable slashing protection even while using web3signer keys. pub enable_web3signer_slashing_protection: bool, - /// Enables block production via the block v3 endpoint. This configuration option can be removed post deneb. - pub produce_block_v3: bool, /// Specifies the boost factor, a percentage multiplier to apply to the builder's payload value. pub builder_boost_factor: Option, /// If true, Lighthouse will prefer builder proposals, if available. @@ -130,7 +128,6 @@ impl Default for Config { enable_latency_measurement_service: true, validator_registration_batch_size: 500, enable_web3signer_slashing_protection: true, - produce_block_v3: false, builder_boost_factor: None, prefer_builder_proposals: false, distributed: false, @@ -380,14 +377,18 @@ impl Config { config.builder_proposals = true; } - if cli_args.get_flag("produce-block-v3") { - config.produce_block_v3 = true; - } - if cli_args.get_flag("prefer-builder-proposals") { config.prefer_builder_proposals = true; } + if cli_args.get_flag("produce-block-v3") { + warn!( + log, + "produce-block-v3 flag"; + "note" => "deprecated flag has no effect and should be removed" + ); + } + config.gas_limit = cli_args .get_one::("gas-limit") .map(|gas_limit| { diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 8284ca3e94..cc5b03bb19 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -11,7 +11,6 @@ pub const UNREGISTERED: &str = "unregistered"; pub const FULL_UPDATE: &str = "full_update"; pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; -pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 574ae7449d..8a9e125936 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -71,7 +71,6 @@ pub struct ValidatorStore { gas_limit: Option, builder_proposals: bool, enable_web3signer_slashing_protection: bool, - produce_block_v3: bool, prefer_builder_proposals: bool, builder_boost_factor: Option, task_executor: TaskExecutor, @@ -106,7 +105,6 @@ impl ValidatorStore { gas_limit: config.gas_limit, builder_proposals: config.builder_proposals, enable_web3signer_slashing_protection: config.enable_web3signer_slashing_protection, - produce_block_v3: config.produce_block_v3, prefer_builder_proposals: config.prefer_builder_proposals, builder_boost_factor: config.builder_boost_factor, task_executor, @@ -321,10 +319,6 @@ impl ValidatorStore { self.spec.fork_at_epoch(epoch) } - pub fn produce_block_v3(&self) -> bool { - self.produce_block_v3 - } - /// Returns a `SigningMethod` for `validator_pubkey` *only if* that validator is considered safe /// by doppelganger protection. fn doppelganger_checked_signing_method( From f4ddc45914d566144f00ff93e086e0bddd0ae974 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 26 Jul 2024 15:53:21 +1000 Subject: [PATCH 08/43] Use upgradable read lock for pubkey cache (#6190) * Use upgradable read for pubkey cache --- beacon_node/beacon_chain/src/beacon_chain.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6d2973021a..095429faa5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3481,9 +3481,15 @@ impl BeaconChain { // would be difficult to check that they all lock fork choice first. let mut ops = { let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_PUBKEY_CACHE_LOCK); - self.validator_pubkey_cache - .write() - .import_new_pubkeys(&state)? + let pubkey_cache = self.validator_pubkey_cache.upgradable_read(); + + // Only take a write lock if there are new keys to import. + if state.validators().len() > pubkey_cache.len() { + parking_lot::RwLockUpgradableReadGuard::upgrade(pubkey_cache) + .import_new_pubkeys(&state)? + } else { + vec![] + } }; // Apply the state to the attester cache, only if it is from the previous epoch or later. From a3b1ef31294ffce70278408131d82d69e92fe9e6 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 29 Jul 2024 15:52:07 +1000 Subject: [PATCH 09/43] Visualisation logging for sync batch states (#6034) * Add visualization for batch states * Replace icons with emojis * Reviewers comments * Change empty emoji and improve docs comments * Fix lints * Move to letters rather than emojis * Replace 'V' with 'v'. Cargo update * Merge latest unstable * Improve docs around visualisation * Merge branch 'unstable' into sync-batch-state --- Cargo.lock | 8 +-- .../network/src/sync/range_sync/batch.rs | 21 +++++++ .../network/src/sync/range_sync/chain.rs | 60 ++++++++++++++++--- 3 files changed, 78 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 605cb4d2a5..818b6adc55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1021,9 +1021,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62dc83a094a71d43eeadd254b1ec2d24cb6a0bb6cadce00df51f0db594711a32" +checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" dependencies = [ "cc", "glob", @@ -5751,9 +5751,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.1" +version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" +checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" dependencies = [ "memchr", ] diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 49e3ac3a81..7f9629740b 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -463,6 +463,11 @@ impl BatchInfo { } } } + + // Visualizes the state of this batch using state::visualize() + pub fn visualize(&self) -> char { + self.state.visualize() + } } /// Represents a peer's attempt and providing the result for this batch. @@ -539,3 +544,19 @@ impl std::fmt::Debug for BatchState { } } } + +impl BatchState { + /// Creates a character representation/visualization for the batch state to display in logs for quicker and + /// easier recognition + fn visualize(&self) -> char { + match self { + BatchState::Downloading(..) => 'D', + BatchState::Processing(_) => 'P', + BatchState::AwaitingValidation(_) => 'v', + BatchState::AwaitingDownload => 'd', + BatchState::Failed => 'F', + BatchState::AwaitingProcessing(..) => 'p', + BatchState::Poisoned => 'X', + } + } +} diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d63b2f95d8..556b4194dd 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -277,7 +277,7 @@ impl SyncingChain { let awaiting_batches = batch_id .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) / EPOCHS_PER_BATCH; - debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); + debug!(self.log, "Batch downloaded"; "epoch" => batch_id, "blocks" => received, "batch_state" => self.visualize_batch_state(), "awaiting_batches" => awaiting_batches); // pre-emptively request more blocks from peers whilst we process current blocks, self.request_batches(network)?; @@ -460,6 +460,7 @@ impl SyncingChain { ) -> ProcessingResult { // the first two cases are possible if the chain advances while waiting for a processing // result + let batch_state = self.visualize_batch_state(); let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { debug!(self.log, "Unexpected batch result"; @@ -492,7 +493,7 @@ impl SyncingChain { // Log the process result and the batch for debugging purposes. debug!(self.log, "Batch processing result"; "result" => ?result, &batch, - "batch_epoch" => batch_id, "client" => %network.client_type(&peer)); + "batch_epoch" => batch_id, "client" => %network.client_type(&peer), "batch_state" => batch_state); // We consider three cases. Batch was successfully processed, Batch failed processing due // to a faulty peer, or batch failed processing but the peer can't be deemed faulty. @@ -859,6 +860,7 @@ impl SyncingChain { peer_id: &PeerId, request_id: Id, ) -> ProcessingResult { + let batch_state = self.visualize_batch_state(); if let Some(batch) = self.batches.get_mut(&batch_id) { // A batch could be retried without the peer failing the request (disconnecting/ // sending an error /timeout) if the peer is removed from the chain for other @@ -870,7 +872,8 @@ impl SyncingChain { "batch_epoch" => batch_id, "batch_state" => ?batch.state(), "peer_id" => %peer_id, - "request_id" => %request_id + "request_id" => %request_id, + "batch_state" => batch_state ); return Ok(KeepChain); } @@ -880,7 +883,8 @@ impl SyncingChain { "batch_epoch" => batch_id, "batch_state" => ?batch.state(), "peer_id" => %peer_id, - "request_id" => %request_id + "request_id" => %request_id, + "batch_state" => batch_state ); if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); @@ -898,7 +902,8 @@ impl SyncingChain { "Batch not found"; "batch_epoch" => batch_id, "peer_id" => %peer_id, - "request_id" => %request_id + "request_id" => %request_id, + "batch_state" => batch_state ); // this could be an error for an old batch, removed when the chain advances Ok(KeepChain) @@ -948,6 +953,7 @@ impl SyncingChain { batch_id: BatchId, peer: PeerId, ) -> ProcessingResult { + let batch_state = self.visualize_batch_state(); if let Some(batch) = self.batches.get_mut(&batch_id) { let (request, batch_type) = batch.to_blocks_by_range_request(); match network.blocks_and_blobs_by_range_request( @@ -967,9 +973,9 @@ impl SyncingChain { .map(|epoch| epoch == batch_id) .unwrap_or(false) { - debug!(self.log, "Requesting optimistic batch"; "epoch" => batch_id, &batch); + debug!(self.log, "Requesting optimistic batch"; "epoch" => batch_id, &batch, "batch_state" => batch_state); } else { - debug!(self.log, "Requesting batch"; "epoch" => batch_id, &batch); + debug!(self.log, "Requesting batch"; "epoch" => batch_id, &batch, "batch_state" => batch_state); } // register the batch for this peer return self @@ -1130,6 +1136,46 @@ impl SyncingChain { } } } + + /// Creates a string visualization of the current state of the chain, to make it easier for debugging and understanding + /// where sync is up to from glancing at the logs. + /// + /// This produces a string of the form: [D,E,E,E,E] + /// to indicate the current buffer state of the chain. The symbols are defined on each of the + /// batch states. See [BatchState::visualize] for symbol definitions. + fn visualize_batch_state(&self) -> String { + let mut visualization_string = String::with_capacity((BATCH_BUFFER_SIZE * 3) as usize); + + // Start of the block + visualization_string.push('['); + + for mut batch_index in 0..BATCH_BUFFER_SIZE { + if let Some(batch) = self + .batches + .get(&(self.processing_target + batch_index as u64 * EPOCHS_PER_BATCH)) + { + visualization_string.push(batch.visualize()); + if batch_index != BATCH_BUFFER_SIZE { + // Add a comma in between elements + visualization_string.push(','); + } + } else { + // No batch exists, it is on our list to be downloaded + // Fill in the rest of the gaps + while batch_index < BATCH_BUFFER_SIZE { + visualization_string.push('E'); + // Add a comma between the empty batches + if batch_index < BATCH_BUFFER_SIZE.saturating_sub(1) { + visualization_string.push(',') + } + batch_index += 1; + } + break; + } + } + visualization_string.push(']'); + visualization_string + } } impl slog::KV for &mut SyncingChain { From 19b3ab39ee2d131abe3226ab58aeaaa73b7f99fb Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 29 Jul 2024 15:52:10 +1000 Subject: [PATCH 10/43] Data column gossip validation and error handling (#6181) * Add gossip verification and error handling. * Merge branch 'unstable' into das-gossip-validation * Add inclusion proof verification and some renames for consistency --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +- beacon_node/beacon_chain/src/builder.rs | 4 +- .../src/data_column_verification.rs | 323 +++++++++++++++++- beacon_node/beacon_chain/src/errors.rs | 6 +- beacon_node/beacon_chain/src/lib.rs | 2 +- ..._sidecars.rs => observed_data_sidecars.rs} | 130 ++++--- .../gossip_methods.rs | 82 ++++- 7 files changed, 494 insertions(+), 59 deletions(-) rename beacon_node/beacon_chain/src/{observed_blob_sidecars.rs => observed_data_sidecars.rs} (77%) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 095429faa5..c6ed979d68 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -52,8 +52,8 @@ use crate::observed_aggregates::{ use crate::observed_attesters::{ ObservedAggregators, ObservedAttesters, ObservedSyncAggregators, ObservedSyncContributors, }; -use crate::observed_blob_sidecars::ObservedBlobSidecars; use crate::observed_block_producers::ObservedBlockProducers; +use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; @@ -415,7 +415,9 @@ pub struct BeaconChain { /// Maintains a record of which validators have proposed blocks for each slot. pub observed_block_producers: RwLock>, /// Maintains a record of blob sidecars seen over the gossip network. - pub observed_blob_sidecars: RwLock>, + pub observed_blob_sidecars: RwLock>>, + /// Maintains a record of column sidecars seen over the gossip network. + pub observed_column_sidecars: RwLock>>, /// Maintains a record of slashable message seen over the gossip network or RPC. pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 14e61e1265..c86e35980b 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -11,6 +11,7 @@ use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::head_tracker::HeadTracker; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; +use crate::observed_data_sidecars::ObservedDataSidecars; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; @@ -917,7 +918,8 @@ where observed_sync_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), - observed_blob_sidecars: <_>::default(), + observed_column_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), + observed_blob_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), observed_slashable: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 2e88da8f6a..53e83a8061 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -1,13 +1,21 @@ -use crate::block_verification::{process_block_slash_info, BlockSlashInfo}; +use crate::block_verification::{ + cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, + BlockSlashInfo, +}; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; +use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; +use proto_array::Block; +use slasher::test_utils::E; +use slog::debug; +use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ - BeaconStateError, DataColumnSidecar, EthSpec, Hash256, RuntimeVariableList, - SignedBeaconBlockHeader, Slot, + BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, + RuntimeVariableList, SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip data column. @@ -54,6 +62,75 @@ pub enum GossipDataColumnError { /// /// The data column sidecar is invalid and the peer is faulty. InvalidKzgProof(kzg::Error), + /// The column was gossiped over an incorrect subnet. + /// + /// ## Peer scoring + /// + /// The column is invalid or the peer is faulty. + InvalidSubnetId { received: u64, expected: u64 }, + /// The column sidecar is from a slot that is later than the current slot (with respect to the + /// gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + /// The sidecar corresponds to a slot older than the finalized head slot. + /// + /// ## Peer scoring + /// + /// It's unclear if this column is valid, but this column is for a finalized slot and is + /// therefore useless to us. + PastFinalizedSlot { + column_slot: Slot, + finalized_slot: Slot, + }, + /// The pubkey cache timed out. + /// + /// ## Peer scoring + /// + /// The column sidecar may be valid, this is an internal error. + PubkeyCacheTimeout, + /// The proposer index specified in the sidecar does not match the locally computed + /// proposer index. + /// + /// ## Peer scoring + /// + /// The column is invalid and the peer is faulty. + ProposerIndexMismatch { sidecar: usize, local: usize }, + /// The provided columns's parent block is unknown. + /// + /// ## Peer scoring + /// + /// We cannot process the columns without validating its parent, the peer isn't necessarily faulty. + ParentUnknown { parent_root: Hash256 }, + /// The column conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this column is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, + /// Invalid kzg commitment inclusion proof + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InvalidInclusionProof, + /// A column has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple + /// over gossip or no gossip sources. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, but we do not forward it over gossip. + PriorKnown { + proposer: u64, + slot: Slot, + index: ColumnIndex, + }, } impl From for GossipDataColumnError { @@ -183,18 +260,254 @@ where pub fn validate_data_column_sidecar_for_gossip( data_column: Arc>, - _subnet: u64, + subnet: u64, chain: &BeaconChain, ) -> Result, GossipDataColumnError> { - // TODO(das): implement gossip verification + let column_slot = data_column.slot(); + + verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; + verify_sidecar_not_from_future_slot(chain, column_slot)?; + verify_slot_greater_than_latest_finalized_slot(chain, column_slot)?; + verify_is_first_sidecar(chain, &data_column)?; + verify_column_inclusion_proof(&data_column)?; + let parent_block = verify_parent_block_and_finalized_descendant(data_column.clone(), chain)?; + verify_slot_higher_than_parent(&parent_block, column_slot)?; + verify_proposer_and_signature(&data_column, &parent_block, chain)?; let kzg = chain .kzg .clone() .ok_or(GossipDataColumnError::KzgNotInitialized)?; let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), &kzg) .map_err(GossipDataColumnError::InvalidKzgProof)?; + + chain + .observed_slashable + .write() + .observe_slashable( + column_slot, + data_column.block_proposer_index(), + data_column.block_root(), + ) + .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))?; + Ok(GossipVerifiedDataColumn { block_root: data_column.block_root(), data_column: kzg_verified_data_column, }) } + +// Verify that this is the first column sidecar received for the tuple: +// (block_header.slot, block_header.proposer_index, column_sidecar.index) +fn verify_is_first_sidecar( + chain: &BeaconChain, + data_column: &DataColumnSidecar, +) -> Result<(), GossipDataColumnError> { + if chain + .observed_column_sidecars + .read() + .proposer_is_known(data_column) + .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))? + { + return Err(GossipDataColumnError::PriorKnown { + proposer: data_column.block_proposer_index(), + slot: data_column.slot(), + index: data_column.index, + }); + } + Ok(()) +} + +fn verify_column_inclusion_proof( + data_column: &DataColumnSidecar, +) -> Result<(), GossipDataColumnError> { + if !data_column.verify_inclusion_proof() { + return Err(GossipDataColumnError::InvalidInclusionProof); + } + Ok(()) +} + +fn verify_slot_higher_than_parent( + parent_block: &Block, + data_column_slot: Slot, +) -> Result<(), GossipDataColumnError> { + if parent_block.slot >= data_column_slot { + return Err(GossipDataColumnError::IsNotLaterThanParent { + data_column_slot, + parent_slot: parent_block.slot, + }); + } + Ok(()) +} + +fn verify_parent_block_and_finalized_descendant( + data_column: Arc>, + chain: &BeaconChain, +) -> Result { + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + + // We have already verified that the column is past finalization, so we can + // just check fork choice for the block's parent. + let block_parent_root = data_column.block_parent_root(); + let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { + return Err(GossipDataColumnError::ParentUnknown { + parent_root: block_parent_root, + }); + }; + + // Do not process a column that does not descend from the finalized root. + // We just loaded the parent_block, so we can be sure that it exists in fork choice. + if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) { + return Err(GossipDataColumnError::NotFinalizedDescendant { block_parent_root }); + } + + Ok(parent_block) +} + +fn verify_proposer_and_signature( + data_column: &DataColumnSidecar, + parent_block: &ProtoBlock, + chain: &BeaconChain, +) -> Result<(), GossipDataColumnError> { + let column_slot = data_column.slot(); + let column_epoch = column_slot.epoch(E::slots_per_epoch()); + let column_index = data_column.index; + let block_root = data_column.block_root(); + let block_parent_root = data_column.block_parent_root(); + + let proposer_shuffling_root = + if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == column_epoch { + parent_block + .next_epoch_shuffling_id + .shuffling_decision_block + } else { + parent_block.root + }; + + let proposer_opt = chain + .beacon_proposer_cache + .lock() + .get_slot::(proposer_shuffling_root, column_slot); + + let (proposer_index, fork) = if let Some(proposer) = proposer_opt { + (proposer.index, proposer.fork) + } else { + debug!( + chain.log, + "Proposer shuffling cache miss for column verification"; + "block_root" => %block_root, + "index" => %column_index, + ); + let (parent_state_root, mut parent_state) = chain + .store + .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) + .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state for parent block {block_parent_root:?}", + )) + })?; + + let state = cheap_state_advance_to_obtain_committees::<_, GossipDataColumnError>( + &mut parent_state, + Some(parent_state_root), + column_slot, + &chain.spec, + )?; + + let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let proposer_index = *proposers + .get(column_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) + .ok_or_else(|| BeaconChainError::NoProposerForSlot(column_slot))?; + + // Prime the proposer shuffling cache with the newly-learned value. + chain.beacon_proposer_cache.lock().insert( + column_epoch, + proposer_shuffling_root, + proposers, + state.fork(), + )?; + (proposer_index, state.fork()) + }; + + // Signature verify the signed block header. + let signature_is_valid = { + let pubkey_cache = get_validator_pubkey_cache(chain) + .map_err(|_| GossipDataColumnError::PubkeyCacheTimeout)?; + + let pubkey = pubkey_cache + .get(proposer_index) + .ok_or_else(|| GossipDataColumnError::UnknownValidator(proposer_index as u64))?; + let signed_block_header = &data_column.signed_block_header; + signed_block_header.verify_signature::( + pubkey, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + }; + + if !signature_is_valid { + return Err(GossipDataColumnError::ProposalSignatureInvalid); + } + + let column_proposer_index = data_column.block_proposer_index(); + if proposer_index != column_proposer_index as usize { + return Err(GossipDataColumnError::ProposerIndexMismatch { + sidecar: column_proposer_index as usize, + local: proposer_index, + }); + } + + Ok(()) +} + +fn verify_index_matches_subnet( + data_column: &DataColumnSidecar, + subnet: u64, + spec: &ChainSpec, +) -> Result<(), GossipDataColumnError> { + let expected_subnet: u64 = + DataColumnSubnetId::from_column_index::(data_column.index as usize, spec).into(); + if expected_subnet != subnet { + return Err(GossipDataColumnError::InvalidSubnetId { + received: subnet, + expected: expected_subnet, + }); + } + Ok(()) +} + +fn verify_slot_greater_than_latest_finalized_slot( + chain: &BeaconChain, + column_slot: Slot, +) -> Result<(), GossipDataColumnError> { + let latest_finalized_slot = chain + .head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + if column_slot <= latest_finalized_slot { + return Err(GossipDataColumnError::PastFinalizedSlot { + column_slot, + finalized_slot: latest_finalized_slot, + }); + } + Ok(()) +} + +fn verify_sidecar_not_from_future_slot( + chain: &BeaconChain, + column_slot: Slot, +) -> Result<(), GossipDataColumnError> { + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if column_slot > latest_permissible_slot { + return Err(GossipDataColumnError::FutureSlot { + message_slot: column_slot, + latest_permissible_slot, + }); + } + Ok(()) +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 819de1f5c1..1e3d67f9d7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -9,8 +9,8 @@ use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; -use crate::observed_blob_sidecars::Error as ObservedBlobSidecarsError; use crate::observed_block_producers::Error as ObservedBlockProducersError; +use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; use futures::channel::mpsc::TrySendError; @@ -100,7 +100,7 @@ pub enum BeaconChainError { ObservedAttestationsError(ObservedAttestationsError), ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), - ObservedBlobSidecarsError(ObservedBlobSidecarsError), + ObservedDataSidecarsError(ObservedDataSidecarsError), AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), @@ -238,7 +238,7 @@ easy_from_to!(NaiveAggregationError, BeaconChainError); easy_from_to!(ObservedAttestationsError, BeaconChainError); easy_from_to!(ObservedAttestersError, BeaconChainError); easy_from_to!(ObservedBlockProducersError, BeaconChainError); -easy_from_to!(ObservedBlobSidecarsError, BeaconChainError); +easy_from_to!(ObservedDataSidecarsError, BeaconChainError); easy_from_to!(AttesterCacheError, BeaconChainError); easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5f3ccac4e4..7bfb5b08be 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -42,8 +42,8 @@ pub mod migrate; mod naive_aggregation_pool; pub mod observed_aggregates; mod observed_attesters; -mod observed_blob_sidecars; pub mod observed_block_producers; +mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; pub mod otb_verification_service; diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs similarity index 77% rename from beacon_node/beacon_chain/src/observed_blob_sidecars.rs rename to beacon_node/beacon_chain/src/observed_data_sidecars.rs index 7d7f490ebb..601241dd8a 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -6,20 +6,63 @@ use crate::observed_block_producers::ProposalKey; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BlobSidecar, EthSpec, Slot}; +use types::{BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Slot}; #[derive(Debug, PartialEq)] pub enum Error { - /// The slot of the provided `BlobSidecar` is prior to finalization and should not have been provided + /// The slot of the provided `ObservableDataSidecar` is prior to finalization and should not have been provided /// to this function. This is an internal error. - FinalizedBlob { slot: Slot, finalized_slot: Slot }, - /// The blob sidecar contains an invalid blob index, the blob sidecar is invalid. - /// Note: The invalid blob should have been caught and flagged as an error much before reaching + FinalizedDataSidecar { slot: Slot, finalized_slot: Slot }, + /// The data sidecar contains an invalid index, the data sidecar is invalid. + /// Note: The invalid data should have been caught and flagged as an error much before reaching /// here. - InvalidBlobIndex(u64), + InvalidDataIndex(u64), } -/// Maintains a cache of seen `BlobSidecar`s that are received over gossip +pub trait ObservableDataSidecar { + fn slot(&self) -> Slot; + fn block_proposer_index(&self) -> u64; + fn index(&self) -> u64; + fn max_num_of_items(spec: &ChainSpec) -> usize; +} + +impl ObservableDataSidecar for BlobSidecar { + fn slot(&self) -> Slot { + self.slot() + } + + fn block_proposer_index(&self) -> u64 { + self.block_proposer_index() + } + + fn index(&self) -> u64 { + self.index + } + + fn max_num_of_items(_spec: &ChainSpec) -> usize { + E::max_blobs_per_block() + } +} + +impl ObservableDataSidecar for DataColumnSidecar { + fn slot(&self) -> Slot { + self.slot() + } + + fn block_proposer_index(&self) -> u64 { + self.block_proposer_index() + } + + fn index(&self) -> u64 { + self.index + } + + fn max_num_of_items(spec: &ChainSpec) -> usize { + spec.number_of_columns + } +} + +/// Maintains a cache of seen `ObservableDataSidecar`s that are received over gossip /// and have been gossip verified. /// /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you @@ -27,67 +70,65 @@ pub enum Error { /// /// Note: To prevent DoS attacks, this cache must include only items that have received some DoS resistance /// like checking the proposer signature. -pub struct ObservedBlobSidecars { +pub struct ObservedDataSidecars { finalized_slot: Slot, - /// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple. + /// Stores all received data indices for a given `(ValidatorIndex, Slot)` tuple. items: HashMap>, - _phantom: PhantomData, + spec: ChainSpec, + _phantom: PhantomData, } -impl Default for ObservedBlobSidecars { +impl ObservedDataSidecars { /// Instantiates `Self` with `finalized_slot == 0`. - fn default() -> Self { + pub fn new(spec: ChainSpec) -> Self { Self { finalized_slot: Slot::new(0), items: HashMap::new(), + spec, _phantom: PhantomData, } } -} -impl ObservedBlobSidecars { - /// Observe the `blob_sidecar` at (`blob_sidecar.block_proposer_index, blob_sidecar.slot`). - /// This will update `self` so future calls to it indicate that this `blob_sidecar` is known. + /// Observe the `data_sidecar` at (`data_sidecar.block_proposer_index, data_sidecar.slot`). + /// This will update `self` so future calls to it indicate that this `data_sidecar` is known. /// - /// The supplied `blob_sidecar` **MUST** have completed proposer signature verification. - pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar) -> Result { - self.sanitize_blob_sidecar(blob_sidecar)?; + /// The supplied `data_sidecar` **MUST** have completed proposer signature verification. + pub fn observe_sidecar(&mut self, data_sidecar: &T) -> Result { + self.sanitize_data_sidecar(data_sidecar)?; - let blob_indices = self + let data_indices = self .items .entry(ProposalKey { - slot: blob_sidecar.slot(), - proposer: blob_sidecar.block_proposer_index(), + slot: data_sidecar.slot(), + proposer: data_sidecar.block_proposer_index(), }) - .or_insert_with(|| HashSet::with_capacity(E::max_blobs_per_block())); - let did_not_exist = blob_indices.insert(blob_sidecar.index); + .or_insert_with(|| HashSet::with_capacity(T::max_num_of_items(&self.spec))); + let did_not_exist = data_indices.insert(data_sidecar.index()); Ok(!did_not_exist) } - /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. - pub fn proposer_is_known(&self, blob_sidecar: &BlobSidecar) -> Result { - self.sanitize_blob_sidecar(blob_sidecar)?; + /// Returns `true` if the `data_sidecar` has already been observed in the cache within the prune window. + pub fn proposer_is_known(&self, data_sidecar: &T) -> Result { + self.sanitize_data_sidecar(data_sidecar)?; let is_known = self .items .get(&ProposalKey { - slot: blob_sidecar.slot(), - proposer: blob_sidecar.block_proposer_index(), + slot: data_sidecar.slot(), + proposer: data_sidecar.block_proposer_index(), }) - .map_or(false, |blob_indices| { - blob_indices.contains(&blob_sidecar.index) - }); + .map_or(false, |indices| indices.contains(&data_sidecar.index())); Ok(is_known) } - fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar) -> Result<(), Error> { - if blob_sidecar.index >= E::max_blobs_per_block() as u64 { - return Err(Error::InvalidBlobIndex(blob_sidecar.index)); + fn sanitize_data_sidecar(&self, data_sidecar: &T) -> Result<(), Error> { + if data_sidecar.index() >= T::max_num_of_items(&self.spec) as u64 { + return Err(Error::InvalidDataIndex(data_sidecar.index())); } let finalized_slot = self.finalized_slot; - if finalized_slot > 0 && blob_sidecar.slot() <= finalized_slot { - return Err(Error::FinalizedBlob { - slot: blob_sidecar.slot(), + if finalized_slot > 0 && data_sidecar.slot() <= finalized_slot { + return Err(Error::FinalizedDataSidecar { + slot: data_sidecar.slot(), finalized_slot, }); } @@ -95,7 +136,7 @@ impl ObservedBlobSidecars { Ok(()) } - /// Prune `blob_sidecar` observations for slots less than or equal to the given slot. + /// Prune `data_sidecar` observations for slots less than or equal to the given slot. pub fn prune(&mut self, finalized_slot: Slot) { if finalized_slot == 0 { return; @@ -109,6 +150,7 @@ impl ObservedBlobSidecars { #[cfg(test)] mod tests { use super::*; + use crate::test_utils::test_spec; use bls::Hash256; use std::sync::Arc; use types::MainnetEthSpec; @@ -125,7 +167,8 @@ mod tests { #[test] fn pruning() { - let mut cache = ObservedBlobSidecars::default(); + let spec = test_spec::(); + let mut cache = ObservedDataSidecars::>::new(spec); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 0, "no slots should be present"); @@ -200,7 +243,7 @@ mod tests { assert_eq!( cache.observe_sidecar(&block_b), - Err(Error::FinalizedBlob { + Err(Error::FinalizedDataSidecar { slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(), }), @@ -263,7 +306,8 @@ mod tests { #[test] fn simple_observations() { - let mut cache = ObservedBlobSidecars::default(); + let spec = test_spec::(); + let mut cache = ObservedDataSidecars::>::new(spec); // Slot 0, index 0 let proposer_index_a = 420; @@ -423,7 +467,7 @@ mod tests { let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), - Err(Error::InvalidBlobIndex(invalid_index)), + Err(Error::InvalidDataIndex(invalid_index)), "cannot add an index > MaxBlobsPerBlock" ); } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 781c447f81..4c5c34bfd8 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -6,7 +6,7 @@ use crate::{ }; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::data_column_verification::GossipVerifiedDataColumn; +use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, @@ -621,7 +621,7 @@ impl NetworkBeaconProcessor { ); match self .chain - .verify_data_column_sidecar_for_gossip(column_sidecar, *subnet_id) + .verify_data_column_sidecar_for_gossip(column_sidecar.clone(), *subnet_id) { Ok(gossip_verified_data_column) => { metrics::inc_counter( @@ -656,8 +656,82 @@ impl NetworkBeaconProcessor { ) .await } - Err(_) => { - // TODO(das) implement gossip error handling + Err(err) => { + match err { + GossipDataColumnError::ParentUnknown { parent_root } => { + debug!( + self.log, + "Unknown parent hash for column"; + "action" => "requesting parent", + "block_root" => %block_root, + "parent_root" => %parent_root, + ); + self.send_sync_message(SyncMessage::UnknownParentDataColumn( + peer_id, + column_sidecar, + )); + } + GossipDataColumnError::KzgNotInitialized + | GossipDataColumnError::PubkeyCacheTimeout + | GossipDataColumnError::BeaconChainError(_) => { + crit!( + self.log, + "Internal error when verifying column sidecar"; + "error" => ?err, + ) + } + GossipDataColumnError::ProposalSignatureInvalid + | GossipDataColumnError::UnknownValidator(_) + | GossipDataColumnError::ProposerIndexMismatch { .. } + | GossipDataColumnError::IsNotLaterThanParent { .. } + | GossipDataColumnError::InvalidSubnetId { .. } + | GossipDataColumnError::InvalidInclusionProof { .. } + | GossipDataColumnError::InvalidKzgProof { .. } + | GossipDataColumnError::NotFinalizedDescendant { .. } => { + debug!( + self.log, + "Could not verify column sidecar for gossip. Rejecting the column sidecar"; + "error" => ?err, + "slot" => %slot, + "block_root" => %block_root, + "index" => %index, + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_data_column_low", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipDataColumnError::FutureSlot { .. } + | GossipDataColumnError::PriorKnown { .. } + | GossipDataColumnError::PastFinalizedSlot { .. } => { + debug!( + self.log, + "Could not verify column sidecar for gossip. Ignoring the column sidecar"; + "error" => ?err, + "slot" => %slot, + "block_root" => %block_root, + "index" => %index, + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_data_column_high", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } } } } From 00038dae81fc9df9a11bb03eee47ae75d780fa7d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 29 Jul 2024 15:52:13 +1000 Subject: [PATCH 11/43] Update database migration docs for 5.3 (#6195) * Update database migration docs for 5.3 --- book/src/database-migrations.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index fc16641da0..6d75b90100 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,7 +16,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| -| v5.3.0 | Aug 2024 TBD | v22 TBD | no (TBD) | +| v5.3.0 | Aug 2024 | v21 | yes | | v5.2.0 | Jun 2024 | v19 | no | | v5.1.0 | Mar 2024 | v19 | no | | v5.0.0 | Feb 2024 | v19 | no | @@ -208,6 +208,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v5.3.0 | Aug 2024 | v21 | yes | | v5.2.0 | Jun 2024 | v19 | yes before Deneb using <= v5.2.1 | | v5.1.0 | Mar 2024 | v19 | yes before Deneb using <= v5.2.1 | | v5.0.0 | Feb 2024 | v19 | yes before Deneb using <= v5.2.1 | From 96b00ef66c82eb7c67e76bf6142922e43f71f8b5 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 29 Jul 2024 21:42:31 +1000 Subject: [PATCH 12/43] Replace `lazy_static!` with `LazyLock` (#6189) * Replace `lazy_static` with `LazyLock`. * Merge branch 'unstable' into remove-lazy-static # Conflicts: # beacon_node/lighthouse_network/src/peer_manager/mod.rs * Lint fixes. * Merge branch 'unstable' into remove-lazy-static # Conflicts: # beacon_node/beacon_chain/src/metrics.rs * Moar lint fixes. * Update rust version to 1.80.0. * Merge branch 'unstable' into remove-lazy-static --- Cargo.lock | 27 - Cargo.toml | 1 - Dockerfile | 2 +- beacon_node/beacon_chain/Cargo.toml | 1 - .../beacon_chain/src/beacon_block_streamer.rs | 10 +- .../beacon_chain/src/graffiti_calculator.rs | 9 +- beacon_node/beacon_chain/src/metrics.rs | 2547 +++++++++++------ beacon_node/beacon_chain/src/test_utils.rs | 19 +- .../tests/attestation_production.rs | 10 +- .../tests/attestation_verification.rs | 9 +- .../beacon_chain/tests/block_verification.rs | 10 +- .../beacon_chain/tests/op_verification.rs | 11 +- beacon_node/beacon_chain/tests/rewards.rs | 7 +- beacon_node/beacon_chain/tests/store_tests.rs | 10 +- .../tests/sync_committee_verification.rs | 9 +- beacon_node/beacon_chain/tests/tests.rs | 9 +- .../beacon_chain/tests/validator_monitor.rs | 9 +- beacon_node/beacon_processor/Cargo.toml | 1 - beacon_node/beacon_processor/src/metrics.rs | 380 ++- beacon_node/client/Cargo.toml | 1 - beacon_node/client/src/metrics.rs | 26 +- beacon_node/eth1/Cargo.toml | 1 - beacon_node/eth1/src/metrics.rs | 61 +- beacon_node/execution_layer/Cargo.toml | 1 - .../execution_layer/src/engine_api/http.rs | 17 +- beacon_node/execution_layer/src/metrics.rs | 142 +- .../execution_layer/src/test_utils/mod.rs | 10 +- beacon_node/http_api/Cargo.toml | 1 - beacon_node/http_api/src/metrics.rs | 68 +- beacon_node/http_metrics/src/metrics.rs | 2 +- beacon_node/lighthouse_network/Cargo.toml | 1 - beacon_node/lighthouse_network/src/metrics.rs | 252 +- .../src/peer_manager/peerdb/score.rs | 6 +- .../lighthouse_network/src/rpc/protocol.rs | 158 +- beacon_node/network/Cargo.toml | 1 - beacon_node/network/src/metrics.rs | 708 +++-- .../network/src/subnet_service/tests/mod.rs | 7 +- beacon_node/operation_pool/Cargo.toml | 1 - beacon_node/operation_pool/src/lib.rs | 9 +- beacon_node/operation_pool/src/metrics.rs | 53 +- beacon_node/store/Cargo.toml | 1 - beacon_node/store/src/metrics.rs | 186 +- common/eth2_interop_keypairs/Cargo.toml | 1 - common/eth2_interop_keypairs/src/lib.rs | 13 +- common/lighthouse_metrics/src/lib.rs | 27 +- common/logging/Cargo.toml | 1 - common/logging/src/lib.rs | 20 +- common/logging/src/tracing_metrics_layer.rs | 34 +- common/malloc_utils/Cargo.toml | 1 - common/malloc_utils/src/glibc.rs | 74 +- common/malloc_utils/src/jemalloc.rs | 36 +- common/monitoring_api/Cargo.toml | 1 - common/monitoring_api/src/gather.rs | 21 +- common/slot_clock/Cargo.toml | 1 - common/slot_clock/src/metrics.rs | 26 +- common/task_executor/Cargo.toml | 1 - common/task_executor/src/metrics.rs | 52 +- common/unused_port/Cargo.toml | 1 - common/unused_port/src/lib.rs | 8 +- common/warp_utils/Cargo.toml | 1 - common/warp_utils/src/metrics.rs | 169 +- consensus/merkle_proof/Cargo.toml | 1 - consensus/merkle_proof/src/lib.rs | 11 +- consensus/state_processing/Cargo.toml | 1 - consensus/state_processing/src/metrics.rs | 76 +- .../src/per_block_processing/tests.rs | 9 +- consensus/types/Cargo.toml | 1 - .../src/beacon_state/committee_cache/tests.rs | 9 +- consensus/types/src/beacon_state/tests.rs | 9 +- consensus/types/src/subnet_id.rs | 18 +- consensus/types/src/sync_subnet_id.rs | 18 +- lcli/Dockerfile | 2 +- lighthouse/Cargo.toml | 3 +- lighthouse/src/main.rs | 12 +- lighthouse/src/metrics.rs | 20 +- slasher/Cargo.toml | 1 - slasher/src/metrics.rs | 92 +- testing/state_transition_vectors/Cargo.toml | 1 - testing/state_transition_vectors/src/main.rs | 9 +- testing/web3signer_tests/Cargo.toml | 1 - testing/web3signer_tests/src/lib.rs | 16 +- validator_client/Cargo.toml | 1 - .../slashing_protection/Cargo.toml | 1 - .../slashing_protection/tests/interop.rs | 6 +- validator_client/src/http_metrics/metrics.rs | 282 +- 85 files changed, 3512 insertions(+), 2370 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 818b6adc55..9afb3635f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -796,7 +796,6 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "lazy_static", "lighthouse_metrics", "lighthouse_version", "logging", @@ -872,7 +871,6 @@ dependencies = [ "fnv", "futures", "itertools 0.10.5", - "lazy_static", "lighthouse_metrics", "lighthouse_network", "logging", @@ -1390,7 +1388,6 @@ dependencies = [ "genesis", "http_api", "http_metrics", - "lazy_static", "lighthouse_metrics", "lighthouse_network", "monitoring_api", @@ -2413,7 +2410,6 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "lazy_static", "lighthouse_metrics", "merkle_proof", "parking_lot 0.12.3", @@ -2492,7 +2488,6 @@ dependencies = [ "bls", "ethereum_hashing", "hex", - "lazy_static", "num-bigint", "serde", "serde_yaml", @@ -2913,7 +2908,6 @@ dependencies = [ "jsonwebtoken", "keccak-hash", "kzg", - "lazy_static", "lighthouse_metrics", "lighthouse_version", "lru", @@ -3783,7 +3777,6 @@ dependencies = [ "futures", "genesis", "hex", - "lazy_static", "lighthouse_metrics", "lighthouse_network", "lighthouse_version", @@ -4965,7 +4958,6 @@ dependencies = [ "eth2_network_config", "ethereum_hashing", "futures", - "lazy_static", "lighthouse_metrics", "lighthouse_network", "lighthouse_version", @@ -5013,7 +5005,6 @@ dependencies = [ "gossipsub", "hex", "itertools 0.10.5", - "lazy_static", "libp2p", "libp2p-mplex", "lighthouse_metrics", @@ -5126,7 +5117,6 @@ name = "logging" version = "0.2.0" dependencies = [ "chrono", - "lazy_static", "lighthouse_metrics", "parking_lot 0.12.3", "serde", @@ -5184,7 +5174,6 @@ version = "0.1.0" dependencies = [ "jemalloc-ctl", "jemallocator", - "lazy_static", "libc", "lighthouse_metrics", "parking_lot 0.12.3", @@ -5271,7 +5260,6 @@ version = "0.2.0" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "lazy_static", "quickcheck", "quickcheck_macros", "safe_arith", @@ -5398,7 +5386,6 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", - "lazy_static", "lighthouse_metrics", "lighthouse_version", "regex", @@ -5576,7 +5563,6 @@ dependencies = [ "hex", "igd-next", "itertools 0.10.5", - "lazy_static", "lighthouse_metrics", "lighthouse_network", "logging", @@ -5881,7 +5867,6 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", - "lazy_static", "lighthouse_metrics", "maplit", "parking_lot 0.12.3", @@ -7698,7 +7683,6 @@ dependencies = [ "ethereum_ssz_derive", "filesystem", "flate2", - "lazy_static", "libmdbx", "lighthouse_metrics", "lmdb-rkv", @@ -7745,7 +7729,6 @@ dependencies = [ "arbitrary", "ethereum_serde_utils", "filesystem", - "lazy_static", "r2d2", "r2d2_sqlite", "rayon", @@ -7862,7 +7845,6 @@ dependencies = [ name = "slot_clock" version = "0.2.0" dependencies = [ - "lazy_static", "lighthouse_metrics", "parking_lot 0.12.3", "types", @@ -7991,7 +7973,6 @@ dependencies = [ "int_to_bytes", "integer-sqrt", "itertools 0.10.5", - "lazy_static", "lighthouse_metrics", "merkle_proof", "rand", @@ -8011,7 +7992,6 @@ version = "0.1.0" dependencies = [ "beacon_chain", "ethereum_ssz", - "lazy_static", "state_processing", "tokio", "types", @@ -8033,7 +8013,6 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", - "lazy_static", "leveldb", "lighthouse_metrics", "lru", @@ -8244,7 +8223,6 @@ version = "0.1.0" dependencies = [ "async-channel", "futures", - "lazy_static", "lighthouse_metrics", "logging", "slog", @@ -8856,7 +8834,6 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "lazy_static", "log", "maplit", "merkle_proof", @@ -9022,7 +8999,6 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" name = "unused_port" version = "0.1.0" dependencies = [ - "lazy_static", "lru_cache", "parking_lot 0.12.3", ] @@ -9076,7 +9052,6 @@ dependencies = [ "hex", "hyper 1.4.1", "itertools 0.10.5", - "lazy_static", "libsecp256k1", "lighthouse_metrics", "lighthouse_version", @@ -9246,7 +9221,6 @@ dependencies = [ "bytes", "eth2", "headers", - "lazy_static", "lighthouse_metrics", "safe_arith", "serde", @@ -9428,7 +9402,6 @@ dependencies = [ "eth2_keystore", "eth2_network_config", "futures", - "lazy_static", "parking_lot 0.12.3", "reqwest", "serde", diff --git a/Cargo.toml b/Cargo.toml index b2957842d5..cf3fd0ab04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -132,7 +132,6 @@ hex = "0.4" hashlink = "0.9.0" hyper = "1" itertools = "0.10" -lazy_static = "1" libsecp256k1 = "0.7" log = "0.4" lru = "0.12" diff --git a/Dockerfile b/Dockerfile index ff7f14d534..0f334e2ac8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.78.0-bullseye AS builder +FROM rust:1.80.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 32a1056c10..cf1c6be33f 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -37,7 +37,6 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index f0a68b6be5..d63a3ee7ea 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -712,16 +712,16 @@ mod tests { use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES}; use execution_layer::EngineCapabilities; - use lazy_static::lazy_static; + use std::sync::LazyLock; use std::time::Duration; use tokio::sync::mpsc; use types::{ChainSpec, Epoch, EthSpec, Hash256, Keypair, MinimalEthSpec, Slot}; const VALIDATOR_COUNT: usize = 48; - lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); - } + + /// A cached set of keys. + static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness( validator_count: usize, diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 599c99dc2d..42a1aa1a0b 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -241,16 +241,15 @@ mod tests { use crate::ChainConfig; use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use execution_layer::EngineCapabilities; - use lazy_static::lazy_static; use slog::info; + use std::sync::LazyLock; use std::time::Duration; use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; const VALIDATOR_COUNT: usize = 48; - lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); - } + /// A cached set of keys. + static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness( validator_count: usize, diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 4ca511370d..b8969b31f1 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,9 +1,9 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; +use std::sync::LazyLock; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; // Attestation simulator metrics @@ -20,1182 +20,1839 @@ pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL: &st pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL: &str = "validator_monitor_attestation_simulator_source_attester_miss_total"; -lazy_static! { - /* - * Block Processing - */ - pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( +/* + * Block Processing + */ +pub static BLOCK_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_block_processing_requests_total", - "Count of blocks submitted for processing" - ); - pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "Count of blocks submitted for processing", + ) +}); +pub static BLOCK_PROCESSING_SUCCESSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_block_processing_successes_total", - "Count of blocks processed without error" - ); - // Keeping the existing "snapshot_cache" metric name as it would break existing dashboards - pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result = try_create_int_gauge( + "Count of blocks processed without error", + ) +}); +// Keeping the existing "snapshot_cache" metric name as it would break existing dashboards +pub static BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_block_processing_snapshot_cache_size", - "Count snapshots in the snapshot cache" - ); - pub static ref BLOCK_PROCESSING_TIMES: Result = - try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); - pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "Count snapshots in the snapshot cache", + ) +}); +pub static BLOCK_PROCESSING_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_block_processing_seconds", + "Full runtime of block processing", + ) +}); +pub static BLOCK_PROCESSING_BLOCK_ROOT: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_block_root_seconds", - "Time spent calculating the block root when processing a block." - ); - pub static ref BLOCK_HEADER_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "Time spent calculating the block root when processing a block.", + ) +}); +pub static BLOCK_HEADER_PROCESSING_BLOCK_ROOT: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_header_processing_block_root_seconds", - "Time spent calculating the block root for a beacon block header." - ); - pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result = try_create_histogram( + "Time spent calculating the block root for a beacon block header.", + ) +}); +pub static BLOCK_PROCESSING_BLOB_ROOT: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_blob_root_seconds", - "Time spent calculating the blob root when processing a block." - ); - pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( + "Time spent calculating the blob root when processing a block.", + ) +}); +pub static BLOCK_PROCESSING_DB_READ: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_db_read_seconds", - "Time spent loading block and state from DB for block processing" - ); - pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( + "Time spent loading block and state from DB for block processing", + ) +}); +pub static BLOCK_PROCESSING_CATCHUP_STATE: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_catch_up_state_seconds", - "Time spent skipping slots on a state before processing a block." - ); - pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( + "Time spent skipping slots on a state before processing a block.", + ) +}); +pub static BLOCK_PROCESSING_COMMITTEE: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_committee_building_seconds", - "Time spent building/obtaining committees for block processing." - ); - pub static ref BLOCK_PROCESSING_SIGNATURE: Result = try_create_histogram( + "Time spent building/obtaining committees for block processing.", + ) +}); +pub static BLOCK_PROCESSING_SIGNATURE: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_signature_seconds", - "Time spent doing signature verification for a block." - ); - pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( + "Time spent doing signature verification for a block.", + ) +}); +pub static BLOCK_PROCESSING_CORE: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_core_seconds", - "Time spent doing the core per_block_processing state processing." - ); - pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( + "Time spent doing the core per_block_processing state processing.", + ) +}); +pub static BLOCK_PROCESSING_STATE_ROOT: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_state_root_seconds", - "Time spent calculating the state root when processing a block." - ); - pub static ref BLOCK_PROCESSING_POST_EXEC_PROCESSING: Result = try_create_histogram_with_buckets( - "beacon_block_processing_post_exec_pre_attestable_seconds", - "Time between finishing execution processing and the block becoming attestable", - linear_buckets(5e-3, 5e-3, 10) - ); - pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( + "Time spent calculating the state root when processing a block.", + ) +}); +pub static BLOCK_PROCESSING_POST_EXEC_PROCESSING: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_block_processing_post_exec_pre_attestable_seconds", + "Time between finishing execution processing and the block becoming attestable", + linear_buckets(5e-3, 5e-3, 10), + ) + }); +pub static BLOCK_PROCESSING_DB_WRITE: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_db_write_seconds", - "Time spent writing a newly processed block and state to DB" - ); - pub static ref BLOCK_PROCESSING_ATTESTATION_OBSERVATION: Result = try_create_histogram( - "beacon_block_processing_attestation_observation_seconds", - "Time spent hashing and remembering all the attestations in the block" - ); - pub static ref BLOCK_PROCESSING_FORK_CHOICE: Result = try_create_histogram_with_buckets( + "Time spent writing a newly processed block and state to DB", + ) +}); +pub static BLOCK_PROCESSING_ATTESTATION_OBSERVATION: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_block_processing_attestation_observation_seconds", + "Time spent hashing and remembering all the attestations in the block", + ) + }); +pub static BLOCK_PROCESSING_FORK_CHOICE: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_block_processing_fork_choice_seconds", "Time spent running fork choice's `get_head` during block import", - exponential_buckets(1e-3, 2.0, 8) - ); - pub static ref BLOCK_PROCESSING_PUBKEY_CACHE_LOCK: Result = try_create_histogram( + exponential_buckets(1e-3, 2.0, 8), + ) +}); +pub static BLOCK_PROCESSING_PUBKEY_CACHE_LOCK: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_processing_pubkey_cache_lock_seconds", "Time spent waiting or holding the pubkey cache write lock", - ); - pub static ref BLOCK_SYNC_AGGREGATE_SET_BITS: Result = try_create_int_gauge( + ) +}); +pub static BLOCK_SYNC_AGGREGATE_SET_BITS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "block_sync_aggregate_set_bits", - "The number of true bits in the last sync aggregate in a block" - ); + "The number of true bits in the last sync aggregate in a block", + ) +}); - /* - * Block Production - */ - pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( +/* + * Block Production + */ +pub static BLOCK_PRODUCTION_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_block_production_requests_total", - "Count of all block production requests" - ); - pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( + "Count of all block production requests", + ) +}); +pub static BLOCK_PRODUCTION_SUCCESSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_block_production_successes_total", - "Count of blocks successfully produced." - ); - pub static ref BLOCK_PRODUCTION_TIMES: Result = - try_create_histogram("beacon_block_production_seconds", "Full runtime of block production"); - pub static ref BLOCK_PRODUCTION_FORK_CHOICE_TIMES: Result = try_create_histogram( + "Count of blocks successfully produced.", + ) +}); +pub static BLOCK_PRODUCTION_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_block_production_seconds", + "Full runtime of block production", + ) +}); +pub static BLOCK_PRODUCTION_FORK_CHOICE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_fork_choice_seconds", - "Time taken to run fork choice before block production" - ); - pub static ref BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES: Result = try_create_histogram_with_buckets( - "beacon_block_production_get_proposer_head_times", - "Time taken for fork choice to compute the proposer head before block production", - exponential_buckets(1e-3, 2.0, 8) - ); - pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result = try_create_histogram( + "Time taken to run fork choice before block production", + ) +}); +pub static BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_block_production_get_proposer_head_times", + "Time taken for fork choice to compute the proposer head before block production", + exponential_buckets(1e-3, 2.0, 8), + ) + }); +pub static BLOCK_PRODUCTION_STATE_LOAD_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_state_load_seconds", - "Time taken to load the base state for block production" - ); - pub static ref BLOCK_PRODUCTION_SLOT_PROCESS_TIMES: Result = try_create_histogram( + "Time taken to load the base state for block production", + ) +}); +pub static BLOCK_PRODUCTION_SLOT_PROCESS_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_slot_process_seconds", - "Time taken to advance the state to the block production slot" - ); - pub static ref BLOCK_PRODUCTION_UNAGGREGATED_TIMES: Result = try_create_histogram( + "Time taken to advance the state to the block production slot", + ) +}); +pub static BLOCK_PRODUCTION_UNAGGREGATED_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_unaggregated_seconds", - "Time taken to import the naive aggregation pool for block production" - ); - pub static ref BLOCK_PRODUCTION_ATTESTATION_TIMES: Result = try_create_histogram( + "Time taken to import the naive aggregation pool for block production", + ) +}); +pub static BLOCK_PRODUCTION_ATTESTATION_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_attestation_seconds", - "Time taken to pack attestations into a block" - ); - pub static ref BLOCK_PRODUCTION_PROCESS_TIMES: Result = try_create_histogram( + "Time taken to pack attestations into a block", + ) +}); +pub static BLOCK_PRODUCTION_PROCESS_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_process_seconds", - "Time taken to process the block produced" - ); - pub static ref BLOCK_PRODUCTION_STATE_ROOT_TIMES: Result = try_create_histogram( + "Time taken to process the block produced", + ) +}); +pub static BLOCK_PRODUCTION_STATE_ROOT_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_block_production_state_root_seconds", - "Time taken to calculate the block's state root" - ); + "Time taken to calculate the block's state root", + ) +}); - /* - * Block Statistics - */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram_with_buckets( +/* + * Block Statistics + */ +pub static OPERATIONS_PER_BLOCK_ATTESTATION: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_operations_per_block_attestation_total", "Number of attestations in a block", // Full block is 128. - Ok(vec![0_f64, 1_f64, 3_f64, 15_f64, 31_f64, 63_f64, 127_f64, 255_f64]) - ); + Ok(vec![ + 0_f64, 1_f64, 3_f64, 15_f64, 31_f64, 63_f64, 127_f64, 255_f64, + ]), + ) +}); - pub static ref BLOCK_SIZE: Result = try_create_histogram_with_buckets( +pub static BLOCK_SIZE: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_block_total_size", "Size of a signed beacon block", - linear_buckets(5120_f64,5120_f64,10) - ); + linear_buckets(5120_f64, 5120_f64, 10), + ) +}); - /* - * Unaggregated Attestation Verification - */ - pub static ref UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_unaggregated_attestation_processing_requests_total", - "Count of all unaggregated attestations submitted for processing" - ); - pub static ref UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_unaggregated_attestation_processing_successes_total", - "Number of unaggregated attestations verified for gossip" - ); - pub static ref UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( - "beacon_unaggregated_attestation_gossip_verification_seconds", - "Full runtime of aggregated attestation gossip verification" - ); +/* + * Unaggregated Attestation Verification + */ +pub static UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_unaggregated_attestation_processing_requests_total", + "Count of all unaggregated attestations submitted for processing", + ) + }); +pub static UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_unaggregated_attestation_processing_successes_total", + "Number of unaggregated attestations verified for gossip", + ) + }); +pub static UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_unaggregated_attestation_gossip_verification_seconds", + "Full runtime of aggregated attestation gossip verification", + ) + }); - /* - * Aggregated Attestation Verification - */ - pub static ref AGGREGATED_ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_aggregated_attestation_processing_requests_total", - "Count of all aggregated attestations submitted for processing" - ); - pub static ref AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_aggregated_attestation_processing_successes_total", - "Number of aggregated attestations verified for gossip" - ); - pub static ref AGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( - "beacon_aggregated_attestation_gossip_verification_seconds", - "Full runtime of aggregated attestation gossip verification" - ); +/* + * Aggregated Attestation Verification + */ +pub static AGGREGATED_ATTESTATION_PROCESSING_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_aggregated_attestation_processing_requests_total", + "Count of all aggregated attestations submitted for processing", + ) + }); +pub static AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_aggregated_attestation_processing_successes_total", + "Number of aggregated attestations verified for gossip", + ) + }); +pub static AGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_aggregated_attestation_gossip_verification_seconds", + "Full runtime of aggregated attestation gossip verification", + ) + }); - /* - * General Attestation Processing - */ - pub static ref ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL: Result = try_create_histogram( - "beacon_attestation_processing_apply_to_agg_pool", - "Time spent applying an attestation to the naive aggregation pool" - ); - pub static ref ATTESTATION_PROCESSING_AGG_POOL_PRUNE: Result = try_create_histogram( - "beacon_attestation_processing_agg_pool_prune", - "Time spent for the agg pool to prune" - ); - pub static ref ATTESTATION_PROCESSING_AGG_POOL_INSERT: Result = try_create_histogram( - "beacon_attestation_processing_agg_pool_insert", - "Time spent for the outer pool.insert() function of agg pool" - ); - pub static ref ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT: Result = try_create_histogram( - "beacon_attestation_processing_agg_pool_core_insert", - "Time spent for the core map.insert() function of agg pool" - ); - pub static ref ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION: Result = try_create_histogram( - "beacon_attestation_processing_agg_pool_aggregation", - "Time spent doing signature aggregation when adding to the agg poll" - ); - pub static ref ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP: Result = try_create_histogram( - "beacon_attestation_processing_agg_pool_create_map", - "Time spent for creating a map for a new slot" - ); - pub static ref ATTESTATION_PROCESSING_APPLY_TO_OP_POOL: Result = try_create_histogram( - "beacon_attestation_processing_apply_to_op_pool", - "Time spent applying an attestation to the block inclusion pool" - ); +/* + * General Attestation Processing + */ +pub static ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_apply_to_agg_pool", + "Time spent applying an attestation to the naive aggregation pool", + ) + }); +pub static ATTESTATION_PROCESSING_AGG_POOL_PRUNE: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_agg_pool_prune", + "Time spent for the agg pool to prune", + ) + }); +pub static ATTESTATION_PROCESSING_AGG_POOL_INSERT: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_agg_pool_insert", + "Time spent for the outer pool.insert() function of agg pool", + ) + }); +pub static ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_agg_pool_core_insert", + "Time spent for the core map.insert() function of agg pool", + ) + }); +pub static ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_agg_pool_aggregation", + "Time spent doing signature aggregation when adding to the agg poll", + ) + }); +pub static ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_agg_pool_create_map", + "Time spent for creating a map for a new slot", + ) + }); +pub static ATTESTATION_PROCESSING_APPLY_TO_OP_POOL: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_apply_to_op_pool", + "Time spent applying an attestation to the block inclusion pool", + ) + }); - /* - * Attestation Processing - */ - pub static ref ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_shuffling_cache_wait_seconds", - "Time spent on waiting for the shuffling cache lock during attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_committee_building_seconds", - "Time spent on building committees during attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_STATE_READ_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_state_read_seconds", - "Time spent on reading the state during attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_STATE_SKIP_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_state_skip_seconds", - "Time spent on reading the state during attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_signature_setup_seconds", - "Time spent on setting up for the signature verification of attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_SIGNATURE_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_signature_seconds", - "Time spent on the signature verification of attestation processing" - ); +/* + * Attestation Processing + */ +pub static ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_shuffling_cache_wait_seconds", + "Time spent on waiting for the shuffling cache lock during attestation processing", + ) + }); +pub static ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_committee_building_seconds", + "Time spent on building committees during attestation processing", + ) + }); +pub static ATTESTATION_PROCESSING_STATE_READ_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_state_read_seconds", + "Time spent on reading the state during attestation processing", + ) + }); +pub static ATTESTATION_PROCESSING_STATE_SKIP_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_state_skip_seconds", + "Time spent on reading the state during attestation processing", + ) + }); +pub static ATTESTATION_PROCESSING_SIGNATURE_SETUP_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_signature_setup_seconds", + "Time spent on setting up for the signature verification of attestation processing", + ) + }); +pub static ATTESTATION_PROCESSING_SIGNATURE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_signature_seconds", + "Time spent on the signature verification of attestation processing", + ) + }); - /* - * Batch Attestation Processing - */ - pub static ref ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_batch_agg_signature_setup_times", - "Time spent on setting up for the signature verification of batch aggregate processing" - ); - pub static ref ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_batch_agg_signature_times", - "Time spent on the signature verification of batch aggregate attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( +/* + * Batch Attestation Processing + */ +pub static ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_SETUP_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_batch_agg_signature_setup_times", + "Time spent on setting up for the signature verification of batch aggregate processing", + ) + }); +pub static ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_batch_agg_signature_times", + "Time spent on the signature verification of batch aggregate attestation processing", + ) + }); +pub static ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( "beacon_attestation_processing_batch_unagg_signature_setup_times", "Time spent on setting up for the signature verification of batch unaggregate processing" - ); - pub static ref ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_batch_unagg_signature_times", - "Time spent on the signature verification of batch unaggregate attestation processing" - ); + ) + }); +pub static ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_attestation_processing_batch_unagg_signature_times", + "Time spent on the signature verification of batch unaggregate attestation processing", + ) + }); - /* - * Shuffling cache - */ - pub static ref SHUFFLING_CACHE_HITS: Result = - try_create_int_counter("beacon_shuffling_cache_hits_total", "Count of times shuffling cache fulfils request"); - pub static ref SHUFFLING_CACHE_MISSES: Result = - try_create_int_counter("beacon_shuffling_cache_misses_total", "Count of times shuffling cache fulfils request"); - pub static ref SHUFFLING_CACHE_PROMISE_HITS: Result = - try_create_int_counter("beacon_shuffling_cache_promise_hits_total", "Count of times shuffling cache returns a promise to future shuffling"); - pub static ref SHUFFLING_CACHE_PROMISE_FAILS: Result = - try_create_int_counter("beacon_shuffling_cache_promise_fails_total", "Count of times shuffling cache detects a failed promise"); +/* + * Shuffling cache + */ +pub static SHUFFLING_CACHE_HITS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_shuffling_cache_hits_total", + "Count of times shuffling cache fulfils request", + ) +}); +pub static SHUFFLING_CACHE_MISSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_shuffling_cache_misses_total", + "Count of times shuffling cache fulfils request", + ) +}); +pub static SHUFFLING_CACHE_PROMISE_HITS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_shuffling_cache_promise_hits_total", + "Count of times shuffling cache returns a promise to future shuffling", + ) +}); +pub static SHUFFLING_CACHE_PROMISE_FAILS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_shuffling_cache_promise_fails_total", + "Count of times shuffling cache detects a failed promise", + ) +}); - /* - * Early attester cache - */ - pub static ref BEACON_EARLY_ATTESTER_CACHE_HITS: Result = try_create_int_counter( +/* + * Early attester cache + */ +pub static BEACON_EARLY_ATTESTER_CACHE_HITS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_early_attester_cache_hits", - "Count of times the early attester cache returns an attestation" - ); + "Count of times the early attester cache returns an attestation", + ) +}); - pub static ref BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE: Result = try_create_int_gauge( +pub static BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_reqresp_pre_import_cache_size", - "Current count of items of the reqresp pre import cache" - ); - pub static ref BEACON_REQRESP_PRE_IMPORT_CACHE_HITS: Result = try_create_int_counter( - "beacon_reqresp_pre_import_cache_hits", - "Count of times the reqresp pre import cache returns an item" - ); -} + "Current count of items of the reqresp pre import cache", + ) +}); +pub static BEACON_REQRESP_PRE_IMPORT_CACHE_HITS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_reqresp_pre_import_cache_hits", + "Count of times the reqresp pre import cache returns an item", + ) + }); -// Second lazy-static block is used to account for macro recursion limit. -lazy_static! { - - /* - * Attestation Production - */ - pub static ref ATTESTATION_PRODUCTION_SECONDS: Result = try_create_histogram( +/* + * Attestation Production + */ +pub static ATTESTATION_PRODUCTION_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_attestation_production_seconds", - "Full runtime of attestation production" - ); - pub static ref ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS: Result = try_create_histogram( - "attestation_production_head_scrape_seconds", - "Time taken to read the head state" - ); - pub static ref ATTESTATION_PRODUCTION_CACHE_INTERACTION_SECONDS: Result = try_create_histogram( - "attestation_production_cache_interaction_seconds", - "Time spent interacting with the attester cache" - ); - pub static ref ATTESTATION_PRODUCTION_CACHE_PRIME_SECONDS: Result = try_create_histogram( - "attestation_production_cache_prime_seconds", - "Time spent loading a new state from the disk due to a cache miss" - ); + "Full runtime of attestation production", + ) +}); +pub static ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "attestation_production_head_scrape_seconds", + "Time taken to read the head state", + ) + }); +pub static ATTESTATION_PRODUCTION_CACHE_INTERACTION_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "attestation_production_cache_interaction_seconds", + "Time spent interacting with the attester cache", + ) + }); +pub static ATTESTATION_PRODUCTION_CACHE_PRIME_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "attestation_production_cache_prime_seconds", + "Time spent loading a new state from the disk due to a cache miss", + ) + }); - /* - * Fork Choice - */ - pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( +/* + * Fork Choice + */ +pub static FORK_CHOICE_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_fork_choice_requests_total", - "Count of occasions where fork choice has tried to find a head" - ); - pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( + "Count of occasions where fork choice has tried to find a head", + ) +}); +pub static FORK_CHOICE_ERRORS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_fork_choice_errors_total", - "Count of occasions where fork choice has returned an error when trying to find a head" - ); - pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( + "Count of occasions where fork choice has returned an error when trying to find a head", + ) +}); +pub static FORK_CHOICE_CHANGED_HEAD: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_fork_choice_changed_head_total", - "Count of occasions fork choice has found a new head" - ); - pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( + "Count of occasions fork choice has found a new head", + ) +}); +pub static FORK_CHOICE_REORG_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_fork_choice_reorg_total", - "Count of occasions fork choice has switched to a different chain" - ); - pub static ref FORK_CHOICE_REORG_DISTANCE: Result = try_create_int_gauge( + "Count of occasions fork choice has switched to a different chain", + ) +}); +pub static FORK_CHOICE_REORG_DISTANCE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_fork_choice_reorg_distance", - "The distance of each re-org of the fork choice algorithm" - ); - pub static ref FORK_CHOICE_REORG_COUNT_INTEROP: Result = try_create_int_counter( + "The distance of each re-org of the fork choice algorithm", + ) +}); +pub static FORK_CHOICE_REORG_COUNT_INTEROP: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_reorgs_total", - "Count of occasions fork choice has switched to a different chain" - ); - pub static ref FORK_CHOICE_TIMES: Result = try_create_histogram_with_buckets( + "Count of occasions fork choice has switched to a different chain", + ) +}); +pub static FORK_CHOICE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_fork_choice_seconds", "Full runtime of fork choice", - linear_buckets(10e-3, 20e-3, 10) - ); - pub static ref FORK_CHOICE_OVERRIDE_FCU_TIMES: Result = try_create_histogram_with_buckets( + linear_buckets(10e-3, 20e-3, 10), + ) +}); +pub static FORK_CHOICE_OVERRIDE_FCU_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_fork_choice_override_fcu_seconds", "Time taken to compute the optional forkchoiceUpdated override", - exponential_buckets(1e-3, 2.0, 8) - ); - pub static ref FORK_CHOICE_AFTER_NEW_HEAD_TIMES: Result = try_create_histogram_with_buckets( + exponential_buckets(1e-3, 2.0, 8), + ) +}); +pub static FORK_CHOICE_AFTER_NEW_HEAD_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_fork_choice_after_new_head_seconds", "Time taken to run `after_new_head`", - exponential_buckets(1e-3, 2.0, 10) - ); - pub static ref FORK_CHOICE_AFTER_FINALIZATION_TIMES: Result = try_create_histogram_with_buckets( - "beacon_fork_choice_after_finalization_seconds", - "Time taken to run `after_finalization`", - exponential_buckets(1e-3, 2.0, 10) - ); - pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( + exponential_buckets(1e-3, 2.0, 10), + ) +}); +pub static FORK_CHOICE_AFTER_FINALIZATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_fork_choice_after_finalization_seconds", + "Time taken to run `after_finalization`", + exponential_buckets(1e-3, 2.0, 10), + ) + }); +pub static FORK_CHOICE_PROCESS_BLOCK_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_fork_choice_process_block_seconds", - "Time taken to add a block and all attestations to fork choice" - ); - pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( - "beacon_fork_choice_process_attestation_seconds", - "Time taken to add an attestation to fork choice" - ); - pub static ref FORK_CHOICE_SET_HEAD_LAG_TIMES: Result = try_create_histogram( + "Time taken to add a block and all attestations to fork choice", + ) +}); +pub static FORK_CHOICE_PROCESS_ATTESTATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_process_attestation_seconds", + "Time taken to add an attestation to fork choice", + ) + }); +pub static FORK_CHOICE_SET_HEAD_LAG_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_fork_choice_set_head_lag_times", - "Time taken between finding the head and setting the canonical head value" - ); - pub static ref BALANCES_CACHE_HITS: Result = - try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request"); - pub static ref BALANCES_CACHE_MISSES: Result = - try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache misses request"); + "Time taken between finding the head and setting the canonical head value", + ) +}); +pub static BALANCES_CACHE_HITS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_balances_cache_hits_total", + "Count of times balances cache fulfils request", + ) +}); +pub static BALANCES_CACHE_MISSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_balances_cache_misses_total", + "Count of times balances cache misses request", + ) +}); - /* - * Persisting BeaconChain components to disk - */ - pub static ref PERSIST_HEAD: Result = - try_create_histogram("beacon_persist_head", "Time taken to persist the canonical head"); - pub static ref PERSIST_OP_POOL: Result = - try_create_histogram("beacon_persist_op_pool", "Time taken to persist the operations pool"); - pub static ref PERSIST_ETH1_CACHE: Result = - try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches"); - pub static ref PERSIST_FORK_CHOICE: Result = - try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct"); +/* + * Persisting BeaconChain components to disk + */ +pub static PERSIST_HEAD: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_persist_head", + "Time taken to persist the canonical head", + ) +}); +pub static PERSIST_OP_POOL: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_persist_op_pool", + "Time taken to persist the operations pool", + ) +}); +pub static PERSIST_ETH1_CACHE: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_persist_eth1_cache", + "Time taken to persist the eth1 caches", + ) +}); +pub static PERSIST_FORK_CHOICE: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_persist_fork_choice", + "Time taken to persist the fork choice struct", + ) +}); - /* - * Eth1 - */ - pub static ref DEFAULT_ETH1_VOTES: Result = - try_create_int_counter("beacon_eth1_default_votes", "Count of times we have voted default value for eth1 data"); +/* + * Eth1 + */ +pub static DEFAULT_ETH1_VOTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_eth1_default_votes", + "Count of times we have voted default value for eth1 data", + ) +}); - /* - * Chain Head - */ - pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("beacon_update_head_seconds", "Time taken to update the canonical head"); - pub static ref HEAD_STATE_SLOT: Result = - try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain"); - pub static ref HEAD_STATE_SLOT_INTEROP: Result = - try_create_int_gauge("beacon_head_slot", "Slot of the block at the head of the chain"); - pub static ref HEAD_STATE_ROOT: Result = - try_create_int_gauge("beacon_head_state_root", "Root of the block at the head of the chain"); - pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result = - try_create_int_gauge("beacon_head_state_latest_block_slot", "Latest block slot at the head of the chain"); - pub static ref HEAD_STATE_CURRENT_JUSTIFIED_ROOT: Result = - try_create_int_gauge("beacon_head_state_current_justified_root", "Current justified root at the head of the chain"); - pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result = - try_create_int_gauge("beacon_head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); - pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH_INTEROP: Result = - try_create_int_gauge("beacon_current_justified_epoch", "Current justified epoch at the head of the chain"); - pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result = - try_create_int_gauge("beacon_head_state_previous_justified_root", "Previous justified root at the head of the chain"); - pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result = - try_create_int_gauge("beacon_head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); - pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH_INTEROP: Result = - try_create_int_gauge("beacon_previous_justified_epoch", "Previous justified epoch at the head of the chain"); - pub static ref HEAD_STATE_FINALIZED_ROOT: Result = - try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain"); - pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = - try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain"); - pub static ref HEAD_STATE_FINALIZED_EPOCH_INTEROP: Result = - try_create_int_gauge("beacon_finalized_epoch", "Finalized epoch at the head of the chain"); - pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_total_validators_total", "Count of validators at the head of the chain"); - pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_active_validators_total", "Count of active validators at the head of the chain"); - pub static ref HEAD_STATE_ACTIVE_VALIDATORS_INTEROP: Result = - try_create_int_gauge("beacon_current_active_validators", "Count of active validators at the head of the chain"); - pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = - try_create_int_gauge("beacon_head_state_validator_balances_total", "Sum of all validator balances at the head of the chain"); - pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_slashed_validators_total", "Count of all slashed validators at the head of the chain"); - pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain"); - pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = - try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); - pub static ref HEAD_STATE_ETH1_DEPOSITS_INTEROP: Result = - try_create_int_gauge("beacon_processed_deposits_total", "Total Eth1 deposits at the head of the chain"); +/* + * Chain Head + */ +pub static UPDATE_HEAD_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_update_head_seconds", + "Time taken to update the canonical head", + ) +}); +pub static HEAD_STATE_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_slot", + "Slot of the block at the head of the chain", + ) +}); +pub static HEAD_STATE_SLOT_INTEROP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_slot", + "Slot of the block at the head of the chain", + ) +}); +pub static HEAD_STATE_ROOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_root", + "Root of the block at the head of the chain", + ) +}); +pub static HEAD_STATE_LATEST_BLOCK_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_latest_block_slot", + "Latest block slot at the head of the chain", + ) +}); +pub static HEAD_STATE_CURRENT_JUSTIFIED_ROOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_current_justified_root", + "Current justified root at the head of the chain", + ) +}); +pub static HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_current_justified_epoch", + "Current justified epoch at the head of the chain", + ) +}); +pub static HEAD_STATE_CURRENT_JUSTIFIED_EPOCH_INTEROP: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_current_justified_epoch", + "Current justified epoch at the head of the chain", + ) + }); +pub static HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_previous_justified_root", + "Previous justified root at the head of the chain", + ) +}); +pub static HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_previous_justified_epoch", + "Previous justified epoch at the head of the chain", + ) +}); +pub static HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH_INTEROP: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_previous_justified_epoch", + "Previous justified epoch at the head of the chain", + ) + }); +pub static HEAD_STATE_FINALIZED_ROOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_finalized_root", + "Finalized root at the head of the chain", + ) +}); +pub static HEAD_STATE_FINALIZED_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_finalized_epoch", + "Finalized epoch at the head of the chain", + ) +}); +pub static HEAD_STATE_FINALIZED_EPOCH_INTEROP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_finalized_epoch", + "Finalized epoch at the head of the chain", + ) +}); +pub static HEAD_STATE_TOTAL_VALIDATORS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_total_validators_total", + "Count of validators at the head of the chain", + ) +}); +pub static HEAD_STATE_ACTIVE_VALIDATORS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_active_validators_total", + "Count of active validators at the head of the chain", + ) +}); +pub static HEAD_STATE_ACTIVE_VALIDATORS_INTEROP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_current_active_validators", + "Count of active validators at the head of the chain", + ) +}); +pub static HEAD_STATE_VALIDATOR_BALANCES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_validator_balances_total", + "Sum of all validator balances at the head of the chain", + ) +}); +pub static HEAD_STATE_SLASHED_VALIDATORS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_slashed_validators_total", + "Count of all slashed validators at the head of the chain", + ) +}); +pub static HEAD_STATE_WITHDRAWN_VALIDATORS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_withdrawn_validators_total", + "Sum of all validator balances at the head of the chain", + ) +}); +pub static HEAD_STATE_ETH1_DEPOSIT_INDEX: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_head_state_eth1_deposit_index", + "Eth1 deposit index at the head of the chain", + ) +}); +pub static HEAD_STATE_ETH1_DEPOSITS_INTEROP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_processed_deposits_total", + "Total Eth1 deposits at the head of the chain", + ) +}); - /* - * Operation Pool - */ - pub static ref OP_POOL_NUM_ATTESTATIONS: Result = - try_create_int_gauge("beacon_op_pool_attestations_total", "Count of attestations in the op pool"); - pub static ref OP_POOL_NUM_ATTESTATION_DATA: Result = - try_create_int_gauge("beacon_op_pool_attestation_data_total", "Count of attestation data in the op pool"); - pub static ref OP_POOL_MAX_AGGREGATES_PER_DATA: Result = - try_create_int_gauge("beacon_op_pool_max_aggregates_per_data", "Max aggregates per AttestationData"); - pub static ref OP_POOL_NUM_ATTESTER_SLASHINGS: Result = - try_create_int_gauge("beacon_op_pool_attester_slashings_total", "Count of attester slashings in the op pool"); - pub static ref OP_POOL_NUM_PROPOSER_SLASHINGS: Result = - try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool"); - pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result = - try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool"); - pub static ref OP_POOL_NUM_SYNC_CONTRIBUTIONS: Result = - try_create_int_gauge("beacon_op_pool_sync_contributions_total", "Count of sync contributions in the op pool"); +/* + * Operation Pool + */ +pub static OP_POOL_NUM_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_attestations_total", + "Count of attestations in the op pool", + ) +}); +pub static OP_POOL_NUM_ATTESTATION_DATA: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_attestation_data_total", + "Count of attestation data in the op pool", + ) +}); +pub static OP_POOL_MAX_AGGREGATES_PER_DATA: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_max_aggregates_per_data", + "Max aggregates per AttestationData", + ) +}); +pub static OP_POOL_NUM_ATTESTER_SLASHINGS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_attester_slashings_total", + "Count of attester slashings in the op pool", + ) +}); +pub static OP_POOL_NUM_PROPOSER_SLASHINGS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_proposer_slashings_total", + "Count of proposer slashings in the op pool", + ) +}); +pub static OP_POOL_NUM_VOLUNTARY_EXITS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_voluntary_exits_total", + "Count of voluntary exits in the op pool", + ) +}); +pub static OP_POOL_NUM_SYNC_CONTRIBUTIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_op_pool_sync_contributions_total", + "Count of sync contributions in the op pool", + ) +}); +/* + * Attestation Observation Metrics + */ +pub static ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_attn_observation_epoch_attesters", + "Count of attesters that have been seen by the beacon chain in the previous epoch", + ) + }); +pub static ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_attn_observation_epoch_aggregators", + "Count of aggregators that have been seen by the beacon chain in the previous epoch", + ) + }); - /* - * Attestation Observation Metrics - */ - pub static ref ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS: Result = try_create_int_gauge( - "beacon_attn_observation_epoch_attesters", - "Count of attesters that have been seen by the beacon chain in the previous epoch" - ); - pub static ref ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS: Result = try_create_int_gauge( - "beacon_attn_observation_epoch_aggregators", - "Count of aggregators that have been seen by the beacon chain in the previous epoch" - ); - - /* - * Sync Committee Observation Metrics - */ - pub static ref SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS: Result = try_create_int_gauge( +/* + * Sync Committee Observation Metrics + */ +pub static SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS: LazyLock> = LazyLock::new( + || { + try_create_int_gauge( "beacon_sync_comm_observation_slot_signers", "Count of sync committee contributors that have been seen by the beacon chain in the previous slot" - ); - pub static ref SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS: Result = try_create_int_gauge( + ) + }, +); +pub static SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS: LazyLock> = LazyLock::new( + || { + try_create_int_gauge( "beacon_sync_comm_observation_slot_aggregators", "Count of sync committee aggregators that have been seen by the beacon chain in the previous slot" - ); -} + ) + }, +); -// Third lazy-static block is used to account for macro recursion limit. -lazy_static! { - /* - * Validator Monitor Metrics (balances, etc) - */ - pub static ref VALIDATOR_MONITOR_BALANCE_GWEI: Result = - try_create_int_gauge_vec( - "validator_monitor_balance_gwei", - "The validator's balance in gwei.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI: Result = +/* + * Validator Monitor Metrics (balances, etc) + */ +pub static VALIDATOR_MONITOR_BALANCE_GWEI: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_balance_gwei", + "The validator's balance in gwei.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_effective_balance_gwei", "The validator's effective balance in gwei.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_SLASHED: Result = - try_create_int_gauge_vec( - "validator_monitor_slashed", - "Set to 1 if the validator is slashed.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_ACTIVE: Result = - try_create_int_gauge_vec( - "validator_monitor_active", - "Set to 1 if the validator is active.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_EXITED: Result = - try_create_int_gauge_vec( - "validator_monitor_exited", - "Set to 1 if the validator is exited.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_WITHDRAWABLE: Result = - try_create_int_gauge_vec( - "validator_monitor_withdrawable", - "Set to 1 if the validator is withdrawable.", - &["validator"] - ); - pub static ref VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH: Result = + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_SLASHED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_slashed", + "Set to 1 if the validator is slashed.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_ACTIVE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_active", + "Set to 1 if the validator is active.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_EXITED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_exited", + "Set to 1 if the validator is exited.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_WITHDRAWABLE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_withdrawable", + "Set to 1 if the validator is withdrawable.", + &["validator"], + ) +}); +pub static VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_activation_eligibility_epoch", "Set to the epoch where the validator will be eligible for activation.", - &["validator"] - ); - pub static ref VALIDATOR_ACTIVATION_EPOCH: Result = - try_create_int_gauge_vec( - "validator_activation_epoch", - "Set to the epoch where the validator will activate.", - &["validator"] - ); - pub static ref VALIDATOR_EXIT_EPOCH: Result = - try_create_int_gauge_vec( - "validator_exit_epoch", - "Set to the epoch where the validator will exit.", - &["validator"] - ); - pub static ref VALIDATOR_WITHDRAWABLE_EPOCH: Result = - try_create_int_gauge_vec( - "validator_withdrawable_epoch", - "Set to the epoch where the validator will be withdrawable.", - &["validator"] - ); + &["validator"], + ) + }); +pub static VALIDATOR_ACTIVATION_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_activation_epoch", + "Set to the epoch where the validator will activate.", + &["validator"], + ) +}); +pub static VALIDATOR_EXIT_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_exit_epoch", + "Set to the epoch where the validator will exit.", + &["validator"], + ) +}); +pub static VALIDATOR_WITHDRAWABLE_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_withdrawable_epoch", + "Set to the epoch where the validator will be withdrawable.", + &["validator"], + ) +}); - /* - * Validator Monitor Metrics (per-epoch summaries) - */ - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT: Result = +/* + * Validator Monitor Metrics (per-epoch summaries) + */ +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "validator_monitor_prev_epoch_on_chain_attester_hit", "Incremented if the validator is flagged as a previous epoch attester \ during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS: Result = + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "validator_monitor_prev_epoch_on_chain_attester_miss", "Incremented if the validator is not flagged as a previous epoch attester \ during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT: Result = - try_create_int_counter_vec( - "validator_monitor_prev_epoch_on_chain_head_attester_hit", - "Incremented if the validator is flagged as a previous epoch head attester \ + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_prev_epoch_on_chain_head_attester_hit", + "Incremented if the validator is flagged as a previous epoch head attester \ during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS: Result = - try_create_int_counter_vec( - "validator_monitor_prev_epoch_on_chain_head_attester_miss", - "Incremented if the validator is not flagged as a previous epoch head attester \ + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_prev_epoch_on_chain_head_attester_miss", + "Incremented if the validator is not flagged as a previous epoch head attester \ during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT: Result = - try_create_int_counter_vec( - "validator_monitor_prev_epoch_on_chain_target_attester_hit", - "Incremented if the validator is flagged as a previous epoch target attester \ + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_prev_epoch_on_chain_target_attester_hit", + "Incremented if the validator is flagged as a previous epoch target attester \ during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS: Result = - try_create_int_counter_vec( - "validator_monitor_prev_epoch_on_chain_target_attester_miss", - "Incremented if the validator is not flagged as a previous epoch target attester \ + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_prev_epoch_on_chain_target_attester_miss", + "Incremented if the validator is not flagged as a previous epoch target attester \ during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE: Result = + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_on_chain_inclusion_distance", "The attestation inclusion distance calculated during per epoch processing", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL: Result = + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_attestations_total", "The number of unagg. attestations seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS: Result = - try_create_histogram_vec( + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_prev_epoch_attestations_min_delay_seconds", "The min delay between when the validator should send the attestation and when it was received.", &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS: Result = - try_create_int_gauge_vec( - "validator_monitor_prev_epoch_attestation_aggregate_inclusions", - "The count of times an attestation was seen inside an aggregate.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS: Result = - try_create_int_gauge_vec( - "validator_monitor_prev_epoch_attestation_block_inclusions", - "The count of times an attestation was seen inside a block.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE: Result = - try_create_int_gauge_vec( - "validator_monitor_prev_epoch_attestation_block_min_inclusion_distance", - "The minimum inclusion distance observed for the inclusion of an attestation in a block.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL: Result = + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_prev_epoch_attestation_aggregate_inclusions", + "The count of times an attestation was seen inside an aggregate.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_prev_epoch_attestation_block_inclusions", + "The count of times an attestation was seen inside a block.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_prev_epoch_attestation_block_min_inclusion_distance", + "The minimum inclusion distance observed for the inclusion of an attestation in a block.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_beacon_blocks_total", "The number of beacon_blocks seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS: Result = - try_create_histogram_vec( - "validator_monitor_prev_epoch_beacon_blocks_min_delay_seconds", - "The min delay between when the validator should send the block and when it was received.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL: Result = + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( + "validator_monitor_prev_epoch_beacon_blocks_min_delay_seconds", + "The min delay between when the validator should send the block and when it was received.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_aggregates_total", "The number of aggregates seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS: Result = - try_create_histogram_vec( + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_prev_epoch_aggregates_min_delay_seconds", "The min delay between when the validator should send the aggregate and when it was received.", &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL: Result = + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_exits_total", "The number of exits seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL: Result = + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_proposer_slashings_total", "The number of proposer slashings seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL: Result = + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_attester_slashings_total", "The number of attester slashings seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL: Result = - try_create_int_gauge_vec( - "validator_monitor_prev_epoch_sync_committee_messages_total", - "The number of sync committee messages seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS: Result = - try_create_histogram_vec( + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_prev_epoch_sync_committee_messages_total", + "The number of sync committee messages seen in the previous epoch.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_prev_epoch_sync_committee_messages_min_delay_seconds", "The min delay between when the validator should send the sync committee message and when it was received.", &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS: Result = - try_create_int_gauge_vec( - "validator_monitor_prev_epoch_sync_contribution_inclusions", - "The count of times a sync signature was seen inside a sync contribution.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS: Result = - try_create_int_gauge_vec( - "validator_monitor_prev_epoch_sync_signature_block_inclusions", - "The count of times a sync signature was seen inside a block.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL: Result = + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_prev_epoch_sync_contribution_inclusions", + "The count of times a sync signature was seen inside a sync contribution.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge_vec( + "validator_monitor_prev_epoch_sync_signature_block_inclusions", + "The count of times a sync signature was seen inside a block.", + &["validator"], + ) +}); +pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_prev_epoch_sync_contributions_total", "The number of sync contributions seen in the previous epoch.", - &["validator"] - ); - pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS: Result = - try_create_histogram_vec( + &["validator"], + ) + }); +pub static VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_prev_epoch_sync_contribution_min_delay_seconds", "The min delay between when the validator should send the sync contribution and when it was received.", &["validator"] - ); - pub static ref VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE: Result = + ) +}); +pub static VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "validator_monitor_validator_in_current_sync_committee", "Is the validator in the current sync committee (1 for true and 0 for false)", - &["validator"] - ); + &["validator"], + ) + }); - /* - * Validator Monitor Metrics (real-time) - */ - pub static ref VALIDATOR_MONITOR_VALIDATORS_TOTAL: Result = try_create_int_gauge( +/* + * Validator Monitor Metrics (real-time) + */ +pub static VALIDATOR_MONITOR_VALIDATORS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "validator_monitor_validators_total", - "Count of validators that are specifically monitored by this beacon node" - ); - pub static ref VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_unaggregated_attestation_total", - "Number of unaggregated attestations seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS: Result = try_create_histogram_vec( + "Count of validators that are specifically monitored by this beacon node", + ) +}); +pub static VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_unaggregated_attestation_total", + "Number of unaggregated attestations seen", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_unaggregated_attestation_delay_seconds", "The delay between when the validator should send the attestation and when it was received.", &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_sync_committee_messages_total", - "Number of sync committee messages seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS: Result = try_create_histogram_vec( + ) +}); +pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_sync_committee_messages_total", + "Number of sync committee messages seen", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_sync_committee_messages_delay_seconds", "The delay between when the validator should send the sync committee message and when it was received.", &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_sync_contributions_total", - "Number of sync contributions seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS: Result = try_create_histogram_vec( + ) + }); +pub static VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_sync_contributions_total", + "Number of sync contributions seen", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_sync_contributions_delay_seconds", "The delay between when the aggregator should send the sync contribution and when it was received.", &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_aggregated_attestation_total", - "Number of aggregated attestations seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS: Result = try_create_histogram_vec( + ) + }); +pub static VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_aggregated_attestation_total", + "Number of aggregated attestations seen", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_aggregated_attestation_delay_seconds", "The delay between then the validator should send the aggregate and when it was received.", &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_attestation_in_aggregate_total", - "Number of times an attestation has been seen in an aggregate", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL: Result = try_create_int_counter_vec( + ) + }); +pub static VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_attestation_in_aggregate_total", + "Number of times an attestation has been seen in an aggregate", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter_vec( "validator_monitor_sync_committee_message_in_contribution_total", "Number of times a sync committee message has been seen in a sync contribution", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS: Result = try_create_histogram_vec( + &["src", "validator"], + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_vec( "validator_monitor_attestation_in_aggregate_delay_seconds", "The delay between when the validator should send the aggregate and when it was received.", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_attestation_in_block_total", - "Number of times an attestation has been seen in a block", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL: Result = try_create_int_counter_vec( + &["src", "validator"], + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_attestation_in_block_total", + "Number of times an attestation has been seen in a block", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter_vec( "validator_monitor_sync_committee_message_in_block_total", "Number of times a validator's sync committee message has been seen in a sync aggregate", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS: Result = try_create_int_gauge_vec( + &["src", "validator"], + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge_vec( "validator_monitor_attestation_in_block_delay_slots", "The excess slots (beyond the minimum delay) between the attestation slot and the block slot.", &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_beacon_block_total", - "Number of beacon blocks seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS: Result = try_create_histogram_vec( - "validator_monitor_beacon_block_delay_seconds", - "The delay between when the validator should send the block and when it was received.", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_EXIT_TOTAL: Result = try_create_int_counter_vec( + ) + }); +pub static VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_beacon_block_total", + "Number of beacon blocks seen", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec( + "validator_monitor_beacon_block_delay_seconds", + "The delay between when the validator should send the block and when it was received.", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_EXIT_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "validator_monitor_exit_total", "Number of beacon exits seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_proposer_slashing_total", - "Number of proposer slashings seen", - &["src", "validator"] - ); - pub static ref VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_attester_slashing_total", - "Number of attester slashings seen", - &["src", "validator"] - ); -} + &["src", "validator"], + ) +}); +pub static VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_proposer_slashing_total", + "Number of proposer slashings seen", + &["src", "validator"], + ) + }); +pub static VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_attester_slashing_total", + "Number of attester slashings seen", + &["src", "validator"], + ) + }); // Prevent recursion limit -lazy_static! { - /* - * Block Delay Metrics - */ - pub static ref BEACON_BLOCK_DELAY_TOTAL: Result = try_create_int_gauge( +/* + * Block Delay Metrics + */ +pub static BEACON_BLOCK_DELAY_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_block_delay_total", "Duration between the start of the block's slot and the time when it was set as head.", - ); + ) +}); - pub static ref BEACON_BLOCK_DELAY_OBSERVED_SLOT_START: Result = try_create_int_gauge( - "beacon_block_delay_observed_slot_start", - "Duration between the start of the block's slot and the time the block was observed.", - ); +pub static BEACON_BLOCK_DELAY_OBSERVED_SLOT_START: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_block_delay_observed_slot_start", + "Duration between the start of the block's slot and the time the block was observed.", + ) + }); - pub static ref BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START: Result = try_create_int_gauge( - "beacon_blob_delay_all_observed_slot_start", - "Duration between the start of the block's slot and the time the block was observed.", - ); +pub static BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_blob_delay_all_observed_slot_start", + "Duration between the start of the block's slot and the time the block was observed.", + ) + }); - pub static ref BEACON_BLOCK_DELAY_CONSENSUS_VERIFICATION_TIME: Result = try_create_int_gauge( - "beacon_block_delay_consensus_verification_time", - "The time taken to verify the block within Lighthouse", - ); +pub static BEACON_BLOCK_DELAY_CONSENSUS_VERIFICATION_TIME: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_block_delay_consensus_verification_time", + "The time taken to verify the block within Lighthouse", + ) + }); - pub static ref BEACON_BLOCK_DELAY_EXECUTION_TIME: Result = try_create_int_gauge( +pub static BEACON_BLOCK_DELAY_EXECUTION_TIME: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_block_delay_execution_time", "The duration in verifying the block with the execution layer.", - ); + ) +}); - pub static ref BEACON_BLOCK_DELAY_AVAILABLE_SLOT_START: Result = try_create_int_gauge( - "beacon_block_delay_available_slot_start", - "Duration between the time that block became available and the start of the slot.", - ); - pub static ref BEACON_BLOCK_DELAY_ATTESTABLE_SLOT_START: Result = try_create_int_gauge( - "beacon_block_delay_attestable_slot_start", - "Duration between the time that block became attestable and the start of the slot.", - ); +pub static BEACON_BLOCK_DELAY_AVAILABLE_SLOT_START: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_block_delay_available_slot_start", + "Duration between the time that block became available and the start of the slot.", + ) + }); +pub static BEACON_BLOCK_DELAY_ATTESTABLE_SLOT_START: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_block_delay_attestable_slot_start", + "Duration between the time that block became attestable and the start of the slot.", + ) + }); - pub static ref BEACON_BLOCK_DELAY_IMPORTED_TIME: Result = try_create_int_gauge( +pub static BEACON_BLOCK_DELAY_IMPORTED_TIME: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_block_delay_imported_time", "Duration between the time the block became available and the time when it was imported.", - ); + ) +}); - pub static ref BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME: Result = try_create_int_gauge( +pub static BEACON_BLOCK_DELAY_HEAD_IMPORTED_TIME: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( "beacon_block_delay_head_imported_time", "Duration between the time that block was imported and the time when it was set as head.", - ); - pub static ref BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL: Result = try_create_int_counter( + ) + }); +pub static BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( "beacon_block_delay_head_slot_start_exceeded_total", "A counter that is triggered when the duration between the start of the block's slot and the current time \ will result in failed attestations.", - ); + ) + }); - /* - * General block metrics - */ - pub static ref GOSSIP_BEACON_BLOCK_SKIPPED_SLOTS: Result = - try_create_int_gauge( - "gossip_beacon_block_skipped_slots", - "For each gossip blocks, the number of skip slots between it and its parent" - ); -} +/* + * General block metrics + */ +pub static GOSSIP_BEACON_BLOCK_SKIPPED_SLOTS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "gossip_beacon_block_skipped_slots", + "For each gossip blocks, the number of skip slots between it and its parent", + ) +}); -// Fourth lazy-static block is used to account for macro recursion limit. -lazy_static! { - /* - * Sync Committee Message Verification - */ - pub static ref SYNC_MESSAGE_PROCESSING_REQUESTS: Result = try_create_int_counter( +/* + * Sync Committee Message Verification + */ +pub static SYNC_MESSAGE_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_sync_committee_message_processing_requests_total", - "Count of all sync messages submitted for processing" - ); - pub static ref SYNC_MESSAGE_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "Count of all sync messages submitted for processing", + ) +}); +pub static SYNC_MESSAGE_PROCESSING_SUCCESSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_sync_committee_message_processing_successes_total", - "Number of sync messages verified for gossip" - ); - pub static ref SYNC_MESSAGE_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( - "beacon_sync_committee_message_gossip_verification_seconds", - "Full runtime of sync contribution gossip verification" - ); - pub static ref SYNC_MESSAGE_EQUIVOCATIONS: Result = try_create_int_counter( + "Number of sync messages verified for gossip", + ) +}); +pub static SYNC_MESSAGE_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_committee_message_gossip_verification_seconds", + "Full runtime of sync contribution gossip verification", + ) + }); +pub static SYNC_MESSAGE_EQUIVOCATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "sync_message_equivocations_total", - "Number of sync messages with the same validator index for different blocks" - ); - pub static ref SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD: Result = try_create_int_counter( + "Number of sync messages with the same validator index for different blocks", + ) +}); +pub static SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "sync_message_equivocations_to_head_total", - "Number of sync message which conflict with a previous message but elect the head" - ); + "Number of sync message which conflict with a previous message but elect the head", + ) +}); - /* - * Sync Committee Contribution Verification - */ - pub static ref SYNC_CONTRIBUTION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_sync_contribution_processing_requests_total", - "Count of all sync contributions submitted for processing" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_sync_contribution_processing_successes_total", - "Number of sync contributions verified for gossip" - ); - pub static ref SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( - "beacon_sync_contribution_gossip_verification_seconds", - "Full runtime of sync contribution gossip verification" - ); +/* + * Sync Committee Contribution Verification + */ +pub static SYNC_CONTRIBUTION_PROCESSING_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_sync_contribution_processing_requests_total", + "Count of all sync contributions submitted for processing", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_sync_contribution_processing_successes_total", + "Number of sync contributions verified for gossip", + ) + }); +pub static SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_gossip_verification_seconds", + "Full runtime of sync contribution gossip verification", + ) + }); - /* - * General Sync Committee Contribution Processing - */ - pub static ref SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_AGG_POOL: Result = try_create_histogram( - "beacon_sync_contribution_processing_apply_to_agg_pool", - "Time spent applying a sync contribution to the naive aggregation pool" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE: Result = try_create_histogram( - "beacon_sync_contribution_processing_agg_pool_prune", - "Time spent for the agg pool to prune" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT: Result = try_create_histogram( - "beacon_sync_contribution_processing_agg_pool_insert", - "Time spent for the outer pool.insert() function of agg pool" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT: Result = try_create_histogram( - "beacon_sync_contribution_processing_agg_pool_core_insert", - "Time spent for the core map.insert() function of agg pool" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION: Result = try_create_histogram( - "beacon_sync_contribution_processing_agg_pool_aggregation", - "Time spent doing signature aggregation when adding to the agg poll" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP: Result = try_create_histogram( - "beacon_sync_contribution_processing_agg_pool_create_map", - "Time spent for creating a map for a new slot" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL: Result = try_create_histogram( - "beacon_sync_contribution_processing_apply_to_op_pool", - "Time spent applying a sync contribution to the block inclusion pool" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( +/* + * General Sync Committee Contribution Processing + */ +pub static SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_AGG_POOL: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_apply_to_agg_pool", + "Time spent applying a sync contribution to the naive aggregation pool", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_prune", + "Time spent for the agg pool to prune", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_insert", + "Time spent for the outer pool.insert() function of agg pool", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_core_insert", + "Time spent for the core map.insert() function of agg pool", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_aggregation", + "Time spent doing signature aggregation when adding to the agg poll", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_agg_pool_create_map", + "Time spent for creating a map for a new slot", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_apply_to_op_pool", + "Time spent applying a sync contribution to the block inclusion pool", + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_SETUP_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( "beacon_sync_contribution_processing_signature_setup_seconds", "Time spent on setting up for the signature verification of sync contribution processing" - ); - pub static ref SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_TIMES: Result = try_create_histogram( - "beacon_sync_contribution_processing_signature_seconds", - "Time spent on the signature verification of sync contribution processing" - ); + ) + }); +pub static SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_contribution_processing_signature_seconds", + "Time spent on the signature verification of sync contribution processing", + ) + }); - /* - * General Sync Committee Contribution Processing - */ - pub static ref SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( - "beacon_sync_committee_message_processing_signature_setup_seconds", - "Time spent on setting up for the signature verification of sync message processing" - ); - pub static ref SYNC_MESSAGE_PROCESSING_SIGNATURE_TIMES: Result = try_create_histogram( - "beacon_sync_committee_message_processing_signature_seconds", - "Time spent on the signature verification of sync message processing" - ); +/* + * General Sync Committee Contribution Processing + */ +pub static SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_committee_message_processing_signature_setup_seconds", + "Time spent on setting up for the signature verification of sync message processing", + ) + }); +pub static SYNC_MESSAGE_PROCESSING_SIGNATURE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_sync_committee_message_processing_signature_seconds", + "Time spent on the signature verification of sync message processing", + ) + }); - /* - * Checkpoint sync & backfill - */ - pub static ref BACKFILL_SIGNATURE_SETUP_TIMES: Result = try_create_histogram( +/* + * Checkpoint sync & backfill + */ +pub static BACKFILL_SIGNATURE_SETUP_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_backfill_signature_setup_seconds", - "Time spent constructing the signature set during backfill sync" - ); - pub static ref BACKFILL_SIGNATURE_VERIFY_TIMES: Result = try_create_histogram( + "Time spent constructing the signature set during backfill sync", + ) +}); +pub static BACKFILL_SIGNATURE_VERIFY_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_backfill_signature_verify_seconds", - "Time spent verifying the signature set during backfill sync" - ); - pub static ref BACKFILL_SIGNATURE_TOTAL_TIMES: Result = try_create_histogram( + "Time spent verifying the signature set during backfill sync", + ) +}); +pub static BACKFILL_SIGNATURE_TOTAL_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_backfill_signature_total_seconds", - "Time spent verifying the signature set during backfill sync, including setup" - ); + "Time spent verifying the signature set during backfill sync, including setup", + ) +}); - /* - * Pre-finalization block cache. - */ - pub static ref PRE_FINALIZATION_BLOCK_CACHE_SIZE: Result = - try_create_int_gauge( - "beacon_pre_finalization_block_cache_size", - "Number of pre-finalization block roots cached for quick rejection" - ); - pub static ref PRE_FINALIZATION_BLOCK_LOOKUP_COUNT: Result = - try_create_int_gauge( - "beacon_pre_finalization_block_lookup_count", - "Number of block roots subject to single block lookups" - ); +/* + * Pre-finalization block cache. + */ +pub static PRE_FINALIZATION_BLOCK_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_pre_finalization_block_cache_size", + "Number of pre-finalization block roots cached for quick rejection", + ) +}); +pub static PRE_FINALIZATION_BLOCK_LOOKUP_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "beacon_pre_finalization_block_lookup_count", + "Number of block roots subject to single block lookups", + ) +}); - /* - * Blob sidecar Verification - */ - pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_blobs_sidecar_processing_requests_total", - "Count of all blob sidecars submitted for processing" - ); - pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_blobs_sidecar_processing_successes_total", - "Number of blob sidecars verified for gossip" - ); - pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( - "beacon_blobs_sidecar_gossip_verification_seconds", - "Full runtime of blob sidecars gossip verification" - ); - pub static ref BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: Result = try_create_histogram( - "blob_sidecar_inclusion_proof_verification_seconds", - "Time taken to verify blob sidecar inclusion proof" - ); - pub static ref BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: Result = try_create_histogram( - "blob_sidecar_inclusion_proof_computation_seconds", - "Time taken to compute blob sidecar inclusion proof" - ); - pub static ref DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_data_column_sidecar_processing_requests_total", - "Count of all data column sidecars submitted for processing" - ); - pub static ref DATA_COLUMN_SIDECAR_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_data_column_sidecar_processing_successes_total", - "Number of data column sidecars verified for gossip" - ); - pub static ref DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( - "beacon_data_column_sidecar_gossip_verification_seconds", - "Full runtime of data column sidecars gossip verification" - ); -} - -// Fifth lazy-static block is used to account for macro recursion limit. -lazy_static! { - /* - * Light server message verification - */ - pub static ref FINALITY_UPDATE_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "light_client_finality_update_verification_success_total", - "Number of light client finality updates verified for gossip" - ); - /* - * Light server message verification - */ - pub static ref OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "light_client_optimistic_update_verification_success_total", - "Number of light client optimistic updates verified for gossip" - ); - /* - * Aggregate subset metrics - */ - pub static ref SYNC_CONTRIBUTION_SUBSETS: Result = try_create_int_counter( - "beacon_sync_contribution_subsets_total", - "Count of new sync contributions that are subsets of already known aggregates" - ); - pub static ref AGGREGATED_ATTESTATION_SUBSETS: Result = try_create_int_counter( - "beacon_aggregated_attestation_subsets_total", - "Count of new aggregated attestations that are subsets of already known aggregates" - ); - /* - * Attestation simulator metrics - */ - pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT: Result = +/* + * Blob sidecar Verification + */ +pub static BLOBS_SIDECAR_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, - "Incremented if a validator is flagged as a previous slot head attester \ + "beacon_blobs_sidecar_processing_requests_total", + "Count of all blob sidecars submitted for processing", + ) +}); +pub static BLOBS_SIDECAR_PROCESSING_SUCCESSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_blobs_sidecar_processing_successes_total", + "Number of blob sidecars verified for gossip", + ) +}); +pub static BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_blobs_sidecar_gossip_verification_seconds", + "Full runtime of blob sidecars gossip verification", + ) + }); +pub static BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "blob_sidecar_inclusion_proof_verification_seconds", + "Time taken to verify blob sidecar inclusion proof", + ) + }); +pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "blob_sidecar_inclusion_proof_computation_seconds", + "Time taken to compute blob sidecar inclusion proof", + ) + }); +pub static DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_data_column_sidecar_processing_requests_total", + "Count of all data column sidecars submitted for processing", + ) + }); +pub static DATA_COLUMN_SIDECAR_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_data_column_sidecar_processing_successes_total", + "Number of data column sidecars verified for gossip", + ) + }); +pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_data_column_sidecar_gossip_verification_seconds", + "Full runtime of data column sidecars gossip verification", + ) + }); + +/* + * Light server message verification + */ +pub static FINALITY_UPDATE_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "light_client_finality_update_verification_success_total", + "Number of light client finality updates verified for gossip", + ) + }); +/* + * Light server message verification + */ +pub static OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "light_client_optimistic_update_verification_success_total", + "Number of light client optimistic updates verified for gossip", + ) + }); +/* + * Aggregate subset metrics + */ +pub static SYNC_CONTRIBUTION_SUBSETS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_sync_contribution_subsets_total", + "Count of new sync contributions that are subsets of already known aggregates", + ) +}); +pub static AGGREGATED_ATTESTATION_SUBSETS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "beacon_aggregated_attestation_subsets_total", + "Count of new aggregated attestations that are subsets of already known aggregates", + ) +}); +/* + * Attestation simulator metrics + */ +pub static VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot head attester \ during per slot processing", - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS: Result = + ) + }); +pub static VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS: LazyLock< + Result, +> = LazyLock::new(|| { try_create_int_counter( VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, "Incremented if a validator is not flagged as a previous slot head attester \ during per slot processing", - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT: Result = + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT: LazyLock< + Result, +> = LazyLock::new(|| { try_create_int_counter( VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, "Incremented if a validator is flagged as a previous slot target attester \ during per slot processing", - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS: Result = + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS: LazyLock< + Result, +> = LazyLock::new(|| { try_create_int_counter( VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, "Incremented if a validator is not flagged as a previous slot target attester \ during per slot processing", - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT: Result = + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT: LazyLock< + Result, +> = LazyLock::new(|| { try_create_int_counter( VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, "Incremented if a validator is flagged as a previous slot source attester \ during per slot processing", - ); - pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS: Result = + ) +}); +pub static VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS: LazyLock< + Result, +> = LazyLock::new(|| { try_create_int_counter( VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, "Incremented if a validator is not flagged as a previous slot source attester \ during per slot processing", - ); - /* - * Missed block metrics - */ - pub static ref VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: Result = try_create_int_counter_vec( - "validator_monitor_missed_blocks_total", - "Number of non-finalized blocks missed", - &["validator"] - ); + ) +}); +/* + * Missed block metrics + */ +pub static VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "validator_monitor_missed_blocks_total", + "Number of non-finalized blocks missed", + &["validator"], + ) + }); - /* - * Kzg related metrics - */ - pub static ref KZG_VERIFICATION_SINGLE_TIMES: Result = - try_create_histogram("kzg_verification_single_seconds", "Runtime of single kzg verification"); - pub static ref KZG_VERIFICATION_BATCH_TIMES: Result = - try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification"); +/* + * Kzg related metrics + */ +pub static KZG_VERIFICATION_SINGLE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "kzg_verification_single_seconds", + "Runtime of single kzg verification", + ) +}); +pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "kzg_verification_batch_seconds", + "Runtime of batched kzg verification", + ) +}); - pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result = try_create_histogram( +pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( + || { + try_create_histogram( "beacon_block_production_blobs_verification_seconds", "Time taken to verify blobs against commitments and creating BlobSidecar objects in block production" - ); - /* - * Availability related metrics - */ - pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_int_gauge( + ) + }, +); +/* + * Availability related metrics + */ +pub static BLOCK_AVAILABILITY_DELAY: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "block_availability_delay", "Duration between start of the slot and the time at which all components of the block are available.", - ); + ) +}); - /* - * Data Availability cache metrics - */ - pub static ref DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE: Result = +/* + * Data Availability cache metrics + */ +pub static DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE: LazyLock> = + LazyLock::new(|| { try_create_int_gauge( "data_availability_overflow_memory_block_cache_size", - "Number of entries in the data availability overflow block memory cache." - ); - pub static ref DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: Result = + "Number of entries in the data availability overflow block memory cache.", + ) + }); +pub static DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: LazyLock> = + LazyLock::new(|| { try_create_int_gauge( "data_availability_overflow_memory_state_cache_size", - "Number of entries in the data availability overflow state memory cache." - ); - pub static ref DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: Result = + "Number of entries in the data availability overflow state memory cache.", + ) + }); +pub static DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: LazyLock> = + LazyLock::new(|| { try_create_int_gauge( "data_availability_overflow_store_cache_size", - "Number of entries in the data availability overflow store cache." - ); + "Number of entries in the data availability overflow store cache.", + ) + }); - /* - * light_client server metrics - */ - pub static ref LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES: Result = try_create_histogram( - "beacon_light_client_server_cache_state_data_seconds", - "Time taken to produce and cache state data", - ); - pub static ref LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES: Result = try_create_histogram( - "beacon_light_client_server_cache_recompute_updates_seconds", - "Time taken to recompute and cache updates", - ); - pub static ref LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS: Result = try_create_int_counter( - "beacon_light_client_server_cache_prev_block_cache_miss", - "Count of prev block cache misses", - ); -} +/* + * light_client server metrics + */ +pub static LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_light_client_server_cache_state_data_seconds", + "Time taken to produce and cache state data", + ) + }); +pub static LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_light_client_server_cache_recompute_updates_seconds", + "Time taken to recompute and cache updates", + ) + }); +pub static LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_light_client_server_cache_prev_block_cache_miss", + "Count of prev block cache misses", + ) + }); /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 4e33f1661b..87a3eeb359 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -31,7 +31,6 @@ use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use kzg::{Kzg, TrustedSetup}; -use lazy_static::lazy_static; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; use parking_lot::Mutex; @@ -52,7 +51,7 @@ use std::collections::{HashMap, HashSet}; use std::fmt; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use std::time::Duration; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; use task_executor::TaskExecutor; @@ -75,15 +74,13 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; -lazy_static! { - pub static ref KZG: Arc = { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); - let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); - Arc::new(kzg) - }; -} +pub static KZG: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); + Arc::new(kzg) +}); pub type BaseHarnessType = Witness, E, THotStore, TColdStore>; diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 697e449dc6..e1f2cbb284 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -5,8 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; -use lazy_static::lazy_static; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{ AggregateSignature, Attestation, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot, @@ -14,10 +13,9 @@ use types::{ pub const VALIDATOR_COUNT: usize = 16; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); /// This test builds a chain that is testing the performance of the unaggregated attestations /// produced by the attestation simulator service. diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 19efe10c6d..a52437e003 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -14,11 +14,11 @@ use beacon_chain::{ }; use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; -use lazy_static::lazy_static; use ssz_types::BitVector; use state_processing::{ per_block_processing::errors::AttestationValidationError, per_slot_processing, }; +use std::sync::LazyLock; use tree_hash::TreeHash; use types::{ signed_aggregate_and_proof::SignedAggregateAndProofRefMut, @@ -36,10 +36,9 @@ pub const VALIDATOR_COUNT: usize = 256; pub const CAPELLA_FORK_EPOCH: usize = 1; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); /// Returns a beacon chain harness. fn get_harness(validator_count: usize) -> BeaconChainHarness> { diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index d9c9a3b6a7..046a3468af 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -11,7 +11,6 @@ use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer, }; -use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ @@ -20,7 +19,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; use std::marker::PhantomData; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use tempfile::tempdir; use types::{test_utils::generate_deterministic_keypair, *}; @@ -31,10 +30,9 @@ const VALIDATOR_COUNT: usize = 24; const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1]; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); async fn get_chain_segment() -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 40910b9b9f..2f8fb6d2bc 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -9,23 +9,20 @@ use beacon_chain::{ }, BeaconChainError, }; -use lazy_static::lazy_static; use sloggers::{null::NullLoggerBuilder, Build}; use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use store::{LevelDB, StoreConfig}; use tempfile::{tempdir, TempDir}; use types::*; pub const VALIDATOR_COUNT: usize = 24; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = - types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 1c80525223..f04f4062f1 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -1,6 +1,7 @@ #![cfg(test)] use std::collections::HashMap; +use std::sync::LazyLock; use beacon_chain::test_utils::{ generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, @@ -12,7 +13,6 @@ use beacon_chain::{ use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; use eth2::lighthouse::StandardAttestationRewards; use eth2::types::ValidatorId; -use lazy_static::lazy_static; use types::beacon_state::Error as BeaconStateError; use types::{BeaconState, ChainSpec, ForkName, Slot}; @@ -20,9 +20,8 @@ pub const VALIDATOR_COUNT: usize = 64; type E = MinimalEthSpec; -lazy_static! { - static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); -} +static KEYPAIRS: LazyLock> = + LazyLock::new(|| generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(E::default()) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e675d6956e..12f2702822 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -14,7 +14,6 @@ use beacon_chain::{ migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; -use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; use rand::Rng; @@ -23,7 +22,7 @@ use state_processing::{state_advance::complete_state_advance, BlockReplayer}; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use std::time::Duration; use store::chunked_vector::Chunk; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; @@ -42,10 +41,9 @@ use types::*; pub const LOW_VALIDATOR_COUNT: usize = 24; pub const HIGH_VALIDATOR_COUNT: usize = 64; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT)); type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 242ed55847..5cbb26ffbf 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -3,12 +3,12 @@ use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; use int_to_bytes::int_to_bytes32; -use lazy_static::lazy_static; use safe_arith::SafeArith; use state_processing::{ per_block_processing::{altair::sync_committee::process_sync_aggregate, VerifySignatures}, state_advance::complete_state_advance, }; +use std::sync::LazyLock; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; @@ -21,10 +21,9 @@ pub type E = MainnetEthSpec; pub const VALIDATOR_COUNT: usize = 256; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); /// Returns a beacon chain harness. fn get_harness(validator_count: usize) -> BeaconChainHarness> { diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 2a0854e78f..7ae34ccf38 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,9 +8,9 @@ use beacon_chain::{ }, BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; -use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; +use std::sync::LazyLock; use types::{ BeaconState, BeaconStateError, BlockImportSource, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, @@ -19,10 +19,9 @@ use types::{ // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index ea9ef73575..c2c03baff0 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -2,17 +2,16 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; -use lazy_static::lazy_static; use logging::test_logger; +use std::sync::LazyLock; use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); type E = MainnetEthSpec; diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 3373dd1c72..554010be07 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -16,7 +16,6 @@ task_executor = { workspace = true } slot_clock = { workspace = true } lighthouse_network = { workspace = true } types = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } num_cpus = { workspace = true } diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index bcd422b357..56105f1e10 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -1,165 +1,253 @@ pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static::lazy_static! { - - /* - * Gossip processor - */ - pub static ref BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_rx_count", - "Count of work events received (but not necessarily processed)", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_ignored_count", - "Count of work events purposefully ignored", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: Result = try_create_int_counter_vec( - "beacon_processor_work_events_started_count", - "Count of work events which have been started by a worker", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORKER_TIME: Result = try_create_histogram_vec( +/* + * Gossip processor + */ +pub static BEACON_PROCESSOR_WORK_EVENTS_RX_COUNT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_work_events_rx_count", + "Count of work events received (but not necessarily processed)", + &["type"], + ) + }); +pub static BEACON_PROCESSOR_WORK_EVENTS_IGNORED_COUNT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_work_events_ignored_count", + "Count of work events purposefully ignored", + &["type"], + ) + }); +pub static BEACON_PROCESSOR_WORK_EVENTS_STARTED_COUNT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_work_events_started_count", + "Count of work events which have been started by a worker", + &["type"], + ) + }); +pub static BEACON_PROCESSOR_WORKER_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "beacon_processor_worker_time", "Time taken for a worker to fully process some parcel of work.", - &["type"] - ); - pub static ref BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: Result = try_create_int_counter( - "beacon_processor_workers_spawned_total", - "The number of workers ever spawned by the gossip processing pool." - ); - pub static ref BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_workers_active_total", - "Count of active workers in the gossip processing pool." - ); - pub static ref BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: Result = try_create_int_counter( + &["type"], + ) +}); +pub static BEACON_PROCESSOR_WORKERS_SPAWNED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_workers_spawned_total", + "The number of workers ever spawned by the gossip processing pool.", + ) + }); +pub static BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_workers_active_total", + "Count of active workers in the gossip processing pool.", + ) + }); +pub static BEACON_PROCESSOR_IDLE_EVENTS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "beacon_processor_idle_events_total", - "Count of idle events processed by the gossip processor manager." - ); - pub static ref BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: Result = try_create_histogram( - "beacon_processor_event_handling_seconds", - "Time spent handling a new message and allocating it to a queue or worker." - ); - // Gossip blocks. - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_gossip_block_queue_total", - "Count of blocks from gossip waiting to be verified." - ); - // Gossip blobs. - pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_gossip_blob_queue_total", - "Count of blobs from gossip waiting to be verified." - ); - // Gossip data column sidecars. - pub static ref BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_gossip_data_column_queue_total", - "Count of data column sidecars from gossip waiting to be verified." - ); - // Gossip Exits. - pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( + "Count of idle events processed by the gossip processor manager.", + ) +}); +pub static BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_processor_event_handling_seconds", + "Time spent handling a new message and allocating it to a queue or worker.", + ) + }); +// Gossip blocks. +pub static BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_gossip_block_queue_total", + "Count of blocks from gossip waiting to be verified.", + ) + }); +// Gossip blobs. +pub static BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_gossip_blob_queue_total", + "Count of blobs from gossip waiting to be verified.", + ) + }); +// Gossip data column sidecars. +pub static BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_gossip_data_column_queue_total", + "Count of data column sidecars from gossip waiting to be verified.", + ) + }); +// Gossip Exits. +pub static BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_processor_exit_queue_total", - "Count of exits from gossip waiting to be verified." - ); - // Gossip proposer slashings. - pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_proposer_slashing_queue_total", - "Count of proposer slashings from gossip waiting to be verified." - ); - // Gossip attester slashings. - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_attester_slashing_queue_total", - "Count of attester slashings from gossip waiting to be verified." - ); - // Gossip BLS to execution changes. - pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_bls_to_execution_change_queue_total", - "Count of address changes from gossip waiting to be verified." - ); - // Rpc blocks. - pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_rpc_block_queue_total", - "Count of blocks from the rpc waiting to be verified." - ); - // Rpc blobs. - pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_rpc_blob_queue_total", - "Count of blobs from the rpc waiting to be verified." - ); - // Chain segments. - pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_chain_segment_queue_total", - "Count of chain segments from the rpc waiting to be verified." - ); - pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_backfill_chain_segment_queue_total", - "Count of backfill chain segments from the rpc waiting to be verified." - ); - // Unaggregated attestations. - pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_unaggregated_attestation_queue_total", - "Count of unagg. attestations waiting to be processed." - ); - // Aggregated attestations. - pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_aggregated_attestation_queue_total", - "Count of agg. attestations waiting to be processed." - ); - // Sync committee messages. - pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_sync_message_queue_total", - "Count of sync committee messages waiting to be processed." - ); - // Sync contribution. - pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_sync_contribution_queue_total", - "Count of sync committee contributions waiting to be processed." - ); - // HTTP API requests. - pub static ref BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_api_request_p0_queue_total", - "Count of P0 HTTP requesets waiting to be processed." - ); - pub static ref BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: Result = try_create_int_gauge( - "beacon_processor_api_request_p1_queue_total", - "Count of P1 HTTP requesets waiting to be processed." - ); + "Count of exits from gossip waiting to be verified.", + ) +}); +// Gossip proposer slashings. +pub static BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_proposer_slashing_queue_total", + "Count of proposer slashings from gossip waiting to be verified.", + ) + }); +// Gossip attester slashings. +pub static BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_attester_slashing_queue_total", + "Count of attester slashings from gossip waiting to be verified.", + ) + }); +// Gossip BLS to execution changes. +pub static BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_bls_to_execution_change_queue_total", + "Count of address changes from gossip waiting to be verified.", + ) + }); +// Rpc blocks. +pub static BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_rpc_block_queue_total", + "Count of blocks from the rpc waiting to be verified.", + ) + }); +// Rpc blobs. +pub static BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_rpc_blob_queue_total", + "Count of blobs from the rpc waiting to be verified.", + ) + }); +// Chain segments. +pub static BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_chain_segment_queue_total", + "Count of chain segments from the rpc waiting to be verified.", + ) + }); +pub static BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_backfill_chain_segment_queue_total", + "Count of backfill chain segments from the rpc waiting to be verified.", + ) + }); +// Unaggregated attestations. +pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_unaggregated_attestation_queue_total", + "Count of unagg. attestations waiting to be processed.", + ) + }); +// Aggregated attestations. +pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_aggregated_attestation_queue_total", + "Count of agg. attestations waiting to be processed.", + ) + }); +// Sync committee messages. +pub static BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_sync_message_queue_total", + "Count of sync committee messages waiting to be processed.", + ) + }); +// Sync contribution. +pub static BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_sync_contribution_queue_total", + "Count of sync committee contributions waiting to be processed.", + ) + }); +// HTTP API requests. +pub static BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_api_request_p0_queue_total", + "Count of P0 HTTP requesets waiting to be processed.", + ) + }); +pub static BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_processor_api_request_p1_queue_total", + "Count of P1 HTTP requesets waiting to be processed.", + ) + }); - /* - * Attestation reprocessing queue metrics. - */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: Result = +/* + * Attestation reprocessing queue metrics. + */ +pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( - "beacon_processor_reprocessing_queue_total", - "Count of items in a reprocessing queue.", - &["type"] - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_total", + "Count of items in a reprocessing queue.", + &["type"], + ) + }); +pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( "beacon_processor_reprocessing_queue_expired_attestations", "Number of queued attestations which have expired before a matching block has been found." - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( - "beacon_processor_reprocessing_queue_matched_attestations", - "Number of queued attestations where as matching block has been imported." - ); + ) + }); +pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_attestations", + "Number of queued attestations where as matching block has been imported.", + ) + }); - /* - * Light client update reprocessing queue metrics. - */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( +/* + * Light client update reprocessing queue metrics. + */ +pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter( "beacon_processor_reprocessing_queue_expired_optimistic_updates", "Number of queued light client optimistic updates which have expired before a matching block has been found." - ); - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + ) +}); +pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter( "beacon_processor_reprocessing_queue_matched_optimistic_updates", "Number of queued light client optimistic updates where as matching block has been imported." - ); + ) +}); - /// Errors and Debugging Stats - pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = +/// Errors and Debugging Stats +pub static BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "beacon_processor_send_error_per_work_type", "Total number of beacon processor send error per work type", - &["type"] - ); -} + &["type"], + ) + }); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 4ac035d17b..88ae650e72 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,7 +31,6 @@ sensitive_url = { workspace = true } genesis = { workspace = true } task_executor = { workspace = true } environment = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } time = "0.3.5" directory = { workspace = true } diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index f1027bb821..ebc4fe70a7 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -1,19 +1,23 @@ -use lazy_static::lazy_static; pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static! { - pub static ref SYNC_SLOTS_PER_SECOND: Result = try_create_int_gauge( +pub static SYNC_SLOTS_PER_SECOND: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "sync_slots_per_second", - "The number of blocks being imported per second" - ); + "The number of blocks being imported per second", + ) +}); - pub static ref IS_SYNCED: Result = try_create_int_gauge( +pub static IS_SYNCED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "sync_eth2_synced", "Metric to check if the beacon chain is synced to head. 0 if not synced and non-zero if synced" - ); + ) +}); - pub static ref NOTIFIER_HEAD_SLOT: Result = try_create_int_gauge( +pub static NOTIFIER_HEAD_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "notifier_head_slot", - "The head slot sourced from the beacon chain notifier" - ); -} + "The head slot sourced from the beacon chain notifier", + ) +}); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 2ffca4a571..6733c5bfd3 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,7 +25,6 @@ superstruct = { workspace = true } tokio = { workspace = true } state_processing = { workspace = true } lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } task_executor = { workspace = true } eth2 = { workspace = true } sensitive_url = { workspace = true } diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index ad94d42ecb..9a11e7a692 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -1,30 +1,41 @@ pub use lighthouse_metrics::*; +use std::sync::LazyLock; -use lazy_static::lazy_static; +/* + * Eth1 blocks + */ +pub static BLOCK_CACHE_LEN: LazyLock> = + LazyLock::new(|| try_create_int_gauge("eth1_block_cache_len", "Count of eth1 blocks in cache")); +pub static LATEST_CACHED_BLOCK_TIMESTAMP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "eth1_latest_cached_block_timestamp", + "Timestamp of latest block in eth1 cache", + ) +}); -lazy_static! { - /* - * Eth1 blocks - */ - pub static ref BLOCK_CACHE_LEN: Result = - try_create_int_gauge("eth1_block_cache_len", "Count of eth1 blocks in cache"); - pub static ref LATEST_CACHED_BLOCK_TIMESTAMP: Result = - try_create_int_gauge("eth1_latest_cached_block_timestamp", "Timestamp of latest block in eth1 cache"); +/* + * Eth1 deposits + */ +pub static DEPOSIT_CACHE_LEN: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "eth1_deposit_cache_len", + "Number of deposits in the eth1 cache", + ) +}); +pub static HIGHEST_PROCESSED_DEPOSIT_BLOCK: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "eth1_highest_processed_deposit_block", + "Number of the last block checked for deposits", + ) +}); - /* - * Eth1 deposits - */ - pub static ref DEPOSIT_CACHE_LEN: Result = - try_create_int_gauge("eth1_deposit_cache_len", "Number of deposits in the eth1 cache"); - pub static ref HIGHEST_PROCESSED_DEPOSIT_BLOCK: Result = - try_create_int_gauge("eth1_highest_processed_deposit_block", "Number of the last block checked for deposits"); +/* + * Eth1 rpc connection + */ - /* - * Eth1 rpc connection - */ - - pub static ref ETH1_CONNECTED: Result = try_create_int_gauge( - "sync_eth1_connected", "Set to 1 if connected to an eth1 node, otherwise set to 0" - ); - -} +pub static ETH1_CONNECTED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "sync_eth1_connected", + "Set to 1 if connected to an eth1 node, otherwise set to 0", + ) +}); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index f9f599c769..0009cd002e 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -36,7 +36,6 @@ tempfile = { workspace = true } rand = { workspace = true } zeroize = { workspace = true } lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1c03cc81fc..ecaf9c6c23 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,13 +3,13 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; -use lazy_static::lazy_static; use lighthouse_version::{COMMIT_PREFIX, VERSION}; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; use std::collections::HashSet; +use std::sync::LazyLock; use tokio::sync::Mutex; use std::time::{Duration, Instant}; @@ -81,18 +81,17 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_GET_CLIENT_VERSION_V1, ]; -lazy_static! { - /// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 - /// for two reasons: - /// 1. This saves the overhead of converting into Json for every engine call - /// 2. The Json version lacks error checking so we can avoid calling `unwrap()` - pub static ref LIGHTHOUSE_JSON_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { +/// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 +/// for two reasons: +/// 1. This saves the overhead of converting into Json for every engine call +/// 2. The Json version lacks error checking so we can avoid calling `unwrap()` +pub static LIGHTHOUSE_JSON_CLIENT_VERSION: LazyLock = + LazyLock::new(|| JsonClientVersionV1 { code: ClientCode::Lighthouse.to_string(), name: "Lighthouse".to_string(), version: VERSION.replace("Lighthouse/", ""), commit: COMMIT_PREFIX.to_string(), - }; -} + }); /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 6aaada3dff..c3da449535 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -1,4 +1,5 @@ pub use lighthouse_metrics::*; +use std::sync::LazyLock; pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; @@ -16,72 +17,109 @@ pub const BUILDER: &str = "builder"; pub const SUCCESS: &str = "success"; pub const FAILURE: &str = "failure"; -lazy_static::lazy_static! { - pub static ref EXECUTION_LAYER_PROPOSER_INSERTED: Result = try_create_int_counter( +pub static EXECUTION_LAYER_PROPOSER_INSERTED: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "execution_layer_proposer_inserted", "Count of times a new proposer is known", - ); - pub static ref EXECUTION_LAYER_PROPOSER_DATA_UPDATED: Result = try_create_int_counter( - "execution_layer_proposer_data_updated", - "Count of times new proposer data is supplied", - ); - pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result = - try_create_histogram_vec_with_buckets( + ) +}); +pub static EXECUTION_LAYER_PROPOSER_DATA_UPDATED: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "execution_layer_proposer_data_updated", + "Count of times new proposer data is supplied", + ) + }); +pub static EXECUTION_LAYER_REQUEST_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec_with_buckets( "execution_layer_request_times", "Duration of calls to ELs", decimal_buckets(-2, 1), - &["method"] - ); - pub static ref EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: Result = try_create_histogram( + &["method"], + ) +}); +pub static EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: LazyLock> = + LazyLock::new(|| { + try_create_histogram( "execution_layer_payload_attributes_lookahead", "Duration between an fcU call with PayloadAttributes and when the block should be produced", - ); - pub static ref EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID: Result = try_create_int_counter_vec( + ) + }); +pub static EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID: LazyLock> = LazyLock::new( + || { + try_create_int_counter_vec( "execution_layer_pre_prepared_payload_id", "Indicates hits or misses for already having prepared a payload id before payload production", &["event"] - ); - pub static ref EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH: Result = try_create_histogram( - "execution_layer_get_payload_by_block_hash_time", - "Time to reconstruct a payload from the EE using eth_getBlockByHash" - ); - pub static ref EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE: Result = try_create_histogram( - "execution_layer_get_payload_bodies_by_range_time", - "Time to fetch a range of payload bodies from the EE" - ); - pub static ref EXECUTION_LAYER_VERIFY_BLOCK_HASH: Result = try_create_histogram_with_buckets( + ) + }, +); +pub static EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "execution_layer_get_payload_by_block_hash_time", + "Time to reconstruct a payload from the EE using eth_getBlockByHash", + ) + }); +pub static EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "execution_layer_get_payload_bodies_by_range_time", + "Time to fetch a range of payload bodies from the EE", + ) + }); +pub static EXECUTION_LAYER_VERIFY_BLOCK_HASH: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "execution_layer_verify_block_hash_time", "Time to verify the execution block hash in Lighthouse, without the EL", - Ok(vec![10e-6, 50e-6, 100e-6, 500e-6, 1e-3, 5e-3, 10e-3, 50e-3, 100e-3, 500e-3]), - ); - pub static ref EXECUTION_LAYER_PAYLOAD_STATUS: Result = try_create_int_counter_vec( + Ok(vec![ + 10e-6, 50e-6, 100e-6, 500e-6, 1e-3, 5e-3, 10e-3, 50e-3, 100e-3, 500e-3, + ]), + ) +}); +pub static EXECUTION_LAYER_PAYLOAD_STATUS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "execution_layer_payload_status", "Indicates the payload status returned for a particular method", - &["method", "status"] - ); - pub static ref EXECUTION_LAYER_GET_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( - "execution_layer_get_payload_outcome", - "The success/failure outcomes from calling get_payload", - &["outcome"] - ); - pub static ref EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME: Result = try_create_int_counter_vec( - "execution_layer_builder_reveal_payload_outcome", - "The success/failure outcomes from a builder un-blinding a payload", - &["outcome"] - ); - pub static ref EXECUTION_LAYER_GET_PAYLOAD_SOURCE: Result = try_create_int_counter_vec( - "execution_layer_get_payload_source", - "The source of each payload returned from get_payload", - &["source"] - ); - pub static ref EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS: Result = try_create_int_counter_vec( - "execution_layer_get_payload_builder_rejections", - "The reasons why a payload from a builder was rejected", - &["reason"] - ); - pub static ref EXECUTION_LAYER_PAYLOAD_BIDS: Result = try_create_int_gauge_vec( + &["method", "status"], + ) +}); +pub static EXECUTION_LAYER_GET_PAYLOAD_OUTCOME: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "execution_layer_get_payload_outcome", + "The success/failure outcomes from calling get_payload", + &["outcome"], + ) + }); +pub static EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "execution_layer_builder_reveal_payload_outcome", + "The success/failure outcomes from a builder un-blinding a payload", + &["outcome"], + ) + }); +pub static EXECUTION_LAYER_GET_PAYLOAD_SOURCE: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "execution_layer_get_payload_source", + "The source of each payload returned from get_payload", + &["source"], + ) + }); +pub static EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "execution_layer_get_payload_builder_rejections", + "The reasons why a payload from a builder was rejected", + &["reason"], + ) + }); +pub static EXECUTION_LAYER_PAYLOAD_BIDS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "execution_layer_payload_bids", "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::MAX.", &["source"] - ); -} + ) +}); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 7b00ca9fbc..6fd853975d 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -10,7 +10,6 @@ use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use kzg::Kzg; -use lazy_static::lazy_static; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -20,7 +19,7 @@ use std::convert::Infallible; use std::future::Future; use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; @@ -56,14 +55,13 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_client_version_v1: true, }; -lazy_static! { - pub static ref DEFAULT_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { +pub static DEFAULT_CLIENT_VERSION: LazyLock = + LazyLock::new(|| JsonClientVersionV1 { code: "MC".to_string(), // "mock client" name: "Mock Execution Client".to_string(), version: "0.1.0".to_string(), commit: "0xabcdef01".to_string(), - }; -} + }); mod execution_block_generator; mod handle_rpc; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index b58e0442f7..2c54c1375a 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -21,7 +21,6 @@ eth1 = { workspace = true } state_processing = { workspace = true } lighthouse_version = { workspace = true } lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } warp_utils = { workspace = true } slot_clock = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 3eada3a3d4..970eef8dd0 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -1,51 +1,41 @@ pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static::lazy_static! { - pub static ref HTTP_API_PATHS_TOTAL: Result = try_create_int_counter_vec( +pub static HTTP_API_PATHS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "http_api_paths_total", "Count of HTTP requests received", - &["path"] - ); - pub static ref HTTP_API_STATUS_CODES_TOTAL: Result = try_create_int_counter_vec( + &["path"], + ) +}); +pub static HTTP_API_STATUS_CODES_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "http_api_status_codes_total", "Count of HTTP status codes returned", - &["status"] - ); - pub static ref HTTP_API_PATHS_TIMES: Result = try_create_histogram_vec( + &["status"], + ) +}); +pub static HTTP_API_PATHS_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "http_api_paths_times", "Duration to process HTTP requests per path", - &["path"] - ); + &["path"], + ) +}); - pub static ref HTTP_API_BEACON_PROPOSER_CACHE_TIMES: Result = try_create_histogram( - "http_api_beacon_proposer_cache_build_times", - "Duration to process HTTP requests per path", - ); - pub static ref HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL: Result = try_create_int_counter( - "http_api_beacon_proposer_cache_hits_total", - "Count of times the proposer cache has been hit", - ); - pub static ref HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL: Result = try_create_int_counter( - "http_api_beacon_proposer_cache_misses_total", - "Count of times the proposer cache has been missed", - ); - pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram_vec( - "http_api_block_broadcast_delay_times", - "Time between start of the slot and when the block completed broadcast and processing", - &["provenance"] - ); - pub static ref HTTP_API_BLOCK_GOSSIP_TIMES: Result = try_create_histogram_vec_with_buckets( +pub static HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec( + "http_api_block_broadcast_delay_times", + "Time between start of the slot and when the block completed broadcast and processing", + &["provenance"], + ) + }); +pub static HTTP_API_BLOCK_GOSSIP_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec_with_buckets( "http_api_block_gossip_times", "Time between receiving the block on HTTP and publishing it on gossip", decimal_buckets(-2, 2), - &["provenance"] - ); - pub static ref HTTP_API_BLOCK_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( - "http_api_block_published_late_total", - "The count of times a block was published beyond more than half way to the attestation deadline" - ); - pub static ref HTTP_API_BLOCK_PUBLISHED_VERY_LATE_TOTAL: Result = try_create_int_counter( - "http_api_block_published_very_late_total", - "The count of times a block was published beyond the attestation deadline" - ); -} + &["provenance"], + ) +}); diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index e6e06caa84..d68efff432 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -17,7 +17,7 @@ pub fn gather_prometheus_metrics( // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) + // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton // which keeps the state of all the metrics. Dynamically updated things will already be // up-to-date in the registry (because they update themselves) however statically updated // things need to be "scraped". diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 3dfa24d467..cb89d492d1 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -20,7 +20,6 @@ futures = { workspace = true } error-chain = { workspace = true } dirs = { workspace = true } fnv = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } smallvec = { workspace = true } tokio-io-timeout = "1" diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 8efed44eb4..9b11fe5a38 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,152 +1,194 @@ pub use lighthouse_metrics::*; +use std::sync::LazyLock; -use lazy_static::lazy_static; - -lazy_static! { - pub static ref NAT_OPEN: Result = try_create_int_gauge_vec( +pub static NAT_OPEN: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "nat_open", "An estimate indicating if the local node is reachable from external nodes", - &["protocol"] - ); - pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( + &["protocol"], + ) +}); +pub static ADDRESS_UPDATE_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "libp2p_address_update_total", - "Count of libp2p socked updated events (when our view of our IP address has changed)" - ); - pub static ref PEERS_CONNECTED: Result = try_create_int_gauge( - "libp2p_peers", - "Count of libp2p peers currently connected" - ); + "Count of libp2p socked updated events (when our view of our IP address has changed)", + ) +}); +pub static PEERS_CONNECTED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("libp2p_peers", "Count of libp2p peers currently connected") +}); - pub static ref PEERS_CONNECTED_MULTI: Result = - try_create_int_gauge_vec("libp2p_peers_multi", "Count of libp2p peers currently connected", &["direction", "transport"]); +pub static PEERS_CONNECTED_MULTI: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "libp2p_peers_multi", + "Count of libp2p peers currently connected", + &["direction", "transport"], + ) +}); - pub static ref TCP_PEERS_CONNECTED: Result = try_create_int_gauge( +pub static TCP_PEERS_CONNECTED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "libp2p_tcp_peers", - "Count of libp2p peers currently connected via TCP" - ); + "Count of libp2p peers currently connected via TCP", + ) +}); - pub static ref QUIC_PEERS_CONNECTED: Result = try_create_int_gauge( +pub static QUIC_PEERS_CONNECTED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "libp2p_quic_peers", - "Count of libp2p peers currently connected via QUIC" - ); + "Count of libp2p peers currently connected via QUIC", + ) +}); - pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( +pub static PEER_CONNECT_EVENT_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "libp2p_peer_connect_event_total", - "Count of libp2p peer connect events (not the current number of connected peers)" - ); - pub static ref PEER_DISCONNECT_EVENT_COUNT: Result = try_create_int_counter( + "Count of libp2p peer connect events (not the current number of connected peers)", + ) +}); +pub static PEER_DISCONNECT_EVENT_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "libp2p_peer_disconnect_event_total", - "Count of libp2p peer disconnect events" - ); - pub static ref DISCOVERY_BYTES: Result = try_create_int_gauge_vec( + "Count of libp2p peer disconnect events", + ) +}); +pub static DISCOVERY_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "discovery_bytes", "The number of bytes sent and received in discovery", - &["direction"] - ); - pub static ref DISCOVERY_QUEUE: Result = try_create_int_gauge( + &["direction"], + ) +}); +pub static DISCOVERY_QUEUE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "discovery_queue_size", - "The number of discovery queries awaiting execution" - ); - pub static ref DISCOVERY_REQS: Result = try_create_float_gauge( + "The number of discovery queries awaiting execution", + ) +}); +pub static DISCOVERY_REQS: LazyLock> = LazyLock::new(|| { + try_create_float_gauge( "discovery_requests", - "The number of unsolicited discovery requests per second" - ); - pub static ref DISCOVERY_SESSIONS: Result = try_create_int_gauge( + "The number of unsolicited discovery requests per second", + ) +}); +pub static DISCOVERY_SESSIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "discovery_sessions", - "The number of active discovery sessions with peers" - ); + "The number of active discovery sessions with peers", + ) +}); - pub static ref PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( +pub static PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "libp2p_peers_per_client", "The connected peers via client implementation", - &["Client"] - ); - pub static ref FAILED_ATTESTATION_PUBLISHES_PER_SUBNET: Result = + &["Client"], + ) +}); +pub static FAILED_ATTESTATION_PUBLISHES_PER_SUBNET: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "gossipsub_failed_attestation_publishes_per_subnet", "Failed attestation publishes per subnet", - &["subnet"] - ); - pub static ref FAILED_PUBLISHES_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( + &["subnet"], + ) + }); +pub static FAILED_PUBLISHES_PER_MAIN_TOPIC: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "gossipsub_failed_publishes_per_main_topic", "Failed gossip publishes", - &["topic_hash"] - ); - pub static ref TOTAL_RPC_ERRORS_PER_CLIENT: Result = try_create_int_counter_vec( + &["topic_hash"], + ) +}); +pub static TOTAL_RPC_ERRORS_PER_CLIENT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "libp2p_rpc_errors_per_client", "RPC errors per client", - &["client", "rpc_error", "direction"] - ); - pub static ref TOTAL_RPC_REQUESTS: Result = try_create_int_counter_vec( - "libp2p_rpc_requests_total", - "RPC requests total", - &["type"] - ); - pub static ref PEER_ACTION_EVENTS_PER_CLIENT: Result = - try_create_int_counter_vec( - "libp2p_peer_actions_per_client", - "Score reports per client", - &["client", "action", "source"] - ); - pub static ref GOSSIP_UNACCEPTED_MESSAGES_PER_CLIENT: Result = + &["client", "rpc_error", "direction"], + ) +}); +pub static TOTAL_RPC_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec("libp2p_rpc_requests_total", "RPC requests total", &["type"]) +}); +pub static PEER_ACTION_EVENTS_PER_CLIENT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "libp2p_peer_actions_per_client", + "Score reports per client", + &["client", "action", "source"], + ) +}); +pub static GOSSIP_UNACCEPTED_MESSAGES_PER_CLIENT: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_unaccepted_messages_per_client", "Gossipsub messages that we did not accept, per client", - &["client", "validation_result"] - ); - pub static ref GOSSIP_LATE_PUBLISH_PER_TOPIC_KIND: Result = + &["client", "validation_result"], + ) + }); +pub static GOSSIP_LATE_PUBLISH_PER_TOPIC_KIND: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_late_publish_per_topic_kind", "Messages published late to gossipsub per topic kind.", - &["topic_kind"] - ); - pub static ref GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND: Result = + &["topic_kind"], + ) + }); +pub static GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_expired_late_publish_per_topic_kind", "Messages that expired waiting to be published on retry to gossipsub per topic kind.", - &["topic_kind"] - ); - pub static ref GOSSIP_FAILED_LATE_PUBLISH_PER_TOPIC_KIND: Result = + &["topic_kind"], + ) + }); +pub static GOSSIP_FAILED_LATE_PUBLISH_PER_TOPIC_KIND: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_failed_late_publish_per_topic_kind", "Messages that failed to be published on retry to gossipsub per topic kind.", - &["topic_kind"] - ); - pub static ref PEER_SCORE_DISTRIBUTION: Result = - try_create_int_gauge_vec( - "peer_score_distribution", - "The distribution of connected peer scores", - &["position"] - ); - pub static ref PEER_SCORE_PER_CLIENT: Result = - try_create_float_gauge_vec( - "peer_score_per_client", - "Average score per client", - &["client"] - ); + &["topic_kind"], + ) + }); +pub static PEER_SCORE_DISTRIBUTION: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "peer_score_distribution", + "The distribution of connected peer scores", + &["position"], + ) +}); +pub static PEER_SCORE_PER_CLIENT: LazyLock> = LazyLock::new(|| { + try_create_float_gauge_vec( + "peer_score_per_client", + "Average score per client", + &["client"], + ) +}); - pub static ref SUBNET_PEERS_FOUND: Result = - try_create_int_counter_vec( - "discovery_query_peers_found", - "Total number of peers found in attestation subnets and sync subnets", - &["type"] - ); - pub static ref TOTAL_SUBNET_QUERIES: Result = - try_create_int_counter_vec( - "discovery_total_queries", - "Total number of discovery subnet queries", - &["type"] - ); +pub static SUBNET_PEERS_FOUND: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "discovery_query_peers_found", + "Total number of peers found in attestation subnets and sync subnets", + &["type"], + ) +}); +pub static TOTAL_SUBNET_QUERIES: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "discovery_total_queries", + "Total number of discovery subnet queries", + &["type"], + ) +}); - /* - * Peer Reporting - */ - pub static ref REPORT_PEER_MSGS: Result = try_create_int_counter_vec( +/* + * Peer Reporting + */ +pub static REPORT_PEER_MSGS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "libp2p_report_peer_msgs_total", "Number of peer reports per msg", - &["msg"] - ); -} + &["msg"], + ) +}); pub fn scrape_discovery_metrics() { let metrics = diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index 8187dc4ba4..c8425fc104 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -6,15 +6,13 @@ //! //! The scoring algorithms are currently experimental. use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; -use lazy_static::lazy_static; use serde::Serialize; +use std::sync::LazyLock; use std::time::Instant; use strum::AsRefStr; use tokio::time::Duration; -lazy_static! { - static ref HALFLIFE_DECAY: f64 = -(2.0f64.ln()) / SCORE_HALFLIFE; -} +static HALFLIFE_DECAY: LazyLock = LazyLock::new(|| -(2.0f64.ln()) / SCORE_HALFLIFE); /// The default score for new peers. pub(crate) const DEFAULT_SCORE: f64 = 0.0; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index bfaaef9b3b..2cdd730a2b 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -3,13 +3,12 @@ use crate::rpc::codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCode use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; -use lazy_static::lazy_static; use libp2p::core::{InboundUpgrade, UpgradeInfo}; use ssz::Encode; use ssz_types::VariableList; use std::io; use std::marker::PhantomData; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use std::time::Duration; use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; use tokio_io_timeout::TimeoutStream; @@ -18,112 +17,131 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockElectra, BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, - LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, - LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, + BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, + LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, + LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, + SignedBeaconBlock, }; -lazy_static! { - // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is - // same across different `EthSpec` implementations. - pub static ref SIGNED_BEACON_BLOCK_BASE_MIN: usize = SignedBeaconBlock::::from_block( - BeaconBlock::Base(BeaconBlockBase::::empty(&MainnetEthSpec::default_spec())), +// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is +// same across different `EthSpec` implementations. +pub static SIGNED_BEACON_BLOCK_BASE_MIN: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::::empty( + &MainnetEthSpec::default_spec(), + )), Signature::empty(), ) .as_ssz_bytes() - .len(); - pub static ref SIGNED_BEACON_BLOCK_BASE_MAX: usize = SignedBeaconBlock::::from_block( + .len() +}); +pub static SIGNED_BEACON_BLOCK_BASE_MAX: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( BeaconBlock::Base(BeaconBlockBase::full(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() - .len(); + .len() +}); - pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MIN: usize = SignedBeaconBlock::::from_block( - BeaconBlock::Altair(BeaconBlockAltair::::empty(&MainnetEthSpec::default_spec())), - Signature::empty(), - ) - .as_ssz_bytes() - .len(); - pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MAX: usize = SignedBeaconBlock::::from_block( +pub static SIGNED_BEACON_BLOCK_ALTAIR_MAX: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( BeaconBlock::Altair(BeaconBlockAltair::full(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() - .len(); + .len() +}); - pub static ref SIGNED_BEACON_BLOCK_BELLATRIX_MIN: usize = SignedBeaconBlock::::from_block( - BeaconBlock::Bellatrix(BeaconBlockBellatrix::::empty(&MainnetEthSpec::default_spec())), - Signature::empty(), - ) - .as_ssz_bytes() - .len(); - - pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::::from_block( +pub static SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() - .len(); + .len() +}); - pub static ref SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::::from_block( +pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( BeaconBlock::Electra(BeaconBlockElectra::full(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() - .len(); + .len() +}); - /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. - /// We calculate the value from its fields instead of constructing the block and checking the length. - /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network - /// with `max_chunk_size`. - pub static ref SIGNED_BEACON_BLOCK_BELLATRIX_MAX: usize = - // Size of a full altair block +/// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. +/// We calculate the value from its fields instead of constructing the block and checking the length. +/// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network +/// with `max_chunk_size`. +pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = + LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX + types::ExecutionPayload::::max_execution_payload_bellatrix_size() // adding max size of execution payload (~16gb) - + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + + ssz::BYTES_PER_LENGTH_OFFSET); // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD +pub static SIGNED_BEACON_BLOCK_CAPELLA_MAX: LazyLock = LazyLock::new(|| { + *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) - + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + + ssz::BYTES_PER_LENGTH_OFFSET +}); // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_DENEB_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD +pub static SIGNED_BEACON_BLOCK_DENEB_MAX: LazyLock = LazyLock::new(|| { + *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + (::ssz_fixed_len() * ::max_blobs_per_block()) - + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. - // - pub static ref SIGNED_BEACON_BLOCK_ELECTRA_MAX: usize = *SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD + + ssz::BYTES_PER_LENGTH_OFFSET +}); // Length offset for the blob commitments field. + // +pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + *SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_electra_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional ssz offset for the `ExecutionPayload` field + (::ssz_fixed_len() * ::max_blobs_per_block()) - + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. + + ssz::BYTES_PER_LENGTH_OFFSET +}); // Length offset for the blob commitments field. - pub static ref ERROR_TYPE_MIN: usize = - VariableList::::from(Vec::::new()) - .as_ssz_bytes() - .len(); - pub static ref ERROR_TYPE_MAX: usize = - VariableList::::from(vec![ - 0u8; - MAX_ERROR_LEN - as usize - ]) - .as_ssz_bytes() - .len(); +pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { + VariableList::::from(Vec::::new()) + .as_ssz_bytes() + .len() +}); - pub static ref LIGHT_CLIENT_FINALITY_UPDATE_CAPELLA_MAX: usize = LightClientFinalityUpdate::::ssz_max_len_for_fork(ForkName::Capella); - pub static ref LIGHT_CLIENT_FINALITY_UPDATE_DENEB_MAX: usize = LightClientFinalityUpdate::::ssz_max_len_for_fork(ForkName::Deneb); - pub static ref LIGHT_CLIENT_FINALITY_UPDATE_ELECTRA_MAX: usize = LightClientFinalityUpdate::::ssz_max_len_for_fork(ForkName::Electra); - pub static ref LIGHT_CLIENT_OPTIMISTIC_UPDATE_CAPELLA_MAX: usize = LightClientOptimisticUpdate::::ssz_max_len_for_fork(ForkName::Capella); - pub static ref LIGHT_CLIENT_OPTIMISTIC_UPDATE_DENEB_MAX: usize = LightClientOptimisticUpdate::::ssz_max_len_for_fork(ForkName::Deneb); - pub static ref LIGHT_CLIENT_OPTIMISTIC_UPDATE_ELECTRA_MAX: usize = LightClientOptimisticUpdate::::ssz_max_len_for_fork(ForkName::Electra); - pub static ref LIGHT_CLIENT_BOOTSTRAP_CAPELLA_MAX: usize = LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Capella); - pub static ref LIGHT_CLIENT_BOOTSTRAP_DENEB_MAX: usize = LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Deneb); - pub static ref LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX: usize = LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Electra); -} +pub static ERROR_TYPE_MAX: LazyLock = LazyLock::new(|| { + VariableList::::from(vec![0u8; MAX_ERROR_LEN as usize]) + .as_ssz_bytes() + .len() +}); + +pub static LIGHT_CLIENT_FINALITY_UPDATE_CAPELLA_MAX: LazyLock = LazyLock::new(|| { + LightClientFinalityUpdate::::ssz_max_len_for_fork(ForkName::Capella) +}); +pub static LIGHT_CLIENT_FINALITY_UPDATE_DENEB_MAX: LazyLock = LazyLock::new(|| { + LightClientFinalityUpdate::::ssz_max_len_for_fork(ForkName::Deneb) +}); +pub static LIGHT_CLIENT_FINALITY_UPDATE_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + LightClientFinalityUpdate::::ssz_max_len_for_fork(ForkName::Electra) +}); +pub static LIGHT_CLIENT_OPTIMISTIC_UPDATE_CAPELLA_MAX: LazyLock = LazyLock::new(|| { + LightClientOptimisticUpdate::::ssz_max_len_for_fork(ForkName::Capella) +}); +pub static LIGHT_CLIENT_OPTIMISTIC_UPDATE_DENEB_MAX: LazyLock = LazyLock::new(|| { + LightClientOptimisticUpdate::::ssz_max_len_for_fork(ForkName::Deneb) +}); +pub static LIGHT_CLIENT_OPTIMISTIC_UPDATE_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + LightClientOptimisticUpdate::::ssz_max_len_for_fork(ForkName::Electra) +}); +pub static LIGHT_CLIENT_BOOTSTRAP_CAPELLA_MAX: LazyLock = LazyLock::new(|| { + LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Capella) +}); +pub static LIGHT_CLIENT_BOOTSTRAP_DENEB_MAX: LazyLock = + LazyLock::new(|| LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Deneb)); +pub static LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Electra) +}); /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 0ad7f53ee7..511cbc3e3c 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -33,7 +33,6 @@ smallvec = { workspace = true } rand = { workspace = true } fnv = { workspace = true } rlp = "0.5.0" -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 0fadb51edb..bb1e546870 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -5,347 +5,500 @@ use beacon_chain::{ sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; -use lazy_static::lazy_static; pub use lighthouse_metrics::*; use lighthouse_network::{ peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, NetworkGlobals, }; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use strum::IntoEnumIterator; use types::EthSpec; -lazy_static! { +pub static BEACON_BLOCK_MESH_PEERS_PER_CLIENT: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge_vec( + "block_mesh_peers_per_client", + "Number of mesh peers for BeaconBlock topic per client", + &["Client"], + ) + }); - pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result = - try_create_int_gauge_vec( - "block_mesh_peers_per_client", - "Number of mesh peers for BeaconBlock topic per client", - &["Client"] - ); - - pub static ref BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT: Result = +pub static BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT: LazyLock> = + LazyLock::new(|| { try_create_int_gauge_vec( "beacon_aggregate_and_proof_mesh_peers_per_client", "Number of mesh peers for BeaconAggregateAndProof topic per client", - &["Client"] - ); + &["Client"], + ) + }); - /* - * Attestation subnet subscriptions - */ - pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( +/* + * Attestation subnet subscriptions + */ +pub static SUBNET_SUBSCRIPTION_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "validator_attestation_subnet_subscriptions_total", - "Count of validator attestation subscription requests." - ); - pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( - "validator_subnet_subscriptions_aggregator_total", - "Count of validator subscription requests where the subscriber is an aggregator." - ); - pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "validator_sync_committee_subnet_subscriptions_total", - "Count of validator sync committee subscription requests." - ); + "Count of validator attestation subscription requests.", + ) +}); +pub static SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "validator_subnet_subscriptions_aggregator_total", + "Count of validator subscription requests where the subscriber is an aggregator.", + ) + }); +pub static SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "validator_sync_committee_subnet_subscriptions_total", + "Count of validator sync committee subscription requests.", + ) + }); - /* - * Gossip processor - */ +/* + * Gossip processor + */ - // Gossip blocks. - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_gossip_block_verified_total", - "Total number of gossip blocks verified for propagation." - ); - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_gossip_block_imported_total", - "Total number of gossip blocks imported to fork choice, etc." - ); - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL: Result = try_create_int_counter( +// Gossip blocks. +pub static BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_gossip_block_verified_total", + "Total number of gossip blocks verified for propagation.", + ) + }); +pub static BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_gossip_block_imported_total", + "Total number of gossip blocks imported to fork choice, etc.", + ) + }); +pub static BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( "beacon_processor_gossip_block_requeued_total", "Total number of gossip blocks that arrived early and were re-queued for later processing." - ); - pub static ref BEACON_PROCESSOR_GOSSIP_BLOCK_EARLY_SECONDS: Result = try_create_histogram( + ) + }); +pub static BEACON_PROCESSOR_GOSSIP_BLOCK_EARLY_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( "beacon_processor_gossip_block_early_seconds", "Whenever a gossip block is received early this metrics is set to how early that block was." - ); - pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_gossip_blob_verified_total", - "Total number of gossip blob verified for propagation." - ); - pub static ref BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_SIDECAR_VERIFIED_TOTAL: Result = try_create_int_counter( + ) + }); +pub static BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_gossip_blob_verified_total", + "Total number of gossip blob verified for propagation.", + ) + }); +pub static BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_SIDECAR_VERIFIED_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter( "beacon_processor_gossip_data_column_verified_total", - "Total number of gossip data column sidecar verified for propagation." - ); - // Gossip Exits. - pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_exit_verified_total", - "Total number of voluntary exits verified for propagation." - ); - pub static ref BEACON_PROCESSOR_EXIT_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_exit_imported_total", - "Total number of voluntary exits imported to the op pool." - ); - // Gossip proposer slashings. - pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_proposer_slashing_verified_total", - "Total number of proposer slashings verified for propagation." - ); - pub static ref BEACON_PROCESSOR_PROPOSER_SLASHING_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_proposer_slashing_imported_total", - "Total number of proposer slashings imported to the op pool." - ); - // Gossip attester slashings. - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_attester_slashing_verified_total", - "Total number of attester slashings verified for propagation." - ); - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_attester_slashing_imported_total", - "Total number of attester slashings imported to the op pool." - ); - // Gossip BLS to execution changes. - pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_bls_to_execution_change_verified_total", - "Total number of address changes verified for propagation." - ); - pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_bls_to_execution_change_imported_total", - "Total number of address changes imported to the op pool." - ); -} + "Total number of gossip data column sidecar verified for propagation.", + ) +}); +// Gossip Exits. +pub static BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_exit_verified_total", + "Total number of voluntary exits verified for propagation.", + ) + }); +pub static BEACON_PROCESSOR_EXIT_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_exit_imported_total", + "Total number of voluntary exits imported to the op pool.", + ) + }); +// Gossip proposer slashings. +pub static BEACON_PROCESSOR_PROPOSER_SLASHING_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_proposer_slashing_verified_total", + "Total number of proposer slashings verified for propagation.", + ) + }); +pub static BEACON_PROCESSOR_PROPOSER_SLASHING_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_proposer_slashing_imported_total", + "Total number of proposer slashings imported to the op pool.", + ) + }); +// Gossip attester slashings. +pub static BEACON_PROCESSOR_ATTESTER_SLASHING_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_attester_slashing_verified_total", + "Total number of attester slashings verified for propagation.", + ) + }); +pub static BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_attester_slashing_imported_total", + "Total number of attester slashings imported to the op pool.", + ) + }); +// Gossip BLS to execution changes. +pub static BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_bls_to_execution_change_verified_total", + "Total number of address changes verified for propagation.", + ) + }); +pub static BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_bls_to_execution_change_imported_total", + "Total number of address changes imported to the op pool.", + ) + }); -// Need to split up this `lazy_static!` due to recursion limits. -lazy_static! { - // Rpc blocks. - pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_rpc_block_imported_total", - "Total number of gossip blocks imported to fork choice, etc." - ); - // Chain segments. - pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( - "beacon_processor_chain_segment_success_total", - "Total number of chain segments successfully processed." - ); - pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( - "beacon_processor_backfill_chain_segment_success_total", - "Total number of chain segments successfully processed." - ); - pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL: Result = try_create_int_counter( - "beacon_processor_chain_segment_failed_total", - "Total number of chain segments that failed processing." - ); - pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_FAILED_TOTAL: Result = try_create_int_counter( - "beacon_processor_backfill_chain_segment_failed_total", - "Total number of backfill chain segments that failed processing." - ); - // Unaggregated attestations. - pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_unaggregated_attestation_verified_total", - "Total number of unaggregated attestations verified for gossip." - ); - pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_unaggregated_attestation_imported_total", - "Total number of unaggregated attestations imported to fork choice, etc." - ); - pub static ref BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL: Result = try_create_int_counter( +// Rpc blocks. +pub static BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_rpc_block_imported_total", + "Total number of gossip blocks imported to fork choice, etc.", + ) + }); +// Chain segments. +pub static BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_chain_segment_success_total", + "Total number of chain segments successfully processed.", + ) + }); +pub static BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_SUCCESS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_backfill_chain_segment_success_total", + "Total number of chain segments successfully processed.", + ) + }); +pub static BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_chain_segment_failed_total", + "Total number of chain segments that failed processing.", + ) + }); +pub static BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_FAILED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_backfill_chain_segment_failed_total", + "Total number of backfill chain segments that failed processing.", + ) + }); +// Unaggregated attestations. +pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_unaggregated_attestation_verified_total", + "Total number of unaggregated attestations verified for gossip.", + ) + }); +pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_unaggregated_attestation_imported_total", + "Total number of unaggregated attestations imported to fork choice, etc.", + ) + }); +pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( "beacon_processor_unaggregated_attestation_requeued_total", "Total number of unaggregated attestations that referenced an unknown block and were re-queued." - ); - // Aggregated attestations. - pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_aggregated_attestation_verified_total", - "Total number of aggregated attestations verified for gossip." - ); - pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_aggregated_attestation_imported_total", - "Total number of aggregated attestations imported to fork choice, etc." - ); - pub static ref BEACON_PROCESSOR_AGGREGATED_ATTESTATION_REQUEUED_TOTAL: Result = try_create_int_counter( + ) + }); +// Aggregated attestations. +pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_aggregated_attestation_verified_total", + "Total number of aggregated attestations verified for gossip.", + ) + }); +pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_aggregated_attestation_imported_total", + "Total number of aggregated attestations imported to fork choice, etc.", + ) + }); +pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_REQUEUED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( "beacon_processor_aggregated_attestation_requeued_total", "Total number of aggregated attestations that referenced an unknown block and were re-queued." - ); - // Sync committee messages. - pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_sync_message_verified_total", - "Total number of sync committee messages verified for gossip." - ); - pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_sync_message_imported_total", - "Total number of sync committee messages imported to fork choice, etc." - ); - // Sync contribution. - pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result = try_create_int_counter( - "beacon_processor_sync_contribution_verified_total", - "Total number of sync committee contributions verified for gossip." - ); + ) + }); +// Sync committee messages. +pub static BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_sync_message_verified_total", + "Total number of sync committee messages verified for gossip.", + ) + }); +pub static BEACON_PROCESSOR_SYNC_MESSAGE_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_sync_message_imported_total", + "Total number of sync committee messages imported to fork choice, etc.", + ) + }); +// Sync contribution. +pub static BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_sync_contribution_verified_total", + "Total number of sync committee contributions verified for gossip.", + ) + }); - pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: Result = try_create_int_counter( - "beacon_processor_sync_contribution_imported_total", - "Total number of sync committee contributions imported to fork choice, etc." - ); +pub static BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_sync_contribution_imported_total", + "Total number of sync committee contributions imported to fork choice, etc.", + ) + }); - /// Errors and Debugging Stats - pub static ref GOSSIP_ATTESTATION_ERRORS_PER_TYPE: Result = +/// Errors and Debugging Stats +pub static GOSSIP_ATTESTATION_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_attestation_errors_per_type", "Gossipsub attestation errors per error type", - &["type"] - ); - pub static ref GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE: Result = + &["type"], + ) + }); +pub static GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_sync_committee_errors_per_type", "Gossipsub sync_committee errors per error type", - &["type"] - ); - pub static ref GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE: Result = + &["type"], + ) + }); +pub static GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_light_client_finality_update_errors_per_type", "Gossipsub light_client_finality_update errors per error type", - &["type"] - ); - pub static ref GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE: Result = + &["type"], + ) + }); +pub static GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { try_create_int_counter_vec( "gossipsub_light_client_optimistic_update_errors_per_type", "Gossipsub light_client_optimistic_update errors per error type", - &["type"] - ); + &["type"], + ) + }); - - /* - * Network queue metrics - */ - pub static ref NETWORK_RECEIVE_EVENTS: Result = try_create_int_counter_vec( +/* + * Network queue metrics + */ +pub static NETWORK_RECEIVE_EVENTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "network_receive_events", "Count of events received by the channel to the network service", - &["type"] - ); - pub static ref NETWORK_RECEIVE_TIMES: Result = try_create_histogram_vec( + &["type"], + ) +}); +pub static NETWORK_RECEIVE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "network_receive_times", "Time taken for network to handle an event sent to the network service.", - &["type"] - ); -} + &["type"], + ) +}); -lazy_static! { - - /* - * Sync related metrics - */ - pub static ref PEERS_PER_SYNC_TYPE: Result = try_create_int_gauge_vec( +/* + * Sync related metrics + */ +pub static PEERS_PER_SYNC_TYPE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "sync_peers_per_status", "Number of connected peers per sync status type", - &["sync_status"] - ); - pub static ref SYNCING_CHAINS_COUNT: Result = try_create_int_gauge_vec( + &["sync_status"], + ) +}); +pub static SYNCING_CHAINS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "sync_range_chains", "Number of Syncing chains in range, per range type", - &["range_type"] - ); - pub static ref SYNCING_CHAINS_REMOVED: Result = try_create_int_counter_vec( + &["range_type"], + ) +}); +pub static SYNCING_CHAINS_REMOVED: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "sync_range_removed_chains_total", "Total count of range syncing chains removed per range type", - &["range_type"] - ); - pub static ref SYNCING_CHAINS_ADDED: Result = try_create_int_counter_vec( + &["range_type"], + ) +}); +pub static SYNCING_CHAINS_ADDED: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "sync_range_added_chains_total", "Total count of range syncing chains added per range type", - &["range_type"] - ); - pub static ref SYNCING_CHAINS_DROPPED_BLOCKS: Result = try_create_int_counter_vec( + &["range_type"], + ) +}); +pub static SYNCING_CHAINS_DROPPED_BLOCKS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "sync_range_chains_dropped_blocks_total", "Total count of dropped blocks when removing a syncing chain per range type", - &["range_type"] - ); - pub static ref SYNCING_CHAINS_IGNORED_BLOCKS: Result = try_create_int_counter_vec( + &["range_type"], + ) +}); +pub static SYNCING_CHAINS_IGNORED_BLOCKS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "sync_range_chains_ignored_blocks_total", "Total count of ignored blocks when processing a syncing chain batch per chain type", - &["chain_type"] - ); - pub static ref SYNCING_CHAINS_PROCESSED_BATCHES: Result = try_create_int_counter_vec( - "sync_range_chains_processed_batches_total", - "Total count of processed batches in a syncing chain batch per chain type", - &["chain_type"] - ); - pub static ref SYNCING_CHAIN_BATCH_AWAITING_PROCESSING: Result = try_create_histogram_with_buckets( - "sync_range_chain_batch_awaiting_processing_seconds", - "Time range sync batches spend in AwaitingProcessing state", - Ok(vec![0.01,0.02,0.05,0.1,0.2,0.5,1.0,2.0,5.0,10.0,20.0]) - ); - pub static ref SYNC_SINGLE_BLOCK_LOOKUPS: Result = try_create_int_gauge( + &["chain_type"], + ) +}); +pub static SYNCING_CHAINS_PROCESSED_BATCHES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "sync_range_chains_processed_batches_total", + "Total count of processed batches in a syncing chain batch per chain type", + &["chain_type"], + ) + }); +pub static SYNCING_CHAIN_BATCH_AWAITING_PROCESSING: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "sync_range_chain_batch_awaiting_processing_seconds", + "Time range sync batches spend in AwaitingProcessing state", + Ok(vec![ + 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, + ]), + ) + }); +pub static SYNC_SINGLE_BLOCK_LOOKUPS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "sync_single_block_lookups", - "Number of single block lookups underway" - ); - pub static ref SYNC_PARENT_BLOCK_LOOKUPS: Result = try_create_int_gauge( - "sync_parent_block_lookups", - "Number of parent block lookups underway" - ); - pub static ref SYNC_LOOKUP_CREATED: Result = try_create_int_counter( + "Number of single block lookups underway", + ) +}); +pub static SYNC_LOOKUP_CREATED: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "sync_lookups_created_total", "Total count of sync lookups created", - ); - pub static ref SYNC_LOOKUP_DROPPED: Result = try_create_int_counter_vec( + ) +}); +pub static SYNC_LOOKUP_DROPPED: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "sync_lookups_dropped_total", "Total count of sync lookups dropped by reason", - &["reason"] - ); - pub static ref SYNC_LOOKUP_COMPLETED: Result = try_create_int_counter( + &["reason"], + ) +}); +pub static SYNC_LOOKUP_COMPLETED: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "sync_lookups_completed_total", "Total count of sync lookups completed", - ); - pub static ref SYNC_LOOKUPS_STUCK: Result = try_create_int_counter( + ) +}); +pub static SYNC_LOOKUPS_STUCK: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "sync_lookups_stuck_total", "Total count of sync lookups that are stuck and dropped", - ); - pub static ref SYNC_ACTIVE_NETWORK_REQUESTS: Result = try_create_int_gauge_vec( + ) +}); +pub static SYNC_ACTIVE_NETWORK_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "sync_active_network_requests", "Current count of active network requests from sync", &["type"], - ); - pub static ref SYNC_UNKNOWN_NETWORK_REQUESTS: Result = try_create_int_counter_vec( + ) +}); +pub static SYNC_UNKNOWN_NETWORK_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "sync_unknwon_network_request", "Total count of network messages received for unknown active requests", &["type"], - ); + ) +}); - /* - * Block Delay Metrics - */ - pub static ref BEACON_BLOCK_DELAY_GOSSIP: Result = try_create_int_gauge( +/* + * Block Delay Metrics + */ +pub static BEACON_BLOCK_DELAY_GOSSIP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_block_delay_gossip", - "The first time we see this block from gossip as a delay from the start of the slot" - ); - pub static ref BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( + "The first time we see this block from gossip as a delay from the start of the slot", + ) +}); +pub static BEACON_BLOCK_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( + || { + try_create_int_gauge( "beacon_block_delay_gossip_verification", "Keeps track of the time delay from the start of the slot to the point we propagate the block" - ); - pub static ref BEACON_BLOCK_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( + ) + }, +); +pub static BEACON_BLOCK_DELAY_FULL_VERIFICATION: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_block_delay_full_verification", - "The time it takes to verify a beacon block." - ); + "The time it takes to verify a beacon block.", + ) +}); - pub static ref BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( +pub static BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( "beacon_block_delay_gossip_arrived_late_total", "Count of times when a gossip block arrived from the network later than the attestation deadline.", - ); + ) + }); - /* - * Blob Delay Metrics - */ - pub static ref BEACON_BLOB_DELAY_GOSSIP: Result = try_create_int_gauge( +/* + * Blob Delay Metrics + */ +pub static BEACON_BLOB_DELAY_GOSSIP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_blob_delay_gossip_last_delay", - "The first time we see this blob as a delay from the start of the slot" - ); + "The first time we see this blob as a delay from the start of the slot", + ) +}); - pub static ref BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( +pub static BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( + || { + try_create_int_gauge( "beacon_blob_delay_gossip_verification", "Keeps track of the time delay from the start of the slot to the point we propagate the blob" - ); - pub static ref BEACON_BLOB_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( + ) + }, +); +pub static BEACON_BLOB_DELAY_FULL_VERIFICATION: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_blob_last_full_verification_delay", - "The time it takes to verify a beacon blob" - ); + "The time it takes to verify a beacon blob", + ) +}); - pub static ref BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( +pub static BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: LazyLock> = LazyLock::new( + || { + try_create_histogram_with_buckets( "beacon_blob_rpc_slot_start_delay_time", "Duration between when a blob is received over rpc and the start of the slot it belongs to.", // Create a custom bucket list for greater granularity in block delay @@ -354,37 +507,46 @@ lazy_static! { // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] //decimal_buckets(-1,2) - ); + ) + }, +); - pub static ref BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( +pub static BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: LazyLock> = LazyLock::new( + || { + try_create_int_counter( "beacon_blob_gossip_arrived_late_total", "Count of times when a gossip blob arrived from the network later than the attestation deadline.", - ); + ) + }, +); - pub static ref BEACON_DATA_COLUMN_DELAY_GOSSIP: Result = try_create_int_gauge( +pub static BEACON_DATA_COLUMN_DELAY_GOSSIP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "beacon_data_column_delay_gossip_last_delay", - "The first time we see this data column as a delay from the start of the slot" - ); + "The first time we see this data column as a delay from the start of the slot", + ) +}); - pub static ref BEACON_DATA_COLUMN_DELAY_GOSSIP_VERIFICATION: Result = try_create_int_gauge( +pub static BEACON_DATA_COLUMN_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( + || { + try_create_int_gauge( "beacon_data_column_delay_gossip_verification", "Keeps track of the time delay from the start of the slot to the point we propagate the data column" - ); + ) + }, +); - pub static ref BEACON_DATA_COLUMN_DELAY_FULL_VERIFICATION: Result = try_create_int_gauge( - "beacon_data_column_last_full_verification_delay", - "The time it takes to verify a beacon data column" - ); - - - /* - * Light client update reprocessing queue metrics. - */ - pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result = try_create_int_counter( +/* + * Light client update reprocessing queue metrics. + */ +pub static BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter( "beacon_processor_reprocessing_queue_sent_optimistic_updates", "Number of queued light client optimistic updates where as matching block has been imported." - ); -} + ) +}); pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 74f3f59df3..e8d9218ec4 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -6,12 +6,11 @@ use beacon_chain::{ }; use futures::prelude::*; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; -use lazy_static::lazy_static; use lighthouse_network::NetworkConfig; use slog::{o, Drain, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; @@ -110,9 +109,7 @@ fn get_logger(log_level: Option) -> Logger { } } -lazy_static! { - static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); -} +static CHAIN: LazyLock = LazyLock::new(TestBeaconChain::new_with_system_clock); fn get_attestation_service( log_level: Option, diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 36595994f0..cbf6284f2a 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } itertools = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } types = { workspace = true } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index a1c9ada03a..c60480ef37 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -797,20 +797,19 @@ mod release_tests { use beacon_chain::test_utils::{ test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, }; - use lazy_static::lazy_static; use maplit::hashset; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; use std::collections::BTreeSet; + use std::sync::LazyLock; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::*; pub const MAX_VALIDATOR_COUNT: usize = 4 * 32 * 128; - lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); - } + /// A cached set of keys. + static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(MAX_VALIDATOR_COUNT)); fn get_harness( validator_count: usize, diff --git a/beacon_node/operation_pool/src/metrics.rs b/beacon_node/operation_pool/src/metrics.rs index 6fd8567cef..e2a8b43ed1 100644 --- a/beacon_node/operation_pool/src/metrics.rs +++ b/beacon_node/operation_pool/src/metrics.rs @@ -1,31 +1,40 @@ -use lazy_static::lazy_static; - pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static! { - pub static ref BUILD_REWARD_CACHE_TIME: Result = try_create_histogram( +pub static BUILD_REWARD_CACHE_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( "op_pool_build_reward_cache_time", - "Time to build the reward cache before packing attestations" - ); - pub static ref ATTESTATION_PREV_EPOCH_PACKING_TIME: Result = try_create_histogram( + "Time to build the reward cache before packing attestations", + ) +}); +pub static ATTESTATION_PREV_EPOCH_PACKING_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( "op_pool_attestation_prev_epoch_packing_time", - "Time to pack previous epoch attestations" - ); - pub static ref ATTESTATION_CURR_EPOCH_PACKING_TIME: Result = try_create_histogram( + "Time to pack previous epoch attestations", + ) +}); +pub static ATTESTATION_CURR_EPOCH_PACKING_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( "op_pool_attestation_curr_epoch_packing_time", - "Time to pack current epoch attestations" - ); - pub static ref NUM_PREV_EPOCH_ATTESTATIONS: Result = try_create_int_gauge( + "Time to pack current epoch attestations", + ) +}); +pub static NUM_PREV_EPOCH_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "op_pool_prev_epoch_attestations", - "Number of valid attestations considered for packing from the previous epoch" - ); - pub static ref NUM_CURR_EPOCH_ATTESTATIONS: Result = try_create_int_gauge( + "Number of valid attestations considered for packing from the previous epoch", + ) +}); +pub static NUM_CURR_EPOCH_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "op_pool_curr_epoch_attestations", - "Number of valid attestations considered for packing from the current epoch" - ); - pub static ref MAX_COVER_NON_ZERO_ITEMS: Result = try_create_int_gauge_vec( + "Number of valid attestations considered for packing from the current epoch", + ) +}); +pub static MAX_COVER_NON_ZERO_ITEMS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "op_pool_max_cover_non_zero_items", "Number of non-trivial items considered in a max coverage optimisation", - &["label"] - ); -} + &["label"], + ) +}); diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7bf1ef76be..b26eb2bb91 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -19,7 +19,6 @@ types = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } lru = { workspace = true } sloggers = { workspace = true } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 30c5207b53..af7b5e93e8 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,110 +1,156 @@ pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; use directory::size_of_dir; -use lazy_static::lazy_static; use std::path::Path; +use std::sync::LazyLock; -lazy_static! { - /* - * General - */ - pub static ref DISK_DB_SIZE: Result = - try_create_int_gauge("store_disk_db_size", "Size of the hot on-disk database (bytes)"); - pub static ref FREEZER_DB_SIZE: Result = - try_create_int_gauge("store_freezer_db_size", "Size of the on-disk freezer database (bytes)"); - pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter_vec( +/* + * General + */ +pub static DISK_DB_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_disk_db_size", + "Size of the hot on-disk database (bytes)", + ) +}); +pub static FREEZER_DB_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "store_freezer_db_size", + "Size of the on-disk freezer database (bytes)", + ) +}); +pub static DISK_DB_WRITE_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "store_disk_db_write_bytes_total", "Number of bytes attempted to be written to the hot on-disk DB", &["col"], - ); - pub static ref DISK_DB_READ_BYTES: Result = try_create_int_counter_vec( + ) +}); +pub static DISK_DB_READ_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "store_disk_db_read_bytes_total", "Number of bytes read from the hot on-disk DB", &["col"], - ); - pub static ref DISK_DB_READ_COUNT: Result = try_create_int_counter_vec( + ) +}); +pub static DISK_DB_READ_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "store_disk_db_read_count_total", "Total number of reads to the hot on-disk DB", &["col"], - ); - pub static ref DISK_DB_WRITE_COUNT: Result = try_create_int_counter_vec( + ) +}); +pub static DISK_DB_WRITE_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "store_disk_db_write_count_total", "Total number of writes to the hot on-disk DB", &["col"], - ); - pub static ref DISK_DB_READ_TIMES: Result = try_create_histogram( + ) +}); +pub static DISK_DB_READ_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "store_disk_db_read_seconds", - "Time taken to write bytes to store." - ); - pub static ref DISK_DB_WRITE_TIMES: Result = try_create_histogram( + "Time taken to write bytes to store.", + ) +}); +pub static DISK_DB_WRITE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "store_disk_db_write_seconds", - "Time taken to write bytes to store." - ); - pub static ref DISK_DB_EXISTS_COUNT: Result = try_create_int_counter_vec( + "Time taken to write bytes to store.", + ) +}); +pub static DISK_DB_EXISTS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "store_disk_db_exists_count_total", "Total number of checks if a key is in the hot on-disk DB", &["col"], - ); - pub static ref DISK_DB_DELETE_COUNT: Result = try_create_int_counter_vec( + ) +}); +pub static DISK_DB_DELETE_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "store_disk_db_delete_count_total", "Total number of deletions from the hot on-disk DB", &["col"], - ); - /* - * Beacon State - */ - pub static ref BEACON_STATE_GET_COUNT: Result = try_create_int_counter( + ) +}); +/* + * Beacon State + */ +pub static BEACON_STATE_GET_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_state_get_total", - "Total number of beacon states requested from the store (cache or DB)" - ); - pub static ref BEACON_STATE_HOT_GET_COUNT: Result = try_create_int_counter( + "Total number of beacon states requested from the store (cache or DB)", + ) +}); +pub static BEACON_STATE_HOT_GET_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_state_hot_get_total", - "Total number of hot beacon states requested from the store (cache or DB)" - ); - pub static ref BEACON_STATE_READ_TIMES: Result = try_create_histogram( + "Total number of hot beacon states requested from the store (cache or DB)", + ) +}); +pub static BEACON_STATE_READ_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "store_beacon_state_read_seconds", - "Total time required to read a BeaconState from the database" - ); - pub static ref BEACON_STATE_READ_OVERHEAD_TIMES: Result = try_create_histogram( + "Total time required to read a BeaconState from the database", + ) +}); +pub static BEACON_STATE_READ_OVERHEAD_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "store_beacon_state_read_overhead_seconds", - "Overhead on reading a beacon state from the DB (e.g., decoding)" - ); - pub static ref BEACON_STATE_READ_COUNT: Result = try_create_int_counter( + "Overhead on reading a beacon state from the DB (e.g., decoding)", + ) +}); +pub static BEACON_STATE_READ_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_state_read_total", - "Total number of beacon state reads from the DB" - ); - pub static ref BEACON_STATE_READ_BYTES: Result = try_create_int_counter( + "Total number of beacon state reads from the DB", + ) +}); +pub static BEACON_STATE_READ_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_state_read_bytes_total", - "Total number of beacon state bytes read from the DB" - ); - pub static ref BEACON_STATE_WRITE_OVERHEAD_TIMES: Result = try_create_histogram( + "Total number of beacon state bytes read from the DB", + ) +}); +pub static BEACON_STATE_WRITE_OVERHEAD_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "store_beacon_state_write_overhead_seconds", - "Overhead on writing a beacon state to the DB (e.g., encoding)" - ); - pub static ref BEACON_STATE_WRITE_COUNT: Result = try_create_int_counter( + "Overhead on writing a beacon state to the DB (e.g., encoding)", + ) +}); +pub static BEACON_STATE_WRITE_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_state_write_total", - "Total number of beacon state writes the DB" - ); - pub static ref BEACON_STATE_WRITE_BYTES: Result = try_create_int_counter( + "Total number of beacon state writes the DB", + ) +}); +pub static BEACON_STATE_WRITE_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_state_write_bytes_total", - "Total number of beacon state bytes written to the DB" - ); - /* - * Beacon Block - */ - pub static ref BEACON_BLOCK_GET_COUNT: Result = try_create_int_counter( + "Total number of beacon state bytes written to the DB", + ) +}); +/* + * Beacon Block + */ +pub static BEACON_BLOCK_GET_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_block_get_total", - "Total number of beacon blocks requested from the store (cache or DB)" - ); - pub static ref BEACON_BLOCK_CACHE_HIT_COUNT: Result = try_create_int_counter( + "Total number of beacon blocks requested from the store (cache or DB)", + ) +}); +pub static BEACON_BLOCK_CACHE_HIT_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_block_cache_hit_total", - "Number of hits to the store's block cache" - ); - pub static ref BEACON_BLOBS_CACHE_HIT_COUNT: Result = try_create_int_counter( + "Number of hits to the store's block cache", + ) +}); +pub static BEACON_BLOBS_CACHE_HIT_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "store_beacon_blobs_cache_hit_total", - "Number of hits to the store's blob cache" - ); -} + "Number of hits to the store's blob cache", + ) +}); /// Updates the global metrics registry with store-related information. pub fn scrape_for_metrics(db_path: &Path, freezer_db_path: &Path) { diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 6f92acc84a..5971b934e0 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = { workspace = true } num-bigint = "0.4.2" ethereum_hashing = { workspace = true } hex = { workspace = true } diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index 34c3d6f87c..0d24eb92f4 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -18,22 +18,21 @@ //! tests](https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml). use bls::{Keypair, PublicKey, SecretKey}; use ethereum_hashing::hash; -use lazy_static::lazy_static; use num_bigint::BigUint; use serde::{Deserialize, Serialize}; use std::fs::File; use std::path::PathBuf; +use std::sync::LazyLock; pub const PRIVATE_KEY_BYTES: usize = 32; pub const PUBLIC_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; -lazy_static! { - static ref CURVE_ORDER: BigUint = - "52435875175126190479447740508185965837690552500527637822603658699938581184513" - .parse::() - .expect("Curve order should be valid"); -} +static CURVE_ORDER: LazyLock = LazyLock::new(|| { + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + .parse::() + .expect("Curve order should be valid") +}); /// Return a G1 point for the given `validator_index`, encoded as a compressed point in /// big-endian byte-ordering. diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index f2424ccabe..fa8f47e364 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -1,5 +1,5 @@ #![allow(clippy::needless_doctest_main)] -//! A wrapper around the `prometheus` crate that provides a global, `lazy_static` metrics registry +//! A wrapper around the `prometheus` crate that provides a global metrics registry //! and functions to add and use the following components (more info at //! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)): //! @@ -20,23 +20,20 @@ //! ## Example //! //! ```rust -//! use lazy_static::lazy_static; //! use lighthouse_metrics::*; +//! use std::sync::LazyLock; //! //! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`. -//! lazy_static! { -//! pub static ref RUN_COUNT: Result = try_create_int_counter( -//! "runs_total", -//! "Total number of runs" -//! ); -//! pub static ref CURRENT_VALUE: Result = try_create_int_gauge( -//! "current_value", -//! "The current value" -//! ); -//! pub static ref RUN_TIME: Result = -//! try_create_histogram("run_seconds", "Time taken (measured to high precision)"); -//! } -//! +//! pub static RUN_COUNT: LazyLock> = LazyLock::new(|| try_create_int_counter( +//! "runs_total", +//! "Total number of runs" +//! )); +//! pub static CURRENT_VALUE: LazyLock> = LazyLock::new(|| try_create_int_gauge( +//! "current_value", +//! "The current value" +//! )); +//! pub static RUN_TIME: LazyLock> = +//! LazyLock::new(|| try_create_histogram("run_seconds", "Time taken (measured to high precision)")); //! //! fn main() { //! for i in 0..100 { diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 3a03d22f3c..cac6d073f2 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -9,7 +9,6 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 50d04fc088..5fc473c853 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,4 +1,3 @@ -use lazy_static::lazy_static; use lighthouse_metrics::{ inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, }; @@ -6,6 +5,7 @@ use slog::Logger; use slog_term::Decorator; use std::io::{Result, Write}; use std::path::PathBuf; +use std::sync::LazyLock; use std::time::{Duration, Instant}; use tracing_appender::non_blocking::NonBlocking; use tracing_appender::rolling::{RollingFileAppender, Rotation}; @@ -25,16 +25,14 @@ pub use tracing_metrics_layer::MetricsLayer; /// The minimum interval between log messages indicating that a queue is full. const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); -lazy_static! { - pub static ref INFOS_TOTAL: MetricsResult = - try_create_int_counter("info_total", "Count of infos logged"); - pub static ref WARNS_TOTAL: MetricsResult = - try_create_int_counter("warn_total", "Count of warns logged"); - pub static ref ERRORS_TOTAL: MetricsResult = - try_create_int_counter("error_total", "Count of errors logged"); - pub static ref CRITS_TOTAL: MetricsResult = - try_create_int_counter("crit_total", "Count of crits logged"); -} +pub static INFOS_TOTAL: LazyLock> = + LazyLock::new(|| try_create_int_counter("info_total", "Count of infos logged")); +pub static WARNS_TOTAL: LazyLock> = + LazyLock::new(|| try_create_int_counter("warn_total", "Count of warns logged")); +pub static ERRORS_TOTAL: LazyLock> = + LazyLock::new(|| try_create_int_counter("error_total", "Count of errors logged")); +pub static CRITS_TOTAL: LazyLock> = + LazyLock::new(|| try_create_int_counter("crit_total", "Count of crits logged")); pub struct AlignedTermDecorator { wrapped: D, diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs index b9dde584b4..89a1f4d1f1 100644 --- a/common/logging/src/tracing_metrics_layer.rs +++ b/common/logging/src/tracing_metrics_layer.rs @@ -1,32 +1,36 @@ //! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. -use lazy_static::lazy_static; use lighthouse_metrics as metrics; +use std::sync::LazyLock; use tracing_log::NormalizeEvent; -lazy_static! { - /// Count of `INFO` logs registered per enabled dependency. - pub static ref DEP_INFOS_TOTAL: metrics::Result = +/// Count of `INFO` logs registered per enabled dependency. +pub static DEP_INFOS_TOTAL: LazyLock> = + LazyLock::new(|| { metrics::try_create_int_counter_vec( "dep_info_total", "Count of infos logged per enabled dependency", - &["target"] - ); - /// Count of `WARN` logs registered per enabled dependency. - pub static ref DEP_WARNS_TOTAL: metrics::Result = + &["target"], + ) + }); +/// Count of `WARN` logs registered per enabled dependency. +pub static DEP_WARNS_TOTAL: LazyLock> = + LazyLock::new(|| { metrics::try_create_int_counter_vec( "dep_warn_total", "Count of warns logged per enabled dependency", - &["target"] - ); - /// Count of `ERROR` logs registered per enabled dependency. - pub static ref DEP_ERRORS_TOTAL: metrics::Result = + &["target"], + ) + }); +/// Count of `ERROR` logs registered per enabled dependency. +pub static DEP_ERRORS_TOTAL: LazyLock> = + LazyLock::new(|| { metrics::try_create_int_counter_vec( "dep_error_total", "Count of errors logged per enabled dependency", - &["target"] - ); -} + &["target"], + ) + }); /// Layer that registers Prometheus metrics for `INFO`, `WARN` and `ERROR` logs emitted per dependency. /// Dependencies are enabled via the `RUST_LOG` env flag. diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 4a5f39b661..ac309cec9d 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -6,7 +6,6 @@ edition = { workspace = true } [dependencies] lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } libc = "0.2.79" parking_lot = { workspace = true } jemalloc-ctl = { version = "0.5.0", optional = true } diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 681849a78c..9531102682 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -4,7 +4,6 @@ //! https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html //! //! These functions are generally only suitable for Linux systems. -use lazy_static::lazy_static; use lighthouse_metrics::*; use parking_lot::Mutex; use std::env; @@ -33,50 +32,65 @@ const M_MMAP_THRESHOLD: c_int = -3; /// https://man7.org/linux/man-pages/man3/mallopt.3.html const ENV_VAR_MMAP_THRESHOLD: &str = "MALLOC_MMAP_THRESHOLD_"; -lazy_static! { - pub static ref GLOBAL_LOCK: Mutex<()> = <_>::default(); -} +pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(|| <_>::default()); // Metrics for the malloc. For more information, see: // // https://man7.org/linux/man-pages/man3/mallinfo.3.html -lazy_static! { - pub static ref MALLINFO_ARENA: lighthouse_metrics::Result = try_create_int_gauge( +pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "mallinfo_arena", "The total amount of memory allocated by means other than mmap(2). \ This figure includes both in-use blocks and blocks on the free list.", - ); - pub static ref MALLINFO_ORDBLKS: lighthouse_metrics::Result = try_create_int_gauge( + ) +}); +pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "mallinfo_ordblks", "The number of ordinary (i.e., non-fastbin) free blocks.", - ); - pub static ref MALLINFO_SMBLKS: lighthouse_metrics::Result = - try_create_int_gauge("mallinfo_smblks", "The number of fastbin free blocks.",); - pub static ref MALLINFO_HBLKS: lighthouse_metrics::Result = try_create_int_gauge( + ) +}); +pub static MALLINFO_SMBLKS: LazyLock> = + LazyLock::new(|| try_create_int_gauge("mallinfo_smblks", "The number of fastbin free blocks.")); +pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "mallinfo_hblks", "The number of blocks currently allocated using mmap.", - ); - pub static ref MALLINFO_HBLKHD: lighthouse_metrics::Result = try_create_int_gauge( + ) +}); +pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "mallinfo_hblkhd", "The number of bytes in blocks currently allocated using mmap.", - ); - pub static ref MALLINFO_FSMBLKS: lighthouse_metrics::Result = try_create_int_gauge( + ) +}); +pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "mallinfo_fsmblks", "The total number of bytes in fastbin free blocks.", - ); - pub static ref MALLINFO_UORDBLKS: lighthouse_metrics::Result = try_create_int_gauge( - "mallinfo_uordblks", - "The total number of bytes used by in-use allocations.", - ); - pub static ref MALLINFO_FORDBLKS: lighthouse_metrics::Result = try_create_int_gauge( - "mallinfo_fordblks", - "The total number of bytes in free blocks.", - ); - pub static ref MALLINFO_KEEPCOST: lighthouse_metrics::Result = try_create_int_gauge( - "mallinfo_keepcost", - "The total amount of releasable free space at the top of the heap..", - ); -} + ) +}); +pub static MALLINFO_UORDBLKS: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_uordblks", + "The total number of bytes used by in-use allocations.", + ) + }); +pub static MALLINFO_FORDBLKS: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_fordblks", + "The total number of bytes in free blocks.", + ) + }); +pub static MALLINFO_KEEPCOST: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_keepcost", + "The total amount of releasable free space at the top of the heap..", + ) + }); /// Calls `mallinfo` and updates Prometheus metrics with the results. pub fn scrape_mallinfo_metrics() { diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index 92533048c5..70685d5960 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -8,29 +8,31 @@ //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. use jemalloc_ctl::{arenas, epoch, stats, Error}; -use lazy_static::lazy_static; use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; +use std::sync::LazyLock; #[global_allocator] static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; // Metrics for jemalloc. -lazy_static! { - pub static ref NUM_ARENAS: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use"); - pub static ref BYTES_ALLOCATED: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated"); - pub static ref BYTES_ACTIVE: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active"); - pub static ref BYTES_MAPPED: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped"); - pub static ref BYTES_METADATA: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata"); - pub static ref BYTES_RESIDENT: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident"); - pub static ref BYTES_RETAINED: lighthouse_metrics::Result = - try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained"); -} +pub static NUM_ARENAS: LazyLock> = + LazyLock::new(|| try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use")); +pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated") +}); +pub static BYTES_ACTIVE: LazyLock> = + LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active")); +pub static BYTES_MAPPED: LazyLock> = + LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped")); +pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata") +}); +pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident") +}); +pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained") +}); pub fn scrape_jemalloc_metrics() { scrape_jemalloc_metrics_fallible().unwrap() diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 3731229c39..55f18edd52 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -17,6 +17,5 @@ lighthouse_version = { workspace = true } lighthouse_metrics = { workspace = true } slog = { workspace = true } store = { workspace = true } -lazy_static = { workspace = true } regex = { workspace = true } sensitive_url = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index b59a6dfb89..e157d82c11 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,9 +1,9 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; -use lazy_static::lazy_static; use lighthouse_metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; use std::path::Path; +use std::sync::LazyLock; /// Represents a metric that needs to be fetched from lighthouse metrics registry /// and sent to the remote monitoring service. @@ -126,19 +126,20 @@ pub enum JsonType { Boolean, } -lazy_static! { - /// HashMap representing the `BEACON_PROCESS_METRICS`. - pub static ref BEACON_METRICS_MAP: HashMap = BEACON_PROCESS_METRICS +/// HashMap representing the `BEACON_PROCESS_METRICS`. +pub static BEACON_METRICS_MAP: LazyLock> = LazyLock::new(|| { + BEACON_PROCESS_METRICS .iter() .map(|metric| (metric.lighthouse_metric_name.to_string(), metric.clone())) - .collect(); - /// HashMap representing the `VALIDATOR_PROCESS_METRICS`. - pub static ref VALIDATOR_METRICS_MAP: HashMap = - VALIDATOR_PROCESS_METRICS + .collect() +}); +/// HashMap representing the `VALIDATOR_PROCESS_METRICS`. +pub static VALIDATOR_METRICS_MAP: LazyLock> = LazyLock::new(|| { + VALIDATOR_PROCESS_METRICS .iter() .map(|metric| (metric.lighthouse_metric_name.to_string(), metric.clone())) - .collect(); -} + .collect() +}); /// Returns the value from a Counter/Gauge `MetricType` assuming that it has no associated labels /// else it returns `None`. diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index 31e53779a8..13bcf006a9 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -6,6 +6,5 @@ edition = { workspace = true } [dependencies] types = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/common/slot_clock/src/metrics.rs b/common/slot_clock/src/metrics.rs index ae3a9b599f..24023c9ed7 100644 --- a/common/slot_clock/src/metrics.rs +++ b/common/slot_clock/src/metrics.rs @@ -1,20 +1,22 @@ use crate::SlotClock; -use lazy_static::lazy_static; pub use lighthouse_metrics::*; +use std::sync::LazyLock; use types::{EthSpec, Slot}; -lazy_static! { - pub static ref PRESENT_SLOT: Result = - try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot"); - pub static ref PRESENT_EPOCH: Result = - try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch"); - pub static ref SLOTS_PER_EPOCH: Result = - try_create_int_gauge("slotclock_slots_per_epoch", "Slots per epoch (constant)"); - pub static ref SECONDS_PER_SLOT: Result = try_create_int_gauge( +pub static PRESENT_SLOT: LazyLock> = + LazyLock::new(|| try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot")); +pub static PRESENT_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch") +}); +pub static SLOTS_PER_EPOCH: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("slotclock_slots_per_epoch", "Slots per epoch (constant)") +}); +pub static SECONDS_PER_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slotclock_slot_time_seconds", - "The duration in seconds between each slot" - ); -} + "The duration in seconds between each slot", + ) +}); /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index cc9a2c5097..7928d4a3c9 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -9,7 +9,6 @@ async-channel = { workspace = true } tokio = { workspace = true } slog = { workspace = true } futures = { workspace = true } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } sloggers = { workspace = true } logging = { workspace = true } diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index 6ecea86d65..a40bfdf4e7 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -1,36 +1,46 @@ /// Handles async task metrics -use lazy_static::lazy_static; pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static! { - pub static ref ASYNC_TASKS_COUNT: Result = try_create_int_gauge_vec( +pub static ASYNC_TASKS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "async_tasks_count", "Total number of async tasks spawned using spawn", - &["async_task_count"] - ); - pub static ref BLOCKING_TASKS_COUNT: Result = try_create_int_gauge_vec( + &["async_task_count"], + ) +}); +pub static BLOCKING_TASKS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "blocking_tasks_count", "Total number of async tasks spawned using spawn_blocking", - &["blocking_task_count"] - ); - pub static ref BLOCKING_TASKS_HISTOGRAM: Result = try_create_histogram_vec( + &["blocking_task_count"], + ) +}); +pub static BLOCKING_TASKS_HISTOGRAM: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "blocking_tasks_histogram", "Time taken by blocking tasks", - &["blocking_task_hist"] - ); - pub static ref BLOCK_ON_TASKS_COUNT: Result = try_create_int_gauge_vec( + &["blocking_task_hist"], + ) +}); +pub static BLOCK_ON_TASKS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "block_on_tasks_count", "Total number of block_on_dangerous tasks spawned", - &["name"] - ); - pub static ref BLOCK_ON_TASKS_HISTOGRAM: Result = try_create_histogram_vec( + &["name"], + ) +}); +pub static BLOCK_ON_TASKS_HISTOGRAM: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "block_on_tasks_histogram", "Time taken by block_on_dangerous tasks", - &["name"] - ); - pub static ref TASKS_HISTOGRAM: Result = try_create_histogram_vec( + &["name"], + ) +}); +pub static TASKS_HISTOGRAM: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "async_tasks_time_histogram", "Time taken by async tasks", - &["async_task_hist"] - ); -} + &["async_task_hist"], + ) +}); diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 3d70cad272..95dbf59186 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -7,5 +7,4 @@ edition = { workspace = true } [dependencies] lru_cache = { workspace = true } -lazy_static = { workspace = true } parking_lot = { workspace = true } diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs index 386f08a739..212ae963e3 100644 --- a/common/unused_port/src/lib.rs +++ b/common/unused_port/src/lib.rs @@ -1,7 +1,7 @@ -use lazy_static::lazy_static; use lru_cache::LRUTimeCache; use parking_lot::Mutex; use std::net::{SocketAddr, TcpListener, UdpSocket}; +use std::sync::LazyLock; use std::time::Duration; #[derive(Copy, Clone)] @@ -18,10 +18,8 @@ pub enum IpVersion { pub const CACHED_PORTS_TTL: Duration = Duration::from_secs(300); -lazy_static! { - static ref FOUND_PORTS_CACHE: Mutex> = - Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL)); -} +static FOUND_PORTS_CACHE: LazyLock>> = + LazyLock::new(|| Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL))); /// A convenience wrapper over [`zero_port`]. pub fn unused_tcp4_port() -> Result { diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 0d33de998e..84f5ce5f18 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -18,6 +18,5 @@ serde_json = { workspace = true } tokio = { workspace = true } headers = "0.3.2" lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } serde_array_query = "0.1.0" bytes = { workspace = true } diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index eb061c7526..505d277583 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -1,86 +1,125 @@ use eth2::lighthouse::{ProcessHealth, SystemHealth}; use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static::lazy_static! { - pub static ref PROCESS_NUM_THREADS: Result = try_create_int_gauge( +pub static PROCESS_NUM_THREADS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "process_num_threads", - "Number of threads used by the current process" - ); - pub static ref PROCESS_RES_MEM: Result = try_create_int_gauge( + "Number of threads used by the current process", + ) +}); +pub static PROCESS_RES_MEM: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "process_resident_memory_bytes", - "Resident memory used by the current process" - ); - pub static ref PROCESS_VIRT_MEM: Result = try_create_int_gauge( + "Resident memory used by the current process", + ) +}); +pub static PROCESS_VIRT_MEM: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "process_virtual_memory_bytes", - "Virtual memory used by the current process" - ); - pub static ref PROCESS_SHR_MEM: Result = try_create_int_gauge( + "Virtual memory used by the current process", + ) +}); +pub static PROCESS_SHR_MEM: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "process_shared_memory_bytes", - "Shared memory used by the current process" - ); - pub static ref PROCESS_SECONDS: Result = try_create_int_gauge( + "Shared memory used by the current process", + ) +}); +pub static PROCESS_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "process_cpu_seconds_total", - "Total cpu time taken by the current process" - ); - pub static ref SYSTEM_VIRT_MEM_TOTAL: Result = - try_create_int_gauge("system_virt_mem_total_bytes", "Total system virtual memory"); - pub static ref SYSTEM_VIRT_MEM_AVAILABLE: Result = try_create_int_gauge( + "Total cpu time taken by the current process", + ) +}); +pub static SYSTEM_VIRT_MEM_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("system_virt_mem_total_bytes", "Total system virtual memory") +}); +pub static SYSTEM_VIRT_MEM_AVAILABLE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "system_virt_mem_available_bytes", - "Available system virtual memory" - ); - pub static ref SYSTEM_VIRT_MEM_USED: Result = - try_create_int_gauge("system_virt_mem_used_bytes", "Used system virtual memory"); - pub static ref SYSTEM_VIRT_MEM_FREE: Result = - try_create_int_gauge("system_virt_mem_free_bytes", "Free system virtual memory"); - pub static ref SYSTEM_VIRT_MEM_CACHED: Result = - try_create_int_gauge("system_virt_mem_cached_bytes", "Used system virtual memory"); - pub static ref SYSTEM_VIRT_MEM_BUFFERS: Result = - try_create_int_gauge("system_virt_mem_buffer_bytes", "Free system virtual memory"); - pub static ref SYSTEM_VIRT_MEM_PERCENTAGE: Result = try_create_float_gauge( + "Available system virtual memory", + ) +}); +pub static SYSTEM_VIRT_MEM_USED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("system_virt_mem_used_bytes", "Used system virtual memory") +}); +pub static SYSTEM_VIRT_MEM_FREE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("system_virt_mem_free_bytes", "Free system virtual memory") +}); +pub static SYSTEM_VIRT_MEM_CACHED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("system_virt_mem_cached_bytes", "Used system virtual memory") +}); +pub static SYSTEM_VIRT_MEM_BUFFERS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("system_virt_mem_buffer_bytes", "Free system virtual memory") +}); +pub static SYSTEM_VIRT_MEM_PERCENTAGE: LazyLock> = LazyLock::new(|| { + try_create_float_gauge( "system_virt_mem_percentage", - "Percentage of used virtual memory" - ); - pub static ref SYSTEM_LOADAVG_1: Result = - try_create_float_gauge("system_loadavg_1", "Loadavg over 1 minute"); - pub static ref SYSTEM_LOADAVG_5: Result = - try_create_float_gauge("system_loadavg_5", "Loadavg over 5 minutes"); - pub static ref SYSTEM_LOADAVG_15: Result = - try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes"); + "Percentage of used virtual memory", + ) +}); +pub static SYSTEM_LOADAVG_1: LazyLock> = + LazyLock::new(|| try_create_float_gauge("system_loadavg_1", "Loadavg over 1 minute")); +pub static SYSTEM_LOADAVG_5: LazyLock> = + LazyLock::new(|| try_create_float_gauge("system_loadavg_5", "Loadavg over 5 minutes")); +pub static SYSTEM_LOADAVG_15: LazyLock> = + LazyLock::new(|| try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes")); - pub static ref CPU_CORES: Result = - try_create_int_gauge("cpu_cores", "Number of physical cpu cores"); - pub static ref CPU_THREADS: Result = - try_create_int_gauge("cpu_threads", "Number of logical cpu cores"); +pub static CPU_CORES: LazyLock> = + LazyLock::new(|| try_create_int_gauge("cpu_cores", "Number of physical cpu cores")); +pub static CPU_THREADS: LazyLock> = + LazyLock::new(|| try_create_int_gauge("cpu_threads", "Number of logical cpu cores")); - pub static ref CPU_SYSTEM_SECONDS_TOTAL: Result = - try_create_int_gauge("cpu_system_seconds_total", "Total time spent in kernel mode"); - pub static ref CPU_USER_SECONDS_TOTAL: Result = - try_create_int_gauge("cpu_user_seconds_total", "Total time spent in user mode"); - pub static ref CPU_IOWAIT_SECONDS_TOTAL: Result = - try_create_int_gauge("cpu_iowait_seconds_total", "Total time spent waiting for io"); - pub static ref CPU_IDLE_SECONDS_TOTAL: Result = - try_create_int_gauge("cpu_idle_seconds_total", "Total time spent idle"); +pub static CPU_SYSTEM_SECONDS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "cpu_system_seconds_total", + "Total time spent in kernel mode", + ) +}); +pub static CPU_USER_SECONDS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("cpu_user_seconds_total", "Total time spent in user mode") +}); +pub static CPU_IOWAIT_SECONDS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "cpu_iowait_seconds_total", + "Total time spent waiting for io", + ) +}); +pub static CPU_IDLE_SECONDS_TOTAL: LazyLock> = + LazyLock::new(|| try_create_int_gauge("cpu_idle_seconds_total", "Total time spent idle")); - pub static ref DISK_BYTES_TOTAL: Result = - try_create_int_gauge("disk_node_bytes_total", "Total capacity of disk"); +pub static DISK_BYTES_TOTAL: LazyLock> = + LazyLock::new(|| try_create_int_gauge("disk_node_bytes_total", "Total capacity of disk")); - pub static ref DISK_BYTES_FREE: Result = - try_create_int_gauge("disk_node_bytes_free", "Free space in disk"); +pub static DISK_BYTES_FREE: LazyLock> = + LazyLock::new(|| try_create_int_gauge("disk_node_bytes_free", "Free space in disk")); - pub static ref DISK_READS: Result = - try_create_int_gauge("disk_node_reads_total", "Number of disk reads"); +pub static DISK_READS: LazyLock> = + LazyLock::new(|| try_create_int_gauge("disk_node_reads_total", "Number of disk reads")); - pub static ref DISK_WRITES: Result = - try_create_int_gauge("disk_node_writes_total", "Number of disk writes"); +pub static DISK_WRITES: LazyLock> = + LazyLock::new(|| try_create_int_gauge("disk_node_writes_total", "Number of disk writes")); - pub static ref NETWORK_BYTES_RECEIVED: Result = - try_create_int_gauge("network_node_bytes_total_received", "Total bytes received over all network interfaces"); - pub static ref NETWORK_BYTES_SENT: Result = - try_create_int_gauge("network_node_bytes_total_transmit", "Total bytes sent over all network interfaces"); +pub static NETWORK_BYTES_RECEIVED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "network_node_bytes_total_received", + "Total bytes received over all network interfaces", + ) +}); +pub static NETWORK_BYTES_SENT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "network_node_bytes_total_transmit", + "Total bytes sent over all network interfaces", + ) +}); - pub static ref BOOT_TIME: Result = - try_create_int_gauge("misc_node_boot_ts_seconds", "Boot time as unix epoch timestamp"); -} +pub static BOOT_TIME: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "misc_node_boot_ts_seconds", + "Boot time as unix epoch timestamp", + ) +}); pub fn scrape_health_metrics() { scrape_process_health_metrics(); diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 3bee25eaac..15f65dfe4f 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -7,7 +7,6 @@ edition = { workspace = true } [dependencies] ethereum-types = { workspace = true } ethereum_hashing = { workspace = true } -lazy_static = { workspace = true } safe_arith = { workspace = true } [dev-dependencies] diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 2d2d2afdda..b9457fffab 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,17 +1,14 @@ use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; -use lazy_static::lazy_static; use safe_arith::ArithError; +use std::sync::LazyLock; const MAX_TREE_DEPTH: usize = 32; const EMPTY_SLICE: &[H256] = &[]; -lazy_static! { - /// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. - static ref ZERO_NODES: Vec = { - (0..=MAX_TREE_DEPTH).map(MerkleTree::Zero).collect() - }; -} +/// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. +static ZERO_NODES: LazyLock> = + LazyLock::new(|| (0..=MAX_TREE_DEPTH).map(MerkleTree::Zero).collect()); /// Right-sparse Merkle tree. /// diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index e05c0bcfeb..7b7c6eb0c4 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -26,7 +26,6 @@ int_to_bytes = { workspace = true } smallvec = { workspace = true } arbitrary = { workspace = true } lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } derivative = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } rand = { workspace = true } diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index ac5c0f659c..e6fe483776 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -1,42 +1,62 @@ -use lazy_static::lazy_static; pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static! { - /* - * Participation Metrics - */ - pub static ref PARTICIPATION_PREV_EPOCH_HEAD_ATTESTING_GWEI_TOTAL: Result = try_create_int_gauge( +/* + * Participation Metrics + */ +pub static PARTICIPATION_PREV_EPOCH_HEAD_ATTESTING_GWEI_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( "beacon_participation_prev_epoch_head_attesting_gwei_total", "Total effective balance (gwei) of validators who attested to the head in the previous epoch" - ); - pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_TOTAL: Result = try_create_int_gauge( + ) + }); +pub static PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( "beacon_participation_prev_epoch_target_attesting_gwei_total", "Total effective balance (gwei) of validators who attested to the target in the previous epoch" - ); - pub static ref PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL: Result = try_create_int_gauge( + ) + }); +pub static PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( "beacon_participation_prev_epoch_source_attesting_gwei_total", "Total effective balance (gwei) of validators who attested to the source in the previous epoch" - ); - pub static ref PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL: Result = try_create_int_gauge( - "beacon_participation_current_epoch_active_gwei_total", - "Total effective balance (gwei) of validators who are active in the current epoch" - ); - /* - * Processing metrics - */ - pub static ref PROCESS_EPOCH_TIME: Result = try_create_histogram( + ) + }); +pub static PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge( + "beacon_participation_current_epoch_active_gwei_total", + "Total effective balance (gwei) of validators who are active in the current epoch", + ) + }); +/* + * Processing metrics + */ +pub static PROCESS_EPOCH_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( "beacon_state_processing_process_epoch", "Time required for process_epoch", - ); - /* - * Participation Metrics (progressive balances) - */ - pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + ) +}); +/* + * Participation Metrics (progressive balances) + */ +pub static PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge( "beacon_participation_prev_epoch_target_attesting_gwei_progressive_total", "Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch" - ); - pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + ) +}); +pub static PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_gauge( "beacon_participation_curr_epoch_target_attesting_gwei_progressive_total", "Progressive total effective balance (gwei) of validators who attested to the target in the current epoch" - ); -} + ) +}); diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2774dd3d87..f8b354d92d 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -11,8 +11,8 @@ use crate::{ BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use lazy_static::lazy_static; use ssz_types::Bitfield; +use std::sync::LazyLock; use test_utils::generate_deterministic_keypairs; use types::*; @@ -22,10 +22,9 @@ pub const VALIDATOR_COUNT: usize = 64; pub const EPOCH_OFFSET: u64 = 4; pub const NUM_ATTESTATIONS: u64 = 1; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| generate_deterministic_keypairs(MAX_VALIDATOR_COUNT)); async fn get_harness( epoch_offset: u64, diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index f623f3d101..c6c89de570 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -43,7 +43,6 @@ rusqlite = { workspace = true } arbitrary = { workspace = true, features = ["derive"] } ethereum_serde_utils = { workspace = true } regex = { workspace = true } -lazy_static = { workspace = true } parking_lot = { workspace = true } itertools = { workspace = true } superstruct = { workspace = true } diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 4dc06feab3..1d2ca4ccdb 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -2,15 +2,14 @@ use crate::test_utils::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::*; -use lazy_static::lazy_static; +use std::sync::LazyLock; use swap_or_not_shuffle::shuffle_list; pub const VALIDATOR_COUNT: usize = 16; -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(E::default()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 16c7ff152f..7d67e96bbc 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -6,18 +6,17 @@ use beacon_chain::types::{ ChainSpec, Domain, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, }; -use lazy_static::lazy_static; use ssz::Encode; use std::ops::Mul; +use std::sync::LazyLock; use swap_or_not_shuffle::compute_shuffled_index; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| generate_deterministic_keypairs(MAX_VALIDATOR_COUNT)); async fn get_harness( validator_count: usize, diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 66786b5129..b2a6b6a2a0 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,23 +1,21 @@ //! Identifies each shard by an integer identifier. use crate::{AttestationRef, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; -use lazy_static::lazy_static; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; +use std::sync::LazyLock; use swap_or_not_shuffle::compute_shuffled_index; const MAX_SUBNET_ID: usize = 64; -lazy_static! { - static ref SUBNET_ID_TO_STRING: Vec = { - let mut v = Vec::with_capacity(MAX_SUBNET_ID); +static SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { + let mut v = Vec::with_capacity(MAX_SUBNET_ID); - for i in 0..MAX_SUBNET_ID { - v.push(i.to_string()); - } - v - }; -} + for i in 0..MAX_SUBNET_ID { + v.push(i.to_string()); + } + v +}); #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 7806aecfca..245ac5a6c4 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -1,24 +1,22 @@ //! Identifies each sync committee subnet by an integer identifier. use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::EthSpec; -use lazy_static::lazy_static; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; use std::collections::HashSet; use std::fmt::{self, Display}; use std::ops::{Deref, DerefMut}; +use std::sync::LazyLock; -lazy_static! { - static ref SYNC_SUBNET_ID_TO_STRING: Vec = { - let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); +static SYNC_SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { + let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); - for i in 0..SYNC_COMMITTEE_SUBNET_COUNT { - v.push(i.to_string()); - } - v - }; -} + for i in 0..SYNC_COMMITTEE_SUBNET_COUNT { + v.push(i.to_string()); + } + v +}); #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 2cc9ce605b..d2cb6f6f14 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.75.0-bullseye AS builder +FROM rust:1.80.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b9d3eaf894..b720601e70 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "5.2.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.78.0" +rust-version = "1.80.0" [features] default = ["slasher-lmdb"] @@ -50,7 +50,6 @@ eth2_network_config = { workspace = true } lighthouse_version = { workspace = true } account_utils = { workspace = true } lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 481e17dbc8..1a1da45991 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -14,20 +14,20 @@ use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; -use lazy_static::lazy_static; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info}; use std::backtrace::Backtrace; use std::path::PathBuf; use std::process::exit; +use std::sync::LazyLock; use task_executor::ShutdownReason; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; -lazy_static! { - pub static ref SHORT_VERSION: String = VERSION.replace("Lighthouse/", ""); - pub static ref LONG_VERSION: String = format!( +pub static SHORT_VERSION: LazyLock = LazyLock::new(|| VERSION.replace("Lighthouse/", "")); +pub static LONG_VERSION: LazyLock = LazyLock::new(|| { + format!( "{}\n\ BLS library: {}\n\ BLS hardware acceleration: {}\n\ @@ -43,8 +43,8 @@ lazy_static! { build_profile_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), - ); -} + ) +}); fn bls_library_name() -> &'static str { if cfg!(feature = "portable") { diff --git a/lighthouse/src/metrics.rs b/lighthouse/src/metrics.rs index ef3c33d298..0002b43e7b 100644 --- a/lighthouse/src/metrics.rs +++ b/lighthouse/src/metrics.rs @@ -1,23 +1,23 @@ -use lazy_static::lazy_static; pub use lighthouse_metrics::*; use lighthouse_version::VERSION; use slog::{error, Logger}; +use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; -lazy_static! { - pub static ref PROCESS_START_TIME_SECONDS: Result = try_create_int_gauge( +pub static PROCESS_START_TIME_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "process_start_time_seconds", - "The unix timestamp at which the process was started" - ); -} + "The unix timestamp at which the process was started", + ) +}); -lazy_static! { - pub static ref LIGHTHOUSE_VERSION: Result = try_create_int_gauge_vec( +pub static LIGHTHOUSE_VERSION: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "lighthouse_info", "The build of Lighthouse running on the server", &["version"], - ); -} + ) +}); pub fn expose_process_start_time(log: &Logger) { match SystemTime::now().duration_since(UNIX_EPOCH) { diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index ad0bb00963..d74b0ac062 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -18,7 +18,6 @@ derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } filesystem = { workspace = true } lru = { workspace = true } diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index b11d21d4b5..2e49bd4aeb 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -1,56 +1,78 @@ -use lazy_static::lazy_static; pub use lighthouse_metrics::*; +use std::sync::LazyLock; -lazy_static! { - pub static ref SLASHER_DATABASE_SIZE: Result = try_create_int_gauge( +pub static SLASHER_DATABASE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slasher_database_size", - "Size of the database backing the slasher, in bytes" - ); - pub static ref SLASHER_RUN_TIME: Result = try_create_histogram( + "Size of the database backing the slasher, in bytes", + ) +}); +pub static SLASHER_RUN_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( "slasher_process_batch_time", - "Time taken to process a batch of blocks and attestations" - ); - pub static ref SLASHER_NUM_ATTESTATIONS_DROPPED: Result = try_create_int_gauge( + "Time taken to process a batch of blocks and attestations", + ) +}); +pub static SLASHER_NUM_ATTESTATIONS_DROPPED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slasher_num_attestations_dropped", - "Number of attestations dropped per batch" - ); - pub static ref SLASHER_NUM_ATTESTATIONS_DEFERRED: Result = try_create_int_gauge( + "Number of attestations dropped per batch", + ) +}); +pub static SLASHER_NUM_ATTESTATIONS_DEFERRED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slasher_num_attestations_deferred", - "Number of attestations deferred per batch" - ); - pub static ref SLASHER_NUM_ATTESTATIONS_VALID: Result = try_create_int_gauge( + "Number of attestations deferred per batch", + ) +}); +pub static SLASHER_NUM_ATTESTATIONS_VALID: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slasher_num_attestations_valid", - "Number of valid attestations per batch" - ); - pub static ref SLASHER_NUM_ATTESTATIONS_STORED_PER_BATCH: Result = + "Number of valid attestations per batch", + ) +}); +pub static SLASHER_NUM_ATTESTATIONS_STORED_PER_BATCH: LazyLock> = + LazyLock::new(|| { try_create_int_gauge( "slasher_num_attestations_stored_per_batch", - "Number of attestations stored per batch" - ); - pub static ref SLASHER_NUM_BLOCKS_PROCESSED: Result = try_create_int_gauge( + "Number of attestations stored per batch", + ) + }); +pub static SLASHER_NUM_BLOCKS_PROCESSED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slasher_num_blocks_processed", "Number of blocks processed per batch", - ); - pub static ref SLASHER_NUM_CHUNKS_UPDATED: Result = try_create_int_counter_vec( + ) +}); +pub static SLASHER_NUM_CHUNKS_UPDATED: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "slasher_num_chunks_updated", "Number of min or max target chunks updated on disk", &["array"], - ); - pub static ref SLASHER_COMPRESSION_RATIO: Result = try_create_float_gauge( + ) +}); +pub static SLASHER_COMPRESSION_RATIO: LazyLock> = LazyLock::new(|| { + try_create_float_gauge( "slasher_compression_ratio", - "Compression ratio for min-max array chunks (higher is better)" - ); - pub static ref SLASHER_NUM_ATTESTATION_ROOT_QUERIES: Result = + "Compression ratio for min-max array chunks (higher is better)", + ) +}); +pub static SLASHER_NUM_ATTESTATION_ROOT_QUERIES: LazyLock> = + LazyLock::new(|| { try_create_int_counter( "slasher_num_attestation_root_queries", "Number of requests for an attestation data root", - ); - pub static ref SLASHER_NUM_ATTESTATION_ROOT_HITS: Result = try_create_int_counter( + ) + }); +pub static SLASHER_NUM_ATTESTATION_ROOT_HITS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "slasher_num_attestation_root_hits", "Number of requests for an attestation data root that hit the LRU cache", - ); - pub static ref SLASHER_ATTESTATION_ROOT_CACHE_SIZE: Result = try_create_int_gauge( + ) +}); +pub static SLASHER_ATTESTATION_ROOT_CACHE_SIZE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "slasher_attestation_root_cache_size", - "Number of attestation data roots cached in memory" - ); -} + "Number of attestation data roots cached in memory", + ) +}); diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index a1b6ed3b87..142a657f07 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -11,7 +11,6 @@ state_processing = { workspace = true } types = { workspace = true } ethereum_ssz = { workspace = true } beacon_chain = { workspace = true } -lazy_static = { workspace = true } tokio = { workspace = true } [features] diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 3e7c37af54..58637b92d9 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -3,13 +3,13 @@ mod macros; mod exit; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use lazy_static::lazy_static; use ssz::Encode; use std::env; use std::fs::{self, File}; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::exit; +use std::sync::LazyLock; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, EthSpec, Keypair, SignedBeaconBlock, }; @@ -45,10 +45,9 @@ pub struct TestVector { pub error: Option, } -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); -} +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| generate_deterministic_keypairs(VALIDATOR_COUNT)); async fn get_harness( slot: Slot, diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 1bdf62cd22..7321fc1384 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -26,5 +26,4 @@ serde_yaml = { workspace = true } eth2_network_config = { workspace = true } serde_json = { workspace = true } zip = { workspace = true } -lazy_static = { workspace = true } parking_lot = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 4187844cec..13d92d2d85 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -22,7 +22,6 @@ mod tests { }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; - use lazy_static::lazy_static; use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; @@ -33,7 +32,7 @@ mod tests { use std::future::Future; use std::path::PathBuf; use std::process::{Child, Command, Stdio}; - use std::sync::Arc; + use std::sync::{Arc, LazyLock}; use std::time::{Duration, Instant}; use task_executor::TaskExecutor; use tempfile::{tempdir, TempDir}; @@ -57,12 +56,13 @@ mod tests { /// debugging. const SUPPRESS_WEB3SIGNER_LOGS: bool = true; - lazy_static! { - static ref TEMP_DIR: Arc> = Arc::new(Mutex::new( - tempdir().expect("Failed to create temporary directory") - )); - static ref GET_WEB3SIGNER_BIN: OnceCell<()> = OnceCell::new(); - } + static TEMP_DIR: LazyLock>> = LazyLock::new(|| { + Arc::new(Mutex::new( + tempdir().expect("Failed to create temporary directory"), + )) + }); + + static GET_WEB3SIGNER_BIN: OnceCell<()> = OnceCell::const_new(); type E = MainnetEthSpec; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 0df687abec..bff40b41d5 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -50,7 +50,6 @@ libsecp256k1 = { workspace = true } ring = { workspace = true } rand = { workspace = true, features = ["small_rng"] } lighthouse_metrics = { workspace = true } -lazy_static = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index baba14c538..6982958bd5 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -22,7 +22,6 @@ filesystem = { workspace = true } arbitrary = { workspace = true, features = ["derive"] } [dev-dependencies] -lazy_static = { workspace = true } rayon = { workspace = true } [features] diff --git a/validator_client/slashing_protection/tests/interop.rs b/validator_client/slashing_protection/tests/interop.rs index ee8f522cd6..c32aab55a2 100644 --- a/validator_client/slashing_protection/tests/interop.rs +++ b/validator_client/slashing_protection/tests/interop.rs @@ -1,11 +1,9 @@ -use lazy_static::lazy_static; use slashing_protection::interchange_test::MultiTestCase; use std::fs::File; use std::path::PathBuf; +use std::sync::LazyLock; -lazy_static! { - pub static ref TEST_ROOT_DIR: PathBuf = test_root_dir(); -} +pub static TEST_ROOT_DIR: LazyLock = LazyLock::new(test_root_dir); fn download_tests() { let make_output = std::process::Command::new("make") diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index cc5b03bb19..8bc569c67a 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -1,6 +1,7 @@ use super::Context; use malloc_utils::scrape_allocator_metrics; use slot_clock::SlotClock; +use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; use types::EthSpec; @@ -39,168 +40,233 @@ pub const WEB3SIGNER: &str = "web3signer"; pub use lighthouse_metrics::*; -lazy_static::lazy_static! { - pub static ref GENESIS_DISTANCE: Result = try_create_int_gauge( +pub static GENESIS_DISTANCE: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "vc_genesis_distance_seconds", - "Distance between now and genesis time" - ); - pub static ref ENABLED_VALIDATORS_COUNT: Result = try_create_int_gauge( + "Distance between now and genesis time", + ) +}); +pub static ENABLED_VALIDATORS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "vc_validators_enabled_count", - "Number of enabled validators" - ); - pub static ref TOTAL_VALIDATORS_COUNT: Result = try_create_int_gauge( + "Number of enabled validators", + ) +}); +pub static TOTAL_VALIDATORS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "vc_validators_total_count", - "Number of total validators (enabled and disabled)" - ); + "Number of total validators (enabled and disabled)", + ) +}); - pub static ref SIGNED_BLOCKS_TOTAL: Result = try_create_int_counter_vec( +pub static SIGNED_BLOCKS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "vc_signed_beacon_blocks_total", "Total count of attempted block signings", - &["status"] - ); - pub static ref SIGNED_ATTESTATIONS_TOTAL: Result = try_create_int_counter_vec( + &["status"], + ) +}); +pub static SIGNED_ATTESTATIONS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "vc_signed_attestations_total", "Total count of attempted Attestation signings", - &["status"] - ); - pub static ref SIGNED_AGGREGATES_TOTAL: Result = try_create_int_counter_vec( + &["status"], + ) +}); +pub static SIGNED_AGGREGATES_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "vc_signed_aggregates_total", "Total count of attempted SignedAggregateAndProof signings", - &["status"] - ); - pub static ref SIGNED_SELECTION_PROOFS_TOTAL: Result = try_create_int_counter_vec( + &["status"], + ) +}); +pub static SIGNED_SELECTION_PROOFS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "vc_signed_selection_proofs_total", "Total count of attempted SelectionProof signings", - &["status"] - ); - pub static ref SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL: Result = try_create_int_counter_vec( - "vc_signed_sync_committee_messages_total", - "Total count of attempted SyncCommitteeMessage signings", - &["status"] - ); - pub static ref SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL: Result = try_create_int_counter_vec( - "vc_signed_sync_committee_contributions_total", - "Total count of attempted ContributionAndProof signings", - &["status"] - ); - pub static ref SIGNED_SYNC_SELECTION_PROOFS_TOTAL: Result = try_create_int_counter_vec( - "vc_signed_sync_selection_proofs_total", - "Total count of attempted SyncSelectionProof signings", - &["status"] - ); - pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result = try_create_int_counter_vec( + &["status"], + ) +}); +pub static SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "vc_signed_sync_committee_messages_total", + "Total count of attempted SyncCommitteeMessage signings", + &["status"], + ) + }); +pub static SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "vc_signed_sync_committee_contributions_total", + "Total count of attempted ContributionAndProof signings", + &["status"], + ) + }); +pub static SIGNED_SYNC_SELECTION_PROOFS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "vc_signed_sync_selection_proofs_total", + "Total count of attempted SyncSelectionProof signings", + &["status"], + ) + }); +pub static SIGNED_VOLUNTARY_EXITS_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "vc_signed_voluntary_exits_total", "Total count of VoluntaryExit signings", - &["status"] - ); - pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( - "builder_validator_registrations_total", - "Total count of ValidatorRegistrationData signings", - &["status"] - ); - pub static ref DUTIES_SERVICE_TIMES: Result = try_create_histogram_vec( + &["status"], + ) +}); +pub static SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "builder_validator_registrations_total", + "Total count of ValidatorRegistrationData signings", + &["status"], + ) + }); +pub static DUTIES_SERVICE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "vc_duties_service_task_times_seconds", "Duration to perform duties service tasks", - &["task"] - ); - pub static ref ATTESTATION_SERVICE_TIMES: Result = try_create_histogram_vec( + &["task"], + ) +}); +pub static ATTESTATION_SERVICE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "vc_attestation_service_task_times_seconds", "Duration to perform attestation service tasks", - &["task"] - ); - pub static ref SLASHING_PROTECTION_PRUNE_TIMES: Result = try_create_histogram( + &["task"], + ) +}); +pub static SLASHING_PROTECTION_PRUNE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "vc_slashing_protection_prune_times_seconds", "Time required to prune the slashing protection DB", - ); - pub static ref BLOCK_SERVICE_TIMES: Result = try_create_histogram_vec( + ) +}); +pub static BLOCK_SERVICE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "vc_beacon_block_service_task_times_seconds", "Duration to perform beacon block service tasks", - &["task"] - ); - pub static ref PROPOSER_COUNT: Result = try_create_int_gauge_vec( + &["task"], + ) +}); +pub static PROPOSER_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "vc_beacon_block_proposer_count", "Number of beacon block proposers on this host", - &["task"] - ); - pub static ref ATTESTER_COUNT: Result = try_create_int_gauge_vec( + &["task"], + ) +}); +pub static ATTESTER_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "vc_beacon_attester_count", "Number of attesters on this host", - &["task"] - ); - pub static ref PROPOSAL_CHANGED: Result = try_create_int_counter( + &["task"], + ) +}); +pub static PROPOSAL_CHANGED: LazyLock> = LazyLock::new(|| { + try_create_int_counter( "vc_beacon_block_proposal_changed", "A duties update discovered a new block proposer for the current slot", - ); - /* - * Endpoint metrics - */ - pub static ref ENDPOINT_ERRORS: Result = try_create_int_counter_vec( + ) +}); +/* + * Endpoint metrics + */ +pub static ENDPOINT_ERRORS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "bn_endpoint_errors", "The number of beacon node request errors for each endpoint", - &["endpoint"] - ); - pub static ref ENDPOINT_REQUESTS: Result = try_create_int_counter_vec( + &["endpoint"], + ) +}); +pub static ENDPOINT_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( "bn_endpoint_requests", "The number of beacon node requests for each endpoint", - &["endpoint"] - ); + &["endpoint"], + ) +}); - /* - * Beacon node availability metrics - */ - pub static ref AVAILABLE_BEACON_NODES_COUNT: Result = try_create_int_gauge( +/* + * Beacon node availability metrics + */ +pub static AVAILABLE_BEACON_NODES_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "vc_beacon_nodes_available_count", "Number of available beacon nodes", - ); - pub static ref SYNCED_BEACON_NODES_COUNT: Result = try_create_int_gauge( + ) +}); +pub static SYNCED_BEACON_NODES_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "vc_beacon_nodes_synced_count", "Number of synced beacon nodes", - ); - pub static ref TOTAL_BEACON_NODES_COUNT: Result = try_create_int_gauge( + ) +}); +pub static TOTAL_BEACON_NODES_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "vc_beacon_nodes_total_count", "Total number of beacon nodes", - ); + ) +}); - pub static ref ETH2_FALLBACK_CONFIGURED: Result = try_create_int_gauge( +pub static ETH2_FALLBACK_CONFIGURED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "sync_eth2_fallback_configured", "The number of configured eth2 fallbacks", - ); + ) +}); - pub static ref ETH2_FALLBACK_CONNECTED: Result = try_create_int_gauge( +pub static ETH2_FALLBACK_CONNECTED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( "sync_eth2_fallback_connected", "Set to 1 if connected to atleast one synced eth2 fallback node, otherwise set to 0", - ); - /* - * Signing Metrics - */ - pub static ref SIGNING_TIMES: Result = try_create_histogram_vec( + ) +}); +/* + * Signing Metrics + */ +pub static SIGNING_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "vc_signing_times_seconds", "Duration to obtain a signature", - &["type"] - ); - pub static ref BLOCK_SIGNING_TIMES: Result = try_create_histogram( + &["type"], + ) +}); +pub static BLOCK_SIGNING_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( "vc_block_signing_times_seconds", "Duration to obtain a signature for a block", - ); + ) +}); - pub static ref ATTESTATION_DUTY: Result = try_create_int_gauge_vec( +pub static ATTESTATION_DUTY: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( "vc_attestation_duty_slot", "Attestation duty slot for all managed validators", - &["validator"] - ); - /* - * BN latency - */ - pub static ref VC_BEACON_NODE_LATENCY: Result = try_create_histogram_vec( + &["validator"], + ) +}); +/* + * BN latency + */ +pub static VC_BEACON_NODE_LATENCY: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec( "vc_beacon_node_latency", "Round-trip latency for a simple API endpoint on each BN", - &["endpoint"] - ); - pub static ref VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT: Result = try_create_histogram( - "vc_beacon_node_latency_primary_endpoint", - "Round-trip latency for the primary BN endpoint", - ); -} + &["endpoint"], + ) +}); +pub static VC_BEACON_NODE_LATENCY_PRIMARY_ENDPOINT: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "vc_beacon_node_latency_primary_endpoint", + "Round-trip latency for the primary BN endpoint", + ) + }); pub fn gather_prometheus_metrics( ctx: &Context, From 75e934842e1fc10f17acb96438d17761c1f5c6b8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 30 Jul 2024 15:43:34 +1000 Subject: [PATCH 13/43] Swap finalized chains based on processed batches (#6203) * Shift from validated to processed --- beacon_node/network/src/sync/range_sync/chain.rs | 12 ++++-------- .../network/src/sync/range_sync/chain_collection.rs | 6 +++--- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d63b2f95d8..1abd490b1f 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -111,9 +111,6 @@ pub struct SyncingChain { /// The current processing batch, if any. current_processing_batch: Option, - /// Batches validated by this chain. - validated_batches: u64, - /// The chain's log. log: slog::Logger, } @@ -161,7 +158,6 @@ impl SyncingChain { attempted_optimistic_starts: HashSet::default(), state: ChainSyncingState::Stopped, current_processing_batch: None, - validated_batches: 0, log: log.new(o!("chain" => id)), } } @@ -182,8 +178,10 @@ impl SyncingChain { } /// Progress in epochs made by the chain - pub fn validated_epochs(&self) -> u64 { - self.validated_batches * EPOCHS_PER_BATCH + pub fn processed_epochs(&self) -> u64 { + self.processing_target + .saturating_sub(self.start_epoch) + .into() } /// Returns the total count of pending blocks in all the batches of this chain @@ -654,7 +652,6 @@ impl SyncingChain { let removed_batches = std::mem::replace(&mut self.batches, remaining_batches); for (id, batch) in removed_batches.into_iter() { - self.validated_batches = self.validated_batches.saturating_add(1); // only for batches awaiting validation can we be sure the last attempt is // right, and thus, that any different attempt is wrong match batch.state() { @@ -1166,7 +1163,6 @@ impl slog::KV for SyncingChain { )?; serializer.emit_usize("batches", self.batches.len())?; serializer.emit_usize("peers", self.peers.len())?; - serializer.emit_u64("validated_batches", self.validated_batches)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 3621a6605a..1217fbf8fe 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -24,7 +24,7 @@ use types::{Epoch, Hash256, Slot}; const PARALLEL_HEAD_CHAINS: usize = 2; /// Minimum work we require a finalized chain to do before picking a chain with more peers. -const MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS: u64 = 10; +const MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS: u64 = 10; /// The state of the long range/batch sync. #[derive(Clone)] @@ -273,8 +273,8 @@ impl ChainCollection { // chains are different, check that they don't have the same number of peers if let Some(syncing_chain) = self.finalized_chains.get_mut(&syncing_id) { if max_peers > syncing_chain.available_peers() - && syncing_chain.validated_epochs() - > MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS + && syncing_chain.processed_epochs() + > MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS { syncing_chain.stop_syncing(); old_id = Some(Some(syncing_id)); From c7ded108705704a474e1f8e681374cea209cccb1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 30 Jul 2024 23:25:55 +1000 Subject: [PATCH 14/43] Use blinded blocks for light client proofs (#6201) * Use blinded blocks for light client proofs --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +- .../src/light_client_server_cache.rs | 15 ++- consensus/types/src/beacon_block_body.rs | 94 ++++++------------- consensus/types/src/light_client_bootstrap.rs | 6 +- .../types/src/light_client_finality_update.rs | 6 +- consensus/types/src/light_client_header.rs | 41 +++++--- .../src/light_client_optimistic_update.rs | 4 +- consensus/types/src/light_client_update.rs | 6 +- 8 files changed, 72 insertions(+), 107 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c6ed979d68..4bc98a98da 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6766,12 +6766,7 @@ impl BeaconChain { &self, block_root: &Hash256, ) -> Result, ForkName)>, Error> { - let handle = self - .task_executor - .handle() - .ok_or(BeaconChainError::RuntimeShutdown)?; - - let Some(block) = handle.block_on(async { self.get_block(block_root).await })? else { + let Some(block) = self.get_blinded_block(block_root)? else { return Ok(None); }; diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index ca02905737..87513885f7 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -84,13 +84,12 @@ impl LightClientServerCache { let signature_slot = block_slot; let attested_block_root = block_parent_root; - let attested_block = - store - .get_full_block(attested_block_root)? - .ok_or(BeaconChainError::DBInconsistent(format!( - "Block not available {:?}", - attested_block_root - )))?; + let attested_block = store.get_blinded_block(attested_block_root)?.ok_or( + BeaconChainError::DBInconsistent(format!( + "Block not available {:?}", + attested_block_root + )), + )?; let cached_parts = self.get_or_compute_prev_block_cache( store.clone(), @@ -130,7 +129,7 @@ impl LightClientServerCache { if is_latest_finality & !cached_parts.finalized_block_root.is_zero() { // Immediately after checkpoint sync the finalized block may not be available yet. if let Some(finalized_block) = - store.get_full_block(&cached_parts.finalized_block_root)? + store.get_blinded_block(&cached_parts.finalized_block_root)? { *self.latest_finality_update.write() = Some(LightClientFinalityUpdate::new( &attested_block, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 363ba08f7d..373e165e0b 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -129,6 +129,11 @@ impl> BeaconBlockBody { pub fn execution_payload(&self) -> Result, Error> { self.to_ref().execution_payload() } + + /// Returns the name of the fork pertaining to `self`. + pub fn fork_name(&self) -> ForkName { + self.to_ref().fork_name() + } } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { @@ -239,6 +244,28 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Ok(proof.into()) } + pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { + let field_index = match generalized_index { + light_client_update::EXECUTION_PAYLOAD_INDEX => { + // Execution payload is a top-level field, subtract off the generalized indices + // for the internal nodes. Result should be 9, the field offset of the execution + // payload in the `BeaconBlockBody`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody + generalized_index + .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + _ => return Err(Error::IndexNotSupported(generalized_index)), + }; + + let leaves = self.body_merkle_leaves(); + let depth = light_client_update::EXECUTION_PAYLOAD_PROOF_LEN; + let tree = merkle_proof::MerkleTree::create(&leaves, depth); + let (_, proof) = tree.generate_proof(field_index, depth)?; + + Ok(proof) + } + /// Return `true` if this block body has a non-zero number of blobs. pub fn has_blobs(self) -> bool { self.blob_kzg_commitments() @@ -832,73 +859,6 @@ impl From>> } } -impl BeaconBlockBody { - /// Returns the name of the fork pertaining to `self`. - pub fn fork_name(&self) -> ForkName { - self.to_ref().fork_name() - } - - pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { - let field_index = match generalized_index { - light_client_update::EXECUTION_PAYLOAD_INDEX => { - // Execution payload is a top-level field, subtract off the generalized indices - // for the internal nodes. Result should be 9, the field offset of the execution - // payload in the `BeaconBlockBody`: - // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody - generalized_index - .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - _ => return Err(Error::IndexNotSupported(generalized_index)), - }; - - let attestations_root = if self.fork_name() > ForkName::Electra { - self.attestations_electra()?.tree_hash_root() - } else { - self.attestations_base()?.tree_hash_root() - }; - - let attester_slashings_root = if self.fork_name() > ForkName::Electra { - self.attester_slashings_electra()?.tree_hash_root() - } else { - self.attester_slashings_base()?.tree_hash_root() - }; - - let mut leaves = vec![ - self.randao_reveal().tree_hash_root(), - self.eth1_data().tree_hash_root(), - self.graffiti().tree_hash_root(), - self.proposer_slashings().tree_hash_root(), - attester_slashings_root, - attestations_root, - self.deposits().tree_hash_root(), - self.voluntary_exits().tree_hash_root(), - ]; - - if let Ok(sync_aggregate) = self.sync_aggregate() { - leaves.push(sync_aggregate.tree_hash_root()) - } - - if let Ok(execution_payload) = self.execution_payload() { - leaves.push(execution_payload.tree_hash_root()) - } - - if let Ok(bls_to_execution_changes) = self.bls_to_execution_changes() { - leaves.push(bls_to_execution_changes.tree_hash_root()) - } - - if let Ok(blob_kzg_commitments) = self.blob_kzg_commitments() { - leaves.push(blob_kzg_commitments.tree_hash_root()) - } - - let depth = light_client_update::EXECUTION_PAYLOAD_PROOF_LEN; - let tree = merkle_proof::MerkleTree::create(&leaves, depth); - let (_, proof) = tree.generate_proof(field_index, depth)?; - - Ok(proof) - } -} - /// Util method helpful for logging. pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index e3a85744de..f06a94adce 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,8 +1,8 @@ use crate::{ light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector, ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, SignedBeaconBlock, - Slot, SyncCommittee, + LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, + SignedBlindedBeaconBlock, Slot, SyncCommittee, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -114,7 +114,7 @@ impl LightClientBootstrap { pub fn from_beacon_state( beacon_state: &mut BeaconState, - block: &SignedBeaconBlock, + block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, ) -> Result { let mut header = beacon_state.latest_block_header().clone(); diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index a9e24e03db..e65b057292 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -3,7 +3,7 @@ use crate::ChainSpec; use crate::{ light_client_update::*, test_utils::TestRandom, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, SignedBeaconBlock, + LightClientHeaderElectra, SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -73,8 +73,8 @@ pub struct LightClientFinalityUpdate { impl LightClientFinalityUpdate { pub fn new( - attested_block: &SignedBeaconBlock, - finalized_block: &SignedBeaconBlock, + attested_block: &SignedBlindedBeaconBlock, + finalized_block: &SignedBlindedBeaconBlock, finality_branch: FixedVector, sync_aggregate: SyncAggregate, signature_slot: Slot, diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 1d6432ed6f..1feb748fae 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -4,7 +4,7 @@ use crate::ForkVersionDeserialize; use crate::{light_client_update::*, BeaconBlockBody}; use crate::{ test_utils::TestRandom, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, FixedVector, Hash256, SignedBeaconBlock, + ExecutionPayloadHeaderElectra, FixedVector, Hash256, SignedBlindedBeaconBlock, }; use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; use derivative::Derivative; @@ -72,7 +72,7 @@ pub struct LightClientHeader { impl LightClientHeader { pub fn block_to_light_client_header( - block: &SignedBeaconBlock, + block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, ) -> Result { let header = match block @@ -139,7 +139,9 @@ impl LightClientHeader { } impl LightClientHeaderAltair { - pub fn block_to_light_client_header(block: &SignedBeaconBlock) -> Result { + pub fn block_to_light_client_header( + block: &SignedBlindedBeaconBlock, + ) -> Result { Ok(LightClientHeaderAltair { beacon: block.message().block_header(), _phantom_data: PhantomData, @@ -148,7 +150,9 @@ impl LightClientHeaderAltair { } impl LightClientHeaderCapella { - pub fn block_to_light_client_header(block: &SignedBeaconBlock) -> Result { + pub fn block_to_light_client_header( + block: &SignedBlindedBeaconBlock, + ) -> Result { let payload = block .message() .execution_payload()? @@ -163,8 +167,9 @@ impl LightClientHeaderCapella { .to_owned(), ); - let execution_branch = - beacon_block_body.block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; + let execution_branch = beacon_block_body + .to_ref() + .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; return Ok(LightClientHeaderCapella { beacon: block.message().block_header(), @@ -176,13 +181,15 @@ impl LightClientHeaderCapella { } impl LightClientHeaderDeneb { - pub fn block_to_light_client_header(block: &SignedBeaconBlock) -> Result { - let payload = block + pub fn block_to_light_client_header( + block: &SignedBlindedBeaconBlock, + ) -> Result { + let header = block .message() .execution_payload()? - .execution_payload_deneb()?; + .execution_payload_deneb()? + .clone(); - let header = ExecutionPayloadHeaderDeneb::from(payload); let beacon_block_body = BeaconBlockBody::from( block .message() @@ -191,8 +198,9 @@ impl LightClientHeaderDeneb { .to_owned(), ); - let execution_branch = - beacon_block_body.block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; + let execution_branch = beacon_block_body + .to_ref() + .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; Ok(LightClientHeaderDeneb { beacon: block.message().block_header(), @@ -204,7 +212,9 @@ impl LightClientHeaderDeneb { } impl LightClientHeaderElectra { - pub fn block_to_light_client_header(block: &SignedBeaconBlock) -> Result { + pub fn block_to_light_client_header( + block: &SignedBlindedBeaconBlock, + ) -> Result { let payload = block .message() .execution_payload()? @@ -219,8 +229,9 @@ impl LightClientHeaderElectra { .to_owned(), ); - let execution_branch = - beacon_block_body.block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; + let execution_branch = beacon_block_body + .to_ref() + .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; Ok(LightClientHeaderElectra { beacon: block.message().block_header(), diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 708f24e770..f5b749be70 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,7 +2,7 @@ use super::{EthSpec, ForkName, ForkVersionDeserialize, LightClientHeader, Slot, use crate::test_utils::TestRandom; use crate::{ light_client_update::*, ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, SignedBeaconBlock, + LightClientHeaderDeneb, LightClientHeaderElectra, SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -63,7 +63,7 @@ pub struct LightClientOptimisticUpdate { impl LightClientOptimisticUpdate { pub fn new( - attested_block: &SignedBeaconBlock, + attested_block: &SignedBlindedBeaconBlock, sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 210fa0eeeb..8a3eaff487 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -3,7 +3,7 @@ use crate::light_client_header::LightClientHeaderElectra; use crate::{ beacon_state, test_utils::TestRandom, BeaconBlock, BeaconBlockHeader, BeaconState, ChainSpec, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, SignedBeaconBlock, + LightClientHeaderDeneb, SignedBlindedBeaconBlock, }; use derivative::Derivative; use safe_arith::ArithError; @@ -156,8 +156,8 @@ impl LightClientUpdate { beacon_state: BeaconState, block: BeaconBlock, attested_state: &mut BeaconState, - attested_block: &SignedBeaconBlock, - finalized_block: &SignedBeaconBlock, + attested_block: &SignedBlindedBeaconBlock, + finalized_block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, ) -> Result { let sync_aggregate = block.body().sync_aggregate()?; From 9b3b73015925a84fbb004f63516d28e45da675ce Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 30 Jul 2024 23:48:28 +1000 Subject: [PATCH 15/43] Avoid acquiring another read lock while holding one to avoid potential deadlock (#6200) * Avoid acquiring another read lock to avoid potential deadlock. --- beacon_node/eth1/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 9cc1da1382..e5d60fac49 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -1129,7 +1129,7 @@ impl Service { Ok(BlockCacheUpdateOutcome { blocks_imported, - head_block_number: self.inner.block_cache.read().highest_block_number(), + head_block_number: block_cache.highest_block_number(), }) } } From d9f8b13e36dd1612358657e267cc0b0ea6fd5173 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 1 Aug 2024 01:26:26 +0100 Subject: [PATCH 16/43] remove no longer required #[allow(clippy::mutable_key_type)] (#6210) * remove no longer required clippy::mutable_key_type lint --- beacon_node/lighthouse_network/src/lib.rs | 1 - beacon_node/lighthouse_network/src/peer_manager/mod.rs | 2 -- beacon_node/network/src/lib.rs | 3 --- 3 files changed, 6 deletions(-) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 0b827164fc..5c12290b97 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -5,7 +5,6 @@ mod config; pub mod service; -#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; pub mod listen_addr; pub mod metrics; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ea3b51092e..4d3da0c8e4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -23,7 +23,6 @@ use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::Multiaddr; pub use libp2p::identity::Keypair; -#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod peerdb; use crate::peer_manager::peerdb::client::ClientKind; @@ -320,7 +319,6 @@ impl PeerManager { /// returned here. /// /// This function decides whether or not to dial these peers. - #[allow(clippy::mutable_key_type)] pub fn peers_discovered(&mut self, results: HashMap>) { let mut to_dial_peers = 0; let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 1149e6e6e3..13a2569b75 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,9 +1,7 @@ /// This crate provides the network server for Lighthouse. pub mod error; -#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod service; -#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod metrics; mod nat; mod network_beacon_processor; @@ -11,7 +9,6 @@ mod persisted_dht; mod router; mod status; mod subnet_service; -#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod sync; pub use lighthouse_network::NetworkConfig; From 0bb2386ff59e41de5dcbdf4bb3b6d5269636bc5d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 1 Aug 2024 16:46:37 +1000 Subject: [PATCH 17/43] Work around UB in LMDB bindings (#6211) * Work around UB in LMDB bindings --- slasher/src/database/lmdb_impl.rs | 6 +++++- slasher/tests/random.rs | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs index 20d89a36fb..74342968cf 100644 --- a/slasher/src/database/lmdb_impl.rs +++ b/slasher/src/database/lmdb_impl.rs @@ -165,8 +165,12 @@ impl<'env> Cursor<'env> { } pub fn get_current(&mut self) -> Result, Value<'env>)>, Error> { + // FIXME: lmdb has an extremely broken API which can mutate the SHARED REFERENCE + // `value` after `get_current` is called. We need to convert it to a Vec here in order + // to avoid `value` changing after another cursor operation. I think this represents a bug + // in the LMDB bindings, as shared references should be immutable. if let Some((Some(key), value)) = self.cursor.get(None, None, MDB_GET_CURRENT).optional()? { - Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value)))) + Ok(Some((Cow::Borrowed(key), Cow::Owned(value.to_vec())))) } else { Ok(None) } diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 0aaaa63f65..0ba2986d44 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -235,3 +235,8 @@ fn no_crash_blocks_example1() { }, ); } + +#[test] +fn no_crash_aug_24() { + random_test(13519442335106054152, TestConfig::default()) +} From 0e96d4f1053d8a903140cc9e2de2d63b9d4dde0b Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 2 Aug 2024 16:58:37 +1000 Subject: [PATCH 18/43] Store changes to persist data columns (#6073) * Store changes to persist data columns. Co-authored-by: dapplion <35266934+dapplion@users.noreply.github.com> * Update to use `eip7594_fork_epoch` for data column slot in Store. * Fix formatting. * Merge branch 'unstable' into data-columns-store # Conflicts: # beacon_node/store/src/lib.rs # consensus/types/src/chain_spec.rs * Minor refactor. * Merge branch 'unstable' into data-columns-store # Conflicts: # beacon_node/store/src/metrics.rs * Init data colum info at PeerDAS epoch instead of Deneb fork epoch. Address review comments. * Remove Deneb-related comments --- beacon_node/store/src/errors.rs | 2 + beacon_node/store/src/hot_cold_store.rs | 278 ++++++++++++++++++++++-- beacon_node/store/src/leveldb_store.rs | 4 + beacon_node/store/src/lib.rs | 33 ++- beacon_node/store/src/memory_store.rs | 14 +- beacon_node/store/src/metadata.rs | 28 +++ beacon_node/store/src/metrics.rs | 7 + 7 files changed, 348 insertions(+), 18 deletions(-) diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 91e6a920ba..e3b2d327b0 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -27,6 +27,8 @@ pub enum Error { AnchorInfoConcurrentMutation, /// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied. BlobInfoConcurrentMutation, + /// The store's `data_column_info` was mutated concurrently, the latest modification wasn't applied. + DataColumnInfoConcurrentMutation, /// The block or state is unavailable due to weak subjectivity sync. HistoryUnavailable, /// State reconstruction cannot commence because not all historic blocks are known. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 9c247c983a..8b144c1be9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -12,12 +12,13 @@ use crate::leveldb_store::BytesKey; use crate::leveldb_store::LevelDB; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, BlobInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, - BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, - PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, + AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, PruningCheckpoint, SchemaVersion, + ANCHOR_INFO_KEY, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, + DATA_COLUMN_INFO_KEY, PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, + STATE_UPPER_LIMIT_NO_RETAIN, }; -use crate::metrics; use crate::state_cache::{PutStateOutcome, StateCache}; +use crate::{get_data_column_key, metrics, parse_data_column_key}; use crate::{ get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, @@ -35,11 +36,13 @@ use state_processing::{ SlotProcessingError, }; use std::cmp::min; +use std::collections::HashMap; use std::marker::PhantomData; use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; +use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; /// On-disk database that stores finalized states efficiently. @@ -57,6 +60,8 @@ pub struct HotColdDB, Cold: ItemStore> { anchor_info: RwLock>, /// The starting slots for the range of blobs stored in the database. blob_info: RwLock, + /// The starting slots for the range of data columns stored in the database. + data_column_info: RwLock, pub(crate) config: StoreConfig, /// Cold database containing compact historical data. pub cold_db: Cold, @@ -86,6 +91,7 @@ pub struct HotColdDB, Cold: ItemStore> { struct BlockCache { block_cache: LruCache>, blob_cache: LruCache>, + data_column_cache: LruCache>>>, } impl BlockCache { @@ -93,6 +99,7 @@ impl BlockCache { Self { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), + data_column_cache: LruCache::new(size), } } pub fn put_block(&mut self, block_root: Hash256, block: SignedBeaconBlock) { @@ -101,12 +108,26 @@ impl BlockCache { pub fn put_blobs(&mut self, block_root: Hash256, blobs: BlobSidecarList) { self.blob_cache.put(block_root, blobs); } + pub fn put_data_column(&mut self, block_root: Hash256, data_column: Arc>) { + self.data_column_cache + .get_or_insert_mut(block_root, Default::default) + .insert(data_column.index, data_column); + } pub fn get_block<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a SignedBeaconBlock> { self.block_cache.get(block_root) } pub fn get_blobs<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a BlobSidecarList> { self.blob_cache.get(block_root) } + pub fn get_data_column<'a>( + &'a mut self, + block_root: &Hash256, + column_index: &ColumnIndex, + ) -> Option<&'a Arc>> { + self.data_column_cache + .get(block_root) + .and_then(|map| map.get(column_index)) + } pub fn delete_block(&mut self, block_root: &Hash256) { let _ = self.block_cache.pop(block_root); } @@ -180,6 +201,7 @@ impl HotColdDB, MemoryStore> { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), blob_info: RwLock::new(BlobInfo::default()), + data_column_info: RwLock::new(DataColumnInfo::default()), cold_db: MemoryStore::open(), blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), @@ -216,6 +238,7 @@ impl HotColdDB, LevelDB> { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), blob_info: RwLock::new(BlobInfo::default()), + data_column_info: RwLock::new(DataColumnInfo::default()), cold_db: LevelDB::open(cold_path)?, blobs_db: LevelDB::open(blobs_db_path)?, hot_db: LevelDB::open(hot_path)?, @@ -294,11 +317,39 @@ impl HotColdDB, LevelDB> { }, }; db.compare_and_set_blob_info_with_write(<_>::default(), new_blob_info.clone())?; + + let data_column_info = db.load_data_column_info()?; + let eip7594_fork_slot = db + .spec + .eip7594_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let new_data_column_info = match &data_column_info { + Some(data_column_info) => { + // Set the oldest data column slot to the fork slot if it is not yet set. + let oldest_data_column_slot = data_column_info + .oldest_data_column_slot + .or(eip7594_fork_slot); + DataColumnInfo { + oldest_data_column_slot, + } + } + // First start. + None => DataColumnInfo { + // Set the oldest data column slot to the fork slot if it is not yet set. + oldest_data_column_slot: eip7594_fork_slot, + }, + }; + db.compare_and_set_data_column_info_with_write( + <_>::default(), + new_data_column_info.clone(), + )?; + info!( db.log, "Blob DB initialized"; "path" => ?blobs_db_path, "oldest_blob_slot" => ?new_blob_info.oldest_blob_slot, + "oldest_data_column_slot" => ?new_data_column_info.oldest_data_column_slot, ); // Ensure that the schema version of the on-disk database matches the software. @@ -626,6 +677,24 @@ impl, Cold: ItemStore> HotColdDB ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); } + pub fn data_columns_as_kv_store_ops( + &self, + block_root: &Hash256, + data_columns: DataColumnSidecarList, + ops: &mut Vec, + ) { + for data_column in data_columns { + let db_key = get_key_for_col( + DBColumn::BeaconDataColumn.into(), + &get_data_column_key(block_root, &data_column.index), + ); + ops.push(KeyValueStoreOp::PutKeyValue( + db_key, + data_column.as_ssz_bytes(), + )); + } + } + pub fn put_state_summary( &self, state_root: &Hash256, @@ -909,6 +978,14 @@ impl, Cold: ItemStore> HotColdDB self.blobs_as_kv_store_ops(&block_root, blobs, &mut key_value_batch); } + StoreOp::PutDataColumns(block_root, data_columns) => { + self.data_columns_as_kv_store_ops( + &block_root, + data_columns, + &mut key_value_batch, + ); + } + StoreOp::PutStateSummary(state_root, summary) => { key_value_batch.push(summary.as_kv_store_op(state_root)); } @@ -933,6 +1010,16 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + StoreOp::DeleteDataColumns(block_root, column_indices) => { + for index in column_indices { + let key = get_key_for_col( + DBColumn::BeaconDataColumn.into(), + &get_data_column_key(&block_root, &index), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } + } + StoreOp::DeleteState(state_root, slot) => { let state_summary_key = get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_bytes()); @@ -963,9 +1050,10 @@ impl, Cold: ItemStore> HotColdDB batch: Vec>, ) -> Result<(), Error> { let mut blobs_to_delete = Vec::new(); + let mut data_columns_to_delete = Vec::new(); let (blobs_ops, hot_db_ops): (Vec>, Vec>) = batch.into_iter().partition(|store_op| match store_op { - StoreOp::PutBlobs(_, _) => true, + StoreOp::PutBlobs(_, _) | StoreOp::PutDataColumns(_, _) => true, StoreOp::DeleteBlobs(block_root) => { match self.get_blobs(block_root) { Ok(Some(blob_sidecar_list)) => { @@ -982,6 +1070,31 @@ impl, Cold: ItemStore> HotColdDB } true } + StoreOp::DeleteDataColumns(block_root, indices) => { + match indices + .iter() + .map(|index| self.get_data_column(block_root, index)) + .collect::, _>>() + { + Ok(data_column_sidecar_list_opt) => { + let data_column_sidecar_list = data_column_sidecar_list_opt + .into_iter() + .flatten() + .collect::>(); + // Must push the same number of items as StoreOp::DeleteDataColumns items to + // prevent a `HotColdDBError::Rollback` error below in case of rollback + data_columns_to_delete.push((*block_root, data_column_sidecar_list)); + } + Err(e) => { + error!( + self.log, "Error getting data columns"; + "block_root" => %block_root, + "error" => ?e + ); + } + } + true + } StoreOp::PutBlock(_, _) | StoreOp::DeleteBlock(_) => false, _ => false, }); @@ -1013,10 +1126,20 @@ impl, Cold: ItemStore> HotColdDB for op in blob_cache_ops.iter_mut() { let reverse_op = match op { StoreOp::PutBlobs(block_root, _) => StoreOp::DeleteBlobs(*block_root), + StoreOp::PutDataColumns(block_root, data_columns) => { + let indices = data_columns.iter().map(|c| c.index).collect(); + StoreOp::DeleteDataColumns(*block_root, indices) + } StoreOp::DeleteBlobs(_) => match blobs_to_delete.pop() { Some((block_root, blobs)) => StoreOp::PutBlobs(block_root, blobs), None => return Err(HotColdDBError::Rollback.into()), }, + StoreOp::DeleteDataColumns(_, _) => match data_columns_to_delete.pop() { + Some((block_root, data_columns)) => { + StoreOp::PutDataColumns(block_root, data_columns) + } + None => return Err(HotColdDBError::Rollback.into()), + }, _ => return Err(HotColdDBError::Rollback.into()), }; *op = reverse_op; @@ -1034,6 +1157,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutBlobs(_, _) => (), + StoreOp::PutDataColumns(_, _) => (), + StoreOp::PutState(_, _) => (), StoreOp::PutStateSummary(_, _) => (), @@ -1053,6 +1178,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteBlobs(_) => (), + StoreOp::DeleteDataColumns(_, _) => (), + StoreOp::DeleteExecutionPayload(_) => (), StoreOp::KeyValueOp(_) => (), @@ -1552,6 +1679,45 @@ impl, Cold: ItemStore> HotColdDB } } + /// Fetch all keys in the data_column column with prefix `block_root` + pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { + self.blobs_db + .iter_raw_keys(DBColumn::BeaconDataColumn, block_root.as_bytes()) + .map(|key| key.and_then(|key| parse_data_column_key(key).map(|key| key.1))) + .collect() + } + + /// Fetch a single data_column for a given block from the store. + pub fn get_data_column( + &self, + block_root: &Hash256, + column_index: &ColumnIndex, + ) -> Result>>, Error> { + // Check the cache. + if let Some(data_column) = self + .block_cache + .lock() + .get_data_column(block_root, column_index) + { + metrics::inc_counter(&metrics::BEACON_DATA_COLUMNS_CACHE_HIT_COUNT); + return Ok(Some(data_column.clone())); + } + + match self.blobs_db.get_bytes( + DBColumn::BeaconDataColumn.into(), + &get_data_column_key(block_root, column_index), + )? { + Some(ref data_column_bytes) => { + let data_column = Arc::new(DataColumnSidecar::from_ssz_bytes(data_column_bytes)?); + self.block_cache + .lock() + .put_data_column(*block_root, data_column.clone()); + Ok(Some(data_column)) + } + None => Ok(None), + } + } + /// Get a reference to the `ChainSpec` used by the database. pub fn get_chain_spec(&self) -> &ChainSpec { &self.spec @@ -1748,6 +1914,24 @@ impl, Cold: ItemStore> HotColdDB self.blob_info.read_recursive().clone() } + /// Initialize the `DataColumnInfo` when starting from genesis or a checkpoint. + pub fn init_data_column_info(&self, anchor_slot: Slot) -> Result { + let oldest_data_column_slot = self.spec.eip7594_fork_epoch.map(|fork_epoch| { + std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) + }); + let data_column_info = DataColumnInfo { + oldest_data_column_slot, + }; + self.compare_and_set_data_column_info(self.get_data_column_info(), data_column_info) + } + + /// Get a clone of the store's data column info. + /// + /// To do mutations, use `compare_and_set_data_column_info`. + pub fn get_data_column_info(&self) -> DataColumnInfo { + self.data_column_info.read_recursive().clone() + } + /// Atomically update the blob info from `prev_value` to `new_value`. /// /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other @@ -1793,6 +1977,54 @@ impl, Cold: ItemStore> HotColdDB blob_info.as_kv_store_op(BLOB_INFO_KEY) } + /// Atomically update the data column info from `prev_value` to `new_value`. + /// + /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other + /// values. + /// + /// Return an `DataColumnInfoConcurrentMutation` error if the `prev_value` provided + /// is not correct. + pub fn compare_and_set_data_column_info( + &self, + prev_value: DataColumnInfo, + new_value: DataColumnInfo, + ) -> Result { + let mut data_column_info = self.data_column_info.write(); + if *data_column_info == prev_value { + let kv_op = self.store_data_column_info_in_batch(&new_value); + *data_column_info = new_value; + Ok(kv_op) + } else { + Err(Error::DataColumnInfoConcurrentMutation) + } + } + + /// As for `compare_and_set_data_column_info`, but also writes the blob info to disk immediately. + pub fn compare_and_set_data_column_info_with_write( + &self, + prev_value: DataColumnInfo, + new_value: DataColumnInfo, + ) -> Result<(), Error> { + let kv_store_op = self.compare_and_set_data_column_info(prev_value, new_value)?; + self.hot_db.do_atomically(vec![kv_store_op]) + } + + /// Load the blob info from disk, but do not set `self.data_column_info`. + fn load_data_column_info(&self) -> Result, Error> { + self.hot_db.get(&DATA_COLUMN_INFO_KEY) + } + + /// Store the given `data_column_info` to disk. + /// + /// The argument is intended to be `self.data_column_info`, but is passed manually to avoid issues + /// with recursive locking. + fn store_data_column_info_in_batch( + &self, + data_column_info: &DataColumnInfo, + ) -> KeyValueStoreOp { + data_column_info.as_kv_store_op(DATA_COLUMN_INFO_KEY) + } + /// Return the slot-window describing the available historic states. /// /// Returns `(lower_limit, upper_limit)`. @@ -2285,15 +2517,33 @@ impl, Cold: ItemStore> HotColdDB } }; - if Some(block_root) != last_pruned_block_root && self.blobs_exist(&block_root)? { - trace!( - self.log, - "Pruning blobs of block"; - "slot" => slot, - "block_root" => ?block_root, - ); - last_pruned_block_root = Some(block_root); - ops.push(StoreOp::DeleteBlobs(block_root)); + if Some(block_root) != last_pruned_block_root { + if self + .spec + .is_peer_das_enabled_for_epoch(slot.epoch(E::slots_per_epoch())) + { + // data columns + let indices = self.get_data_column_keys(block_root)?; + if !indices.is_empty() { + trace!( + self.log, + "Pruning data columns of block"; + "slot" => slot, + "block_root" => ?block_root, + ); + last_pruned_block_root = Some(block_root); + ops.push(StoreOp::DeleteDataColumns(block_root, indices)); + } + } else if self.blobs_exist(&block_root)? { + trace!( + self.log, + "Pruning blobs of block"; + "slot" => slot, + "block_root" => ?block_root, + ); + last_pruned_block_root = Some(block_root); + ops.push(StoreOp::DeleteBlobs(block_root)); + } } if slot >= end_slot { diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index b28bf689f8..32ff942ddc 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -270,6 +270,10 @@ impl db_key::Key for BytesKey { } impl BytesKey { + pub fn starts_with(&self, prefix: &Self) -> bool { + self.key.starts_with(&prefix.key) + } + /// Return `true` iff this `BytesKey` was created with the given `column`. pub fn matches_column(&self, column: DBColumn) -> bool { self.key.starts_with(column.as_bytes()) diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 3b6d9ddff6..1f8cc8ca01 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -44,6 +44,8 @@ use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; +const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; + pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a, K> = Box> + 'a>; @@ -109,9 +111,7 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { Box::new(std::iter::empty()) } - fn iter_raw_keys(&self, _column: DBColumn, _prefix: &[u8]) -> RawKeyIter { - Box::new(std::iter::empty()) - } + fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter; /// Iterate through all keys in a particular column. fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; @@ -150,6 +150,28 @@ pub fn get_col_from_key(key: &[u8]) -> Option { String::from_utf8(key[0..3].to_vec()).ok() } +pub fn get_data_column_key(block_root: &Hash256, column_index: &ColumnIndex) -> Vec { + let mut result = block_root.as_bytes().to_vec(); + result.extend_from_slice(&column_index.to_le_bytes()); + result +} + +pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Error> { + if data.len() != DBColumn::BeaconDataColumn.key_size() { + return Err(Error::InvalidKey); + } + // split_at panics if 32 < 40 which will never happen after the length check above + let (block_root_bytes, column_index_bytes) = data.split_at(32); + let block_root = Hash256::from_slice(block_root_bytes); + // column_index_bytes is asserted to be 8 bytes after the length check above + let column_index = ColumnIndex::from_le_bytes( + column_index_bytes + .try_into() + .map_err(|_| Error::InvalidKey)?, + ); + Ok((block_root, column_index)) +} + #[must_use] #[derive(Clone)] pub enum KeyValueStoreOp { @@ -210,11 +232,13 @@ pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), PutBlobs(Hash256, BlobSidecarList), + PutDataColumns(Hash256, DataColumnSidecarList), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), DeleteBlobs(Hash256), + DeleteDataColumns(Hash256, Vec), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), KeyValueOp(KeyValueStoreOp), @@ -230,6 +254,8 @@ pub enum DBColumn { BeaconBlock, #[strum(serialize = "blb")] BeaconBlob, + #[strum(serialize = "bdc")] + BeaconDataColumn, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, @@ -317,6 +343,7 @@ impl DBColumn { | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries | Self::BeaconRandaoMixes => 8, + Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } } } diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 302d2c2add..4c7bfdf10f 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,6 +1,6 @@ use crate::{ get_key_for_col, leveldb_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, - ItemStore, Key, KeyValueStore, KeyValueStoreOp, + ItemStore, Key, KeyValueStore, KeyValueStoreOp, RawKeyIter, }; use parking_lot::{Mutex, MutexGuard, RwLock}; use std::collections::BTreeMap; @@ -100,6 +100,18 @@ impl KeyValueStore for MemoryStore { })) } + fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), prefix)); + let keys = self + .db + .read() + .range(start_key.clone()..) + .take_while(|(k, _)| k.starts_with(&start_key)) + .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) + .collect::>(); + Box::new(keys.into_iter().map(Ok)) + } + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { Box::new(self.iter_column(column).map(|res| res.map(|(k, _)| k))) } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index a22dc4aab4..0c93251fe2 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -16,6 +16,7 @@ pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); +pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -152,3 +153,30 @@ impl StoreItem for BlobInfo { Ok(Self::from_ssz_bytes(bytes)?) } } + +/// Database parameters relevant to data column sync. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct DataColumnInfo { + /// The slot after which data columns are or *will be* available (>=). + /// + /// If this slot is in the future, then it is the first slot of the EIP-7594 fork, from which + /// data columns will be available. + /// + /// If the `oldest_data_column_slot` is `None` then this means that the EIP-7594 fork epoch is + /// not yet known. + pub oldest_data_column_slot: Option, +} + +impl StoreItem for DataColumnInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index af7b5e93e8..902c440be8 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -151,6 +151,13 @@ pub static BEACON_BLOBS_CACHE_HIT_COUNT: LazyLock> = LazyLock "Number of hits to the store's blob cache", ) }); +pub static BEACON_DATA_COLUMNS_CACHE_HIT_COUNT: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "store_beacon_data_columns_cache_hit_total", + "Number of hits to the store's data column cache", + ) + }); /// Updates the global metrics registry with store-related information. pub fn scrape_for_metrics(db_path: &Path, freezer_db_path: &Path) { From acd31511843db3cce03551624a3f95447b4339ae Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 2 Aug 2024 22:42:11 +1000 Subject: [PATCH 19/43] Import gossip data column into data availability checker (#6197) * Import gossip data column into data availability checker --- beacon_node/beacon_chain/src/beacon_chain.rs | 36 ++++++++----- .../src/block_verification_types.rs | 3 +- .../src/data_availability_checker.rs | 33 +++++++++--- .../overflow_lru_cache.rs | 51 ++++++++++++------- .../src/data_column_verification.rs | 19 +++++++ .../beacon_chain/src/early_attester_cache.rs | 13 ++++- .../beacon_chain/src/historical_blocks.rs | 10 +++- beacon_node/beacon_chain/tests/store_tests.rs | 4 +- 8 files changed, 128 insertions(+), 41 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4bc98a98da..8cd991cc10 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2959,9 +2959,9 @@ impl BeaconChain { self: &Arc, data_columns: Vec>, ) -> Result> { - let Ok(block_root) = data_columns + let Ok((slot, block_root)) = data_columns .iter() - .map(|c| c.block_root()) + .map(|c| (c.slot(), c.block_root())) .unique() .exactly_one() else { @@ -2981,7 +2981,7 @@ impl BeaconChain { } let r = self - .check_gossip_data_columns_availability_and_import(data_columns) + .check_gossip_data_columns_availability_and_import(slot, block_root, data_columns) .await; self.remove_notified_custody_columns(&block_root, r) } @@ -3298,6 +3298,8 @@ impl BeaconChain { /// if so, otherwise caches the data column in the data availability checker. async fn check_gossip_data_columns_availability_and_import( self: &Arc, + slot: Slot, + block_root: Hash256, data_columns: Vec>, ) -> Result> { if let Some(slasher) = self.slasher.as_ref() { @@ -3306,15 +3308,11 @@ impl BeaconChain { } } - let Ok(slot) = data_columns.iter().map(|c| c.slot()).unique().exactly_one() else { - return Err(BlockError::InternalError( - "Columns for the same block should have matching slot".to_string(), - )); - }; - - let availability = self - .data_availability_checker - .put_gossip_data_columns(data_columns)?; + let availability = self.data_availability_checker.put_gossip_data_columns( + slot, + block_root, + data_columns, + )?; self.process_availability(slot, availability).await } @@ -3629,7 +3627,7 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let (_, signed_block, blobs) = signed_block.deconstruct(); + let (_, signed_block, blobs, data_columns) = signed_block.deconstruct(); let block = signed_block.message(); ops.extend( confirmed_state_roots @@ -3650,6 +3648,18 @@ impl BeaconChain { } } + if let Some(_data_columns) = data_columns { + // TODO(das): depends on https://github.com/sigp/lighthouse/pull/6073 + // if !data_columns.is_empty() { + // debug!( + // self.log, "Writing data_columns to store"; + // "block_root" => %block_root, + // "count" => data_columns.len(), + // ); + // ops.push(StoreOp::PutDataColumns(block_root, data_columns)); + // } + } + let txn_lock = self.store.hot_db.begin_rw_transaction(); if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 70f1e99ef7..426c41bfea 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -517,7 +517,8 @@ impl AsBlock for AvailableBlock { } fn into_rpc_block(self) -> RpcBlock { - let (block_root, block, blobs_opt) = self.deconstruct(); + // TODO(das): rpc data columns to be merged from `das` branch + let (block_root, block, blobs_opt, _data_columns_opt) = self.deconstruct(); // Circumvent the constructor here, because an Available block will have already had // consistency checks performed. let inner = match blobs_opt { diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index fdba60a69a..ce5995a558 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -14,13 +14,16 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; -use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; +use types::{ + BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, + Slot, +}; mod error; mod overflow_lru_cache; mod state_lru_cache; -use crate::data_column_verification::GossipVerifiedDataColumn; +use crate::data_column_verification::{GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; @@ -191,10 +194,18 @@ impl DataAvailabilityChecker { pub fn put_gossip_data_columns( &self, - _gossip_data_columns: Vec>, + slot: Slot, + block_root: Hash256, + gossip_data_columns: Vec>, ) -> Result, AvailabilityCheckError> { - // TODO(das) to be implemented - Err(AvailabilityCheckError::Unexpected) + let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + let custody_columns = gossip_data_columns + .into_iter() + .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) + .collect::>(); + + self.availability_cache + .put_kzg_verified_data_columns(block_root, epoch, custody_columns) } /// Check if we have all the blobs for a block. Returns `Availability` which has information @@ -231,6 +242,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: None, + data_columns: None, blobs_available_timestamp: None, })) } @@ -251,6 +263,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: verified_blobs, + data_columns: None, blobs_available_timestamp: None, })) } @@ -297,6 +310,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: None, + data_columns: None, blobs_available_timestamp: None, })) } @@ -312,6 +326,7 @@ impl DataAvailabilityChecker { block_root, block, blobs: verified_blobs, + data_columns: None, blobs_available_timestamp: None, })) } @@ -477,6 +492,7 @@ pub struct AvailableBlock { block_root: Hash256, block: Arc>, blobs: Option>, + data_columns: Option>, /// Timestamp at which this block first became available (UNIX timestamp, time since 1970). blobs_available_timestamp: Option, } @@ -486,11 +502,13 @@ impl AvailableBlock { block_root: Hash256, block: Arc>, blobs: Option>, + data_columns: Option>, ) -> Self { Self { block_root, block, blobs, + data_columns, blobs_available_timestamp: None, } } @@ -510,20 +528,23 @@ impl AvailableBlock { self.blobs_available_timestamp } + #[allow(clippy::type_complexity)] pub fn deconstruct( self, ) -> ( Hash256, Arc>, Option>, + Option>, ) { let AvailableBlock { block_root, block, blobs, + data_columns, blobs_available_timestamp: _, } = self; - (block_root, block, blobs) + (block_root, block, blobs, data_columns) } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index e7bb2034fc..6c9964bdf8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -217,7 +217,11 @@ impl PendingComponents { /// /// WARNING: This function can potentially take a lot of time if the state needs to be /// reconstructed from disk. Ensure you are not holding any write locks while calling this. - pub fn make_available(self, recover: R) -> Result, AvailabilityCheckError> + pub fn make_available( + self, + block_import_requirement: BlockImportRequirement, + recover: R, + ) -> Result, AvailabilityCheckError> where R: FnOnce( DietAvailabilityPendingExecutedBlock, @@ -226,7 +230,7 @@ impl PendingComponents { let Self { block_root, verified_blobs, - verified_data_columns: _, + verified_data_columns, executed_block, } = self; @@ -239,17 +243,29 @@ impl PendingComponents { let Some(diet_executed_block) = executed_block else { return Err(AvailabilityCheckError::Unexpected); }; - let num_blobs_expected = diet_executed_block.num_blobs_expected(); - let Some(verified_blobs) = verified_blobs - .into_iter() - .cloned() - .map(|b| b.map(|b| b.to_blob())) - .take(num_blobs_expected) - .collect::>>() - else { - return Err(AvailabilityCheckError::Unexpected); + + let (blobs, data_columns) = match block_import_requirement { + BlockImportRequirement::AllBlobs => { + let num_blobs_expected = diet_executed_block.num_blobs_expected(); + let Some(verified_blobs) = verified_blobs + .into_iter() + .cloned() + .map(|b| b.map(|b| b.to_blob())) + .take(num_blobs_expected) + .collect::>>() + else { + return Err(AvailabilityCheckError::Unexpected); + }; + (Some(VariableList::new(verified_blobs)?), None) + } + BlockImportRequirement::CustodyColumns(_) => { + let verified_data_columns = verified_data_columns + .into_iter() + .map(|d| d.into_inner()) + .collect(); + (None, Some(verified_data_columns)) + } }; - let verified_blobs = VariableList::new(verified_blobs)?; let executed_block = recover(diet_executed_block)?; @@ -262,7 +278,8 @@ impl PendingComponents { let available_block = AvailableBlock { block_root, block, - blobs: Some(verified_blobs), + blobs, + data_columns, blobs_available_timestamp, }; Ok(Availability::Available(Box::new( @@ -404,7 +421,7 @@ impl DataAvailabilityCheckerInner { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(|diet_block| { + pending_components.make_available(block_import_requirement, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -413,7 +430,7 @@ impl DataAvailabilityCheckerInner { } } - // TODO(das): gossip and rpc code paths to be implemented. + // TODO(das): rpc code paths to be implemented. #[allow(dead_code)] pub fn put_kzg_verified_data_columns< I: IntoIterator>, @@ -439,7 +456,7 @@ impl DataAvailabilityCheckerInner { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(|diet_block| { + pending_components.make_available(block_import_requirement, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { @@ -478,7 +495,7 @@ impl DataAvailabilityCheckerInner { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components.make_available(|diet_block| { + pending_components.make_available(block_import_requirement, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 53e83a8061..fa31d6f2e8 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -190,6 +190,10 @@ impl GossipVerifiedDataColumn { pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { self.data_column.data.signed_block_header.clone() } + + pub fn into_inner(self) -> KzgVerifiedDataColumn { + self.data_column + } } /// Wrapper over a `DataColumnSidecar` for which we have completed kzg verification. @@ -204,6 +208,9 @@ impl KzgVerifiedDataColumn { pub fn new(data_column: Arc>, kzg: &Kzg) -> Result { verify_kzg_for_data_column(data_column, kzg) } + pub fn to_data_column(self) -> Arc> { + self.data + } pub fn as_data_column(&self) -> &DataColumnSidecar { &self.data } @@ -226,9 +233,21 @@ pub struct KzgVerifiedCustodyDataColumn { } impl KzgVerifiedCustodyDataColumn { + /// Mark a column as custody column. Caller must ensure that our current custody requirements + /// include this column + pub fn from_asserted_custody(kzg_verified: KzgVerifiedDataColumn) -> Self { + Self { + data: kzg_verified.to_data_column(), + } + } + pub fn index(&self) -> ColumnIndex { self.data.index } + + pub fn into_inner(self) -> Arc> { + self.data + } } /// Complete kzg verification for a `DataColumnSidecar`. diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index dda699cc6c..606610a748 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -22,6 +22,7 @@ pub struct CacheItem { */ block: Arc>, blobs: Option>, + data_columns: Option>, proto_block: ProtoBlock, } @@ -69,7 +70,7 @@ impl EarlyAttesterCache { }, }; - let (_, block, blobs) = block.deconstruct(); + let (_, block, blobs, data_columns) = block.deconstruct(); let item = CacheItem { epoch, committee_lengths, @@ -78,6 +79,7 @@ impl EarlyAttesterCache { target, block, blobs, + data_columns, proto_block, }; @@ -164,6 +166,15 @@ impl EarlyAttesterCache { .and_then(|item| item.blobs.clone()) } + /// Returns the data columns, if `block_root` matches the cached item. + pub fn get_data_columns(&self, block_root: Hash256) -> Option> { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .and_then(|item| item.data_columns.clone()) + } + /// Returns the proto-array block, if `block_root` matches the cached item. pub fn get_proto_block(&self, block_root: Hash256) -> Option { self.item diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 85208c8ad6..aa2fac2afc 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -107,7 +107,8 @@ impl BeaconChain { let mut signed_blocks = Vec::with_capacity(blocks_to_import.len()); for available_block in blocks_to_import.into_iter().rev() { - let (block_root, block, maybe_blobs) = available_block.deconstruct(); + let (block_root, block, maybe_blobs, maybe_data_columns) = + available_block.deconstruct(); if block_root != expected_block_root { return Err(HistoricalBlockError::MismatchedBlockRoot { @@ -127,6 +128,13 @@ impl BeaconChain { self.store .blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch); } + // Store the data columns too + if let Some(_data_columns) = maybe_data_columns { + // TODO(das): depends on https://github.com/sigp/lighthouse/pull/6073 + // new_oldest_data_column_slot = Some(block.slot()); + // self.store + // .data_columns_as_kv_store_ops(&block_root, data_columns, &mut blob_batch); + } // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 12f2702822..01d7798b92 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2542,10 +2542,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120. let mut batch_with_invalid_first_block = available_blocks.clone(); batch_with_invalid_first_block[0] = { - let (block_root, block, blobs) = available_blocks[0].clone().deconstruct(); + let (block_root, block, blobs, data_columns) = available_blocks[0].clone().deconstruct(); let mut corrupt_block = (*block).clone(); *corrupt_block.signature_mut() = Signature::empty(); - AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), blobs) + AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), blobs, data_columns) }; // Importing the invalid batch should error. From 05bc99e67b10c65b6f56bd538f21739926c7818f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sat, 3 Aug 2024 05:49:09 +0100 Subject: [PATCH 20/43] patch quick-protobuf (#6217) * patch quick-protobuf --- Cargo.lock | 3 +-- Cargo.toml | 5 ++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 605cb4d2a5..872c6e3368 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6513,8 +6513,7 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-protobuf" version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +source = "git+https://github.com/sigp/quick-protobuf.git?rev=681f413312404ab6e51f0b46f39b0075c6f4ebfd#681f413312404ab6e51f0b46f39b0075c6f4ebfd" dependencies = [ "byteorder", ] diff --git a/Cargo.toml b/Cargo.toml index b2957842d5..6d8d9d43bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,7 +107,7 @@ bytes = "1" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. -c-kzg = { version = "1", default-features = false } +c-kzg = { version = "1", default-features = false } compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.5" delay_map = "0.3" @@ -240,6 +240,9 @@ validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } +[patch.crates-io] +quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } + [profile.maxperf] inherits = "release" lto = "fat" From 612946b27358ad838e4d66492b084bed76948bbe Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 5 Aug 2024 21:30:48 +1000 Subject: [PATCH 21/43] Downgrade re-org log to INFO (#6220) * Downgrade re-org log to INFO * Update book Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> --- beacon_node/beacon_chain/src/canonical_head.rs | 4 ++-- book/src/late-block-re-orgs.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index a5d85d5603..40e2f60b0e 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -48,7 +48,7 @@ use fork_choice::{ }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use slog::{crit, debug, error, warn, Logger}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; @@ -1212,7 +1212,7 @@ fn detect_reorg( &metrics::FORK_CHOICE_REORG_DISTANCE, reorg_distance.as_u64() as i64, ); - warn!( + info!( log, "Beacon chain re-org"; "previous_head" => ?old_block_root, diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index fc4530589d..4a00f33aa4 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -50,10 +50,10 @@ A pair of messages at `INFO` level will be logged if a re-org opportunity is det > INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 -This should be followed shortly after by a `WARN` log indicating that a re-org occurred. This is +This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> WARN Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: From f7f0bfc9f2b9eb25f3e111265f29943ad8c47641 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 6 Aug 2024 04:32:56 +0200 Subject: [PATCH 22/43] Skip recursive discovery query if no useful ENRs (#6207) * Skip recursive discovery query if no useful ENRs --- beacon_node/lighthouse_network/src/metrics.rs | 6 ++++++ .../lighthouse_network/src/peer_manager/mod.rs | 16 ++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 9b11fe5a38..85da8dc211 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -77,6 +77,12 @@ pub static DISCOVERY_SESSIONS: LazyLock> = LazyLock::new(|| { "The number of active discovery sessions with peers", ) }); +pub static DISCOVERY_NO_USEFUL_ENRS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "discovery_no_useful_enrs_found", + "Total number of counts a query returned no useful ENRs to dial", + ) +}); pub static PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4d3da0c8e4..6423da56fe 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -321,6 +321,7 @@ impl PeerManager { /// This function decides whether or not to dial these peers. pub fn peers_discovered(&mut self, results: HashMap>) { let mut to_dial_peers = 0; + let results_count = results.len(); let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); for (enr, min_ttl) in results { // There are two conditions in deciding whether to dial this peer. @@ -352,8 +353,19 @@ impl PeerManager { } } - // Queue another discovery if we need to - self.maintain_peer_count(to_dial_peers); + // The heartbeat will attempt new discovery queries every N seconds if the node needs more + // peers. As an optimization, this function can recursively trigger new discovery queries + // immediatelly if we don't fulfill our peers needs after completing a query. This + // recursiveness results in an infinite loop in networks where there not enough peers to + // reach out target. To prevent the infinite loop, if a query returns no useful peers, we + // will cancel the recursiveness and wait for the heartbeat to trigger another query latter. + if results_count > 0 && to_dial_peers == 0 { + debug!(self.log, "Skipping recursive discovery query after finding no useful results"; "results" => results_count); + metrics::inc_counter(&metrics::DISCOVERY_NO_USEFUL_ENRS); + } else { + // Queue another discovery if we need to + self.maintain_peer_count(to_dial_peers); + } } /// A STATUS message has been received from a peer. This resets the status timer. From f126a42b7ef9467552bc7efaf799bc79e9077108 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 6 Aug 2024 12:32:58 +1000 Subject: [PATCH 23/43] Remove double-locking in `eth/v1/node/syncing` (#6202) * Remove double-locking in `eth/v1/node/syncing` --- beacon_node/http_api/src/lib.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ce62ed63f2..f98f449396 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2872,7 +2872,11 @@ pub fn serve( task_spawner .blocking_json_task(Priority::P0, move || { - let head_slot = chain.canonical_head.cached_head().head_slot(); + let (head, head_execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = head.head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { warp_utils::reject::custom_server_error( @@ -2883,9 +2887,7 @@ pub fn serve( // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + let is_optimistic = head_execution_status.is_optimistic_or_invalid(); let syncing_data = api_types::SyncingData { is_syncing: !network_globals.sync_state.read().is_synced(), From 42a1cd81fb97b6d761a72baf82979425a9416609 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 6 Aug 2024 16:11:00 +1000 Subject: [PATCH 24/43] Don't expect DAS config in HTTP spec response (#6221) * Don't expect DAS config in HTTP spec response --- consensus/types/src/chain_spec.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ca4df32d1e..b347d78639 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1365,10 +1365,13 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_exit_churn_limit: u64, + #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, + #[serde(default = "default_data_column_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] data_column_sidecar_subnet_count: u64, + #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, } @@ -1509,6 +1512,18 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { 500 } +const fn default_custody_requirement() -> u64 { + 1 +} + +const fn default_data_column_sidecar_subnet_count() -> u64 { + 32 +} + +const fn default_number_of_columns() -> u64 { + 128 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( From a68f34a014ea7fa4788497dd38b5e1a960957fe4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 8 Aug 2024 09:31:35 +1000 Subject: [PATCH 25/43] Broadcast VC requests in parallel and fix subscription error (#6223) * Broadcast VC requests in parallel * Remove outdated comment * Try some things * Fix subscription error * Remove junk logging --- common/eth2/src/lib.rs | 9 +- validator_client/src/beacon_node_fallback.rs | 88 +++++++++++--------- validator_client/src/duties_service.rs | 53 ++++++++---- validator_client/src/lib.rs | 3 + 4 files changed, 94 insertions(+), 59 deletions(-) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 6d000f576f..3642f4bfe4 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -121,6 +121,7 @@ impl fmt::Display for Error { pub struct Timeouts { pub attestation: Duration, pub attester_duties: Duration, + pub attestation_subscriptions: Duration, pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, @@ -137,6 +138,7 @@ impl Timeouts { Timeouts { attestation: timeout, attester_duties: timeout, + attestation_subscriptions: timeout, liveness: timeout, proposal: timeout, proposer_duties: timeout, @@ -2515,7 +2517,12 @@ impl BeaconNodeHttpClient { .push("validator") .push("beacon_committee_subscriptions"); - self.post(path, &subscriptions).await?; + self.post_with_timeout( + path, + &subscriptions, + self.timeouts.attestation_subscriptions, + ) + .await?; Ok(()) } diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 4467b80786..58d7f9d8ee 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -134,6 +134,12 @@ impl fmt::Display for Errors { } } +impl Errors { + pub fn num_errors(&self) -> usize { + self.0.len() + } +} + /// Reasons why a candidate might not be ready. #[derive(Debug, Clone, Copy)] pub enum CandidateError { @@ -599,46 +605,41 @@ impl BeaconNodeFallback { F: Fn(&'a BeaconNodeHttpClient) -> R, R: Future>, { - let mut results = vec![]; let mut to_retry = vec![]; let mut retry_unsynced = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - // - // We use a macro instead of a closure here since it is not trivial to move `func` into a - // closure. - macro_rules! try_func { - ($candidate: ident) => {{ - inc_counter_vec(&ENDPOINT_REQUESTS, &[$candidate.beacon_node.as_ref()]); + let run_on_candidate = |candidate: &'a CandidateBeaconNode| async { + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.beacon_node.as_ref()]); - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&$candidate.beacon_node).await { - Ok(val) => results.push(Ok(val)), - Err(e) => { - // If we have an error on this function, make the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - $candidate.set_offline().await; - } - results.push(Err(( - $candidate.beacon_node.to_string(), - Error::RequestFailed(e), - ))); - inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); + // There exists a race condition where `func` may be called when the candidate is + // actually not ready. We deem this an acceptable inefficiency. + match func(&candidate.beacon_node).await { + Ok(val) => Ok(val), + Err(e) => { + // If we have an error on this function, mark the client as not-ready. + // + // There exists a race condition where the candidate may have been marked + // as ready between the `func` call and now. We deem this an acceptable + // inefficiency. + if matches!(offline_on_failure, OfflineOnFailure::Yes) { + candidate.set_offline().await; } + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.beacon_node.as_ref()]); + Err((candidate.beacon_node.to_string(), Error::RequestFailed(e))) } - }}; - } + } + }; // First pass: try `func` on all synced and ready candidates. // // This ensures that we always choose a synced node if it is available. + let mut first_batch_futures = vec![]; for candidate in &self.candidates { match candidate.status(RequireSynced::Yes).await { + Ok(_) => { + first_batch_futures.push(run_on_candidate(candidate)); + } Err(CandidateError::NotSynced) if require_synced == false => { // This client is unsynced we will try it after trying all synced clients retry_unsynced.push(candidate); @@ -647,22 +648,24 @@ impl BeaconNodeFallback { // This client was not ready on the first pass, we might try it again later. to_retry.push(candidate); } - Ok(_) => try_func!(candidate), } } + let first_batch_results = futures::future::join_all(first_batch_futures).await; // Second pass: try `func` on ready unsynced candidates. This only runs if we permit // unsynced candidates. // // Due to async race-conditions, it is possible that we will send a request to a candidate // that has been set to an offline/unready status. This is acceptable. - if require_synced == false { - for candidate in retry_unsynced { - try_func!(candidate); - } - } + let second_batch_results = if require_synced == false { + futures::future::join_all(retry_unsynced.into_iter().map(run_on_candidate)).await + } else { + vec![] + }; // Third pass: try again, attempting to make non-ready clients become ready. + let mut third_batch_futures = vec![]; + let mut third_batch_results = vec![]; for candidate in to_retry { // If the candidate hasn't luckily transferred into the correct state in the meantime, // force an update of the state. @@ -676,16 +679,21 @@ impl BeaconNodeFallback { }; match new_status { - Ok(()) => try_func!(candidate), - Err(CandidateError::NotSynced) if require_synced == false => try_func!(candidate), - Err(e) => { - results.push(Err(( - candidate.beacon_node.to_string(), - Error::Unavailable(e), - ))); + Ok(()) => third_batch_futures.push(run_on_candidate(candidate)), + Err(CandidateError::NotSynced) if require_synced == false => { + third_batch_futures.push(run_on_candidate(candidate)) } + Err(e) => third_batch_results.push(Err(( + candidate.beacon_node.to_string(), + Error::Unavailable(e), + ))), } } + third_batch_results.extend(futures::future::join_all(third_batch_futures).await); + + let mut results = first_batch_results; + results.extend(second_batch_results); + results.extend(third_batch_results); let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 880f0eaa48..faa157a859 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -86,7 +86,8 @@ const _: () = assert!({ /// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the /// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid /// bringing in the entire crate. -const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); +const MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD: u64 = 2; +const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD); // The info in the enum variants is displayed in logging, clippy thinks it's dead code. #[derive(Debug)] @@ -121,6 +122,8 @@ pub struct DutyAndProof { pub struct SubscriptionSlots { /// Pairs of `(slot, already_sent)` in slot-descending order. slots: Vec<(Slot, AtomicBool)>, + /// The slot of the duty itself. + duty_slot: Slot, } /// Create a selection proof for `duty`. @@ -172,18 +175,20 @@ impl SubscriptionSlots { .filter(|scheduled_slot| *scheduled_slot > current_slot) .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) .collect(); - Arc::new(Self { slots }) + Arc::new(Self { slots, duty_slot }) } /// Return `true` if we should send a subscription at `slot`. fn should_send_subscription_at(&self, slot: Slot) -> bool { // Iterate slots from smallest to largest looking for one that hasn't been completed yet. - self.slots - .iter() - .rev() - .any(|(scheduled_slot, already_sent)| { - slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) - }) + slot + MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD <= self.duty_slot + && self + .slots + .iter() + .rev() + .any(|(scheduled_slot, already_sent)| { + slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) + }) } /// Update our record of subscribed slots to account for successful subscription at `slot`. @@ -737,7 +742,7 @@ async fn poll_beacon_attesters( // If there are any subscriptions, push them out to beacon nodes if !subscriptions.is_empty() { let subscriptions_ref = &subscriptions; - if let Err(e) = duties_service + let subscription_result = duties_service .beacon_nodes .request( RequireSynced::No, @@ -753,15 +758,8 @@ async fn poll_beacon_attesters( .await }, ) - .await - { - error!( - log, - "Failed to subscribe validators"; - "error" => %e - ) - } else { - // Record that subscriptions were successfully sent. + .await; + if subscription_result.as_ref().is_ok() { debug!( log, "Broadcast attestation subscriptions"; @@ -770,6 +768,25 @@ async fn poll_beacon_attesters( for subscription_slots in subscription_slots_to_confirm { subscription_slots.record_successful_subscription_at(current_slot); } + } else if let Err(e) = subscription_result { + if e.num_errors() < duties_service.beacon_nodes.num_total() { + warn!( + log, + "Some subscriptions failed"; + "error" => %e, + ); + // If subscriptions were sent to at least one node, regard that as a success. + // There is some redundancy built into the subscription schedule to handle failures. + for subscription_slots in subscription_slots_to_confirm { + subscription_slots.record_successful_subscription_at(current_slot); + } + } else { + error!( + log, + "All subscriptions failed"; + "error" => %e + ); + } } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 729ff62ee3..dff50582df 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -75,6 +75,7 @@ const WAITING_FOR_GENESIS_POLL_TIME: Duration = Duration::from_secs(12); /// This can help ensure that proper endpoint fallback occurs. const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT: u32 = 24; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; @@ -323,6 +324,8 @@ impl ProductionValidatorClient { Timeouts { attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + attestation_subscriptions: slot_duration + / HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT, liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, From d6ba8c397557f5c977b70f0d822a9228e98ca214 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 8 Aug 2024 12:17:03 +1000 Subject: [PATCH 26/43] Release v5.3.0 (#6194) * Release v5.3.0 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 872c6e3368..0c7d10d6a5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -836,7 +836,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.2.1" +version = "5.3.0" dependencies = [ "beacon_chain", "clap", @@ -1043,7 +1043,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.2.1" +version = "5.3.0" dependencies = [ "beacon_node", "clap", @@ -4376,7 +4376,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.2.1" +version = "5.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -4947,7 +4947,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.2.1" +version = "5.3.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a5fd29c971..146f1c1018 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.2.1" +version = "5.3.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index d32d799468..f988dd86b1 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.2.1-", - fallback = "Lighthouse/v5.2.1" + prefix = "Lighthouse/v5.3.0-", + fallback = "Lighthouse/v5.3.0" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 3cddd8ee60..30721f3d5b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.2.1" +version = "5.3.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b9d3eaf894..b381a3fb0e 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.2.1" +version = "5.3.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From aad8727f52fc3a53df77b9f7b7e08a591e6fef14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 8 Aug 2024 06:55:47 +0100 Subject: [PATCH 27/43] add missing use std::sync::Lazylock to malloc_utils::glibc (#6234) * add missing use std::sync::Lazylock --- common/malloc_utils/src/glibc.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 9531102682..41d8d28291 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -9,6 +9,7 @@ use parking_lot::Mutex; use std::env; use std::os::raw::c_int; use std::result::Result; +use std::sync::LazyLock; /// The optimal mmap threshold for Lighthouse seems to be around 128KB. /// From 3913ea44c6dc412c6e723383dd398264a967faf3 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 9 Aug 2024 00:36:20 -0700 Subject: [PATCH 28/43] Persist light client updates (#5545) * persist light client updates * update beacon chain to serve light client updates * resolve todos * cache best update * extend cache parts * is better light client update * resolve merge conflict * initial api changes * add lc update db column * fmt * added tests * add sim * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * fix some weird issues with the simulator * tests * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * test changes * merge conflict * testing * started work on ef tests and some code clean up * update tests * linting * noop pre altair, were still failing on electra though * allow for zeroed light client header * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * merge unstable * remove unwraps * remove unwraps * Update light_client_update.rs * merge unstable * move functionality to helper methods * refactor is best update fn * refactor is best update fn * improve organization of light client server cache logic * fork diget calc, and only spawn as many blcoks as we need for the lc update test * fetch lc update from the cache if it exists * fmt * Fix beacon_chain tests * Add debug code to update ranking_order ef test * Fix compare code * merge conflicts * fix test * Merge branch 'persist-light-client-updates' of https://github.com/eserilev/lighthouse into persist-light-client-updates * Use blinded blocks for light client proofs * fix ef test * merge conflicts * fix lc update check * Lint * resolve merge conflict * Merge branch 'persist-light-client-updates' of https://github.com/eserilev/lighthouse into persist-light-client-updates * revert basic sim * small fix * revert sim * Review PR * resolve merge conflicts * Merge branch 'unstable' into persist-light-client-updates --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +- .../src/light_client_server_cache.rs | 223 +++++++++++---- beacon_node/beacon_chain/tests/store_tests.rs | 251 +++++++++++++++++ beacon_node/http_api/src/lib.rs | 29 +- beacon_node/http_api/src/light_client.rs | 143 ++++++++++ beacon_node/http_api/tests/tests.rs | 42 +++ beacon_node/store/src/leveldb_store.rs | 1 - beacon_node/store/src/lib.rs | 6 +- common/eth2/src/lib.rs | 25 ++ common/eth2/src/types.rs | 18 ++ .../types/src/light_client_finality_update.rs | 13 + consensus/types/src/light_client_header.rs | 42 +++ .../src/light_client_optimistic_update.rs | 13 + consensus/types/src/light_client_update.rs | 265 +++++++++++++----- testing/ef_tests/check_all_files_accessed.py | 4 +- testing/ef_tests/src/cases.rs | 2 + .../light_client_verify_is_better_update.rs | 110 ++++++++ testing/ef_tests/src/decode.rs | 12 +- testing/ef_tests/src/error.rs | 3 + testing/ef_tests/src/handler.rs | 26 ++ testing/ef_tests/tests/tests.rs | 5 + 21 files changed, 1124 insertions(+), 124 deletions(-) create mode 100644 beacon_node/http_api/src/light_client.rs create mode 100644 testing/ef_tests/src/cases/light_client_verify_is_better_update.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8cd991cc10..3bf7528477 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1351,14 +1351,27 @@ impl BeaconChain { ) -> Result<(), Error> { self.light_client_server_cache.recompute_and_cache_updates( self.store.clone(), - &parent_root, slot, + &parent_root, &sync_aggregate, &self.log, &self.spec, ) } + pub fn get_light_client_updates( + &self, + sync_committee_period: u64, + count: u64, + ) -> Result>, Error> { + self.light_client_server_cache.get_light_client_updates( + &self.store, + sync_committee_period, + count, + &self.spec, + ) + } + /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 87513885f7..efc746675d 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,14 +1,23 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use slog::{debug, Logger}; +use ssz::Decode; +use ssz::Encode; use ssz_types::FixedVector; use std::num::NonZeroUsize; -use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX}; +use std::sync::Arc; +use store::DBColumn; +use store::KeyValueStore; +use types::light_client_update::{ + FinalizedRootProofLen, NextSyncCommitteeProofLen, FINALIZED_ROOT_INDEX, + NEXT_SYNC_COMMITTEE_INDEX, +}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, - LightClientOptimisticUpdate, Slot, SyncAggregate, + LightClientOptimisticUpdate, LightClientUpdate, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -30,8 +39,10 @@ pub struct LightClientServerCache { latest_finality_update: RwLock>>, /// Tracks a single global latest optimistic update out of all imported blocks. latest_optimistic_update: RwLock>>, + /// Caches the most recent light client update + latest_light_client_update: RwLock>>, /// Caches state proofs by block root - prev_block_cache: Mutex>, + prev_block_cache: Mutex>>, } impl LightClientServerCache { @@ -39,13 +50,14 @@ impl LightClientServerCache { Self { latest_finality_update: None.into(), latest_optimistic_update: None.into(), + latest_light_client_update: None.into(), prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), } } /// Compute and cache state proofs for latter production of light-client messages. Does not /// trigger block replay. - pub fn cache_state_data( + pub(crate) fn cache_state_data( &self, spec: &ChainSpec, block: BeaconBlockRef, @@ -67,13 +79,13 @@ impl LightClientServerCache { Ok(()) } - /// Given a block with a SyncAggregte computes better or more recent light client updates. The + /// Given a block with a SyncAggregate computes better or more recent light client updates. The /// results are cached either on disk or memory to be served via p2p and rest API pub fn recompute_and_cache_updates( &self, store: BeaconStore, - block_parent_root: &Hash256, block_slot: Slot, + block_parent_root: &Hash256, sync_aggregate: &SyncAggregate, log: &Logger, chain_spec: &ChainSpec, @@ -100,11 +112,17 @@ impl LightClientServerCache { let attested_slot = attested_block.slot(); + let maybe_finalized_block = store.get_blinded_block(&cached_parts.finalized_block_root)?; + + let sync_period = block_slot + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + // Spec: Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice let is_latest_optimistic = match &self.latest_optimistic_update.read().clone() { Some(latest_optimistic_update) => { - is_latest_optimistic_update(latest_optimistic_update, attested_slot, signature_slot) + latest_optimistic_update.is_latest(attested_slot, signature_slot) } None => true, }; @@ -122,18 +140,17 @@ impl LightClientServerCache { // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice let is_latest_finality = match &self.latest_finality_update.read().clone() { Some(latest_finality_update) => { - is_latest_finality_update(latest_finality_update, attested_slot, signature_slot) + latest_finality_update.is_latest(attested_slot, signature_slot) } None => true, }; + if is_latest_finality & !cached_parts.finalized_block_root.is_zero() { // Immediately after checkpoint sync the finalized block may not be available yet. - if let Some(finalized_block) = - store.get_blinded_block(&cached_parts.finalized_block_root)? - { + if let Some(finalized_block) = maybe_finalized_block.as_ref() { *self.latest_finality_update.write() = Some(LightClientFinalityUpdate::new( &attested_block, - &finalized_block, + finalized_block, cached_parts.finality_branch.clone(), sync_aggregate.clone(), signature_slot, @@ -148,9 +165,142 @@ impl LightClientServerCache { } } + let new_light_client_update = LightClientUpdate::new( + sync_aggregate, + block_slot, + cached_parts.next_sync_committee, + cached_parts.next_sync_committee_branch, + cached_parts.finality_branch, + &attested_block, + maybe_finalized_block.as_ref(), + chain_spec, + )?; + + // Spec: Full nodes SHOULD provide the best derivable LightClientUpdate (according to is_better_update) + // for each sync committee period + let prev_light_client_update = match &self.latest_light_client_update.read().clone() { + Some(prev_light_client_update) => Some(prev_light_client_update.clone()), + None => self.get_light_client_update(&store, sync_period, chain_spec)?, + }; + + let should_persist_light_client_update = + if let Some(prev_light_client_update) = prev_light_client_update { + let prev_sync_period = prev_light_client_update + .signature_slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + + if sync_period != prev_sync_period { + true + } else { + prev_light_client_update + .is_better_light_client_update(&new_light_client_update, chain_spec)? + } + } else { + true + }; + + if should_persist_light_client_update { + self.store_light_client_update(&store, sync_period, &new_light_client_update)?; + } + Ok(()) } + fn store_light_client_update( + &self, + store: &BeaconStore, + sync_committee_period: u64, + light_client_update: &LightClientUpdate, + ) -> Result<(), BeaconChainError> { + let column = DBColumn::LightClientUpdate; + + store.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &light_client_update.as_ssz_bytes(), + )?; + + *self.latest_light_client_update.write() = Some(light_client_update.clone()); + + Ok(()) + } + + // Used to fetch the most recently persisted "best" light client update. + // Should not be used outside the light client server, as it also caches the fetched + // light client update. + fn get_light_client_update( + &self, + store: &BeaconStore, + sync_committee_period: u64, + chain_spec: &ChainSpec, + ) -> Result>, BeaconChainError> { + if let Some(latest_light_client_update) = self.latest_light_client_update.read().clone() { + let latest_lc_update_sync_committee_period = latest_light_client_update + .signature_slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + if latest_lc_update_sync_committee_period == sync_committee_period { + return Ok(Some(latest_light_client_update)); + } + } + + let column = DBColumn::LightClientUpdate; + let res = store + .hot_db + .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + + if let Some(light_client_update_bytes) = res { + let epoch = sync_committee_period + .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; + + let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) + .map_err(store::errors::Error::SszDecodeError)?; + + *self.latest_light_client_update.write() = Some(light_client_update.clone()); + return Ok(Some(light_client_update)); + } + + Ok(None) + } + + pub fn get_light_client_updates( + &self, + store: &BeaconStore, + start_period: u64, + count: u64, + chain_spec: &ChainSpec, + ) -> Result>, BeaconChainError> { + let column = DBColumn::LightClientUpdate; + let mut light_client_updates = vec![]; + for res in store + .hot_db + .iter_column_from::>(column, &start_period.to_le_bytes()) + { + let (sync_committee_bytes, light_client_update_bytes) = res?; + let sync_committee_period = u64::from_ssz_bytes(&sync_committee_bytes) + .map_err(store::errors::Error::SszDecodeError)?; + let epoch = sync_committee_period + .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; + + let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) + .map_err(store::errors::Error::SszDecodeError)?; + + light_client_updates.push(light_client_update); + + if sync_committee_period >= start_period + count { + break; + } + } + Ok(light_client_updates) + } + /// Retrieves prev block cached data from cache. If not present re-computes by retrieving the /// parent state, and inserts an entry to the cache. /// @@ -161,7 +311,7 @@ impl LightClientServerCache { block_root: &Hash256, block_state_root: &Hash256, block_slot: Slot, - ) -> Result { + ) -> Result, BeaconChainError> { // Attempt to get the value from the cache first. if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { return Ok(cached_parts.clone()); @@ -199,52 +349,25 @@ impl Default for LightClientServerCache { } type FinalityBranch = FixedVector; +type NextSyncCommitteeBranch = FixedVector; #[derive(Clone)] -struct LightClientCachedData { +struct LightClientCachedData { finality_branch: FinalityBranch, + next_sync_committee_branch: NextSyncCommitteeBranch, + next_sync_committee: Arc>, finalized_block_root: Hash256, } -impl LightClientCachedData { - fn from_state(state: &mut BeaconState) -> Result { +impl LightClientCachedData { + fn from_state(state: &mut BeaconState) -> Result { Ok(Self { finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + next_sync_committee: state.next_sync_committee()?.clone(), + next_sync_committee_branch: state + .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? + .into(), finalized_block_root: state.finalized_checkpoint().root, }) } } - -// Implements spec prioritization rules: -// > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) -// -// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update -fn is_latest_finality_update( - prev: &LightClientFinalityUpdate, - attested_slot: Slot, - signature_slot: Slot, -) -> bool { - let prev_slot = prev.get_attested_header_slot(); - if attested_slot > prev_slot { - true - } else { - attested_slot == prev_slot && signature_slot > *prev.signature_slot() - } -} - -// Implements spec prioritization rules: -// > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) -// -// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update -fn is_latest_optimistic_update( - prev: &LightClientOptimisticUpdate, - attested_slot: Slot, - signature_slot: Slot, -) -> bool { - let prev_slot = prev.get_slot(); - if attested_slot > prev_slot { - true - } else { - attested_slot == prev_slot && signature_slot > *prev.signature_slot() - } -} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 01d7798b92..7049bf14fd 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5,6 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; +use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, KZG, @@ -103,6 +104,256 @@ fn get_harness_generic( harness } +#[tokio::test] +async fn light_client_updates_test() { + let spec = test_spec::(); + let Some(_) = spec.altair_fork_epoch else { + // No-op prior to Altair. + return; + }; + + let num_final_blocks = E::slots_per_epoch() * 2; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let db_path = tempdir().unwrap(); + let log = test_logger(); + + let seconds_per_slot = spec.seconds_per_slot; + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + let num_initial_slots = E::slots_per_epoch() * 10; + let slots: Vec = (1..num_initial_slots).map(Slot::new).collect(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_state = store + .get_state(&wss_state_root, Some(checkpoint_slot)) + .unwrap() + .unwrap(); + + let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + + harness.advance_slot(); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .kzg(kzg) + .build() + .expect("should build"); + + let beacon_chain = Arc::new(beacon_chain); + + let current_state = harness.get_current_state(); + + if ForkName::Electra == current_state.fork_name_unchecked() { + // TODO(electra) fix beacon state `compute_merkle_proof` + return; + } + + let block_root = *current_state + .get_block_root(current_state.slot() - Slot::new(1)) + .unwrap(); + + let contributions = harness.make_sync_contributions( + ¤t_state, + block_root, + current_state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + // generate sync aggregates + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + beacon_chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + beacon_chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + // check that we can fetch the newly generated sync aggregate + let sync_aggregate = beacon_chain + .op_pool + .get_sync_aggregate(¤t_state) + .unwrap() + .unwrap(); + + // cache light client data + beacon_chain + .light_client_server_cache + .recompute_and_cache_updates( + store.clone(), + current_state.slot() - Slot::new(1), + &block_root, + &sync_aggregate, + &log, + &spec, + ) + .unwrap(); + + // calculate the sync period from the previous slot + let sync_period = (current_state.slot() - Slot::new(1)) + .epoch(E::slots_per_epoch()) + .sync_committee_period(&spec) + .unwrap(); + + // fetch a range of light client updates. right now there should only be one light client update + // in the db. + let lc_updates = beacon_chain + .get_light_client_updates(sync_period, 100) + .unwrap(); + + assert_eq!(lc_updates.len(), 1); + + // Advance to the next sync committee period + for _i in 0..(E::slots_per_epoch() * u64::from(spec.epochs_per_sync_committee_period)) { + harness.advance_slot(); + } + + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let current_state = harness.get_current_state(); + + let block_root = *current_state + .get_block_root(current_state.slot() - Slot::new(1)) + .unwrap(); + + let contributions = harness.make_sync_contributions( + ¤t_state, + block_root, + current_state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + // generate new sync aggregates from this new state + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + beacon_chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + beacon_chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + let sync_aggregate = beacon_chain + .op_pool + .get_sync_aggregate(¤t_state) + .unwrap() + .unwrap(); + + // cache new light client data + beacon_chain + .light_client_server_cache + .recompute_and_cache_updates( + store.clone(), + current_state.slot() - Slot::new(1), + &block_root, + &sync_aggregate, + &log, + &spec, + ) + .unwrap(); + + // we should now have two light client updates in the db + let lc_updates = beacon_chain + .get_light_client_updates(sync_period, 100) + .unwrap(); + + assert_eq!(lc_updates.len(), 2); +} + /// Tests that `store.heal_freezer_block_roots_at_split` inserts block roots between last restore point /// slot and the split slot. #[tokio::test] diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index f98f449396..aa47d5c464 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,6 +13,7 @@ mod block_rewards; mod build_block_contents; mod builder_states; mod database; +mod light_client; mod metrics; mod produce_block; mod proposer_duties; @@ -30,6 +31,7 @@ mod validator_inclusion; mod validators; mod version; +use crate::light_client::get_light_client_updates; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; use beacon_chain::{ @@ -44,8 +46,8 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, ValidatorStatus, - ValidatorsRequestBody, + LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, + ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; @@ -2484,6 +2486,25 @@ pub fn serve( }, ); + // GET beacon/light_client/updates + let get_beacon_light_client_updates = beacon_light_client_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path("updates")) + .and(warp::path::end()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + query: LightClientUpdatesQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + get_light_client_updates::(chain, query, accept_header) + }) + }, + ); + /* * beacon/rewards */ @@ -4640,6 +4661,10 @@ pub fn serve( enable(ctx.config.enable_light_client_server) .and(get_beacon_light_client_bootstrap), ) + .uor( + enable(ctx.config.enable_light_client_server) + .and(get_beacon_light_client_updates), + ) .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs new file mode 100644 index 0000000000..a6543114b8 --- /dev/null +++ b/beacon_node/http_api/src/light_client.rs @@ -0,0 +1,143 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{ + self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, + LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, +}; +use ssz::Encode; +use std::sync::Arc; +use warp::{ + hyper::{Body, Response}, + reply::Reply, + Rejection, +}; + +use crate::version::{add_ssz_content_type_header, fork_versioned_response, V1}; + +const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + +pub fn get_light_client_updates( + chain: Arc>, + query: LightClientUpdatesQuery, + accept_header: Option, +) -> Result, Rejection> { + validate_light_client_updates_request(&chain, &query)?; + + let light_client_updates = chain + .get_light_client_updates(query.start_period, query.count) + .map_err(|_| { + warp_utils::reject::custom_not_found("No LightClientUpdates found".to_string()) + })?; + + match accept_header { + Some(api_types::Accept::Ssz) => { + let response_chunks = light_client_updates + .iter() + .map(|update| map_light_client_update_to_ssz_chunk::(&chain, update)) + .collect::>(); + + let ssz_response = LightClientUpdateSszResponse { + response_chunk_len: (light_client_updates.len() as u64).to_le_bytes().to_vec(), + response_chunk: response_chunks.as_ssz_bytes(), + } + .as_ssz_bytes(); + + Response::builder() + .status(200) + .body(ssz_response) + .map(|res: Response>| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + } + _ => { + let fork_versioned_response = light_client_updates + .iter() + .map(|update| map_light_client_update_to_json_response::(&chain, update.clone())) + .collect::>>, Rejection>>()?; + Ok(warp::reply::json(&fork_versioned_response).into_response()) + } + } +} + +pub fn validate_light_client_updates_request( + chain: &BeaconChain, + query: &LightClientUpdatesQuery, +) -> Result<(), Rejection> { + if query.count > MAX_REQUEST_LIGHT_CLIENT_UPDATES { + return Err(warp_utils::reject::custom_bad_request( + "Invalid count requested".to_string(), + )); + } + + let current_sync_period = chain + .epoch() + .map_err(|_| { + warp_utils::reject::custom_server_error("failed to get current epoch".to_string()) + })? + .sync_committee_period(&chain.spec) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "failed to get current sync committee period".to_string(), + ) + })?; + + if query.start_period > current_sync_period { + return Err(warp_utils::reject::custom_bad_request( + "Invalid sync committee period requested".to_string(), + )); + } + + let earliest_altair_sync_committee = chain + .spec + .altair_fork_epoch + .ok_or(warp_utils::reject::custom_server_error( + "failed to get altair fork epoch".to_string(), + ))? + .sync_committee_period(&chain.spec) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "failed to get earliest altair sync committee".to_string(), + ) + })?; + + if query.start_period < earliest_altair_sync_committee { + return Err(warp_utils::reject::custom_bad_request( + "Invalid sync committee period requested".to_string(), + )); + } + + Ok(()) +} + +fn map_light_client_update_to_ssz_chunk( + chain: &BeaconChain, + light_client_update: &LightClientUpdate, +) -> LightClientUpdateResponseChunk { + let fork_name = chain + .spec + .fork_name_at_slot::(*light_client_update.signature_slot()); + + let fork_digest = ChainSpec::compute_fork_digest( + chain.spec.fork_version_for_name(fork_name), + chain.genesis_validators_root, + ); + + LightClientUpdateResponseChunk { + context: fork_digest, + payload: light_client_update.as_ssz_bytes(), + } +} + +fn map_light_client_update_to_json_response( + chain: &BeaconChain, + light_client_update: LightClientUpdate, +) -> Result>, Rejection> { + let fork_name = chain + .spec + .fork_name_at_slot::(*light_client_update.signature_slot()); + + fork_versioned_response(V1, fork_name, light_client_update) +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d51799b866..9377e277c2 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1813,6 +1813,36 @@ impl ApiTester { self } + pub async fn test_get_beacon_light_client_updates(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + let current_sync_committee_period = current_epoch + .sync_committee_period(&self.chain.spec) + .unwrap(); + + let result = match self + .client + .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .await + { + Ok(result) => result, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + let expected = self + .chain + .light_client_server_cache + .get_light_client_updates( + &self.chain.store, + current_sync_committee_period as u64, + 1, + &self.chain.spec, + ) + .unwrap(); + + assert_eq!(result.clone().unwrap().len(), expected.len()); + self + } + pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { let block_id = BlockId(CoreBlockId::Finalized); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); @@ -6171,6 +6201,18 @@ async fn node_get() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_light_client_updates() { + let config = ApiTesterConfig { + spec: ForkName::Altair.make_genesis_spec(E::default_spec()), + ..<_>::default() + }; + ApiTester::new_from_config(config) + .await + .test_get_beacon_light_client_updates() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_light_client_bootstrap() { let config = ApiTesterConfig { diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 32ff942ddc..28e04f5620 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -182,7 +182,6 @@ impl KeyValueStore for LevelDB { fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); - let iter = self.db.iter(self.read_options()); iter.seek(&start_key); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1f8cc8ca01..e8631cc5ec 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -300,6 +300,9 @@ pub enum DBColumn { BeaconHistoricalSummaries, #[strum(serialize = "olc")] OverflowLRUCache, + /// For persisting eagerly computed light client data + #[strum(serialize = "lcu")] + LightClientUpdate, } /// A block from the database, which might have an execution payload or not. @@ -342,7 +345,8 @@ impl DBColumn { | Self::BeaconStateRoots | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries - | Self::BeaconRandaoMixes => 8, + | Self::BeaconRandaoMixes + | Self::LightClientUpdate => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 6d000f576f..48cdf7031a 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -763,6 +763,31 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/light_client/updates` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_light_client_updates( + &self, + start_period: u64, + count: u64, + ) -> Result>>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("light_client") + .push("updates"); + + path.query_pairs_mut() + .append_pair("start_period", &start_period.to_string()); + + path.query_pairs_mut() + .append_pair("count", &count.to_string()); + + self.get_opt(path).await + } + /// `GET beacon/light_client/bootstrap` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index fa5fb654b7..793d839cee 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -784,6 +784,24 @@ pub struct ValidatorAggregateAttestationQuery { pub committee_index: Option, } +#[derive(Clone, Deserialize)] +pub struct LightClientUpdatesQuery { + pub start_period: u64, + pub count: u64, +} + +#[derive(Encode, Decode)] +pub struct LightClientUpdateSszResponse { + pub response_chunk_len: Vec, + pub response_chunk: Vec, +} + +#[derive(Encode, Decode)] +pub struct LightClientUpdateResponseChunk { + pub context: [u8; 4], + pub payload: Vec, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index e65b057292..dc7561f5fc 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -192,6 +192,19 @@ impl LightClientFinalityUpdate { // `2 *` because there are two headers in the update fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + + // Implements spec prioritization rules: + // > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) + // + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update + pub fn is_latest(&self, attested_slot: Slot, signature_slot: Slot) -> bool { + let prev_slot = self.get_attested_header_slot(); + if attested_slot > prev_slot { + true + } else { + attested_slot == prev_slot && signature_slot > *self.signature_slot() + } + } } impl ForkVersionDeserialize for LightClientFinalityUpdate { diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 1feb748fae..a1d5f85eac 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -149,6 +149,15 @@ impl LightClientHeaderAltair { } } +impl Default for LightClientHeaderAltair { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -180,6 +189,17 @@ impl LightClientHeaderCapella { } } +impl Default for LightClientHeaderCapella { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderCapella::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -211,6 +231,17 @@ impl LightClientHeaderDeneb { } } +impl Default for LightClientHeaderDeneb { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderDeneb::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -242,6 +273,17 @@ impl LightClientHeaderElectra { } } +impl Default for LightClientHeaderElectra { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderElectra::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl ForkVersionDeserialize for LightClientHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index f5b749be70..3cae31edf8 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -178,6 +178,19 @@ impl LightClientOptimisticUpdate { }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + + // Implements spec prioritization rules: + // > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) + // + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update + pub fn is_latest(&self, attested_slot: Slot, signature_slot: Slot) -> bool { + let prev_slot = self.get_slot(); + if attested_slot > prev_slot { + true + } else { + attested_slot == prev_slot && signature_slot > *self.signature_slot() + } + } } impl ForkVersionDeserialize for LightClientOptimisticUpdate { diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 8a3eaff487..3b48a68df3 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,12 +1,13 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; use crate::{ - beacon_state, test_utils::TestRandom, BeaconBlock, BeaconBlockHeader, BeaconState, ChainSpec, - ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, SignedBlindedBeaconBlock, + beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + SignedBlindedBeaconBlock, }; use derivative::Derivative; use safe_arith::ArithError; +use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use ssz::Decode; @@ -16,7 +17,6 @@ use ssz_types::typenum::{U4, U5, U6}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; pub const FINALIZED_ROOT_INDEX: usize = 105; @@ -35,6 +35,9 @@ pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +type FinalityBranch = FixedVector; +type NextSyncCommitteeBranch = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -117,7 +120,7 @@ pub struct LightClientUpdate { /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, /// Merkle proof for next sync committee - pub next_sync_committee_branch: FixedVector, + pub next_sync_committee_branch: NextSyncCommitteeBranch, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -128,7 +131,7 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - pub finality_branch: FixedVector, + pub finality_branch: FinalityBranch, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -152,45 +155,17 @@ impl ForkVersionDeserialize for LightClientUpdate { } impl LightClientUpdate { + #[allow(clippy::too_many_arguments)] pub fn new( - beacon_state: BeaconState, - block: BeaconBlock, - attested_state: &mut BeaconState, + sync_aggregate: &SyncAggregate, + block_slot: Slot, + next_sync_committee: Arc>, + next_sync_committee_branch: FixedVector, + finality_branch: FixedVector, attested_block: &SignedBlindedBeaconBlock, - finalized_block: &SignedBlindedBeaconBlock, + finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, ) -> Result { - let sync_aggregate = block.body().sync_aggregate()?; - if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { - return Err(Error::NotEnoughSyncCommitteeParticipants); - } - - let signature_period = block.epoch().sync_committee_period(chain_spec)?; - // Compute and validate attested header. - let mut attested_header = attested_state.latest_block_header().clone(); - attested_header.state_root = attested_state.update_tree_hash_cache()?; - let attested_period = attested_header - .slot - .epoch(E::slots_per_epoch()) - .sync_committee_period(chain_spec)?; - if attested_period != signature_period { - return Err(Error::MismatchingPeriods); - } - // Build finalized header from finalized block - let finalized_header = BeaconBlockHeader { - slot: finalized_block.slot(), - proposer_index: finalized_block.message().proposer_index(), - parent_root: finalized_block.parent_root(), - state_root: finalized_block.state_root(), - body_root: finalized_block.message().body_root(), - }; - if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { - return Err(Error::InvalidFinalizedBlock); - } - let next_sync_committee_branch = - attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; - let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; - let light_client_update = match attested_block .fork_name(chain_spec) .map_err(|_| Error::InconsistentFork)? @@ -199,71 +174,91 @@ impl LightClientUpdate { ForkName::Altair | ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderAltair::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderAltair::default() + }; + Self::Altair(LightClientUpdateAltair { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Capella => { let attested_header = LightClientHeaderCapella::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderCapella::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderCapella::default() + }; + Self::Capella(LightClientUpdateCapella { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Deneb => { let attested_header = LightClientHeaderDeneb::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderDeneb::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderDeneb::default() + }; + Self::Deneb(LightClientUpdateDeneb { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Electra => { let attested_header = LightClientHeaderElectra::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderElectra::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderElectra::default() + }; + Self::Electra(LightClientUpdateElectra { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } // To add a new fork, just append the new fork variant on the latest fork. Forks that - // have a distinct execution header will need a new LightClientUdpate variant only + // have a distinct execution header will need a new LightClientUpdate variant only // if you need to test or support lightclient usages }; Ok(light_client_update) } - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: &ForkName) -> Result { let update = match fork_name { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientUpdateAltair::from_ssz_bytes(bytes)?) @@ -280,6 +275,142 @@ impl LightClientUpdate { Ok(update) } + + pub fn attested_header_slot(&self) -> Slot { + match self { + LightClientUpdate::Altair(update) => update.attested_header.beacon.slot, + LightClientUpdate::Capella(update) => update.attested_header.beacon.slot, + LightClientUpdate::Deneb(update) => update.attested_header.beacon.slot, + LightClientUpdate::Electra(update) => update.attested_header.beacon.slot, + } + } + + pub fn finalized_header_slot(&self) -> Slot { + match self { + LightClientUpdate::Altair(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Capella(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Deneb(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Electra(update) => update.finalized_header.beacon.slot, + } + } + + fn attested_header_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { + compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) + .map_err(Error::ArithError) + } + + fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) + .map_err(Error::ArithError) + } + + pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + Ok(!self.is_next_sync_committee_branch_empty() + && (self.attested_header_sync_committee_period(chain_spec)? + == self.signature_slot_sync_committee_period(chain_spec)?)) + } + + pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + Ok( + compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? + == self.attested_header_sync_committee_period(chain_spec)?, + ) + } + + // Implements spec prioritization rules: + // Full nodes SHOULD provide the best derivable LightClientUpdate for each sync committee period + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_update + pub fn is_better_light_client_update( + &self, + new: &Self, + chain_spec: &ChainSpec, + ) -> Result { + // Compare super majority (> 2/3) sync committee participation + let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); + + let new_active_participants = new.sync_aggregate().sync_committee_bits.num_set_bits(); + let prev_active_participants = self.sync_aggregate().sync_committee_bits.num_set_bits(); + + let new_has_super_majority = + new_active_participants.safe_mul(3)? >= max_active_participants.safe_mul(2)?; + let prev_has_super_majority = + prev_active_participants.safe_mul(3)? >= max_active_participants.safe_mul(2)?; + + if new_has_super_majority != prev_has_super_majority { + return Ok(new_has_super_majority); + } + + if !new_has_super_majority && new_active_participants != prev_active_participants { + return Ok(new_active_participants > prev_active_participants); + } + + // Compare presence of relevant sync committee + let new_has_relevant_sync_committee = new.is_sync_committee_update(chain_spec)?; + let prev_has_relevant_sync_committee = self.is_sync_committee_update(chain_spec)?; + if new_has_relevant_sync_committee != prev_has_relevant_sync_committee { + return Ok(new_has_relevant_sync_committee); + } + + // Compare indication of any finality + let new_has_finality = !new.is_finality_branch_empty(); + let prev_has_finality = !self.is_finality_branch_empty(); + if new_has_finality != prev_has_finality { + return Ok(new_has_finality); + } + + // Compare sync committee finality + if new_has_finality { + let new_has_sync_committee_finality = new.has_sync_committee_finality(chain_spec)?; + let prev_has_sync_committee_finality = self.has_sync_committee_finality(chain_spec)?; + if new_has_sync_committee_finality != prev_has_sync_committee_finality { + return Ok(new_has_sync_committee_finality); + } + } + + // Tiebreaker 1: Sync committee participation beyond super majority + if new_active_participants != prev_active_participants { + return Ok(new_active_participants > prev_active_participants); + } + + let new_attested_header_slot = new.attested_header_slot(); + let prev_attested_header_slot = self.attested_header_slot(); + + // Tiebreaker 2: Prefer older data (fewer changes to best) + if new_attested_header_slot != prev_attested_header_slot { + return Ok(new_attested_header_slot < prev_attested_header_slot); + } + + return Ok(new.signature_slot() < self.signature_slot()); + } + + fn is_next_sync_committee_branch_empty(&self) -> bool { + for index in self.next_sync_committee_branch().iter() { + if *index != Hash256::default() { + return false; + } + } + true + } + + pub fn is_finality_branch_empty(&self) -> bool { + for index in self.finality_branch().iter() { + if *index != Hash256::default() { + return false; + } + } + true + } +} + +fn compute_sync_committee_period_at_slot( + slot: Slot, + chain_spec: &ChainSpec, +) -> Result { + slot.epoch(E::slots_per_epoch()) + .safe_div(chain_spec.epochs_per_sync_committee_period) } #[cfg(test)] diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index e1a308f7a4..f1ab5ad600 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -26,7 +26,9 @@ excluded_paths = [ "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", # light_client - "tests/.*/.*/light_client", + # "tests/.*/.*/light_client", + "tests/.*/.*/light_client/single_merkle_proof", + "tests/.*/.*/light_client/sync", # LightClientStore "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index f328fa6404..2d6f661f0e 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -24,6 +24,7 @@ mod kzg_compute_kzg_proof; mod kzg_verify_blob_kzg_proof; mod kzg_verify_blob_kzg_proof_batch; mod kzg_verify_kzg_proof; +mod light_client_verify_is_better_update; mod merkle_proof_validity; mod operations; mod rewards; @@ -54,6 +55,7 @@ pub use kzg_compute_kzg_proof::*; pub use kzg_verify_blob_kzg_proof::*; pub use kzg_verify_blob_kzg_proof_batch::*; pub use kzg_verify_kzg_proof::*; +pub use light_client_verify_is_better_update::*; pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; diff --git a/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs new file mode 100644 index 0000000000..de281d906c --- /dev/null +++ b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs @@ -0,0 +1,110 @@ +use super::*; +use decode::ssz_decode_light_client_update; +use serde::Deserialize; +use types::{LightClientUpdate, Slot}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct LightClientVerifyIsBetterUpdate { + light_client_updates: Vec>, +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + updates_count: u64, +} + +impl LoadCase for LightClientVerifyIsBetterUpdate { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let mut light_client_updates = vec![]; + let metadata: Metadata = decode::yaml_decode_file(path.join("meta.yaml").as_path())?; + for index in 0..metadata.updates_count { + let light_client_update = ssz_decode_light_client_update( + &path.join(format!("updates_{}.ssz_snappy", index)), + &fork_name, + )?; + light_client_updates.push(light_client_update); + } + + Ok(Self { + light_client_updates, + }) + } +} + +impl Case for LightClientVerifyIsBetterUpdate { + // Light client updates in `self.light_client_updates` are ordered in descending precedence + // where the update at index = 0 is considered the best update. This test iterates through + // all light client updates in a nested loop to make all possible comparisons. If a light client update + // at index `i`` is considered 'better' than a light client update at index `j`` when `i > j`, this test fails. + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = fork_name.make_genesis_spec(E::default_spec()); + for (i, ith_light_client_update) in self.light_client_updates.iter().enumerate() { + for (j, jth_light_client_update) in self.light_client_updates.iter().enumerate() { + eprintln!("{i} {j}"); + if i == j { + continue; + } + + let is_better_update = ith_light_client_update + .is_better_light_client_update(jth_light_client_update, &spec) + .unwrap(); + + let ith_summary = + LightClientUpdateSummary::from_update(ith_light_client_update, &spec); + let jth_summary = + LightClientUpdateSummary::from_update(jth_light_client_update, &spec); + + let (best_index, other_index, best_update, other_update, failed) = if i < j { + // i is better, so is_better_update must return false + (i, j, ith_summary, jth_summary, is_better_update) + } else { + // j is better, so is_better must return true + (j, i, jth_summary, ith_summary, !is_better_update) + }; + + if failed { + eprintln!("is_better_update: {is_better_update}"); + eprintln!("index {best_index} update {best_update:?}"); + eprintln!("index {other_index} update {other_update:?}"); + eprintln!( + "update at index {best_index} must be considered better than update at index {other_index}" + ); + return Err(Error::FailedComparison(format!( + "update at index {best_index} must be considered better than update at index {other_index}" + ))); + } + } + } + + Ok(()) + } +} + +#[derive(Debug)] +#[allow(dead_code)] +struct LightClientUpdateSummary { + participants: usize, + supermajority: bool, + relevant_sync_committee: bool, + has_finality: bool, + has_sync_committee_finality: bool, + header_slot: Slot, + signature_slot: Slot, +} + +impl LightClientUpdateSummary { + fn from_update(update: &LightClientUpdate, spec: &ChainSpec) -> Self { + let max_participants = update.sync_aggregate().sync_committee_bits.len(); + let participants = update.sync_aggregate().sync_committee_bits.num_set_bits(); + Self { + participants, + supermajority: participants * 3 > max_participants * 2, + relevant_sync_committee: update.is_sync_committee_update(spec).unwrap(), + has_finality: !update.is_finality_branch_empty(), + has_sync_committee_finality: update.has_sync_committee_finality(spec).unwrap(), + header_slot: update.attested_header_slot(), + signature_slot: *update.signature_slot(), + } + } +} diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 51ab682f3d..757b9bf3c4 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -5,7 +5,7 @@ use std::fs::{self}; use std::io::Write; use std::path::Path; use std::path::PathBuf; -use types::BeaconState; +use types::{BeaconState, LightClientUpdate}; /// See `log_file_access` for details. const ACCESSED_FILE_LOG_FILENAME: &str = ".accessed_file_log.txt"; @@ -95,3 +95,13 @@ pub fn ssz_decode_state( log_file_access(path); ssz_decode_file_with(path, |bytes| BeaconState::from_ssz_bytes(bytes, spec)) } + +pub fn ssz_decode_light_client_update( + path: &Path, + fork_name: &ForkName, +) -> Result, Error> { + log_file_access(path); + ssz_decode_file_with(path, |bytes| { + LightClientUpdate::from_ssz_bytes(bytes, fork_name) + }) +} diff --git a/testing/ef_tests/src/error.rs b/testing/ef_tests/src/error.rs index c5795777ad..389308377c 100644 --- a/testing/ef_tests/src/error.rs +++ b/testing/ef_tests/src/error.rs @@ -14,6 +14,8 @@ pub enum Error { SkippedKnownFailure, /// The test failed due to some internal error preventing the test from running. InternalError(String), + /// The test failed while making some comparison. + FailedComparison(String), } impl Error { @@ -26,6 +28,7 @@ impl Error { Error::SkippedBls => "SkippedBls", Error::SkippedKnownFailure => "SkippedKnownFailure", Error::InternalError(_) => "InternalError", + Error::FailedComparison(_) => "FailedComparison", } } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 410a37e768..52fc58f3d8 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -837,6 +837,32 @@ impl Handler for KzgInclusionMerkleProofValidityHandler(PhantomData); + +impl Handler for LightClientUpdateHandler { + type Case = cases::LightClientVerifyIsBetterUpdate; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "update_ranking".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Enabled in Altair + // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + fork_name != ForkName::Base && fork_name != ForkName::Electra + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 10a57a6b45..9014385044 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -900,6 +900,11 @@ fn merkle_proof_validity() { MerkleProofValidityHandler::::default().run(); } +#[test] +fn light_client_update() { + LightClientUpdateHandler::::default().run(); +} + #[test] #[cfg(feature = "fake_crypto")] fn kzg_inclusion_merkle_proof_validity() { From 781c5ecb1f206f701fb1fdc77fe4269002c35ff5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 12 Aug 2024 12:31:18 +1000 Subject: [PATCH 29/43] Add lcli command for manual rescue sync (#5458) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Rescue CLI * Allow tweaking start block * More caching * Merge branch 'unstable' into rescue-cli # Conflicts: # lcli/src/main.rs * Add `--known–common-ancestor` flag to optimise for download speed. * Rename rescue command to `http-sync` * Add logging * Add optional `--block-cache-dir` cli arg and create directory if it doesn't already exist. * Lint fix. * Merge branch 'unstable' into rescue-cli --- lcli/src/http_sync.rs | 152 ++++++++++++++++++++++++++++++++++++++++++ lcli/src/main.rs | 74 ++++++++++++++++++++ 2 files changed, 226 insertions(+) create mode 100644 lcli/src/http_sync.rs diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs new file mode 100644 index 0000000000..1ef40e6397 --- /dev/null +++ b/lcli/src/http_sync.rs @@ -0,0 +1,152 @@ +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{ + types::{BlockId, ChainSpec, ForkName, PublishBlockRequest, SignedBlockContents}, + BeaconNodeHttpClient, Error, SensitiveUrl, Timeouts, +}; +use eth2_network_config::Eth2NetworkConfig; +use ssz::Encode; +use std::fs; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; +use types::EthSpec; + +const HTTP_TIMEOUT: Duration = Duration::from_secs(3600); +const DEFAULT_CACHE_DIR: &str = "./cache"; + +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let executor = env.core_context().executor; + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { run_async::(network_config, matches).await }) +} + +pub async fn run_async( + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; + let source_url: SensitiveUrl = parse_required(matches, "source-url")?; + let target_url: SensitiveUrl = parse_required(matches, "target-url")?; + let start_block: BlockId = parse_required(matches, "start-block")?; + let maybe_common_ancestor_block: Option = + parse_optional(matches, "known–common-ancestor")?; + let cache_dir_path: PathBuf = + parse_optional(matches, "block-cache-dir")?.unwrap_or(DEFAULT_CACHE_DIR.into()); + + let source = BeaconNodeHttpClient::new(source_url, Timeouts::set_all(HTTP_TIMEOUT)); + let target = BeaconNodeHttpClient::new(target_url, Timeouts::set_all(HTTP_TIMEOUT)); + + if !cache_dir_path.exists() { + fs::create_dir_all(&cache_dir_path) + .map_err(|e| format!("Unable to create block cache dir: {:?}", e))?; + } + + // 1. Download blocks back from head, looking for common ancestor. + let mut blocks = vec![]; + let mut next_block_id = start_block; + loop { + println!("downloading {next_block_id:?}"); + + let publish_block_req = + get_block_from_source::(&source, next_block_id, spec, &cache_dir_path).await; + let block = publish_block_req.signed_block(); + + next_block_id = BlockId::Root(block.parent_root()); + blocks.push((block.slot(), publish_block_req)); + + if let Some(ref common_ancestor_block) = maybe_common_ancestor_block { + if common_ancestor_block == &next_block_id { + println!("reached known common ancestor: {next_block_id:?}"); + break; + } + } + + let block_exists_in_target = target + .get_beacon_blocks_ssz::(next_block_id, spec) + .await + .unwrap() + .is_some(); + if block_exists_in_target { + println!("common ancestor found: {next_block_id:?}"); + break; + } + } + + // 2. Apply blocks to target. + for (slot, block) in blocks.iter().rev() { + println!("posting block at slot {slot}"); + if let Err(e) = target.post_beacon_blocks(block).await { + if let Error::ServerMessage(ref e) = e { + if e.code == 202 { + println!("duplicate block detected while posting block at slot {slot}"); + continue; + } + } + return Err(format!("error posting {slot}: {e:?}")); + } else { + println!("success"); + } + } + + println!("SYNCED!!!!"); + + Ok(()) +} + +async fn get_block_from_source( + source: &BeaconNodeHttpClient, + block_id: BlockId, + spec: &ChainSpec, + cache_dir_path: &Path, +) -> PublishBlockRequest { + let mut cache_path = cache_dir_path.join(format!("block_{block_id}")); + + if cache_path.exists() { + let mut f = File::open(&cache_path).unwrap(); + let mut bytes = vec![]; + f.read_to_end(&mut bytes).unwrap(); + PublishBlockRequest::from_ssz_bytes(&bytes, ForkName::Deneb).unwrap() + } else { + let block_from_source = source + .get_beacon_blocks_ssz::(block_id, spec) + .await + .unwrap() + .unwrap(); + let blobs_from_source = source + .get_blobs::(block_id, None) + .await + .unwrap() + .unwrap() + .data; + + let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source + .iter() + .cloned() + .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) + .unzip(); + + let block_root = block_from_source.canonical_root(); + let block_contents = SignedBlockContents { + signed_block: Arc::new(block_from_source), + kzg_proofs: kzg_proofs.into(), + blobs: blobs.into(), + }; + let publish_block_req = PublishBlockRequest::BlockContents(block_contents); + + cache_path = cache_dir_path.join(format!("block_{block_root:?}")); + let mut f = File::create(&cache_path).unwrap(); + f.write_all(&publish_block_req.as_ssz_bytes()).unwrap(); + + publish_block_req + } +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 85898b60ee..380aeb6ace 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,6 +1,7 @@ mod block_root; mod check_deposit_data; mod generate_bootnode_enr; +mod http_sync; mod indexed_attestations; mod mnemonic_validators; mod mock_el; @@ -552,6 +553,74 @@ fn main() { .display_order(0) ) ) + .subcommand( + Command::new("http-sync") + .about("Manual sync") + .arg( + Arg::new("start-block") + .long("start-block") + .value_name("BLOCK_ID") + .action(ArgAction::Set) + .help("Block ID of source's head") + .default_value("head") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("source-url") + .long("source-url") + .value_name("URL") + .action(ArgAction::Set) + .help("URL to a synced beacon-API provider") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("target-url") + .long("target-url") + .value_name("URL") + .action(ArgAction::Set) + .help("URL to an unsynced beacon-API provider") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("testnet-dir") + .short('d') + .long("testnet-dir") + .value_name("PATH") + .action(ArgAction::Set) + .global(true) + .help("The testnet dir.") + .display_order(0) + ) + .arg( + Arg::new("network") + .long("network") + .value_name("NAME") + .action(ArgAction::Set) + .global(true) + .help("The network to use. Defaults to mainnet.") + .conflicts_with("testnet-dir") + .display_order(0) + ) + .arg( + Arg::new("known-common-ancestor") + .long("known-common-ancestor") + .value_name("BLOCK_ID") + .action(ArgAction::Set) + .help("Block ID of common ancestor, if known.") + .display_order(0) + ) + .arg( + Arg::new("block-cache-dir") + .long("block-cache-dir") + .value_name("PATH") + .action(ArgAction::Set) + .help("Directory to keep a cache of the downloaded SSZ blocks.") + .display_order(0) + ) + ) .get_matches(); let result = matches @@ -656,6 +725,11 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> } Some(("mock-el", matches)) => mock_el::run::(env, matches) .map_err(|e| format!("Failed to run mock-el command: {}", e)), + Some(("http-sync", matches)) => { + let network_config = get_network_config()?; + http_sync::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run http-sync command: {}", e)) + } Some((other, _)) => Err(format!("Unknown subcommand {}. See --help.", other)), _ => Err("No subcommand provided. See --help.".to_string()), } From f2fdbe7fbe82428a9458deecc2d580533f955ed9 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 12 Aug 2024 12:31:21 +1000 Subject: [PATCH 30/43] Add plumbing for PeerDAS supernodes (#5050, #5409, #5570, #5966) (#6216) * Add plumbing for peerdas supernodes (#5050, #5409, #5570, #5966) - add cli option `--subscribe-to-all-data-columns` - add custody subnet count to ENR, only if PeerDAS is scheduled - subscribe to data column topics, only if PeerDAS is scheduled Co-authored-by: Jacob Kaufmann * Merge branch 'unstable' into das-supernode * Update CLI docs. * Merge branch 'unstable' into das-supernode * Fix fork epoch comparison with `FAR_FUTURE_EPOCH`. * Merge branch 'unstable' into das-supernode * Hide `--subscribe-all-data-column-subnets` flag and update help. * Fix docs only * Merge branch 'unstable' into das-supernode --- beacon_node/beacon_chain/src/builder.rs | 19 ++- .../src/data_availability_checker.rs | 11 +- beacon_node/client/src/builder.rs | 1 + beacon_node/lighthouse_network/src/config.rs | 6 +- .../lighthouse_network/src/discovery/enr.rs | 111 +++++++++++++++++- .../lighthouse_network/src/discovery/mod.rs | 2 +- .../lighthouse_network/src/service/mod.rs | 1 + beacon_node/network/src/service.rs | 56 ++++++++- beacon_node/src/cli.rs | 12 ++ beacon_node/src/config.rs | 4 + consensus/types/src/chain_spec.rs | 7 ++ lcli/src/generate_bootnode_enr.rs | 4 +- lcli/src/main.rs | 6 +- 13 files changed, 223 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c86e35980b..042d14a4fa 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -104,6 +104,7 @@ pub struct BeaconChainBuilder { kzg: Option>, task_executor: Option, validator_monitor_config: Option, + import_all_data_columns: bool, } impl @@ -145,6 +146,7 @@ where kzg: None, task_executor: None, validator_monitor_config: None, + import_all_data_columns: false, } } @@ -615,6 +617,12 @@ where self } + /// Sets whether to require and import all data columns when importing block. + pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { + self.import_all_data_columns = import_all_data_columns; + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -965,8 +973,15 @@ where validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, data_availability_checker: Arc::new( - DataAvailabilityChecker::new(slot_clock, self.kzg.clone(), store, &log, self.spec) - .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + DataAvailabilityChecker::new( + slot_clock, + self.kzg.clone(), + store, + self.import_all_data_columns, + &log, + self.spec, + ) + .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), }; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index ce5995a558..b4336a054e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -95,11 +95,16 @@ impl DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Option>, store: BeaconStore, + import_all_data_columns: bool, log: &Logger, spec: ChainSpec, ) -> Result { - // TODO(das): support supernode or custom custody requirement - let custody_subnet_count = spec.custody_requirement as usize; + let custody_subnet_count = if import_all_data_columns { + spec.data_column_sidecar_subnet_count as usize + } else { + spec.custody_requirement as usize + }; + let custody_column_count = custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); @@ -112,8 +117,8 @@ impl DataAvailabilityChecker { Ok(Self { availability_cache: Arc::new(overflow_cache), slot_clock, - log: log.clone(), kzg, + log: log.clone(), spec, }) } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 393ce35f00..6695f3c4bc 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -207,6 +207,7 @@ where .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) + .import_all_data_columns(config.network.subscribe_all_data_column_subnets) .validator_monitor_config(config.validator_monitor.clone()); let builder = if let Some(slasher) = self.slasher.clone() { diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 91c5b62d0b..7c95977140 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -42,7 +42,7 @@ pub struct Config { pub network_dir: PathBuf, /// IP addresses to listen on. - listen_addresses: ListenAddress, + pub(crate) listen_addresses: ListenAddress, /// The address to broadcast to peers about which address we are listening on. None indicates /// that no discovery address has been set in the CLI args. @@ -100,6 +100,9 @@ pub struct Config { /// Attempt to construct external port mappings with UPnP. pub upnp_enabled: bool, + /// Subscribe to all data column subnets for the duration of the runtime. + pub subscribe_all_data_column_subnets: bool, + /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, @@ -338,6 +341,7 @@ impl Default for Config { upnp_enabled: true, network_load: 4, private: false, + subscribe_all_data_column_subnets: false, subscribe_all_subnets: false, import_all_attestations: false, shutdown_after_sync: false, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 51e50808e1..04ae997150 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -14,7 +14,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::str::FromStr; -use types::{EnrForkId, EthSpec}; +use types::{ChainSpec, EnrForkId, EthSpec}; use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; @@ -24,6 +24,8 @@ pub const ETH2_ENR_KEY: &str = "eth2"; pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; /// The ENR field specifying the sync committee subnet bitfield. pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; +/// The ENR field specifying the peerdas custody subnet count. +pub const PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY: &str = "csc"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -35,6 +37,9 @@ pub trait Eth2Enr { &self, ) -> Result, &'static str>; + /// The peerdas custody subnet count associated with the ENR. + fn custody_subnet_count(&self, spec: &ChainSpec) -> u64; + fn eth2(&self) -> Result; } @@ -59,6 +64,16 @@ impl Eth2Enr for Enr { .map_err(|_| "Could not decode the ENR syncnets bitfield") } + /// if the custody value is non-existent in the ENR, then we assume the minimum custody value + /// defined in the spec. + fn custody_subnet_count(&self, spec: &ChainSpec) -> u64 { + self.get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + .and_then(|r| r.ok()) + // If value supplied in ENR is invalid, fallback to `custody_requirement` + .filter(|csc| csc <= &spec.data_column_sidecar_subnet_count) + .unwrap_or(spec.custody_requirement) + } + fn eth2(&self) -> Result { let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?; @@ -126,12 +141,13 @@ pub fn build_or_load_enr( config: &NetworkConfig, enr_fork_id: &EnrForkId, log: &slog::Logger, + spec: &ChainSpec, ) -> Result { // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?; Ok(local_enr) @@ -142,6 +158,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, + spec: &ChainSpec, ) -> Result { let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; @@ -221,6 +238,16 @@ pub fn build_enr( builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + // only set `csc` if PeerDAS fork epoch has been scheduled + if spec.is_peer_das_scheduled() { + let custody_subnet_count = if config.subscribe_all_data_column_subnets { + spec.data_column_sidecar_subnet_count + } else { + spec.custody_requirement + }; + builder.add_value(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, &custody_subnet_count); + } + builder .build(enr_key) .map_err(|e| format!("Could not build Local ENR: {:?}", e)) @@ -244,10 +271,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) - // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, - // otherwise we use a new ENR. This will likely only be true for non-validating nodes + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and + // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will + // likely only be true for non-validating nodes. && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + && local_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) } /// Loads enr from the given directory @@ -280,3 +309,77 @@ pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) { } } } + +#[cfg(test)] +mod test { + use super::*; + use crate::config::Config as NetworkConfig; + use types::{Epoch, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn make_eip7594_spec() -> ChainSpec { + let mut spec = E::default_spec(); + spec.eip7594_fork_epoch = Some(Epoch::new(10)); + spec + } + + #[test] + fn custody_subnet_count_default() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: false, + ..NetworkConfig::default() + }; + let spec = make_eip7594_spec(); + + let enr = build_enr_with_config(config, &spec).0; + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.custody_requirement, + ); + } + + #[test] + fn custody_subnet_count_all() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: true, + ..NetworkConfig::default() + }; + let spec = make_eip7594_spec(); + let enr = build_enr_with_config(config, &spec).0; + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.data_column_sidecar_subnet_count, + ); + } + + #[test] + fn custody_subnet_count_fallback_default() { + let config = NetworkConfig::default(); + let spec = make_eip7594_spec(); + let (mut enr, enr_key) = build_enr_with_config(config, &spec); + let invalid_subnet_count = 99u64; + + enr.insert( + PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, + &invalid_subnet_count, + &enr_key, + ) + .unwrap(); + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.custody_requirement, + ); + } + + fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&keypair); + let enr_fork_id = EnrForkId::default(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); + (enr, enr_key) + } +} diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 865d707495..300c190cda 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1220,7 +1220,7 @@ mod tests { let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); - let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); + let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index c2a2a03fe8..fe649f4199 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -162,6 +162,7 @@ impl Network { &config, &ctx.enr_fork_id, &log, + ctx.chain_spec, )?; // Construct the metadata let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e522285a9e..db5fc7636e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -16,6 +16,7 @@ use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; +use lighthouse_network::Eth2Enr; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -35,8 +36,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, - Unsigned, ValidatorSubscription, + ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, Unsigned, ValidatorSubscription, }; mod tests; @@ -183,6 +184,8 @@ pub struct NetworkService { next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. next_unsubscribe: Pin>>, + /// Subscribe to all the data column subnets. + subscribe_all_data_column_subnets: bool, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// Shutdown beacon node after sync is complete. @@ -349,6 +352,7 @@ impl NetworkService { next_fork_update, next_fork_subscriptions, next_unsubscribe, + subscribe_all_data_column_subnets: config.subscribe_all_data_column_subnets, subscribe_all_subnets: config.subscribe_all_subnets, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -733,6 +737,15 @@ impl NetworkService { } } + // TODO(das): This is added here for the purpose of testing, *without* having to + // activate Electra. This should happen as part of the Electra upgrade and we should + // move the subscription logic once it's ready to rebase PeerDAS on Electra, or if + // we decide to activate via the soft fork route: + // https://github.com/sigp/lighthouse/pull/5899 + if self.fork_context.spec.is_peer_das_scheduled() { + self.subscribe_to_peer_das_topics(&mut subscribed_topics); + } + // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { @@ -779,6 +792,45 @@ impl NetworkService { } } + fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec) { + if self.subscribe_all_data_column_subnets { + for column_subnet in 0..self.fork_context.spec.data_column_sidecar_subnet_count { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = + Subnet::DataColumn(DataColumnSubnetId::new(column_subnet)).into(); + let topic = + GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } else { + for column_subnet in DataColumnSubnetId::compute_custody_subnets::( + self.network_globals.local_enr().node_id().raw().into(), + self.network_globals + .local_enr() + .custody_subnet_count::<::EthSpec>( + &self.fork_context.spec, + ), + &self.fork_context.spec, + ) { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = Subnet::DataColumn(column_subnet).into(); + let topic = + GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } + } + /// Handle a message sent to the network service. async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2e1b1c093c..3f991d4db2 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -54,6 +54,18 @@ pub fn cli_app() -> Command { /* * Network parameters. */ + .arg( + Arg::new("subscribe-all-data-column-subnets") + .long("subscribe-all-data-column-subnets") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Subscribe to all data column subnets and participate in data custody for \ + all columns. This will also advertise the beacon node as being long-lived \ + subscribed to all data column subnets. \ + NOTE: this is an experimental flag and may change any time without notice!") + .display_order(0) + .hide(true) + ) .arg( Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b4fa38da7d..24bef73f7c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1130,6 +1130,10 @@ pub fn set_network_config( config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; + if parse_flag(cli_args, "subscribe-all-data-column-subnets") { + config.subscribe_all_data_column_subnets = true; + } + if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ca4df32d1e..ed929061ff 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -426,6 +426,13 @@ impl ChainSpec { }) } + /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + pub fn is_peer_das_scheduled(&self) -> bool { + self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { + eip7594_fork_epoch != self.far_future_epoch + }) + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 52960b929d..26e17ba73e 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -10,7 +10,7 @@ use std::{fs, net::Ipv4Addr}; use std::{fs::File, num::NonZeroU16}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; @@ -37,7 +37,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, &enr_fork_id) + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 380aeb6ace..f055a23b36 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -707,8 +707,10 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> } Some(("check-deposit-data", matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), - Some(("generate-bootnode-enr", matches)) => generate_bootnode_enr::run::(matches) - .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), + Some(("generate-bootnode-enr", matches)) => { + generate_bootnode_enr::run::(matches, &env.eth2_config.spec) + .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)) + } Some(("mnemonic-validators", matches)) => mnemonic_validators::run(matches) .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), Some(("indexed-attestations", matches)) => indexed_attestations::run::(matches) From ff15c78ced26388ed882df494f922ce7bebba74c Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 13 Aug 2024 08:16:14 +0800 Subject: [PATCH 31/43] Implement data columns by network boilerplate (#6224) * Implement data columns by network boilerplate * Use correct quota values * Address PR review * Update currently_supported * Merge remote-tracking branch 'sigp/unstable' into peerdas-network-boilerplate * PR reviews * Fix data column rpc request not being sent due to incorrect limits set. (#6000) --- beacon_node/beacon_processor/src/lib.rs | 28 +++- .../src/peer_manager/mod.rs | 6 + .../src/rpc/codec/ssz_snappy.rs | 140 +++++++++++++++++- .../lighthouse_network/src/rpc/config.rs | 22 +++ .../lighthouse_network/src/rpc/methods.rs | 88 ++++++++++- beacon_node/lighthouse_network/src/rpc/mod.rs | 2 + .../lighthouse_network/src/rpc/outbound.rs | 22 +++ .../lighthouse_network/src/rpc/protocol.rs | 56 ++++++- .../src/rpc/rate_limiter.rs | 28 ++++ .../src/service/api_types.rs | 33 ++++- .../lighthouse_network/src/service/mod.rs | 30 ++++ .../src/network_beacon_processor/mod.rs | 38 ++++- .../network_beacon_processor/rpc_methods.rs | 33 ++++- beacon_node/network/src/router.rs | 99 +++++++++++-- beacon_node/network/src/sync/manager.rs | 33 +++++ 15 files changed, 624 insertions(+), 34 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index f491dc7ffb..68c33e99ba 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -119,6 +119,8 @@ pub struct BeaconProcessorQueueLengths { bbroots_queue: usize, blbroots_queue: usize, blbrange_queue: usize, + dcbroots_queue: usize, + dcbrange_queue: usize, gossip_bls_to_execution_change_queue: usize, lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, @@ -172,6 +174,9 @@ impl BeaconProcessorQueueLengths { bbroots_queue: 1024, blbroots_queue: 1024, blbrange_queue: 1024, + // TODO(das): pick proper values + dcbroots_queue: 1024, + dcbrange_queue: 1024, gossip_bls_to_execution_change_queue: 16384, lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, @@ -230,6 +235,8 @@ pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; +pub const DATA_COLUMNS_BY_ROOTS_REQUEST: &str = "data_columns_by_roots_request"; +pub const DATA_COLUMNS_BY_RANGE_REQUEST: &str = "data_columns_by_range_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const LIGHT_CLIENT_FINALITY_UPDATE_REQUEST: &str = "light_client_finality_update_request"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST: &str = "light_client_optimistic_update_request"; @@ -609,6 +616,8 @@ pub enum Work { BlocksByRootsRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), + DataColumnsByRootsRequest(BlockingFn), + DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), @@ -652,6 +661,8 @@ impl Work { Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, + Work::DataColumnsByRootsRequest(_) => DATA_COLUMNS_BY_ROOTS_REQUEST, + Work::DataColumnsByRangeRequest(_) => DATA_COLUMNS_BY_RANGE_REQUEST, Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::LightClientOptimisticUpdateRequest(_) => LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST, Work::LightClientFinalityUpdateRequest(_) => LIGHT_CLIENT_FINALITY_UPDATE_REQUEST, @@ -816,6 +827,8 @@ impl BeaconProcessor { let mut bbroots_queue = FifoQueue::new(queue_lengths.bbroots_queue); let mut blbroots_queue = FifoQueue::new(queue_lengths.blbroots_queue); let mut blbrange_queue = FifoQueue::new(queue_lengths.blbrange_queue); + let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); + let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); let mut gossip_bls_to_execution_change_queue = FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); @@ -1118,6 +1131,10 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = blbroots_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = dcbroots_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = dcbrange_queue.pop() { + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1282,6 +1299,12 @@ impl BeaconProcessor { Work::BlobsByRootsRequest { .. } => { blbroots_queue.push(work, work_id, &self.log) } + Work::DataColumnsByRootsRequest { .. } => { + dcbroots_queue.push(work, work_id, &self.log) + } + Work::DataColumnsByRangeRequest { .. } => { + dcbrange_queue.push(work, work_id, &self.log) + } Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } @@ -1483,7 +1506,10 @@ impl BeaconProcessor { | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { work.await; }), - Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { + Work::BlobsByRangeRequest(process_fn) + | Work::BlobsByRootsRequest(process_fn) + | Work::DataColumnsByRootsRequest(process_fn) + | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6423da56fe..4c9551507e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -569,6 +569,8 @@ impl PeerManager { Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -587,6 +589,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::BlobsByRange => return, Protocol::BlobsByRoot => return, + Protocol::DataColumnsByRoot => return, + Protocol::DataColumnsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -607,6 +611,8 @@ impl PeerManager { Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::BlobsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 482d1d96b4..f5d8b58dce 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,10 +16,11 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobSidecar, ChainSpec, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, + LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, + RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, + SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -70,6 +71,8 @@ impl Encoder> for SSZSnappyInboundCodec { RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RPCResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -224,6 +227,8 @@ impl Encoder> for SSZSnappyOutboundCodec { }, OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), + OutboundRequest::DataColumnsByRange(req) => req.as_ssz_bytes(), + OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode }; @@ -414,7 +419,12 @@ fn context_bytes( } }; } - RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) => { + RPCResponse::BlobsByRange(_) + | RPCResponse::BlobsByRoot(_) + | RPCResponse::DataColumnsByRoot(_) + | RPCResponse::DataColumnsByRange(_) => { + // TODO(das): If DataColumnSidecar is defined as an Electra type, update the + // context bytes to point to ForkName::Electra return fork_context.to_context_bytes(ForkName::Deneb); } RPCResponse::LightClientBootstrap(lc_bootstrap) => { @@ -512,6 +522,17 @@ fn handle_rpc_request( )?, }))) } + SupportedProtocol::DataColumnsByRootV1 => Ok(Some(InboundRequest::DataColumnsByRoot( + DataColumnsByRootRequest { + data_column_ids: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_data_column_sidecars as usize, + )?, + }, + ))), + SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(InboundRequest::DataColumnsByRange( + DataColumnsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -604,6 +625,51 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::DataColumnsByRootV1 => match fork_name { + Some(fork_name) => { + // TODO(das): PeerDAS is currently supported for both deneb and electra. This check + // does not advertise the topic on deneb, simply allows it to decode it. Advertise + // logic is in `SupportedTopic::currently_supported`. + if fork_name.deneb_enabled() { + Ok(Some(RPCResponse::DataColumnsByRoot(Arc::new( + DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for data columns by root".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::DataColumnsByRangeV1 => match fork_name { + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RPCResponse::DataColumnsByRange(Arc::new( + DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for data columns by range".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -747,7 +813,8 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, - BeaconBlockBellatrix, EmptyBlock, Epoch, FullPayload, Signature, Slot, + BeaconBlockBellatrix, DataColumnIdentifier, EmptyBlock, Epoch, FullPayload, Signature, + Slot, }; type Spec = types::MainnetEthSpec; @@ -794,6 +861,10 @@ mod tests { Arc::new(BlobSidecar::empty()) } + fn empty_data_column_sidecar() -> Arc> { + Arc::new(DataColumnSidecar::empty()) + } + /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small( fork_context: &ForkContext, @@ -855,6 +926,27 @@ mod tests { } } + fn dcbrange_request() -> DataColumnsByRangeRequest { + DataColumnsByRangeRequest { + start_slot: 0, + count: 10, + columns: vec![1, 2, 3], + } + } + + fn dcbroot_request(spec: &ChainSpec) -> DataColumnsByRootRequest { + DataColumnsByRootRequest { + data_column_ids: RuntimeVariableList::new( + vec![DataColumnIdentifier { + block_root: Hash256::zero(), + index: 0, + }], + spec.max_request_data_column_sidecars as usize, + ) + .unwrap(), + } + } + fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) } @@ -1012,6 +1104,12 @@ mod tests { OutboundRequest::BlobsByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) } + OutboundRequest::DataColumnsByRoot(dcbroot) => { + assert_eq!(decoded, InboundRequest::DataColumnsByRoot(dcbroot)) + } + OutboundRequest::DataColumnsByRange(dcbrange) => { + assert_eq!(decoded, InboundRequest::DataColumnsByRange(dcbrange)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } @@ -1138,6 +1236,34 @@ mod tests { ), Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRangeV1, + RPCCodedResponse::Success(RPCResponse::DataColumnsByRange( + empty_data_column_sidecar() + )), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::DataColumnsByRange( + empty_data_column_sidecar() + ))), + ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRootV1, + RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot( + empty_data_column_sidecar() + )), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::DataColumnsByRoot( + empty_data_column_sidecar() + ))), + ); } // Test RPCResponse encoding/decoding for V1 messages @@ -1491,6 +1617,8 @@ mod tests { OutboundRequest::MetaData(MetadataRequest::new_v1()), OutboundRequest::BlobsByRange(blbrange_request()), OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), + OutboundRequest::DataColumnsByRange(dcbrange_request()), + OutboundRequest::DataColumnsByRoot(dcbroot_request(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index d17fa112a1..7ff189b981 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -91,6 +91,8 @@ pub struct RateLimiterConfig { pub(super) blocks_by_root_quota: Quota, pub(super) blobs_by_range_quota: Quota, pub(super) blobs_by_root_quota: Quota, + pub(super) data_columns_by_root_quota: Quota, + pub(super) data_columns_by_range_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -110,6 +112,12 @@ impl RateLimiterConfig { // measured against the maximum request size. pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(6144, 10); pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(768, 10); + // 320 blocks worth of columns for regular node, or 40 blocks for supernode. + // Range sync load balances when requesting blocks, and each batch is 32 blocks. + pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); + // 512 columns per request from spec. This should be plenty as peers are unlikely to send all + // sampling requests to a single peer. + pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -126,6 +134,8 @@ impl Default for RateLimiterConfig { blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, + data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, + data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -175,6 +185,8 @@ impl FromStr for RateLimiterConfig { let mut blocks_by_root_quota = None; let mut blobs_by_range_quota = None; let mut blobs_by_root_quota = None; + let mut data_columns_by_root_quota = None; + let mut data_columns_by_range_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -189,6 +201,12 @@ impl FromStr for RateLimiterConfig { Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota), + Protocol::DataColumnsByRoot => { + data_columns_by_root_quota = data_columns_by_root_quota.or(quota) + } + Protocol::DataColumnsByRange => { + data_columns_by_range_quota = data_columns_by_range_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -216,6 +234,10 @@ impl FromStr for RateLimiterConfig { blobs_by_range_quota: blobs_by_range_quota .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA), + data_columns_by_root_quota: data_columns_by_root_quota + .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA), + data_columns_by_range_quota: data_columns_by_range_quota + .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 1b0486ff77..8849a5433d 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -14,9 +14,9 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::{ - blob_sidecar::BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - Slot, + blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, + Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, + LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -293,6 +293,43 @@ impl BlobsByRangeRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct DataColumnsByRangeRequest { + /// The starting slot to request data columns. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, + /// The list column indices being requested. + pub columns: Vec, +} + +impl DataColumnsByRangeRequest { + pub fn max_requested(&self) -> u64 { + self.count.saturating_mul(self.columns.len() as u64) + } + + pub fn ssz_min_len() -> usize { + DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + columns: vec![0], + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len(spec: &ChainSpec) -> usize { + DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + columns: vec![0; spec.number_of_columns], + } + .as_ssz_bytes() + .len() + } +} + /// Request a number of beacon block roots from a peer. #[superstruct( variants(V1, V2), @@ -370,6 +407,27 @@ impl BlobsByRootRequest { } } +/// Request a number of data columns from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct DataColumnsByRootRequest { + /// The list of beacon block roots and column indices being requested. + pub data_column_ids: RuntimeVariableList, +} + +impl DataColumnsByRootRequest { + pub fn new(data_column_ids: Vec, spec: &ChainSpec) -> Self { + let data_column_ids = RuntimeVariableList::from_vec( + data_column_ids, + spec.max_request_data_column_sidecars as usize, + ); + Self { data_column_ids } + } + + pub fn new_single(block_root: Hash256, index: ColumnIndex, spec: &ChainSpec) -> Self { + Self::new(vec![DataColumnIdentifier { block_root, index }], spec) + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -400,6 +458,12 @@ pub enum RPCResponse { /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. + DataColumnsByRoot(Arc>), + + /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. + DataColumnsByRange(Arc>), + /// A PONG response to a PING request. Pong(Ping), @@ -421,6 +485,12 @@ pub enum ResponseTermination { /// Blobs by root stream termination. BlobsByRoot, + + /// Data column sidecars by root stream termination. + DataColumnsByRoot, + + /// Data column sidecars by range stream termination. + DataColumnsByRange, } /// The structured response containing a result/code indicating success or failure @@ -511,6 +581,8 @@ impl RPCResponse { RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RPCResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, + RPCResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -556,6 +628,16 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlobsByRoot(sidecar) => { write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } + RPCResponse::DataColumnsByRoot(sidecar) => { + write!(f, "DataColumnsByRoot: Data column slot: {}", sidecar.slot()) + } + RPCResponse::DataColumnsByRange(sidecar) => { + write!( + f, + "DataColumnsByRange: Data column slot: {}", + sidecar.slot() + ) + } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 027af89edf..666cbe6fbc 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -471,6 +471,8 @@ where ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, ResponseTermination::BlobsByRange => Protocol::BlobsByRange, ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, + ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, + ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 8ea7b84bc9..7752d27e75 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -36,6 +36,8 @@ pub enum OutboundRequest { BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), + DataColumnsByRoot(DataColumnsByRootRequest), + DataColumnsByRange(DataColumnsByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -79,6 +81,14 @@ impl OutboundRequest { SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy, )], + OutboundRequest::DataColumnsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRootV1, + Encoding::SSZSnappy, + )], + OutboundRequest::DataColumnsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRangeV1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -100,6 +110,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, + OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + OutboundRequest::DataColumnsByRange(req) => req.max_requested::(), OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, } @@ -113,6 +125,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => false, OutboundRequest::BlobsByRange(_) => false, OutboundRequest::BlobsByRoot(_) => false, + OutboundRequest::DataColumnsByRoot(_) => false, + OutboundRequest::DataColumnsByRange(_) => false, OutboundRequest::Ping(_) => true, OutboundRequest::MetaData(_) => true, } @@ -133,6 +147,8 @@ impl OutboundRequest { }, OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + OutboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + OutboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, OutboundRequest::Ping(_) => SupportedProtocol::PingV1, OutboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -151,6 +167,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + OutboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + OutboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -208,6 +226,10 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + OutboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + OutboundRequest::DataColumnsByRange(req) => { + write!(f, "Data columns by range: {:?}", req) + } OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 2cdd730a2b..6f7f034834 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,10 +18,10 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, - SignedBeaconBlock, + BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -268,6 +268,12 @@ pub enum Protocol { /// The `BlobsByRoot` protocol name. #[strum(serialize = "blob_sidecars_by_root")] BlobsByRoot, + /// The `DataColumnSidecarsByRoot` protocol name. + #[strum(serialize = "data_column_sidecars_by_root")] + DataColumnsByRoot, + /// The `DataColumnSidecarsByRange` protocol name. + #[strum(serialize = "data_column_sidecars_by_range")] + DataColumnsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -293,6 +299,8 @@ impl Protocol { Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange), Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), + Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), + Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -319,6 +327,8 @@ pub enum SupportedProtocol { BlocksByRootV2, BlobsByRangeV1, BlobsByRootV1, + DataColumnsByRootV1, + DataColumnsByRangeV1, PingV1, MetaDataV1, MetaDataV2, @@ -338,6 +348,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRootV2 => "2", SupportedProtocol::BlobsByRangeV1 => "1", SupportedProtocol::BlobsByRootV1 => "1", + SupportedProtocol::DataColumnsByRootV1 => "1", + SupportedProtocol::DataColumnsByRangeV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -357,6 +369,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange, SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, + SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, + SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -387,6 +401,12 @@ impl SupportedProtocol { ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.spec.is_peer_das_scheduled() { + supported.extend_from_slice(&[ + ProtocolId::new(SupportedProtocol::DataColumnsByRootV1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), + ]); + } supported } } @@ -495,6 +515,11 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::BlobsByRoot => RpcLimits::new(0, spec.max_blobs_by_root_request), + Protocol::DataColumnsByRoot => RpcLimits::new(0, spec.max_data_columns_by_root_request), + Protocol::DataColumnsByRange => RpcLimits::new( + DataColumnsByRangeRequest::ssz_min_len(), + DataColumnsByRangeRequest::ssz_max_len(spec), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -521,6 +546,8 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), + Protocol::DataColumnsByRoot => rpc_data_column_limits::(), + Protocol::DataColumnsByRange => rpc_data_column_limits::(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -549,6 +576,8 @@ impl ProtocolId { | SupportedProtocol::BlocksByRootV2 | SupportedProtocol::BlobsByRangeV1 | SupportedProtocol::BlobsByRootV1 + | SupportedProtocol::DataColumnsByRootV1 + | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 | SupportedProtocol::LightClientFinalityUpdateV1 => true, @@ -589,6 +618,13 @@ pub fn rpc_blob_limits() -> RpcLimits { ) } +pub fn rpc_data_column_limits() -> RpcLimits { + RpcLimits::new( + DataColumnSidecar::::empty().as_ssz_bytes().len(), + DataColumnSidecar::::max_size(), + ) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -668,6 +704,8 @@ pub enum InboundRequest { BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), + DataColumnsByRoot(DataColumnsByRootRequest), + DataColumnsByRange(DataColumnsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -688,6 +726,8 @@ impl InboundRequest { InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, + InboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + InboundRequest::DataColumnsByRange(req) => req.max_requested::(), InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, @@ -711,6 +751,8 @@ impl InboundRequest { }, InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + InboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + InboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, InboundRequest::Ping(_) => SupportedProtocol::PingV1, InboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -736,6 +778,8 @@ impl InboundRequest { InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + InboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + InboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -846,6 +890,10 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + InboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + InboundRequest::DataColumnsByRange(req) => { + write!(f, "Data columns by range: {:?}", req) + } InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index b304eb546d..9fb085efd8 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -97,6 +97,10 @@ pub struct RPCRateLimiter { blbrange_rl: Limiter, /// BlobsByRoot rate limiter. blbroot_rl: Limiter, + /// DataColumnssByRoot rate limiter. + dcbroot_rl: Limiter, + /// DataColumnsByRange rate limiter. + dcbrange_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -133,6 +137,10 @@ pub struct RPCRateLimiterBuilder { blbrange_quota: Option, /// Quota for the BlobsByRoot protocol. blbroot_quota: Option, + /// Quota for the DataColumnsByRoot protocol. + dcbroot_quota: Option, + /// Quota for the DataColumnsByRange protocol. + dcbrange_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -154,6 +162,8 @@ impl RPCRateLimiterBuilder { Protocol::BlocksByRoot => self.bbroots_quota = q, Protocol::BlobsByRange => self.blbrange_quota = q, Protocol::BlobsByRoot => self.blbroot_quota = q, + Protocol::DataColumnsByRoot => self.dcbroot_quota = q, + Protocol::DataColumnsByRange => self.dcbrange_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -191,6 +201,14 @@ impl RPCRateLimiterBuilder { .blbroot_quota .ok_or("BlobsByRoot quota not specified")?; + let dcbroot_quota = self + .dcbroot_quota + .ok_or("DataColumnsByRoot quota not specified")?; + + let dcbrange_quota = self + .dcbrange_quota + .ok_or("DataColumnsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -200,6 +218,8 @@ impl RPCRateLimiterBuilder { let bbrange_rl = Limiter::from_quota(bbrange_quota)?; let blbrange_rl = Limiter::from_quota(blbrange_quota)?; let blbroot_rl = Limiter::from_quota(blbroots_quota)?; + let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; + let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -218,6 +238,8 @@ impl RPCRateLimiterBuilder { bbrange_rl, blbrange_rl, blbroot_rl, + dcbroot_rl, + dcbrange_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -262,6 +284,8 @@ impl RPCRateLimiter { blocks_by_root_quota, blobs_by_range_quota, blobs_by_root_quota, + data_columns_by_root_quota, + data_columns_by_range_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -276,6 +300,8 @@ impl RPCRateLimiter { .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) + .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) + .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -312,6 +338,8 @@ impl RPCRateLimiter { Protocol::BlocksByRoot => &mut self.bbroots_rl, Protocol::BlobsByRange => &mut self.blbrange_rl, Protocol::BlobsByRoot => &mut self.blbroot_rl, + Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, + Protocol::DataColumnsByRange => &mut self.dcbrange_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 376ac34dee..11df591ae4 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -2,11 +2,13 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ - BlobSidecar, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, + BlobSidecar, DataColumnSidecar, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, }; -use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use crate::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, @@ -27,6 +29,11 @@ pub struct SingleLookupReqId { pub req_id: Id, } +/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. +/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId(pub Id); + /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -34,6 +41,8 @@ pub enum SyncRequestId { SingleBlock { id: SingleLookupReqId }, /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, + /// Request searching for a set of data columns given a hash and list of column indices. + DataColumnsByRoot(DataColumnsByRootRequestId, SingleLookupReqId), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } @@ -75,6 +84,10 @@ pub enum Request { LightClientFinalityUpdate, /// A request blobs root request. BlobsByRoot(BlobsByRootRequest), + /// A request data columns root request. + DataColumnsByRoot(DataColumnsByRootRequest), + /// A request data columns by range request. + DataColumnsByRange(DataColumnsByRangeRequest), } impl std::convert::From for OutboundRequest { @@ -104,6 +117,8 @@ impl std::convert::From for OutboundRequest { } Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), + Request::DataColumnsByRoot(r) => OutboundRequest::DataColumnsByRoot(r), + Request::DataColumnsByRange(r) => OutboundRequest::DataColumnsByRange(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -123,10 +138,14 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. BlobsByRange(Option>>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_Range request. + DataColumnsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Option>>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. + DataColumnsByRoot(Option>>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -154,6 +173,16 @@ impl std::convert::From> for RPCCodedResponse { Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), }, + Response::DataColumnsByRoot(r) => match r { + Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot(d)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), + }, + Response::DataColumnsByRange(r) => match r { + Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRange(d)), + None => { + RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRange) + } + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::LightClientBootstrap(b) => { RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index fe649f4199..4ef080619e 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1204,6 +1204,12 @@ impl Network { Request::BlobsByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) } + Request::DataColumnsByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_root"]) + } + Request::DataColumnsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_range"]) + } } NetworkEvent::RequestReceived { peer_id, @@ -1523,6 +1529,22 @@ impl Network { self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); Some(event) } + InboundRequest::DataColumnsByRoot(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::DataColumnsByRoot(req), + ); + Some(event) + } + InboundRequest::DataColumnsByRange(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::DataColumnsByRange(req), + ); + Some(event) + } InboundRequest::LightClientBootstrap(req) => { let event = self.build_request( peer_request_id, @@ -1580,6 +1602,12 @@ impl Network { RPCResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } + RPCResponse::DataColumnsByRoot(resp) => { + self.build_response(id, peer_id, Response::DataColumnsByRoot(Some(resp))) + } + RPCResponse::DataColumnsByRange(resp) => { + self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) + } // Should never be reached RPCResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1602,6 +1630,8 @@ impl Network { ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), ResponseTermination::BlobsByRange => Response::BlobsByRange(None), ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), + ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), + ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), }; self.build_response(id, peer_id, response) } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index ffb01a99ef..9fb14fdcb8 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -8,7 +8,9 @@ use beacon_processor::{ DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -602,6 +604,40 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. + pub fn send_data_columns_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_data_columns_by_root_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::DataColumnsByRootsRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `DataColumnsByRange`s from the RPC network. + pub fn send_data_columns_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_data_columns_by_range_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::DataColumnsByRangeRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn send_light_client_bootstrap_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 2a0c7ea089..3f8cf14dcb 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -4,7 +4,9 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; @@ -314,6 +316,20 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `DataColumnsByRoot` request from the peer. + pub fn handle_data_columns_by_root_request( + self: Arc, + peer_id: PeerId, + _request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) { + // TODO(das): implement handler + debug!(self.log, "Received DataColumnsByRoot Request"; + "peer_id" => %peer_id, + "count" => request.data_column_ids.len() + ); + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, @@ -815,6 +831,21 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `DataColumnsByRange` request from the peer. + pub fn handle_data_columns_by_range_request( + self: Arc, + peer_id: PeerId, + _request_id: PeerRequestId, + req: DataColumnsByRangeRequest, + ) { + // TODO(das): implement handler + debug!(self.log, "Received DataColumnsByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); + } + /// Helper function to ensure single item protocol always end with either a single chunk or an /// error fn terminate_response_single_item Response>( diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index c162d52d02..a5e27f582a 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -27,7 +27,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -216,6 +216,14 @@ impl Router { self.network_beacon_processor .send_blobs_by_roots_request(peer_id, request_id, request), ), + Request::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_data_columns_by_roots_request(peer_id, request_id, request), + ), + Request::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_data_columns_by_range_request(peer_id, request_id, request), + ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor .send_light_client_bootstrap_request(peer_id, request_id, request), @@ -258,6 +266,12 @@ impl Router { Response::BlobsByRoot(blob) => { self.on_blobs_by_root_response(peer_id, request_id, blob); } + Response::DataColumnsByRoot(data_column) => { + self.on_data_columns_by_root_response(peer_id, request_id, data_column); + } + Response::DataColumnsByRange(data_column) => { + self.on_data_columns_by_range_response(peer_id, request_id, data_column); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -507,11 +521,11 @@ impl Router { ) { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { - SyncRequestId::SingleBlock { .. } | SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Block lookups do not request BBRange requests"; "peer_id" => %peer_id); + id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, + other => { + crit!(self.log, "BlocksByRange response on incorrect request"; "request" => ?other); return; } - id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, }, AppRequestId::Router => { crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); @@ -570,12 +584,8 @@ impl Router { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlock { .. } => id, - SyncRequestId::RangeBlockAndBlobs { .. } => { - crit!(self.log, "Batch syncing do not request BBRoot requests"; "peer_id" => %peer_id); - return; - } - SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Blob response to block by roots request"; "peer_id" => %peer_id); + other => { + crit!(self.log, "BlocksByRoot response on incorrect request"; "request" => ?other); return; } }, @@ -608,12 +618,8 @@ impl Router { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlob { .. } => id, - SyncRequestId::SingleBlock { .. } => { - crit!(self.log, "Block response to blobs by roots request"; "peer_id" => %peer_id); - return; - } - SyncRequestId::RangeBlockAndBlobs { .. } => { - crit!(self.log, "Batch syncing does not request BBRoot requests"; "peer_id" => %peer_id); + other => { + crit!(self.log, "BlobsByRoot response on incorrect request"; "request" => ?other); return; } }, @@ -636,6 +642,67 @@ impl Router { }); } + /// Handle a `DataColumnsByRoot` response from the peer. + pub fn on_data_columns_by_root_response( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + data_column: Option>>, + ) { + let request_id = match request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::DataColumnsByRoot { .. } => id, + other => { + crit!(self.log, "DataColumnsByRoot response on incorrect request"; "request" => ?other); + return; + } + }, + AppRequestId::Router => { + crit!(self.log, "All DataColumnsByRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } + }; + + trace!( + self.log, + "Received DataColumnsByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column, + seen_timestamp: timestamp_now(), + }); + } + + pub fn on_data_columns_by_range_response( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + data_column: Option>>, + ) { + trace!( + self.log, + "Received DataColumnsByRange Response"; + "peer" => %peer_id, + ); + + if let AppRequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::RpcDataColumn { + peer_id, + request_id: id, + data_column, + seen_timestamp: timestamp_now(), + }); + } else { + crit!( + self.log, + "All data columns by range responses should belong to sync" + ); + } + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 7149395839..e8e6896cd6 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -101,6 +101,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// A data columns has been received from the RPC + RpcDataColumn { + request_id: SyncRequestId, + peer_id: PeerId, + data_column: Option>>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, RpcBlock, Hash256), @@ -337,6 +345,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::DataColumnsByRoot { .. } => { + // TODO(das) + } SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -614,6 +625,12 @@ impl SyncManager { blob_sidecar, seen_timestamp, } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column, + seen_timestamp, + } => self.rpc_data_column_received(request_id, peer_id, data_column, seen_timestamp), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -846,6 +863,9 @@ impl SyncManager { SyncRequestId::SingleBlob { .. } => { crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); } + SyncRequestId::DataColumnsByRoot { .. } => { + // TODO(das) + } SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, block.into()) } @@ -888,12 +908,25 @@ impl SyncManager { None => RpcEvent::StreamTermination, }, ), + SyncRequestId::DataColumnsByRoot { .. } => { + // TODO(das) + } SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, blob.into()) } } } + fn rpc_data_column_received( + &mut self, + _request_id: SyncRequestId, + _peer_id: PeerId, + _data_column: Option>>, + _seen_timestamp: Duration, + ) { + // TODO(das): implement handler + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, From 6dc614fede50be066f7db7e1289c7fa7275c8b0f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 13 Aug 2024 10:16:17 +1000 Subject: [PATCH 32/43] Add PeerDAS KZG lib integration (construction & KZG verification) (#6212) * Add peerdas KZG library and use it for data column construction and cell kzg verification (#5701, #5941, #6118, #6179) Co-authored-by: kevaundray * Update `rust_eth_kzg` crate to published version. * Update kzg metrics buckets. * Merge branch 'unstable' into peerdas-kzg * Update KZG version to fix windows mem allocation. * Refactor common logic from build sidecar and reconstruction. Remove unnecessary `needless_lifetimes`. Co-authored-by: realbigsean * Copy existing trusted setup into `PeerDASTrustedSetup` for consistency and maintain `--trusted-setup` functionality. * Merge branch 'unstable' into peerdas-kzg * Merge branch 'peerdas-kzg' of github.com:jimmygchen/lighthouse into peerdas-kzg * Merge branch 'unstable' into peerdas-kzg * Merge branch 'unstable' into peerdas-kzg * Load PeerDAS KZG only if PeerDAS is enabled. --- Cargo.lock | 93 +++++ Cargo.toml | 1 + .../src/data_column_verification.rs | 16 +- beacon_node/beacon_chain/src/kzg_utils.rs | 328 +++++++++++++++++- beacon_node/beacon_chain/src/metrics.rs | 27 ++ beacon_node/client/src/builder.rs | 19 +- consensus/types/src/data_column_sidecar.rs | 212 +---------- crypto/kzg/Cargo.toml | 10 + crypto/kzg/benches/benchmark.rs | 31 ++ crypto/kzg/src/lib.rs | 151 ++++---- crypto/kzg/src/trusted_setup.rs | 23 ++ 11 files changed, 627 insertions(+), 284 deletions(-) create mode 100644 crypto/kzg/benches/benchmark.rs diff --git a/Cargo.lock b/Cargo.lock index 9afb3635f1..df005da696 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1029,6 +1029,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "blstrs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" +dependencies = [ + "blst", + "byte-slice-cast", + "ff 0.13.0", + "group 0.13.0", + "pairing", + "rand_core", + "serde", + "subtle", +] + [[package]] name = "bollard-stubs" version = "1.42.0-rc.3" @@ -1514,6 +1530,52 @@ dependencies = [ "libc", ] +[[package]] +name = "crate_crypto_internal_eth_kzg_bls12_381" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8761b04feb6031ffaf93933c955a0c91a2f3ce15dcac6b9586d2487fe55abf0b" +dependencies = [ + "blst", + "blstrs", + "ff 0.13.0", + "group 0.13.0", + "pairing", + "rayon", +] + +[[package]] +name = "crate_crypto_internal_eth_kzg_erasure_codes" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca410dff79524a2babe8a0d9ab5fdce21b16808f8189eb8b6da6159681f8de2" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_polynomial", +] + +[[package]] +name = "crate_crypto_internal_eth_kzg_polynomial" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68be1a5f16bc1c09254dec5209e22278d7d395284443576886a5890e7131234f" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", +] + +[[package]] +name = "crate_crypto_kzg_multi_open_fk20" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702fe5b687fe8c5a46851b8bc624ad49603a339dc93c920d4f7e61592c201ee8" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_polynomial", + "hex", + "rayon", + "sha2 0.10.8", +] + [[package]] name = "crc32fast" version = "1.4.2" @@ -3001,6 +3063,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core", "subtle", ] @@ -3431,7 +3494,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", + "rand", "rand_core", + "rand_xorshift", "subtle", ] @@ -4342,13 +4407,17 @@ version = "0.1.0" dependencies = [ "arbitrary", "c-kzg", + "criterion", "derivative", + "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "hex", + "rust_eth_kzg", "serde", + "serde_json", "tree_hash", ] @@ -5897,6 +5966,15 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group 0.13.0", +] + [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -7015,6 +7093,21 @@ dependencies = [ "smallvec", ] +[[package]] +name = "rust_eth_kzg" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "013a850c7e131a8f9651ffbb151dc33240234f21dd357b692bd5ff4cdc84bf9a" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_erasure_codes", + "crate_crypto_kzg_multi_open_fk20", + "hex", + "rayon", + "serde", + "serde_json", +] + [[package]] name = "rustc-demangle" version = "0.1.24" diff --git a/Cargo.toml b/Cargo.toml index cf3fd0ab04..901fff2ea6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ delay_map = "0.3" derivative = "2" dirs = "3" either = "1.9" +rust_eth_kzg = "0.3.4" discv5 = { version = "0.4.1", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index fa31d6f2e8..da639e3695 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -2,7 +2,8 @@ use crate::block_verification::{ cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, BlockSlashInfo, }; -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::kzg_utils::validate_data_columns; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; @@ -11,6 +12,7 @@ use slasher::test_utils::E; use slog::debug; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; +use std::iter; use std::sync::Arc; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ @@ -255,9 +257,10 @@ impl KzgVerifiedCustodyDataColumn { /// Returns an error if the kzg verification check fails. pub fn verify_kzg_for_data_column( data_column: Arc>, - _kzg: &Kzg, + kzg: &Kzg, ) -> Result, KzgError> { - // TODO(das): KZG verification to be implemented + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); + validate_data_columns(kzg, iter::once(&data_column))?; Ok(KzgVerifiedDataColumn { data: data_column }) } @@ -267,13 +270,14 @@ pub fn verify_kzg_for_data_column( /// Note: This function should be preferred over calling `verify_kzg_for_data_column` /// in a loop since this function kzg verifies a list of data columns more efficiently. pub fn verify_kzg_for_data_column_list<'a, E: EthSpec, I>( - _data_column_iter: I, - _kzg: &'a Kzg, + data_column_iter: I, + kzg: &'a Kzg, ) -> Result<(), KzgError> where I: Iterator>> + Clone, { - // TODO(das): implement KZG verification + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES); + validate_data_columns(kzg, data_column_iter)?; Ok(()) } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index b554133875..55c1ee9e98 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,5 +1,15 @@ -use kzg::{Blob as KzgBlob, Error as KzgError, Kzg}; -use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; +use kzg::{ + Blob as KzgBlob, Bytes48, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, +}; +use rayon::prelude::*; +use ssz_types::FixedVector; +use std::sync::Arc; +use types::beacon_block_body::KzgCommitments; +use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; +use types::{ + Blob, BlobsList, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, + Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, +}; /// Converts a blob ssz List object to an array to be used with the kzg /// crypto library. @@ -7,6 +17,15 @@ fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result(cell: &Cell) -> Result { + let cell_bytes: &[u8] = cell.as_ref(); + Ok(cell_bytes + .try_into() + .expect("expected cell to have size {BYTES_PER_CELL}. This should be guaranteed by the `FixedVector type")) +} + /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. pub fn validate_blob( kzg: &Kzg, @@ -19,6 +38,50 @@ pub fn validate_blob( kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } +/// Validate a batch of `DataColumnSidecar`. +pub fn validate_data_columns<'a, E: EthSpec, I>( + kzg: &Kzg, + data_column_iter: I, +) -> Result<(), KzgError> +where + I: Iterator>> + Clone, +{ + let cells = data_column_iter + .clone() + .flat_map(|data_column| data_column.column.iter().map(ssz_cell_to_crypto_cell::)) + .collect::, KzgError>>()?; + + let proofs = data_column_iter + .clone() + .flat_map(|data_column| { + data_column + .kzg_proofs + .iter() + .map(|&proof| Bytes48::from(proof)) + }) + .collect::>(); + + let column_indices = data_column_iter + .clone() + .flat_map(|data_column| { + let col_index = data_column.index; + data_column.column.iter().map(move |_| col_index) + }) + .collect::>(); + + let commitments = data_column_iter + .clone() + .flat_map(|data_column| { + data_column + .kzg_commitments + .iter() + .map(|&commitment| Bytes48::from(commitment)) + }) + .collect::>(); + + kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) +} + /// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. pub fn validate_blobs( kzg: &Kzg, @@ -76,3 +139,264 @@ pub fn verify_kzg_proof( ) -> Result { kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) } + +/// Build data column sidecars from a signed beacon block and its blobs. +pub fn blobs_to_data_column_sidecars( + blobs: &BlobsList, + block: &SignedBeaconBlock, + kzg: &Kzg, + spec: &ChainSpec, +) -> Result, DataColumnSidecarError> { + if blobs.is_empty() { + return Ok(vec![]); + } + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_err| DataColumnSidecarError::PreDeneb)?; + let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; + let signed_block_header = block.signed_block_header(); + + // NOTE: assumes blob sidecars are ordered by index + let blob_cells_and_proofs_vec = blobs + .into_par_iter() + .map(|blob| { + let blob = blob + .as_ref() + .try_into() + .expect("blob should have a guaranteed size due to FixedVector"); + kzg.compute_cells_and_proofs(blob) + }) + .collect::, KzgError>>()?; + + build_data_column_sidecars( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .map_err(DataColumnSidecarError::BuildSidecarFailed) +} + +fn build_data_column_sidecars( + kzg_commitments: KzgCommitments, + kzg_commitments_inclusion_proof: FixedVector, + signed_block_header: SignedBeaconBlockHeader, + blob_cells_and_proofs_vec: Vec, + spec: &ChainSpec, +) -> Result, String> { + let number_of_columns = spec.number_of_columns; + let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let mut column_kzg_proofs = + vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + + for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = blob_cells + .get(col) + .ok_or(format!("Missing blob cell at index {col}"))?; + let cell: Vec = cell.to_vec(); + let cell = Cell::::from(cell); + + let proof = blob_cell_proofs + .get(col) + .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; + + let column = columns + .get_mut(col) + .ok_or(format!("Missing data column at index {col}"))?; + let column_proofs = column_kzg_proofs + .get_mut(col) + .ok_or(format!("Missing data column proofs at index {col}"))?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + let sidecars: Vec>> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map(|(index, (col, proofs))| { + Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::from(col), + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: KzgProofs::::from(proofs), + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + }) + }) + .collect(); + + Ok(sidecars) +} + +/// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). +pub fn reconstruct_data_columns( + kzg: &Kzg, + data_columns: &[Arc>], + spec: &ChainSpec, +) -> Result, KzgError> { + let first_data_column = data_columns + .first() + .ok_or(KzgError::InconsistentArrayLength( + "data_columns should have at least one element".to_string(), + ))?; + let num_of_blobs = first_data_column.kzg_commitments.len(); + + let blob_cells_and_proofs_vec = + (0..num_of_blobs) + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column.column.get(row_index).ok_or( + KzgError::InconsistentArrayLength(format!( + "Missing data column at index {row_index}" + )), + )?; + + cells.push(ssz_cell_to_crypto_cell::(cell)?); + cell_ids.push(data_column.index); + } + kzg.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + }) + .collect::, KzgError>>()?; + + // Clone sidecar elements from existing data column, no need to re-compute + build_data_column_sidecars( + first_data_column.kzg_commitments.clone(), + first_data_column.kzg_commitments_inclusion_proof.clone(), + first_data_column.signed_block_header.clone(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(KzgError::ReconstructFailed) +} + +#[cfg(test)] +mod test { + use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use bls::Signature; + use eth2_network_config::TRUSTED_SETUP_BYTES; + use kzg::{Kzg, KzgCommitment, TrustedSetup}; + use types::{ + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, + ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + }; + + type E = MainnetEthSpec; + + // Loading and initializing PeerDAS KZG is expensive and slow, so we group the tests together + // only load it once. + #[test] + fn test_build_data_columns_sidecars() { + let spec = E::default_spec(); + let kzg = get_kzg(); + test_build_data_columns_empty(&kzg, &spec); + test_build_data_columns(&kzg, &spec); + test_reconstruct_data_columns(&kzg, &spec); + } + + #[track_caller] + fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 0; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + assert!(column_sidecars.is_empty()); + } + + #[track_caller] + fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + + let block_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + let block_kzg_commitments_inclusion_proof = signed_block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(); + + assert_eq!(column_sidecars.len(), spec.number_of_columns); + for (idx, col_sidecar) in column_sidecars.iter().enumerate() { + assert_eq!(col_sidecar.index, idx as u64); + + assert_eq!(col_sidecar.kzg_commitments.len(), num_of_blobs); + assert_eq!(col_sidecar.column.len(), num_of_blobs); + assert_eq!(col_sidecar.kzg_proofs.len(), num_of_blobs); + + assert_eq!(col_sidecar.kzg_commitments, block_kzg_commitments); + assert_eq!( + col_sidecar.kzg_commitments_inclusion_proof, + block_kzg_commitments_inclusion_proof + ); + assert!(col_sidecar.verify_inclusion_proof()); + } + } + + #[track_caller] + fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let reconstructed_columns = reconstruct_data_columns( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + spec, + ) + .unwrap(); + + for i in 0..spec.number_of_columns { + assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}"); + } + } + + fn get_kzg() -> Kzg { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") + } + + fn create_test_block_and_blobs( + num_of_blobs: usize, + spec: &ChainSpec, + ) -> (SignedBeaconBlock, BlobsList) { + let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); + let mut body = block.body_mut(); + let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); + *blob_kzg_commitments = + KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) + .unwrap(); + + let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let blobs = (0..num_of_blobs) + .map(|_| Blob::::default()) + .collect::>() + .into(); + + (signed_block, blobs) + } +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b8969b31f1..0309c4995e 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1645,6 +1645,13 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> "Time taken to compute blob sidecar inclusion proof", ) }); +pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "data_column_sidecar_computation_seconds", + "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", + Ok(vec![0.04, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 1.0]), + ) +}); pub static DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -1785,6 +1792,26 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: "Runtime of batched kzg verification", ) }); +pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "kzg_verification_data_column_single_seconds", + "Runtime of single data column kzg verification", + Ok(vec![ + 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, + ]), + ) + }); +pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "kzg_verification_data_column_batch_seconds", + "Runtime of batched data column kzg verification", + Ok(vec![ + 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, + ]), + ) + }); pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( || { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 6695f3c4bc..d299eebec8 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -10,7 +10,6 @@ use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_servic use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::LightClientProducerEvent; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, @@ -19,6 +18,7 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; +use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; @@ -505,7 +505,7 @@ where deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( config.eth1, context.log().clone(), - spec, + spec.clone(), &snapshot, ) { Ok(service) => { @@ -624,12 +624,15 @@ where }; let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { - let kzg = trusted_setup - .try_into() - .map(Arc::new) - .map(Some) - .map_err(|e| format!("Failed to load trusted setup: {:?}", e))?; - beacon_chain_builder.kzg(kzg) + let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); + + let kzg = if spec.is_peer_das_scheduled() { + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? + } else { + Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? + }; + + beacon_chain_builder.kzg(Some(Arc::new(kzg))) } else { beacon_chain_builder }; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index a0e3ca6cce..90c05aea1f 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,17 +1,12 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, ChainSpec, EthSpec, Hash256, KzgProofs, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, -}; -use crate::{BeaconStateError, BlobsList}; +use crate::BeaconStateError; +use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; -use kzg::Kzg; -use kzg::{Blob as KzgBlob, Cell as KzgCell, Error as KzgError}; +use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; -use rayon::prelude::*; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -60,7 +55,7 @@ pub struct DataColumnSidecar { pub index: ColumnIndex, #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] pub column: DataColumn, - /// All of the KZG commitments and proofs associated with the block, used for verifying sample cells. + /// All the KZG commitments and proofs associated with the block, used for verifying sample cells. pub kzg_commitments: KzgCommitments, pub kzg_proofs: KzgProofs, pub signed_block_header: SignedBeaconBlockHeader, @@ -98,197 +93,6 @@ impl DataColumnSidecar { ) } - pub fn build_sidecars( - blobs: &BlobsList, - block: &SignedBeaconBlock, - kzg: &Kzg, - spec: &ChainSpec, - ) -> Result, DataColumnSidecarError> { - let number_of_columns = spec.number_of_columns; - if blobs.is_empty() { - return Ok(vec![]); - } - let kzg_commitments = block - .message() - .body() - .blob_kzg_commitments() - .map_err(|_err| DataColumnSidecarError::PreDeneb)?; - let kzg_commitments_inclusion_proof = - block.message().body().kzg_commitments_merkle_proof()?; - let signed_block_header = block.signed_block_header(); - - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - - // NOTE: assumes blob sidecars are ordered by index - let blob_cells_and_proofs_vec = blobs - .into_par_iter() - .map(|blob| { - let blob = KzgBlob::from_bytes(blob).map_err(KzgError::from)?; - kzg.compute_cells_and_proofs(&blob) - }) - .collect::, KzgError>>()?; - - for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { - // we iterate over each column, and we construct the column from "top to bottom", - // pushing on the cell and the corresponding proof at each column index. we do this for - // each blob (i.e. the outer loop). - for col in 0..number_of_columns { - let cell = - blob_cells - .get(col) - .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing blob cell at index {col}" - )))?; - let cell: Vec = cell.into_inner().into_iter().collect(); - let cell = Cell::::from(cell); - - let proof = blob_cell_proofs.get(col).ok_or( - DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing blob cell KZG proof at index {col}" - )), - )?; - - let column = - columns - .get_mut(col) - .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing data column at index {col}" - )))?; - let column_proofs = column_kzg_proofs.get_mut(col).ok_or( - DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing data column proofs at index {col}" - )), - )?; - - column.push(cell); - column_proofs.push(*proof); - } - } - - let sidecars: Vec>> = columns - .into_iter() - .zip(column_kzg_proofs) - .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) - .collect(); - - Ok(sidecars) - } - - pub fn reconstruct( - kzg: &Kzg, - data_columns: &[Arc], - spec: &ChainSpec, - ) -> Result>, KzgError> { - let number_of_columns = spec.number_of_columns; - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - - let first_data_column = data_columns - .first() - .ok_or(KzgError::InconsistentArrayLength( - "data_columns should have at least one element".to_string(), - ))?; - let num_of_blobs = first_data_column.kzg_commitments.len(); - - let blob_cells_and_proofs_vec = (0..num_of_blobs) - .into_par_iter() - .map(|row_index| { - let mut cells: Vec = vec![]; - let mut cell_ids: Vec = vec![]; - for data_column in data_columns { - let cell = data_column.column.get(row_index).ok_or( - KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" - )), - )?; - - cells.push(ssz_cell_to_crypto_cell::(cell)?); - cell_ids.push(data_column.index); - } - // recover_all_cells does not expect sorted - let all_cells = kzg.recover_all_cells(&cell_ids, &cells)?; - let blob = kzg.cells_to_blob(&all_cells)?; - - // Note: This function computes all cells and proofs. According to Justin this is okay, - // computing a partial set may be more expensive and requires code paths that don't exist. - // Computing the blobs cells is technically unnecessary but very cheap. It's done here again - // for simplicity. - kzg.compute_cells_and_proofs(&blob) - }) - .collect::, KzgError>>()?; - - for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { - // we iterate over each column, and we construct the column from "top to bottom", - // pushing on the cell and the corresponding proof at each column index. we do this for - // each blob (i.e. the outer loop). - for col in 0..number_of_columns { - let cell = blob_cells - .get(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing blob cell at index {col}" - )))?; - let cell: Vec = cell.into_inner().into_iter().collect(); - let cell = Cell::::from(cell); - - let proof = blob_cell_proofs - .get(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing blob cell KZG proof at index {col}" - )))?; - - let column = columns - .get_mut(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing data column at index {col}" - )))?; - let column_proofs = - column_kzg_proofs - .get_mut(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing data column proofs at index {col}" - )))?; - - column.push(cell); - column_proofs.push(*proof); - } - } - - // Clone sidecar elements from existing data column, no need to re-compute - let kzg_commitments = &first_data_column.kzg_commitments; - let signed_block_header = &first_data_column.signed_block_header; - let kzg_commitments_inclusion_proof = &first_data_column.kzg_commitments_inclusion_proof; - - let sidecars: Vec>> = columns - .into_iter() - .zip(column_kzg_proofs) - .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) - .collect(); - Ok(sidecars) - } - pub fn min_size() -> usize { // min size is one cell Self { @@ -360,7 +164,7 @@ pub enum DataColumnSidecarError { MissingBlobSidecars, PreDeneb, SszError(SszError), - InconsistentArrayLength(String), + BuildSidecarFailed(String), } impl From for DataColumnSidecarError { @@ -386,9 +190,3 @@ impl From for DataColumnSidecarError { Self::SszError(e) } } - -/// Converts a cell ssz List object to an array to be used with the kzg -/// crypto library. -fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result { - KzgCell::from_bytes(cell.as_ref()).map_err(Into::into) -} diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index d26dfe4992..e940fe2e20 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -17,3 +17,13 @@ ethereum_serde_utils = { workspace = true } hex = { workspace = true } ethereum_hashing = { workspace = true } c-kzg = { workspace = true } +rust_eth_kzg = { workspace = true } + +[dev-dependencies] +criterion = { workspace = true } +serde_json = { workspace = true } +eth2_network_config = { workspace = true } + +[[bench]] +name = "benchmark" +harness = false diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs new file mode 100644 index 0000000000..69ec94c0b1 --- /dev/null +++ b/crypto/kzg/benches/benchmark.rs @@ -0,0 +1,31 @@ +use c_kzg::KzgSettings; +use criterion::{criterion_group, criterion_main, Criterion}; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::TrustedSetup; +use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; + +pub fn bench_init_context(c: &mut Criterion) { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + + c.bench_function(&format!("Initialize context rust_eth_kzg"), |b| { + b.iter(|| { + const NUM_THREADS: usize = 1; + let trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + DASContext::with_threads(&trusted_setup, NUM_THREADS) + }) + }); + c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { + b.iter(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + KzgSettings::load_trusted_setup(&trusted_setup.g1_points(), &trusted_setup.g2_points()) + .unwrap() + }) + }); +} + +criterion_group!(benches, bench_init_context); +criterion_main!(benches); diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 181642df39..507db05cd5 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -2,6 +2,7 @@ mod kzg_commitment; mod kzg_proof; mod trusted_setup; +use rust_eth_kzg::{CellIndex, DASContext}; use std::fmt::Debug; pub use crate::{ @@ -9,18 +10,35 @@ pub use crate::{ kzg_proof::KzgProof, trusted_setup::TrustedSetup, }; + pub use c_kzg::{ Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, }; + +pub use rust_eth_kzg::{ + constants::{BYTES_PER_CELL, CELLS_PER_EXT_BLOB}, + Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, +}; + +pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_EXT_BLOB]); + +pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; + #[derive(Debug)] pub enum Error { /// An error from the underlying kzg library. Kzg(c_kzg::Error), + /// A prover/verifier error from the rust-eth-kzg library. + PeerDASKZG(rust_eth_kzg::Error), /// The kzg verification failed KzgVerificationFailed, /// Misc indexing error InconsistentArrayLength(String), + /// Error reconstructing data columns. + ReconstructFailed(String), + /// Kzg was not initialized with PeerDAS enabled. + DASContextUninitialized, } impl From for Error { @@ -29,32 +47,11 @@ impl From for Error { } } -pub const CELLS_PER_EXT_BLOB: usize = 128; - -// TODO(das): use proper crypto once ckzg merges das branch -#[allow(dead_code)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct Cell { - bytes: [u8; 2048usize], -} - -impl Cell { - pub fn from_bytes(b: &[u8]) -> Result { - Ok(Self { - bytes: b - .try_into() - .map_err(|_| Error::Kzg(c_kzg::Error::MismatchLength("".to_owned())))?, - }) - } - pub fn into_inner(self) -> [u8; 2048usize] { - self.bytes - } -} - /// A wrapper over a kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { trusted_setup: KzgSettings, + context: Option, } impl Kzg { @@ -65,9 +62,36 @@ impl Kzg { &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, + context: None, }) } + pub fn new_from_trusted_setup_das_enabled(trusted_setup: TrustedSetup) -> Result { + // Initialize the trusted setup using default parameters + // + // Note: One can also use `from_json` to initialize it from the consensus-specs + // json string. + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + // Set the number of threads to be used + // + // we set it to 1 to match the c-kzg performance + const NUM_THREADS: usize = 1; + + let context = DASContext::with_threads(&peerdas_trusted_setup, NUM_THREADS); + + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + context: Some(context), + }) + } + + fn context(&self) -> Result<&DASContext, Error> { + self.context.as_ref().ok_or(Error::DASContextUninitialized) + } + /// Compute the kzg proof given a blob and its kzg commitment. pub fn compute_blob_kzg_proof( &self, @@ -167,21 +191,18 @@ impl Kzg { } /// Computes the cells and associated proofs for a given `blob` at index `index`. - #[allow(clippy::type_complexity)] pub fn compute_cells_and_proofs( &self, - _blob: &Blob, - ) -> Result< - ( - Box<[Cell; CELLS_PER_EXT_BLOB]>, - Box<[KzgProof; CELLS_PER_EXT_BLOB]>, - ), - Error, - > { - // TODO(das): use proper crypto once ckzg merges das branch - let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); - let proofs = Box::new([KzgProof([0u8; BYTES_PER_PROOF]); CELLS_PER_EXT_BLOB]); - Ok((cells, proofs)) + blob: KzgBlobRef<'_>, + ) -> Result { + let (cells, proofs) = self + .context()? + .compute_cells_and_kzg_proofs(blob) + .map_err(Error::PeerDASKZG)?; + + // Convert the proof type to a c-kzg proof type + let c_kzg_proof = proofs.map(KzgProof); + Ok((cells, c_kzg_proof)) } /// Verifies a batch of cell-proof-commitment triplets. @@ -191,35 +212,43 @@ impl Kzg { /// to the data column index. pub fn verify_cell_proof_batch( &self, - _cells: &[Cell], - _kzg_proofs: &[Bytes48], - _coordinates: &[(u64, u64)], - _kzg_commitments: &[Bytes48], + cells: &[CellRef<'_>], + kzg_proofs: &[Bytes48], + columns: Vec, + kzg_commitments: &[Bytes48], ) -> Result<(), Error> { - // TODO(das): use proper crypto once ckzg merges das branch - Ok(()) + let proofs: Vec<_> = kzg_proofs.iter().map(|proof| proof.as_ref()).collect(); + let commitments: Vec<_> = kzg_commitments + .iter() + .map(|commitment| commitment.as_ref()) + .collect(); + let verification_result = self.context()?.verify_cell_kzg_proof_batch( + commitments.to_vec(), + columns, + cells.to_vec(), + proofs.to_vec(), + ); + + // Modify the result so it matches roughly what the previous method was doing. + match verification_result { + Ok(_) => Ok(()), + Err(e) if e.invalid_proof() => Err(Error::KzgVerificationFailed), + Err(e) => Err(Error::PeerDASKZG(e)), + } } - pub fn cells_to_blob(&self, _cells: &[Cell; CELLS_PER_EXT_BLOB]) -> Result { - // TODO(das): use proper crypto once ckzg merges das branch - Ok(Blob::new([0u8; 131072usize])) - } - - pub fn recover_all_cells( + pub fn recover_cells_and_compute_kzg_proofs( &self, - _cell_ids: &[u64], - _cells: &[Cell], - ) -> Result, Error> { - // TODO(das): use proper crypto once ckzg merges das branch - let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); - Ok(cells) - } -} - -impl TryFrom for Kzg { - type Error = Error; - - fn try_from(trusted_setup: TrustedSetup) -> Result { - Kzg::new_from_trusted_setup(trusted_setup) + cell_ids: &[u64], + cells: &[CellRef<'_>], + ) -> Result { + let (cells, proofs) = self + .context()? + .recover_cells_and_proofs(cell_ids.to_vec(), cells.to_vec()) + .map_err(Error::PeerDASKZG)?; + + // Convert the proof type to a c-kzg proof type + let c_kzg_proof = proofs.map(KzgProof); + Ok((cells, c_kzg_proof)) } } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index d930eabe22..6ddc33df5a 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -1,3 +1,4 @@ +use crate::PeerDASTrustedSetup; use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT}; use serde::{ de::{self, Deserializer, Visitor}, @@ -43,6 +44,28 @@ impl TrustedSetup { } } +impl From<&TrustedSetup> for PeerDASTrustedSetup { + fn from(trusted_setup: &TrustedSetup) -> Self { + Self { + g1_monomial: trusted_setup + .g1_monomial_points + .iter() + .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) + .collect::>(), + g1_lagrange: trusted_setup + .g1_points + .iter() + .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) + .collect::>(), + g2_monomial: trusted_setup + .g2_points + .iter() + .map(|g2_point| format!("0x{}", hex::encode(g2_point.0))) + .collect::>(), + } + } +} + impl Serialize for G1Point { fn serialize(&self, serializer: S) -> Result where From 22ccdb6c23965bdb231ec93fa7c709ae555ff42e Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 12 Aug 2024 17:16:21 -0700 Subject: [PATCH 33/43] Reuse password option prompts again on a wrong password (#4380) * Prompt for password if incorrect in import * lint and fmt * Use if instead of match * Fix issue raised by @chong-he * Merge branch 'unstable' into reuse-pw-break * Remove unused function --- account_manager/src/validator/import.rs | 50 ++++++++++++++++++------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index a7c72679f7..8f04e9059a 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -178,7 +178,13 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password_opt = loop { if let Some(password) = previous_password.clone() { eprintln!("Reuse previous password."); - break Some(password); + if check_password_on_keystore(&keystore, &password)? { + break Some(password); + } else { + eprintln!("Reused password incorrect. Retry!"); + previous_password = None; + continue; + } } eprintln!(); eprintln!("{}", PASSWORD_PROMPT); @@ -201,20 +207,12 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin } }; - match keystore.decrypt_keypair(password.as_ref()) { - Ok(_) => { - eprintln!("Password is correct."); - eprintln!(); - sleep(Duration::from_secs(1)); // Provides nicer UX. - if reuse_password { - previous_password = Some(password.clone()); - } - break Some(password); + // Check if the password unlocks the keystore + if check_password_on_keystore(&keystore, &password)? { + if reuse_password { + previous_password = Some(password.clone()); } - Err(eth2_keystore::Error::InvalidPassword) => { - eprintln!("Invalid password"); - } - Err(e) => return Err(format!("Error whilst decrypting keypair: {:?}", e)), + break Some(password); } }; @@ -317,3 +315,27 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin Ok(()) } + +/// Checks if the given password unlocks the keystore. +/// +/// Returns `Ok(true)` if password unlocks the keystore successfully. +/// Returns `Ok(false` if password is incorrect. +/// Otherwise, returns the keystore error. +fn check_password_on_keystore( + keystore: &Keystore, + password: &ZeroizeString, +) -> Result { + match keystore.decrypt_keypair(password.as_ref()) { + Ok(_) => { + eprintln!("Password is correct."); + eprintln!(); + sleep(Duration::from_secs(1)); // Provides nicer UX. + Ok(true) + } + Err(eth2_keystore::Error::InvalidPassword) => { + eprintln!("Invalid password"); + Ok(false) + } + Err(e) => Err(format!("Error whilst decrypting keypair: {:?}", e)), + } +} From 18df7010c3befe4450703235f48e4e2c3358eef5 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 14 Aug 2024 14:36:24 +1000 Subject: [PATCH 34/43] Persist data columns to store (#6255) * Persist data columns (from das PR #5196) --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 ++++----- .../src/data_availability_checker.rs | 10 ++++- .../overflow_lru_cache.rs | 16 +++++++- .../src/data_column_verification.rs | 4 ++ .../beacon_chain/src/historical_blocks.rs | 41 +++++++++++++++---- 5 files changed, 67 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3bf7528477..dcfc8e8412 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3661,16 +3661,15 @@ impl BeaconChain { } } - if let Some(_data_columns) = data_columns { - // TODO(das): depends on https://github.com/sigp/lighthouse/pull/6073 - // if !data_columns.is_empty() { - // debug!( - // self.log, "Writing data_columns to store"; - // "block_root" => %block_root, - // "count" => data_columns.len(), - // ); - // ops.push(StoreOp::PutDataColumns(block_root, data_columns)); - // } + if let Some(data_columns) = data_columns { + if !data_columns.is_empty() { + debug!( + self.log, "Writing data_columns to store"; + "block_root" => %block_root, + "count" => data_columns.len(), + ); + ops.push(StoreOp::PutDataColumns(block_root, data_columns)); + } } let txn_lock = self.store.hot_db.begin_rw_transaction(); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index b4336a054e..2178798bb6 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -108,14 +108,14 @@ impl DataAvailabilityChecker { let custody_column_count = custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); - let overflow_cache = DataAvailabilityCheckerInner::new( + let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, custody_column_count, spec.clone(), )?; Ok(Self { - availability_cache: Arc::new(overflow_cache), + availability_cache: Arc::new(inner), slot_clock, kzg, log: log.clone(), @@ -123,6 +123,12 @@ impl DataAvailabilityChecker { }) } + pub fn get_custody_columns_count(&self) -> usize { + self.availability_cache + .custody_subnet_count() + .saturating_mul(self.spec.data_columns_per_subnet()) + } + /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. pub fn get_execution_valid_block( diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 6c9964bdf8..34a4028273 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -9,7 +9,6 @@ use crate::data_column_verification::KzgVerifiedCustodyDataColumn; use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; -use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; use std::num::NonZeroUsize; use std::sync::Arc; @@ -20,7 +19,7 @@ use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// /// The blobs are all gossip and kzg verified. /// The block has completed all verifications except the availability check. -#[derive(Encode, Decode, Clone)] +#[derive(Clone)] pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: FixedVector>, E::MaxBlobsPerBlock>, @@ -303,6 +302,15 @@ impl PendingComponents { }); } } + + if let Some(kzg_verified_data_column) = self.verified_data_columns.first() { + let epoch = kzg_verified_data_column + .as_data_column() + .slot() + .epoch(E::slots_per_epoch()); + return Some(epoch); + } + None }) } @@ -336,6 +344,10 @@ impl DataAvailabilityCheckerInner { }) } + pub fn custody_subnet_count(&self) -> usize { + self.custody_column_count + } + /// Returns true if the block root is known, without altering the LRU ordering pub fn get_execution_valid_block( &self, diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index da639e3695..af3fbab6ae 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -250,6 +250,10 @@ impl KzgVerifiedCustodyDataColumn { pub fn into_inner(self) -> Arc> { self.data } + + pub fn as_data_column(&self) -> &DataColumnSidecar { + &self.data + } } /// Complete kzg verification for a `DataColumnSidecar`. diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index aa2fac2afc..4b63ac6915 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -9,6 +9,7 @@ use state_processing::{ use std::borrow::Cow; use std::iter; use std::time::Duration; +use store::metadata::DataColumnInfo; use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore}; use types::{Hash256, Slot}; @@ -66,6 +67,7 @@ impl BeaconChain { .get_anchor_info() .ok_or(HistoricalBlockError::NoAnchorInfo)?; let blob_info = self.store.get_blob_info(); + let data_column_info = self.store.get_data_column_info(); // Take all blocks with slots less than the oldest block slot. let num_relevant = blocks.partition_point(|available_block| { @@ -90,18 +92,27 @@ impl BeaconChain { return Ok(0); } - let n_blobs_lists_to_import = blocks_to_import + // Blobs are stored per block, and data columns are each stored individually + let n_blob_ops_per_block = if self.spec.is_peer_das_scheduled() { + self.data_availability_checker.get_custody_columns_count() + } else { + 1 + }; + + let blob_batch_size = blocks_to_import .iter() .filter(|available_block| available_block.blobs().is_some()) - .count(); + .count() + .saturating_mul(n_blob_ops_per_block); let mut expected_block_root = anchor_info.oldest_block_parent; let mut prev_block_slot = anchor_info.oldest_block_slot; let mut chunk_writer = ChunkWriter::::new(&self.store.cold_db, prev_block_slot.as_usize())?; let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; + let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot; - let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import); + let mut blob_batch = Vec::with_capacity(blob_batch_size); let mut cold_batch = Vec::with_capacity(blocks_to_import.len()); let mut hot_batch = Vec::with_capacity(blocks_to_import.len()); let mut signed_blocks = Vec::with_capacity(blocks_to_import.len()); @@ -129,11 +140,10 @@ impl BeaconChain { .blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch); } // Store the data columns too - if let Some(_data_columns) = maybe_data_columns { - // TODO(das): depends on https://github.com/sigp/lighthouse/pull/6073 - // new_oldest_data_column_slot = Some(block.slot()); - // self.store - // .data_columns_as_kv_store_ops(&block_root, data_columns, &mut blob_batch); + if let Some(data_columns) = maybe_data_columns { + new_oldest_data_column_slot = Some(block.slot()); + self.store + .data_columns_as_kv_store_ops(&block_root, data_columns, &mut blob_batch); } // Store block roots, including at all skip slots in the freezer DB. @@ -212,7 +222,7 @@ impl BeaconChain { self.store.hot_db.do_atomically(hot_batch)?; self.store.cold_db.do_atomically(cold_batch)?; - let mut anchor_and_blob_batch = Vec::with_capacity(2); + let mut anchor_and_blob_batch = Vec::with_capacity(3); // Update the blob info. if new_oldest_blob_slot != blob_info.oldest_blob_slot { @@ -228,6 +238,19 @@ impl BeaconChain { } } + // Update the data column info. + if new_oldest_data_column_slot != data_column_info.oldest_data_column_slot { + if let Some(oldest_data_column_slot) = new_oldest_data_column_slot { + let new_data_column_info = DataColumnInfo { + oldest_data_column_slot: Some(oldest_data_column_slot), + }; + anchor_and_blob_batch.push( + self.store + .compare_and_set_data_column_info(data_column_info, new_data_column_info)?, + ); + } + } + // Update the anchor. let new_anchor = AnchorInfo { oldest_block_slot: prev_block_slot, From a91f432a3ea5847d2bc3ee1255c705e5e9eea91e Mon Sep 17 00:00:00 2001 From: Braqzen <103777923+Braqzen@users.noreply.github.com> Date: Thu, 15 Aug 2024 13:36:09 +0100 Subject: [PATCH 35/43] Add Reth to execution clients suggested for installation (#6172) * Added Reth among execution clients * Update book/src/run_a_node.md Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> * Merge branch 'unstable' into braqzen-6171 --- book/src/installation.md | 2 +- book/src/merge-migration.md | 7 ++++--- book/src/run_a_node.md | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/book/src/installation.md b/book/src/installation.md index a0df394bd2..137a00b918 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -23,7 +23,7 @@ There are also community-maintained installation methods: Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. -After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): +After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/), [Reth](https://github.com/paradigmxyz/reth)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): - CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer - Memory: 32 GB RAM* diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 6de05cff2a..7a123254bf 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -7,7 +7,7 @@ There are two configuration changes required for a Lighthouse node to operate correctly throughout the merge: -1. You *must* run your own execution engine such as Besu, Erigon, Geth or Nethermind alongside Lighthouse. +1. You *must* run your own execution engine such as Besu, Erigon, Reth, Geth or Nethermind alongside Lighthouse. You *must* update your `lighthouse bn` configuration to connect to the execution engine using new flags which are documented on this page in the [Connecting to an execution engine](#connecting-to-an-execution-engine) section. @@ -65,6 +65,7 @@ Each execution engine has its own flags for configuring the engine API and JWT. the relevant page for your execution engine for the required flags: - [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) +- [Reth: Running the Consensus Layer](https://reth.rs/run/mainnet.html?highlight=consensus#running-the-consensus-layer) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) @@ -123,7 +124,7 @@ a deprecation warning will be logged and Lighthouse *may* remove these flags in ### The relationship between `--eth1-endpoints` and `--execution-endpoint` Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum -"eth1" nodes (Besu, Erigon, Geth or Nethermind). Each beacon node (BN) can have multiple eth1 endpoints +"eth1" nodes (Besu, Erigon, Reth, Geth or Nethermind). Each beacon node (BN) can have multiple eth1 endpoints and each eth1 endpoint can have many BNs connection (many-to-many relationship). The eth1 node provides a source of truth for the [deposit contract](https://ethereum.org/en/staking/deposit-contract/) and beacon chain proposers include this @@ -134,7 +135,7 @@ achieve this. To progress through the Bellatrix upgrade nodes will need a *new* connection to an "eth1" node; `--execution-endpoint`. This connection has a few different properties. Firstly, the term "eth1 node" has been deprecated and replaced with "execution engine". Whilst "eth1 node" and "execution -engine" still refer to the same projects (Besu, Erigon, Geth or Nethermind), the former refers to the pre-merge +engine" still refer to the same projects (Besu, Erigon, Reth, Geth or Nethermind), the former refers to the pre-merge versions and the latter refers to post-merge versions. Secondly, there is a strict one-to-one relationship between Lighthouse and the execution engine; only one Lighthouse node can connect to one execution engine. Thirdly, it is impossible to fully verify the post-merge chain without an diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 6c1f23d8e8..9b9e0cba8e 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -26,6 +26,7 @@ per beacon node. The reason for this is that the beacon node *controls* the exec - [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) - [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) +- [Reth](https://reth.rs/run/mainnet.html) > Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. From 5169e03721ac61a32f34da60afdbbbe33491a6cb Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 15 Aug 2024 17:59:58 +0200 Subject: [PATCH 36/43] Add PeerDAS RPC import boilerplate (#6238) * Add PeerDAS RPC import boilerplate * revert refactor * Remove allow --- beacon_node/beacon_chain/src/beacon_chain.rs | 76 +++++++++++++++++++ .../src/data_availability_checker.rs | 35 ++++++++- .../overflow_lru_cache.rs | 2 - .../src/data_column_verification.rs | 4 +- beacon_node/beacon_processor/src/lib.rs | 17 ++++- .../src/network_beacon_processor/mod.rs | 24 ++++++ .../network_beacon_processor/sync_methods.rs | 56 +++++++++++++- 7 files changed, 205 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index dcfc8e8412..5610e6aa63 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3033,6 +3033,41 @@ impl BeaconChain { self.remove_notified(&block_root, r) } + /// Cache the columns in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_rpc_custody_columns( + self: &Arc, + custody_columns: DataColumnSidecarList, + ) -> Result> { + let Ok((slot, block_root)) = custody_columns + .iter() + .map(|c| (c.slot(), c.block_root())) + .unique() + .exactly_one() + else { + return Err(BlockError::InternalError( + "Columns should be from the same block".to_string(), + )); + }; + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its columns again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown(block_root)); + } + + // TODO(das): custody column SSE event + + let r = self + .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) + .await; + self.remove_notified(&block_root, r) + } + /// Remove any block components from the *processing cache* if we no longer require them. If the /// block was imported full or erred, we no longer require them. fn remove_notified( @@ -3369,6 +3404,47 @@ impl BeaconChain { self.process_availability(slot, availability).await } + /// Checks if the provided columns can make any cached blocks available, and imports immediately + /// if so, otherwise caches the columns in the data availability checker. + async fn check_rpc_custody_columns_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + ) -> Result> { + // Need to scope this to ensure the lock is dropped before calling `process_availability` + // Even an explicit drop is not enough to convince the borrow checker. + { + let mut slashable_cache = self.observed_slashable.write(); + // Assumes all items in custody_columns are for the same block_root + if let Some(column) = custody_columns.first() { + let header = &column.signed_block_header; + if verify_header_signature::>(self, header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header.clone()); + } + } + } + } + + // This slot value is purely informative for the consumers of + // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. + let availability = self.data_availability_checker.put_rpc_custody_columns( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + custody_columns, + )?; + + self.process_availability(slot, availability).await + } + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` /// /// An error is returned if the block was unable to be imported. It may be partially imported diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 2178798bb6..c4ce93d266 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -23,7 +23,9 @@ mod error; mod overflow_lru_cache; mod state_lru_cache; -use crate::data_column_verification::{GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn}; +use crate::data_column_verification::{ + GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, +}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; @@ -187,6 +189,37 @@ impl DataAvailabilityChecker { .put_kzg_verified_blobs(block_root, epoch, verified_blobs) } + /// Put a list of custody columns received via RPC into the availability cache. This performs KZG + /// verification on the blobs in the list. + pub fn put_rpc_custody_columns( + &self, + block_root: Hash256, + epoch: Epoch, + custody_columns: DataColumnSidecarList, + ) -> Result, AvailabilityCheckError> { + let Some(kzg) = self.kzg.as_ref() else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + + // TODO(das): report which column is invalid for proper peer scoring + // TODO(das): batch KZG verification here + let verified_custody_columns = custody_columns + .iter() + .map(|column| { + Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( + KzgVerifiedDataColumn::new(column.clone(), kzg) + .map_err(AvailabilityCheckError::Kzg)?, + )) + }) + .collect::, AvailabilityCheckError>>()?; + + self.availability_cache.put_kzg_verified_data_columns( + block_root, + epoch, + verified_custody_columns, + ) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 34a4028273..a85ef6476f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -442,8 +442,6 @@ impl DataAvailabilityCheckerInner { } } - // TODO(das): rpc code paths to be implemented. - #[allow(dead_code)] pub fn put_kzg_verified_data_columns< I: IntoIterator>, >( diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index af3fbab6ae..6a4efd605d 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -177,7 +177,7 @@ impl GossipVerifiedDataColumn { pub fn id(&self) -> DataColumnIdentifier { DataColumnIdentifier { block_root: self.block_root, - index: self.data_column.data_column_index(), + index: self.data_column.index(), } } @@ -221,7 +221,7 @@ impl KzgVerifiedDataColumn { self.data.clone() } - pub fn data_column_index(&self) -> u64 { + pub fn index(&self) -> ColumnIndex { self.data.index } } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 68c33e99ba..6ce3b64acf 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -108,6 +108,7 @@ pub struct BeaconProcessorQueueLengths { unknown_light_client_update_queue: usize, rpc_block_queue: usize, rpc_blob_queue: usize, + rpc_custody_column_queue: usize, chain_segment_queue: usize, backfill_chain_segment: usize, gossip_block_queue: usize, @@ -163,6 +164,7 @@ impl BeaconProcessorQueueLengths { unknown_light_client_update_queue: 128, rpc_block_queue: 1024, rpc_blob_queue: 1024, + rpc_custody_column_queue: 1024, chain_segment_queue: 64, backfill_chain_segment: 64, gossip_block_queue: 1024, @@ -228,6 +230,7 @@ pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic pub const RPC_BLOCK: &str = "rpc_block"; pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; pub const RPC_BLOBS: &str = "rpc_blob"; +pub const RPC_CUSTODY_COLUMN: &str = "rpc_custody_column"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -606,6 +609,7 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcCustodyColumn(AsyncFn), IgnoredRpcBlock { process_fn: BlockingFn, }, @@ -653,6 +657,7 @@ impl Work { Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, Work::RpcBlobs { .. } => RPC_BLOBS, + Work::RpcCustodyColumn { .. } => RPC_CUSTODY_COLUMN, Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, @@ -815,6 +820,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); @@ -970,6 +976,8 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = rpc_blob_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = rpc_custody_column_queue.pop() { + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -1262,6 +1270,9 @@ impl BeaconProcessor { rpc_block_queue.push(work, work_id, &self.log) } Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), + Work::RpcCustodyColumn { .. } => { + rpc_custody_column_queue.push(work, work_id, &self.log) + } Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) } @@ -1497,9 +1508,9 @@ impl BeaconProcessor { beacon_block_root: _, process_fn, } => task_spawner.spawn_async(process_fn), - Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => { - task_spawner.spawn_async(process_fn) - } + Work::RpcBlock { process_fn } + | Work::RpcBlobs { process_fn } + | Work::RpcCustodyColumn(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 9fb14fdcb8..cb21b6dfb5 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -476,6 +476,30 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some custody columns. `process_rpc_custody_columns` reports + /// the result back to sync. + pub fn send_rpc_custody_columns( + self: &Arc, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let s = self.clone(); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcCustodyColumn(Box::pin(async move { + s.process_rpc_custody_columns( + block_root, + custody_columns, + seen_timestamp, + process_type, + ) + .await; + })), + }) + } + /// Create a new work event to import `blocks` as a beacon chain segment. pub fn send_chain_segment( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 68bd674514..495d1cd92b 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -24,7 +24,7 @@ use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; -use types::BlockImportSource; +use types::{BlockImportSource, DataColumnSidecarList}; use types::{Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. @@ -307,6 +307,60 @@ impl NetworkBeaconProcessor { }); } + pub async fn process_rpc_custody_columns( + self: Arc>, + block_root: Hash256, + custody_columns: DataColumnSidecarList, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let result = self + .chain + .process_rpc_custody_columns(custody_columns) + .await; + + match &result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + } + }, + Err(BlockError::BlockIsAlreadyKnown(_)) => { + debug!( + self.log, + "Custody columns have already been imported"; + "block_hash" => %block_root, + ); + } + Err(e) => { + warn!( + self.log, + "Error when importing rpc custody columns"; + "error" => ?e, + "block_hash" => %block_root, + ); + } + } + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( From 9fc0a662c3d6cfb7994a064de8a0e1785b108c94 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 15 Aug 2024 18:26:39 +0200 Subject: [PATCH 37/43] Add sync lookup custody request state (#6257) * Add sync lookup custody request state * Review PR * clippy * Merge branch 'unstable' of https://github.com/sigp/lighthouse into peerdas-network-lookup --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 +++ .../src/data_availability_checker.rs | 9 ++ .../overflow_lru_cache.rs | 10 +- .../lighthouse_network/src/types/globals.rs | 9 +- .../network/src/sync/block_lookups/common.rs | 66 +++++++++-- .../network/src/sync/block_lookups/mod.rs | 5 +- .../sync/block_lookups/single_block_lookup.rs | 49 +++++--- .../network/src/sync/block_lookups/tests.rs | 2 + .../network/src/sync/network_context.rs | 105 +++++++++++++++++- 9 files changed, 239 insertions(+), 34 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5610e6aa63..427866ba10 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6852,6 +6852,24 @@ impl BeaconChain { self.data_availability_checker.data_availability_boundary() } + /// Returns true if epoch is within the data availability boundary + pub fn da_check_required_for_epoch(&self, epoch: Epoch) -> bool { + self.data_availability_checker + .da_check_required_for_epoch(epoch) + } + + /// Returns true if we should fetch blobs for this block + pub fn should_fetch_blobs(&self, block_epoch: Epoch) -> bool { + self.da_check_required_for_epoch(block_epoch) + && !self.spec.is_peer_das_enabled_for_epoch(block_epoch) + } + + /// Returns true if we should fetch custody columns for this block + pub fn should_fetch_custody_columns(&self, block_epoch: Epoch) -> bool { + self.da_check_required_for_epoch(block_epoch) + && self.spec.is_peer_das_enabled_for_epoch(block_epoch) + } + pub fn logger(&self) -> &Logger { &self.log } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index c4ce93d266..bf9f94b986 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -156,6 +156,15 @@ impl DataAvailabilityChecker { }) } + /// Return the set of imported custody column indexes for `block_root`. Returns None if there is + /// no block component for `block_root`. + pub fn imported_custody_column_indexes(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| components.get_cached_data_columns_indices()) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index a85ef6476f..7108f7153c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -13,7 +13,7 @@ use ssz_types::{FixedVector, VariableList}; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BlobSidecar, ChainSpec, ColumnIndex, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This represents the components of a partially available block /// @@ -108,6 +108,14 @@ impl PendingComponents { self.verified_data_columns.len() } + /// Returns the indices of cached custody columns + pub fn get_cached_data_columns_indices(&self) -> Vec { + self.verified_data_columns + .iter() + .map(|d| d.index()) + .collect() + } + /// Inserts a block into the cache. pub fn insert_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { *self.get_cached_block_mut() = Some(block) diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index f9ed2c9f74..1c7c7f07d0 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -7,7 +7,7 @@ use crate::EnrExt; use crate::{Enr, GossipTopic, Multiaddr, PeerId}; use parking_lot::RwLock; use std::collections::HashSet; -use types::EthSpec; +use types::{ChainSpec, ColumnIndex, EthSpec}; pub struct NetworkGlobals { /// The current local ENR. @@ -110,6 +110,13 @@ impl NetworkGlobals { std::mem::replace(&mut *self.sync_state.write(), new_state) } + /// Compute custody data columns the node is assigned to custody. + pub fn custody_columns(&self, _spec: &ChainSpec) -> Vec { + let _enr = self.local_enr(); + //TODO(das): implement ENR changes + vec![] + } + /// TESTING ONLY. Build a dummy NetworkGlobals instance. pub fn new_test_globals(trusted_peers: Vec, log: &slog::Logger) -> NetworkGlobals { use crate::CombinedKeyExt; diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index e94e9589c0..a7be72556e 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -1,14 +1,16 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; -use crate::sync::block_lookups::{BlobRequestState, BlockRequestState, PeerId}; +use crate::sync::block_lookups::{ + BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, +}; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use lighthouse_network::service::api_types::Id; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::SignedBeaconBlock; +use types::{DataColumnSidecarList, SignedBeaconBlock}; use super::single_block_lookup::DownloadResult; use super::SingleLookupId; @@ -17,6 +19,7 @@ use super::SingleLookupId; pub enum ResponseType { Block, Blob, + CustodyColumn, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -38,7 +41,7 @@ pub trait RequestState { &self, id: Id, peer_id: PeerId, - downloaded_block_expected_blobs: Option, + downloaded_block: Option>>, cx: &mut SyncNetworkContext, ) -> Result; @@ -73,7 +76,7 @@ impl RequestState for BlockRequestState { &self, id: SingleLookupId, peer_id: PeerId, - _: Option, + _: Option>>, cx: &mut SyncNetworkContext, ) -> Result { cx.block_lookup_request(id, peer_id, self.requested_block_root) @@ -121,16 +124,11 @@ impl RequestState for BlobRequestState { &self, id: Id, peer_id: PeerId, - downloaded_block_expected_blobs: Option, + downloaded_block: Option>>, cx: &mut SyncNetworkContext, ) -> Result { - cx.blob_lookup_request( - id, - peer_id, - self.block_root, - downloaded_block_expected_blobs, - ) - .map_err(LookupRequestError::SendFailedNetwork) + cx.blob_lookup_request(id, peer_id, self.block_root, downloaded_block) + .map_err(LookupRequestError::SendFailedNetwork) } fn send_for_processing( @@ -161,3 +159,47 @@ impl RequestState for BlobRequestState { &mut self.state } } + +impl RequestState for CustodyRequestState { + type VerifiedResponseType = DataColumnSidecarList; + + fn make_request( + &self, + id: Id, + // TODO(das): consider selecting peers that have custody but are in this set + _peer_id: PeerId, + downloaded_block: Option>>, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.custody_lookup_request(id, self.block_root, downloaded_block) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_custody_columns_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::CustodyColumn + } + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + &mut request.custody_request_state + } + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 3b93b8072c..7194faa286 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -36,7 +36,7 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState}; +pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; use slog::{debug, error, warn, Logger}; use std::collections::hash_map::Entry; use std::sync::Arc; @@ -527,7 +527,7 @@ impl BlockLookups { // if both components have been processed. request_state.on_processing_success()?; - if lookup.both_components_processed() { + if lookup.all_components_processed() { // We don't request for other block components until being sure that the block has // data. If we request blobs / columns to a peer we are sure those must exist. // Therefore if all components are processed and we still receive `MissingComponents` @@ -599,6 +599,7 @@ impl BlockLookups { match R::response_type() { ResponseType::Block => "lookup_block_processing_failure", ResponseType::Blob => "lookup_blobs_processing_failure", + ResponseType::CustodyColumn => "lookup_custody_processing_failure", }, ); diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 0466636fb7..b9cd4e3e03 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::{EthSpec, SignedBeaconBlock}; +use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -63,6 +63,7 @@ pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, pub blob_request_state: BlobRequestState, + pub custody_request_state: CustodyRequestState, /// Peers that claim to have imported this set of block components #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] peers: HashSet, @@ -82,6 +83,7 @@ impl SingleBlockLookup { id, block_request_state: BlockRequestState::new(requested_block_root), blob_request_state: BlobRequestState::new(requested_block_root), + custody_request_state: CustodyRequestState::new(requested_block_root), peers: HashSet::from_iter(peers.iter().copied()), block_root: requested_block_root, awaiting_parent, @@ -138,9 +140,10 @@ impl SingleBlockLookup { } /// Returns true if the block has already been downloaded. - pub fn both_components_processed(&self) -> bool { + pub fn all_components_processed(&self) -> bool { self.block_request_state.state.is_processed() && self.blob_request_state.state.is_processed() + && self.custody_request_state.state.is_processed() } /// Returns true if this request is expecting some event to make progress @@ -148,6 +151,7 @@ impl SingleBlockLookup { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() || self.blob_request_state.state.is_awaiting_event() + || self.custody_request_state.state.is_awaiting_event() } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -159,13 +163,12 @@ impl SingleBlockLookup { // TODO: Check what's necessary to download, specially for blobs self.continue_request::>(cx)?; self.continue_request::>(cx)?; + self.continue_request::>(cx)?; // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. // This case can happen if we receive the components from gossip during a retry. - if self.block_request_state.state.is_processed() - && self.blob_request_state.state.is_processed() - { + if self.all_components_processed() { Ok(LookupResult::Completed) } else { Ok(LookupResult::Pending) @@ -179,11 +182,11 @@ impl SingleBlockLookup { ) -> Result<(), LookupRequestError> { let id = self.id; let awaiting_parent = self.awaiting_parent.is_some(); - let downloaded_block_expected_blobs = self + let downloaded_block = self .block_request_state .state .peek_downloaded_data() - .map(|block| block.num_expected_blobs()); + .cloned(); let block_is_processed = self.block_request_state.state.is_processed(); let request = R::request_state_mut(self); @@ -210,7 +213,7 @@ impl SingleBlockLookup { }; let request = R::request_state_mut(self); - match request.make_request(id, peer_id, downloaded_block_expected_blobs, cx)? { + match request.make_request(id, peer_id, downloaded_block, cx)? { LookupRequestResult::RequestSent(req_id) => { // Lookup sync event safety: If make_request returns `RequestSent`, we are // guaranteed that `BlockLookups::on_download_response` will be called exactly @@ -289,6 +292,24 @@ impl SingleBlockLookup { } } +/// The state of the block request component of a `SingleBlockLookup`. +#[derive(Derivative)] +#[derivative(Debug)] +pub struct BlockRequestState { + #[derivative(Debug = "ignore")] + pub requested_block_root: Hash256, + pub state: SingleLookupRequestState>>, +} + +impl BlockRequestState { + pub fn new(block_root: Hash256) -> Self { + Self { + requested_block_root: block_root, + state: SingleLookupRequestState::new(), + } + } +} + /// The state of the blob request component of a `SingleBlockLookup`. #[derive(Derivative)] #[derivative(Debug)] @@ -307,19 +328,19 @@ impl BlobRequestState { } } -/// The state of the block request component of a `SingleBlockLookup`. +/// The state of the custody request component of a `SingleBlockLookup`. #[derive(Derivative)] #[derivative(Debug)] -pub struct BlockRequestState { +pub struct CustodyRequestState { #[derivative(Debug = "ignore")] - pub requested_block_root: Hash256, - pub state: SingleLookupRequestState>>, + pub block_root: Hash256, + pub state: SingleLookupRequestState>, } -impl BlockRequestState { +impl CustodyRequestState { pub fn new(block_root: Hash256) -> Self { Self { - requested_block_root: block_root, + block_root, state: SingleLookupRequestState::new(), } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index ef2822fe56..fcd0d768b7 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -723,6 +723,8 @@ impl TestRig { (ev.work_type() == beacon_processor::RPC_BLOBS).then_some(()) }) .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), + // TODO(das): remove todo when adding tests for custody sync lookup + ResponseType::CustodyColumn => todo!(), } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index df8be9f6d5..7bcc8ae9f2 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -25,7 +25,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecarList, EthSpec, Hash256, SignedBeaconBlock}; mod requests; @@ -447,16 +447,16 @@ impl SyncNetworkContext { lookup_id: SingleLookupId, peer_id: PeerId, block_root: Hash256, - downloaded_block_expected_blobs: Option, + downloaded_block: Option>>, ) -> Result { - let Some(expected_blobs) = downloaded_block_expected_blobs.or_else(|| { + let Some(block) = downloaded_block.or_else(|| { // If the block is already being processed or fully validated, retrieve how many blobs // it expects. Consider any stage of the block. If the block root has been validated, we // can assert that this is the correct value of `blob_kzg_commitments_count`. match self.chain.get_block_process_status(&block_root) { BlockProcessStatus::Unknown => None, BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.num_expected_blobs()), + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), } }) else { // Wait to download the block before downloading blobs. Then we can be sure that the @@ -473,6 +473,13 @@ impl SyncNetworkContext { // get dropped as completed. return Ok(LookupRequestResult::Pending("waiting for block download")); }; + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + // Check if we are into peerdas + if !self.chain.should_fetch_blobs(block_epoch) { + return Ok(LookupRequestResult::NoRequestNeeded); + } let imported_blob_indexes = self .chain @@ -522,6 +529,76 @@ impl SyncNetworkContext { Ok(LookupRequestResult::RequestSent(req_id)) } + pub fn custody_lookup_request( + &mut self, + lookup_id: SingleLookupId, + block_root: Hash256, + downloaded_block: Option>>, + ) -> Result { + let Some(block) = + downloaded_block.or_else(|| match self.chain.get_block_process_status(&block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), + }) + else { + // Wait to download the block before downloading columns. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible columns and + // latter handle the case where if the peer sent no columns, penalize. + // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. + // - if `num_expected_blobs` returns Some = block is processed. + return Ok(LookupRequestResult::Pending("waiting for block download")); + }; + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + + // Check if we are into peerdas + if !self.chain.should_fetch_custody_columns(block_epoch) { + return Ok(LookupRequestResult::NoRequestNeeded); + } + + // No data required for this block + if expected_blobs == 0 { + return Ok(LookupRequestResult::NoRequestNeeded); + } + + let custody_indexes_imported = self + .chain + .data_availability_checker + .imported_custody_column_indexes(&block_root) + .unwrap_or_default(); + + // TODO(das): figure out how to pass block.slot if we end up doing rotation + let custody_indexes_duty = self.network_globals().custody_columns(&self.chain.spec); + + // Include only the blob indexes not yet imported (received through gossip) + let custody_indexes_to_fetch = custody_indexes_duty + .into_iter() + .filter(|index| !custody_indexes_imported.contains(index)) + .collect::>(); + + if custody_indexes_to_fetch.is_empty() { + // No indexes required, do not issue any request + return Ok(LookupRequestResult::NoRequestNeeded); + } + + let req_id = self.next_id(); + let id = SingleLookupReqId { lookup_id, req_id }; + + debug!( + self.log, + "Starting custody columns request"; + "block_root" => ?block_root, + "indices" => ?custody_indexes_to_fetch, + "id" => ?id + ); + + // TODO(das): Issue a custody request with `id` for the set of columns + // `custody_indexes_to_fetch` and block `block_root`. + + Ok(LookupRequestResult::RequestSent(req_id)) + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -776,6 +853,26 @@ impl SyncNetworkContext { }) } + pub fn send_custody_columns_for_processing( + &self, + id: Id, + block_root: Hash256, + _custody_columns: DataColumnSidecarList, + _duration: Duration, + ) -> Result<(), SendErrorProcessor> { + let _beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(self.log, "Sending custody columns for processing"; "block" => ?block_root, "id" => id); + + // Lookup sync event safety: If `beacon_processor.send_rpc_custody_columns` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type + // + // TODO(das): After merging processor import PR, actually send columns to beacon processor. + Ok(()) + } + pub(crate) fn register_metrics(&self) { metrics::set_gauge_vec( &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, From 6566705505b417965dbaeafe107367cdb45bdf08 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 15 Aug 2024 23:15:09 +0200 Subject: [PATCH 38/43] Implement PeerDAS RPC handlers (#6237) * Implement PeerDAS RPC handlers * use terminate_response_stream * Merge branch 'unstable' of https://github.com/sigp/lighthouse into peerdas-network-rpc-handler * cargo fmt --- beacon_node/beacon_chain/src/beacon_chain.rs | 31 +++ .../src/data_availability_checker.rs | 12 +- .../overflow_lru_cache.rs | 21 +- .../src/data_column_verification.rs | 3 + .../lighthouse_network/src/rpc/methods.rs | 12 + .../network_beacon_processor/rpc_methods.rs | 245 +++++++++++++++++- 6 files changed, 313 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 427866ba10..b6de128dbb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1155,6 +1155,25 @@ impl BeaconChain { .map_or_else(|| self.get_blobs(block_root), Ok) } + pub fn get_data_column_checking_all_caches( + &self, + block_root: Hash256, + index: ColumnIndex, + ) -> Result>>, Error> { + if let Some(column) = self + .data_availability_checker + .get_data_column(&DataColumnIdentifier { block_root, index })? + { + return Ok(Some(column)); + } + + if let Some(columns) = self.early_attester_cache.get_data_columns(block_root) { + return Ok(columns.iter().find(|c| c.index == index).cloned()); + } + + self.get_data_column(&block_root, &index) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -1230,6 +1249,18 @@ impl BeaconChain { } } + /// Returns the data columns at the given root, if any. + /// + /// ## Errors + /// May return a database error. + pub fn get_data_column( + &self, + block_root: &Hash256, + column_index: &ColumnIndex, + ) -> Result>>, Error> { + Ok(self.store.get_data_column(block_root, column_index)?) + } + pub fn get_blinded_block( &self, block_root: &Hash256, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index bf9f94b986..1bfe377ad0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -15,8 +15,8 @@ use std::time::Duration; use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ - BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, - Slot, + BlobSidecarList, ChainSpec, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; mod error; @@ -173,6 +173,14 @@ impl DataAvailabilityChecker { self.availability_cache.peek_blob(blob_id) } + /// Get a data column from the availability cache. + pub fn get_data_column( + &self, + data_column_id: &DataColumnIdentifier, + ) -> Result>>, AvailabilityCheckError> { + self.availability_cache.peek_data_column(data_column_id) + } + /// Put a list of blobs received via RPC into the availability cache. This performs KZG /// verification on the blobs in the list. pub fn put_rpc_blobs( diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 7108f7153c..50fae09119 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -13,7 +13,10 @@ use ssz_types::{FixedVector, VariableList}; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, ChainSpec, ColumnIndex, Epoch, EthSpec, Hash256, SignedBeaconBlock}; +use types::{ + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, SignedBeaconBlock, +}; /// This represents the components of a partially available block /// @@ -389,6 +392,22 @@ impl DataAvailabilityCheckerInner { } } + /// Fetch a data column from the cache without affecting the LRU ordering + pub fn peek_data_column( + &self, + data_column_id: &DataColumnIdentifier, + ) -> Result>>, AvailabilityCheckError> { + if let Some(pending_components) = self.critical.read().peek(&data_column_id.block_root) { + Ok(pending_components + .verified_data_columns + .iter() + .find(|data_column| data_column.as_data_column().index == data_column_id.index) + .map(|data_column| data_column.clone_arc())) + } else { + Ok(None) + } + } + pub fn peek_pending_components>) -> R>( &self, block_root: &Hash256, diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 6a4efd605d..279af20909 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -254,6 +254,9 @@ impl KzgVerifiedCustodyDataColumn { pub fn as_data_column(&self) -> &DataColumnSidecar { &self.data } + pub fn clone_arc(&self) -> Arc> { + self.data.clone() + } } /// Complete kzg verification for a `DataColumnSidecar`. diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 8849a5433d..7c7dca02f5 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -6,6 +6,7 @@ use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::U256, VariableList}; +use std::collections::BTreeMap; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -426,6 +427,17 @@ impl DataColumnsByRootRequest { pub fn new_single(block_root: Hash256, index: ColumnIndex, spec: &ChainSpec) -> Self { Self::new(vec![DataColumnIdentifier { block_root, index }], spec) } + + pub fn group_by_ordered_block_root(&self) -> Vec<(Hash256, Vec)> { + let mut column_indexes_by_block = BTreeMap::>::new(); + for request_id in self.data_column_ids.as_slice() { + column_indexes_by_block + .entry(request_id.block_root) + .or_default() + .push(request_id.index); + } + column_indexes_by_block.into_iter().collect() + } } /* RPC Handling and Grouping */ diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 3f8cf14dcb..0defe7ad87 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -320,16 +320,66 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request( self: Arc, peer_id: PeerId, - _request_id: PeerRequestId, + request_id: PeerRequestId, request: DataColumnsByRootRequest, ) { - // TODO(das): implement handler - debug!(self.log, "Received DataColumnsByRoot Request"; - "peer_id" => %peer_id, - "count" => request.data_column_ids.len() + self.terminate_response_stream( + peer_id, + request_id, + self.handle_data_columns_by_root_request_inner(peer_id, request_id, request), + Response::DataColumnsByRoot, ); } + /// Handle a `DataColumnsByRoot` request from the peer. + pub fn handle_data_columns_by_root_request_inner( + &self, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + let mut send_data_column_count = 0; + + for data_column_id in request.data_column_ids.as_slice() { + match self.chain.get_data_column_checking_all_caches( + data_column_id.block_root, + data_column_id.index, + ) { + Ok(Some(data_column)) => { + send_data_column_count += 1; + self.send_response( + peer_id, + Response::DataColumnsByRoot(Some(data_column)), + request_id, + ); + } + Ok(None) => {} // no-op + Err(e) => { + // TODO(das): lower log level when feature is stabilized + error!(self.log, "Error getting data column"; + "block_root" => ?data_column_id.block_root, + "peer" => %peer_id, + "error" => ?e + ); + return Err(( + RPCResponseErrorCode::ServerError, + "Error getting data column", + )); + } + } + } + + debug!( + self.log, + "Received DataColumnsByRoot Request"; + "peer" => %peer_id, + "request" => ?request.group_by_ordered_block_root(), + "returned" => send_data_column_count + ); + + Ok(()) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, @@ -833,17 +883,196 @@ impl NetworkBeaconProcessor { /// Handle a `DataColumnsByRange` request from the peer. pub fn handle_data_columns_by_range_request( - self: Arc, + &self, peer_id: PeerId, - _request_id: PeerRequestId, + request_id: PeerRequestId, req: DataColumnsByRangeRequest, ) { - // TODO(das): implement handler + self.terminate_response_stream( + peer_id, + request_id, + self.handle_data_columns_by_range_request_inner(peer_id, request_id, req), + Response::DataColumnsByRange, + ); + } + + /// Handle a `DataColumnsByRange` request from the peer. + pub fn handle_data_columns_by_range_request_inner( + &self, + peer_id: PeerId, + request_id: PeerRequestId, + req: DataColumnsByRangeRequest, + ) -> Result<(), (RPCResponseErrorCode, &'static str)> { debug!(self.log, "Received DataColumnsByRange Request"; "peer_id" => %peer_id, "count" => req.count, "start_slot" => req.start_slot, ); + + // Should not send more than max request data columns + if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { + return Err(( + RPCResponseErrorCode::InvalidRequest, + "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", + )); + } + + let request_start_slot = Slot::from(req.start_slot); + + let data_availability_boundary_slot = match self.chain.data_availability_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!(self.log, "Deneb fork is disabled"); + return Err(( + RPCResponseErrorCode::InvalidRequest, + "Deneb fork is disabled", + )); + } + }; + + let oldest_data_column_slot = self + .chain + .store + .get_data_column_info() + .oldest_data_column_slot + .unwrap_or(data_availability_boundary_slot); + + if request_start_slot < oldest_data_column_slot { + debug!( + self.log, + "Range request start slot is older than data availability boundary."; + "requested_slot" => request_start_slot, + "oldest_data_column_slot" => oldest_data_column_slot, + "data_availability_boundary" => data_availability_boundary_slot + ); + + return if data_availability_boundary_slot < oldest_data_column_slot { + Err(( + RPCResponseErrorCode::ResourceUnavailable, + "blobs pruned within boundary", + )) + } else { + Err(( + RPCResponseErrorCode::InvalidRequest, + "Req outside availability period", + )) + }; + } + + let forwards_block_root_iter = + match self.chain.forwards_iter_block_roots(request_start_slot) { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockError( + HistoricalBlockError::BlockOutOfRange { + slot, + oldest_block_slot, + }, + )) => { + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); + return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + } + Err(e) => { + error!(self.log, "Unable to obtain root iter"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RPCResponseErrorCode::ServerError, "Database error")); + } + }; + + // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to + // `request_start_slot` in order to check whether the `request_start_slot` is a skip. + let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { + self.chain + .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) + .ok() + .flatten() + }); + + // Pick out the required blocks, ignoring skip-slots. + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => { + error!(self.log, "Error during iteration over blocks"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RPCResponseErrorCode::ServerError, "Database error")); + } + }; + + // remove all skip slots + let block_roots = block_roots.into_iter().flatten(); + let mut data_columns_sent = 0; + + for root in block_roots { + for index in &req.columns { + match self.chain.get_data_column(&root, index) { + Ok(Some(data_column_sidecar)) => { + data_columns_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::DataColumnsByRange(Some( + data_column_sidecar.clone(), + )), + id: request_id, + }); + } + Ok(None) => {} // no-op + Err(e) => { + error!( + self.log, + "Error fetching data columns block root"; + "request" => ?req, + "peer" => %peer_id, + "block_root" => ?root, + "error" => ?e + ); + return Err(( + RPCResponseErrorCode::ServerError, + "No data columns and failed fetching corresponding block", + )); + } + } + } + } + + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + + debug!( + self.log, + "DataColumnsByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => data_columns_sent + ); + + Ok(()) } /// Helper function to ensure single item protocol always end with either a single chunk or an From 9bc5643319305698199490409a81e8ce5a0bde5e Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 19 Aug 2024 09:56:31 +0200 Subject: [PATCH 39/43] Add metrics inside fork-choice crate (#6205) * Add metrics inside fork-choice crate --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 4 -- .../beacon_chain/src/block_verification.rs | 3 - beacon_node/beacon_chain/src/metrics.rs | 18 ++---- consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 17 +++++ consensus/fork_choice/src/lib.rs | 1 + consensus/fork_choice/src/metrics.rs | 62 +++++++++++++++++++ 8 files changed, 87 insertions(+), 20 deletions(-) create mode 100644 consensus/fork_choice/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 92911f18e9..8f0a4cac6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3162,6 +3162,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", + "lighthouse_metrics", "proto_array", "slog", "state_processing", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b6de128dbb..e290b4903a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2192,8 +2192,6 @@ impl BeaconChain { &self, verified: &impl VerifiedAttestation, ) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - self.canonical_head .fork_choice_write_lock() .on_attestation( @@ -3634,8 +3632,6 @@ impl BeaconChain { // Register the new block with the fork choice service. { - let _fork_choice_block_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); let block_delay = self .slot_clock .seconds_from_current_slot_start() diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 832eaccc80..68fccee959 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1666,9 +1666,6 @@ impl ExecutionPendingBlock { // Register each attestation in the block with fork choice. for (i, attestation) in block.message().body().attestations().enumerate() { - let _fork_choice_attestation_timer = - metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - let indexed_attestation = consensus_context .get_indexed_attestation(&state, attestation) .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 0309c4995e..9610001be1 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -569,19 +569,6 @@ pub static FORK_CHOICE_AFTER_FINALIZATION_TIMES: LazyLock> = exponential_buckets(1e-3, 2.0, 10), ) }); -pub static FORK_CHOICE_PROCESS_BLOCK_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_fork_choice_process_block_seconds", - "Time taken to add a block and all attestations to fork choice", - ) -}); -pub static FORK_CHOICE_PROCESS_ATTESTATION_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "beacon_fork_choice_process_attestation_seconds", - "Time taken to add an attestation to fork choice", - ) - }); pub static FORK_CHOICE_SET_HEAD_LAG_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_fork_choice_set_head_lag_times", @@ -1955,6 +1942,11 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { .validator_monitor .read() .scrape_metrics(&beacon_chain.slot_clock, &beacon_chain.spec); + + beacon_chain + .canonical_head + .fork_choice_read_lock() + .scrape_for_metrics(); } /// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 7a06d7352b..4a4f6e9086 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -12,6 +12,7 @@ state_processing = { workspace = true } proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +lighthouse_metrics = { workspace = true } slog = { workspace = true } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c55219a676..1482e2beb4 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,3 +1,4 @@ +use crate::metrics::{self, scrape_for_metrics}; use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, @@ -262,6 +263,11 @@ fn dequeue_attestations( .unwrap_or(queued_attestations.len()), ); + metrics::inc_counter_by( + &metrics::FORK_CHOICE_DEQUEUED_ATTESTATIONS, + queued_attestations.len() as u64, + ); + std::mem::replace(queued_attestations, remaining) } @@ -649,6 +655,8 @@ where payload_verification_status: PayloadVerificationStatus, spec: &ChainSpec, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_BLOCK_TIMES); + // If this block has already been processed we do not need to reprocess it. // We check this immediately in case re-processing the block mutates some property of the // global fork choice store, e.g. the justified checkpoints or the proposer boost root. @@ -1040,6 +1048,8 @@ where attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTATION_TIMES); + self.update_time(system_time_current_slot)?; // Ignore any attestations to the zero hash. @@ -1087,6 +1097,8 @@ where /// /// We assume that the attester slashing provided to this function has already been verified. pub fn on_attester_slashing(&mut self, slashing: AttesterSlashingRef<'_, E>) { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTER_SLASHING_TIMES); + let attesting_indices_set = |att: IndexedAttestationRef<'_, E>| { att.attesting_indices_iter() .copied() @@ -1502,6 +1514,11 @@ where queued_attestations: self.queued_attestations().to_vec(), } } + + /// Update the global metrics `DEFAULT_REGISTRY` with info from the fork choice + pub fn scrape_for_metrics(&self) { + scrape_for_metrics(self); + } } /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 5e8cfb1ee4..17f1dc38a6 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -1,5 +1,6 @@ mod fork_choice; mod fork_choice_store; +mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, diff --git a/consensus/fork_choice/src/metrics.rs b/consensus/fork_choice/src/metrics.rs new file mode 100644 index 0000000000..eb0dbf435e --- /dev/null +++ b/consensus/fork_choice/src/metrics.rs @@ -0,0 +1,62 @@ +pub use lighthouse_metrics::*; +use std::sync::LazyLock; +use types::EthSpec; + +use crate::{ForkChoice, ForkChoiceStore}; + +pub static FORK_CHOICE_QUEUED_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "fork_choice_queued_attestations", + "Current count of queued attestations", + ) +}); +pub static FORK_CHOICE_NODES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("fork_choice_nodes", "Current count of proto array nodes") +}); +pub static FORK_CHOICE_INDICES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "fork_choice_indices", + "Current count of proto array indices", + ) +}); +pub static FORK_CHOICE_DEQUEUED_ATTESTATIONS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "fork_choice_dequeued_attestations_total", + "Total count of dequeued attestations", + ) +}); +pub static FORK_CHOICE_ON_BLOCK_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_process_block_seconds", + "The duration in seconds of on_block runs", + ) +}); +pub static FORK_CHOICE_ON_ATTESTATION_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_process_attestation_seconds", + "The duration in seconds of on_attestation runs", + ) +}); +pub static FORK_CHOICE_ON_ATTESTER_SLASHING_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_fork_choice_on_attester_slashing_seconds", + "The duration in seconds on on_attester_slashing runs", + ) + }); + +/// Update the global metrics `DEFAULT_REGISTRY` with info from the fork choice. +pub fn scrape_for_metrics, E: EthSpec>(fork_choice: &ForkChoice) { + set_gauge( + &FORK_CHOICE_QUEUED_ATTESTATIONS, + fork_choice.queued_attestations().len() as i64, + ); + set_gauge( + &FORK_CHOICE_NODES, + fork_choice.proto_array().core_proto_array().nodes.len() as i64, + ); + set_gauge( + &FORK_CHOICE_INDICES, + fork_choice.proto_array().core_proto_array().indices.len() as i64, + ); +} From b6d15bc29921df0c810222730373521295830000 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Mon, 19 Aug 2024 15:56:33 +0800 Subject: [PATCH 40/43] Make `portable` the default in Lighthouse book and Makefile, update workflows (#6247) * Remove portable in book * Add back modern in docker.md * Update Makefile * Update release.yml * Update docker.yml * Update book * Minor revision * delete portable tarballs * delete portable in Make command * Fix link in book * mdlint * Remove modernity in docker * Merge branch 'unstable' into update-book-portable * Merge branch 'unstable' into update-book-portable * Remove `-dev` docker images * Merge remote-tracking branch 'origin/unstable' into update-book-portable --- .github/workflows/docker.yml | 3 --- .github/workflows/release.yml | 4 ---- Makefile | 12 +---------- book/src/cross-compiling.md | 10 +-------- book/src/docker.md | 27 ++++++------------------ book/src/installation-binaries.md | 34 ------------------------------- book/src/installation-source.md | 3 +-- 7 files changed, 9 insertions(+), 84 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d1a8c9f614..28a84048c0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -124,9 +124,6 @@ jobs: push: true tags: | ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-dev - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}-modern-dev - name: Build and push (lcli) if: startsWith(matrix.binary, 'lcli') diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 86f99b53e1..76925a8be0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -253,13 +253,9 @@ jobs: | System | Architecture | Binary | PGP Signature | |:---:|:---:|:---:|:---| | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz.asc) | | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz.asc) | | | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) | - | | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz.asc) | | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) | | | | | | | **System** | **Option** | - | **Resource** | | | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | diff --git a/Makefile b/Makefile index d18a673880..a4cb70f687 100644 --- a/Makefile +++ b/Makefile @@ -62,16 +62,10 @@ install-lcli: # # The resulting binaries will be created in the `target/` directory. # -# The *-portable options compile the blst library *without* the use of some -# optimized CPU functions that may not be available on some systems. This -# results in a more portable binary with ~20% slower BLS verification. +# The *-portable options is the default feature. build-x86_64: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked -build-x86_64-portable: cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked -build-aarch64-portable: cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-lcli-x86_64: @@ -96,12 +90,8 @@ build-release-tarballs: [ -d $(BIN_DIR) ] || mkdir -p $(BIN_DIR) $(MAKE) build-x86_64 $(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"") - $(MAKE) build-x86_64-portable - $(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"-portable") $(MAKE) build-aarch64 $(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"") - $(MAKE) build-aarch64-portable - $(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"-portable") # Runs the full workspace tests in **release**, without downloading any additional # test vectors. diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index dfddcbc294..c90001d561 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -15,18 +15,10 @@ project. ### Targets -The `Makefile` in the project contains four targets for cross-compiling: +The `Makefile` in the project contains two targets for cross-compiling: - `build-x86_64`: builds an optimized version for x86_64 processors (suitable for most users). -- `build-x86_64-portable`: builds a version for x86_64 processors which avoids using some modern CPU - instructions that are incompatible with older CPUs. - `build-aarch64`: builds an optimized version for 64-bit ARM processors (suitable for Raspberry Pi 4). -- `build-aarch64-portable`: builds a version for 64-bit ARM processors which avoids using some - modern CPU instructions. In practice, very few ARM processors lack the instructions necessary to - run the faster non-portable build. - -For more information about optimized vs portable builds see -[Portability](./installation-binaries.md#portability). ### Example diff --git a/book/src/docker.md b/book/src/docker.md index 16e685491e..8ee0c56bb4 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -35,28 +35,23 @@ Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` -> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker -> Images](#available-docker-images) below. - ### Available Docker Images There are several images available on Docker Hub. -Most users should use the `latest-modern` tag, which corresponds to the latest stable release of -Lighthouse with optimizations enabled. If you are running on older hardware then the default -`latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware -compatibility (see [Portability](./installation-binaries.md#portability)). +Most users should use the `latest` tag, which corresponds to the latest stable release of +Lighthouse with optimizations enabled. -To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: +To install a specific tag (in this case `latest`), add the tag name to your `docker` commands: ```bash -docker pull sigp/lighthouse:latest-modern +docker pull sigp/lighthouse:latest ``` Image tags follow this format: ```text -${version}${arch}${stability}${modernity}${features} +${version}${arch}${stability} ``` The `version` is: @@ -75,19 +70,9 @@ The `stability` is: * `-unstable` for the `unstable` branch * empty for a tagged release or the `stable` branch -The `modernity` is: - -* `-modern` for optimized builds -* empty for a `portable` unoptimized build - -The `features` is: - -* `-dev` for a development build with `minimal` preset enabled (`spec-minimal` feature). -* empty for a standard build with no custom feature enabled. - Examples: -* `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) +* `latest-unstable`: most recent `unstable` build * `latest-amd64`: most recent Lighthouse release for older x86_64 CPUs * `latest-amd64-unstable`: most recent `unstable` build for older x86_64 CPUs diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 580b5c19d4..e3a2bfb8a0 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -13,13 +13,6 @@ Binaries are supplied for four platforms: - `x86_64-apple-darwin`: macOS with Intel chips - `x86_64-windows`: Windows with 64-bit processors -Additionally there is also a `-portable` suffix which indicates if the `portable` feature is used: - -- Without `portable`: uses modern CPU instructions to provide the fastest signature verification times (may cause `Illegal instruction` error on older CPUs) -- With `portable`: approx. 20% slower, but should work on all modern 64-bit processors. - -For details, see [Portability](#portability). - ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs @@ -41,30 +34,3 @@ a `x86_64` binary. 1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. > Windows users will need to execute the commands in Step 2 from PowerShell. - -## Portability - -Portable builds of Lighthouse are designed to run on the widest range of hardware possible, but -sacrifice the ability to make use of modern CPU instructions. - -If you have a modern CPU then you should try running a non-portable build to get a 20-30% speed up. - -- For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set -extension is compatible with the optimized build. This includes Intel Broadwell (2014) -and newer, and AMD Ryzen (2017) and newer. -- For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by -the Raspberry Pi 4. - -## Troubleshooting - -If you get a SIGILL (exit code 132), then your CPU is incompatible with the optimized build -of Lighthouse and you should switch to the `-portable` build. In this case, you will see a -warning like this on start-up: - -``` -WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get a SIGILL, please try Lighthouse portable build -``` - -On some VPS providers, the virtualization can make it appear as if CPU features are not available, -even when they are. In this case you might see the warning above, but so long as the client -continues to function, it's nothing to worry about. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index be03a189de..3c9f27d236 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -160,8 +160,7 @@ FEATURES=gnosis,slasher-lmdb make Commonly used features include: - `gnosis`: support for the Gnosis Beacon Chain. -- `portable`: support for legacy hardware. -- `modern`: support for exclusively modern hardware. +- `portable`: the default feature as Lighthouse now uses runtime detection of hardware CPU features. - `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. - `slasher-mdbx`: support for the MDBX slasher backend. - `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. From 042915859d80a33f70e8873bd300acffc4efc435 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 19 Aug 2024 01:28:45 -0700 Subject: [PATCH 41/43] add missing fields to get blob sidecars request (#5987) * add missing fields to get blob sidecars request * add fork versioned response impl * only compute the block root once * Merge branch 'unstable' of https://github.com/sigp/lighthouse into add-missing-fields-get-blob-sidecars * Merge branch 'unstable' of https://github.com/sigp/lighthouse into add-missing-fields-get-blob-sidecars * fetch root first fetch from cache if its a head block * fmt * always load from db --- beacon_node/http_api/src/block_id.rs | 101 +++++++++++++++------------ beacon_node/http_api/src/lib.rs | 24 +++++-- common/eth2/src/lib.rs | 3 +- consensus/types/src/blob_sidecar.rs | 12 +++- 4 files changed, 89 insertions(+), 51 deletions(-) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 45fc651f05..f35df2f5e8 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -123,6 +123,15 @@ impl BlockId { } } + pub fn blinded_block_by_root( + root: &Hash256, + chain: &BeaconChain, + ) -> Result>, warp::Rejection> { + chain + .get_blinded_block(root) + .map_err(warp_utils::reject::beacon_chain_error) + } + /// Return the `SignedBeaconBlock` identified by `self`. pub fn blinded_block( &self, @@ -149,38 +158,32 @@ impl BlockId { } CoreBlockId::Slot(slot) => { let (root, execution_optimistic, finalized) = self.root(chain)?; - chain - .get_blinded_block(&root) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|block_opt| match block_opt { - Some(block) => { - if block.slot() != *slot { - return Err(warp_utils::reject::custom_not_found(format!( - "slot {} was skipped", - slot - ))); - } - Ok((block, execution_optimistic, finalized)) + BlockId::blinded_block_by_root(&root, chain).and_then(|block_opt| match block_opt { + Some(block) => { + if block.slot() != *slot { + return Err(warp_utils::reject::custom_not_found(format!( + "slot {} was skipped", + slot + ))); } - None => Err(warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - ))), - }) + Ok((block, execution_optimistic, finalized)) + } + None => Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))), + }) } _ => { let (root, execution_optimistic, finalized) = self.root(chain)?; - let block = chain - .get_blinded_block(&root) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - )) - }) - })?; + let block = BlockId::blinded_block_by_root(&root, chain).and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + })?; Ok((block, execution_optimistic, finalized)) } } @@ -252,23 +255,30 @@ impl BlockId { } } - /// Return the `BlobSidecarList` identified by `self`. - pub fn blob_sidecar_list( - &self, - chain: &BeaconChain, - ) -> Result, warp::Rejection> { - let root = self.root(chain)?.0; - chain - .get_blobs(&root) - .map_err(warp_utils::reject::beacon_chain_error) - } - - pub fn blob_sidecar_list_filtered( + #[allow(clippy::type_complexity)] + pub fn get_blinded_block_and_blob_list_filtered( &self, indices: BlobIndicesQuery, chain: &BeaconChain, - ) -> Result, warp::Rejection> { - let blob_sidecar_list = self.blob_sidecar_list(chain)?; + ) -> Result< + ( + SignedBlindedBeaconBlock, + BlobSidecarList, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { + let (root, execution_optimistic, finalized) = self.root(chain)?; + let block = BlockId::blinded_block_by_root(&root, chain)?.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) + })?; + + // Return the `BlobSidecarList` identified by `self`. + let blob_sidecar_list = chain + .get_blobs(&root) + .map_err(warp_utils::reject::beacon_chain_error)?; + let blob_sidecar_list_filtered = match indices.indices { Some(vec) => { let list = blob_sidecar_list @@ -280,7 +290,12 @@ impl BlockId { } None => blob_sidecar_list, }; - Ok(blob_sidecar_list_filtered) + Ok(( + block, + blob_sidecar_list_filtered, + execution_optimistic, + finalized, + )) } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index aa47d5c464..93499b7c38 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1736,8 +1736,12 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let indices = indices_res?; - let blob_sidecar_list_filtered = - block_id.blob_sidecar_list_filtered(indices, &chain)?; + let (block, blob_sidecar_list_filtered, execution_optimistic, finalized) = + block_id.get_blinded_block_and_blob_list_filtered(indices, &chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1749,11 +1753,19 @@ pub fn serve( e )) }), - _ => Ok(warp::reply::json(&api_types::GenericResponse::from( - blob_sidecar_list_filtered, - )) - .into_response()), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_fork_versioned_response( + V2, + fork_name, + execution_optimistic, + finalized, + &blob_sidecar_list_filtered, + )?; + Ok(warp::reply::json(&res).into_response()) + } } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index f0c25124dd..2805d36b90 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1141,7 +1141,8 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, indices: Option<&[u64]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.get_blobs_path(block_id)?; if let Some(indices) = indices { let indices_string = indices diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 6b32523c35..0f7dbb2673 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,10 @@ use crate::test_utils::TestRandom; +use crate::ForkName; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; -use crate::{KzgProofs, SignedBeaconBlock}; +use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; @@ -273,3 +274,12 @@ pub type BlobSidecarList = VariableList>, :: pub type FixedBlobSidecarList = FixedVector>>, ::MaxBlobsPerBlock>; pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; + +impl ForkVersionDeserialize for BlobSidecarList { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + _: ForkName, + ) -> Result { + serde_json::from_value::>(value).map_err(serde::de::Error::custom) + } +} From 6faa9c678ed237d04bea7d317ad225019be8ec7e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 19 Aug 2024 19:21:07 +1000 Subject: [PATCH 42/43] Prevent fd leak in random slasher tests (#6254) * Prevent fd leak in random slasher tests * Clippy --- slasher/src/database.rs | 60 ++++++++++++++++++++++++++++++++++++++-- slasher/src/slasher.rs | 18 ++++++++++++ slasher/tests/random.rs | 61 ++++++++++++++++++++++++++++++----------- 3 files changed, 121 insertions(+), 18 deletions(-) diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 5c22c60982..b5d7ab5ce8 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -4,8 +4,8 @@ mod mdbx_impl; mod redb_impl; use crate::{ - metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Error, - ProposerSlashingStatus, + metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, + Error, ProposerSlashingStatus, }; use byteorder::{BigEndian, ByteOrder}; use interface::{Environment, OpenDatabases, RwTransaction}; @@ -350,6 +350,18 @@ impl SlasherDB { Ok(()) } + pub fn get_config(&self) -> &Config { + &self.config + } + + /// TESTING ONLY. + /// + /// Replace the config for this database. This is only a sane thing to do if the database + /// is empty (has been `reset`). + pub fn update_config(&mut self, config: Arc) { + self.config = config; + } + /// Load a config from disk. /// /// This is generic in order to allow loading of configs for different schema versions. @@ -799,6 +811,50 @@ impl SlasherDB { Ok(()) } + + /// Delete all data from the database, essentially re-initialising it. + /// + /// We use this reset pattern in tests instead of leaking tonnes of file descriptors and + /// exhausting our allocation by creating (and leaking) databases. + /// + /// THIS FUNCTION SHOULD ONLY BE USED IN TESTS. + pub fn reset(&self) -> Result<(), Error> { + // Clear the cache(s) first. + self.attestation_root_cache.lock().clear(); + + // Pattern match to avoid missing any database. + let OpenDatabases { + indexed_attestation_db, + indexed_attestation_id_db, + attesters_db, + attesters_max_targets_db, + min_targets_db, + max_targets_db, + current_epochs_db, + proposers_db, + metadata_db, + } = &self.databases; + let mut txn = self.begin_rw_txn()?; + self.reset_db(&mut txn, indexed_attestation_db)?; + self.reset_db(&mut txn, indexed_attestation_id_db)?; + self.reset_db(&mut txn, attesters_db)?; + self.reset_db(&mut txn, attesters_max_targets_db)?; + self.reset_db(&mut txn, min_targets_db)?; + self.reset_db(&mut txn, max_targets_db)?; + self.reset_db(&mut txn, current_epochs_db)?; + self.reset_db(&mut txn, proposers_db)?; + self.reset_db(&mut txn, metadata_db)?; + txn.commit() + } + + fn reset_db(&self, txn: &mut RwTransaction<'_>, db: &Database<'static>) -> Result<(), Error> { + let mut cursor = txn.cursor(db)?; + if cursor.first_key()?.is_none() { + return Ok(()); + } + cursor.delete_while(|_| Ok(true))?; + Ok(()) + } } #[cfg(test)] diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 0bb7c9c3ff..19f2cd138d 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -33,6 +33,19 @@ impl Slasher { config.validate()?; let config = Arc::new(config); let db = SlasherDB::open(config.clone(), spec, log.clone())?; + Self::from_config_and_db(config, db, log) + } + + /// TESTING ONLY. + /// + /// Initialise a slasher database from an existing `db`. The caller must ensure that the + /// database's config matches the one provided. + pub fn from_config_and_db( + config: Arc, + db: SlasherDB, + log: Logger, + ) -> Result { + config.validate()?; let attester_slashings = Mutex::new(HashSet::new()); let proposer_slashings = Mutex::new(HashSet::new()); let attestation_queue = AttestationQueue::default(); @@ -48,6 +61,11 @@ impl Slasher { }) } + pub fn into_reset_db(self) -> Result, Error> { + self.db.reset()?; + Ok(self.db) + } + /// Harvest all attester slashings found, removing them from the slasher. pub fn get_attester_slashings(&self) -> HashSet> { std::mem::take(&mut self.attester_slashings.lock()) diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 0ba2986d44..ff234dff3f 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -7,10 +7,11 @@ use slasher::{ block, chain_spec, indexed_att, slashed_validators_from_attestations, slashed_validators_from_slashings, E, }, - Config, Slasher, + Config, Slasher, SlasherDB, }; use std::cmp::max; -use tempfile::tempdir; +use std::sync::Arc; +use tempfile::{tempdir, TempDir}; use types::{Epoch, EthSpec}; #[derive(Debug)] @@ -32,7 +33,16 @@ impl Default for TestConfig { } } -fn random_test(seed: u64, test_config: TestConfig) { +fn make_db() -> (TempDir, SlasherDB) { + let tempdir = tempdir().unwrap(); + let initial_config = Arc::new(Config::new(tempdir.path().into())); + let logger = test_logger(); + let spec = chain_spec(); + let db = SlasherDB::open(initial_config.clone(), spec, logger).unwrap(); + (tempdir, db) +} + +fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> SlasherDB { let check_slashings = test_config.check_slashings; let num_validators = test_config.num_validators; let max_attestations = test_config.max_attestations; @@ -40,18 +50,17 @@ fn random_test(seed: u64, test_config: TestConfig) { println!("Running with seed {}", seed); let mut rng = StdRng::seed_from_u64(seed); - let tempdir = tempdir().unwrap(); - - let mut config = Config::new(tempdir.path().into()); + let mut config = Config::new(db.get_config().database_path.clone()); config.validator_chunk_size = 1 << rng.gen_range(1..4); let chunk_size_exponent = rng.gen_range(1..4); config.chunk_size = 1 << chunk_size_exponent; config.history_length = 1 << rng.gen_range(chunk_size_exponent..chunk_size_exponent + 3); - let spec = chain_spec(); + let config = Arc::new(config); + db.update_config(config.clone()); - let slasher = Slasher::::open(config.clone(), spec, test_logger()).unwrap(); + let slasher = Slasher::::from_config_and_db(config.clone(), db, test_logger()).unwrap(); let validators = (0..num_validators as u64).collect::>(); @@ -121,7 +130,7 @@ fn random_test(seed: u64, test_config: TestConfig) { } if !check_slashings { - return; + return slasher.into_reset_db().unwrap(); } slasher.process_queued(current_epoch).unwrap(); @@ -131,6 +140,9 @@ fn random_test(seed: u64, test_config: TestConfig) { let slashed_validators = slashed_validators_from_slashings(&slashings); let expected_slashed_validators = slashed_validators_from_attestations(&attestations); assert_eq!(slashed_validators, expected_slashed_validators); + + // Return the database for reuse. + slasher.into_reset_db().unwrap() } // Fuzz-like test that runs forever on different seeds looking for crashes. @@ -138,8 +150,9 @@ fn random_test(seed: u64, test_config: TestConfig) { #[ignore] fn no_crash() { let mut rng = thread_rng(); + let (_tempdir, mut db) = make_db(); loop { - random_test(rng.gen(), TestConfig::default()); + db = random_test(rng.gen(), db, TestConfig::default()); } } @@ -148,9 +161,11 @@ fn no_crash() { #[ignore] fn no_crash_with_blocks() { let mut rng = thread_rng(); + let (_tempdir, mut db) = make_db(); loop { - random_test( + db = random_test( rng.gen(), + db, TestConfig { add_blocks: true, ..TestConfig::default() @@ -164,9 +179,11 @@ fn no_crash_with_blocks() { #[ignore] fn check_slashings() { let mut rng = thread_rng(); + let (_tempdir, mut db) = make_db(); loop { - random_test( + db = random_test( rng.gen(), + db, TestConfig { check_slashings: true, ..TestConfig::default() @@ -177,8 +194,10 @@ fn check_slashings() { #[test] fn check_slashings_example1() { + let (_tempdir, db) = make_db(); random_test( 1, + db, TestConfig { check_slashings: true, ..TestConfig::default() @@ -188,8 +207,10 @@ fn check_slashings_example1() { #[test] fn check_slashings_example2() { + let (_tempdir, db) = make_db(); random_test( 2, + db, TestConfig { check_slashings: true, max_attestations: 3, @@ -200,8 +221,10 @@ fn check_slashings_example2() { #[test] fn check_slashings_example3() { + let (_tempdir, db) = make_db(); random_test( 3, + db, TestConfig { check_slashings: true, max_attestations: 100, @@ -212,23 +235,28 @@ fn check_slashings_example3() { #[test] fn no_crash_example1() { - random_test(1, TestConfig::default()); + let (_tempdir, db) = make_db(); + random_test(1, db, TestConfig::default()); } #[test] fn no_crash_example2() { - random_test(2, TestConfig::default()); + let (_tempdir, db) = make_db(); + random_test(2, db, TestConfig::default()); } #[test] fn no_crash_example3() { - random_test(3, TestConfig::default()); + let (_tempdir, db) = make_db(); + random_test(3, db, TestConfig::default()); } #[test] fn no_crash_blocks_example1() { + let (_tempdir, db) = make_db(); random_test( 1, + db, TestConfig { add_blocks: true, ..TestConfig::default() @@ -238,5 +266,6 @@ fn no_crash_blocks_example1() { #[test] fn no_crash_aug_24() { - random_test(13519442335106054152, TestConfig::default()) + let (_tempdir, db) = make_db(); + random_test(13519442335106054152, db, TestConfig::default()); } From d9571617403fa13d72683af0cefcf5a1b2de5a28 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 19 Aug 2024 11:21:10 +0200 Subject: [PATCH 43/43] Drop null_logger (#6013) * Drop null_logger --- Cargo.lock | 9 ++++--- beacon_node/beacon_chain/Cargo.toml | 1 - beacon_node/beacon_chain/src/eth1_chain.rs | 4 +-- .../beacon_chain/src/shuffling_cache.rs | 4 +-- beacon_node/eth1/Cargo.toml | 1 + beacon_node/eth1/tests/test.rs | 26 +++++++------------ beacon_node/execution_layer/Cargo.toml | 2 +- .../execution_layer/src/test_utils/mod.rs | 4 +-- beacon_node/genesis/tests/tests.rs | 2 +- beacon_node/http_api/Cargo.toml | 2 +- beacon_node/http_api/tests/tests.rs | 6 ++--- beacon_node/http_metrics/Cargo.toml | 2 +- beacon_node/http_metrics/tests/tests.rs | 4 +-- beacon_node/tests/test.rs | 2 +- common/logging/src/lib.rs | 4 +-- common/task_executor/src/test_utils.rs | 10 +------ lcli/Cargo.toml | 1 + lcli/src/transition_blocks.rs | 7 +++-- lighthouse/environment/src/lib.rs | 10 +++---- .../environment/tests/environment_builder.rs | 2 +- testing/web3signer_tests/Cargo.toml | 1 + testing/web3signer_tests/src/lib.rs | 3 ++- validator_client/src/doppelganger_service.rs | 4 +-- 23 files changed, 51 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f0a4cac6a..b2449529ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -780,7 +780,6 @@ dependencies = [ "bitvec 1.0.1", "bls", "derivative", - "environment", "eth1", "eth2", "eth2_network_config", @@ -2473,6 +2472,7 @@ dependencies = [ "execution_layer", "futures", "lighthouse_metrics", + "logging", "merkle_proof", "parking_lot 0.12.3", "sensitive_url", @@ -2957,7 +2957,6 @@ dependencies = [ "arc-swap", "builder_client", "bytes", - "environment", "eth2", "eth2_network_config", "ethereum_serde_utils", @@ -2972,6 +2971,7 @@ dependencies = [ "kzg", "lighthouse_metrics", "lighthouse_version", + "logging", "lru", "parking_lot 0.12.3", "pretty_reqwest_error", @@ -3834,7 +3834,6 @@ dependencies = [ "bs58 0.4.0", "bytes", "directory", - "environment", "eth1", "eth2", "ethereum_serde_utils", @@ -3876,10 +3875,10 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", - "environment", "lighthouse_metrics", "lighthouse_network", "lighthouse_version", + "logging", "malloc_utils", "reqwest", "serde", @@ -4464,6 +4463,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "sloggers", "snap", "state_processing", "store", @@ -9495,6 +9495,7 @@ dependencies = [ "eth2_keystore", "eth2_network_config", "futures", + "logging", "parking_lot 0.12.3", "reqwest", "serde", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index cf1c6be33f..0deccfb622 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -15,7 +15,6 @@ test_backfill = [] [dev-dependencies] maplit = { workspace = true } -environment = { workspace = true } serde_json = { workspace = true } [dependencies] diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index b4005f22fd..20706ab820 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -685,7 +685,6 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) -> #[cfg(test)] mod test { use super::*; - use environment::null_logger; use types::{DepositData, MinimalEthSpec, Signature}; type E = MinimalEthSpec; @@ -743,6 +742,7 @@ mod test { mod eth1_chain_json_backend { use super::*; use eth1::DepositLog; + use logging::test_logger; use types::{test_utils::generate_deterministic_keypair, MainnetEthSpec}; fn get_eth1_chain() -> Eth1Chain, E> { @@ -750,7 +750,7 @@ mod test { ..Eth1Config::default() }; - let log = null_logger().unwrap(); + let log = test_logger(); Eth1Chain::new( CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(), ) diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 04d5888263..a662cc49c9 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -294,7 +294,7 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { - use task_executor::test_utils::null_logger; + use task_executor::test_utils::test_logger; use types::*; use crate::test_utils::EphemeralHarnessType; @@ -315,7 +315,7 @@ mod test { previous: Some(shuffling_id(current_epoch - 1)), block_root: Hash256::from_low_u64_le(0), }; - let logger = null_logger().unwrap(); + let logger = test_logger(); ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids, logger) } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 6733c5bfd3..4910cfd2e1 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -21,6 +21,7 @@ ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } parking_lot = { workspace = true } slog = { workspace = true } +logging = { workspace = true } superstruct = { workspace = true } tokio = { workspace = true } state_processing = { workspace = true } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 0479ea7c58..cd35c521b6 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -4,10 +4,9 @@ use eth1::{Config, Eth1Endpoint, Service}; use eth1::{DepositCache, DEFAULT_CHAIN_ID}; use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; +use logging::test_logger; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; -use slog::Logger; -use sloggers::{null::NullLoggerBuilder, Build}; use std::ops::Range; use std::time::Duration; use tree_hash::TreeHash; @@ -15,16 +14,11 @@ use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSp const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; -pub fn null_logger() -> Logger { - let log_builder = NullLoggerBuilder; - log_builder.build().expect("should build logger") -} - pub fn new_env() -> Environment { EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .null_logger() + .test_logger() .expect("should start null logger") .build() .expect("should build env") @@ -103,7 +97,7 @@ mod eth1_cache { #[tokio::test] async fn simple_scenario() { async { - let log = null_logger(); + let log = test_logger(); for follow_distance in 0..3 { let eth1 = new_anvil_instance() @@ -185,7 +179,7 @@ mod eth1_cache { #[tokio::test] async fn big_skip() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await @@ -240,7 +234,7 @@ mod eth1_cache { #[tokio::test] async fn pruning() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await @@ -292,7 +286,7 @@ mod eth1_cache { #[tokio::test] async fn double_update() { async { - let log = null_logger(); + let log = test_logger(); let n = 16; @@ -345,7 +339,7 @@ mod deposit_tree { #[tokio::test] async fn updating() { async { - let log = null_logger(); + let log = test_logger(); let n = 4; @@ -426,7 +420,7 @@ mod deposit_tree { #[tokio::test] async fn double_update() { async { - let log = null_logger(); + let log = test_logger(); let n = 8; @@ -688,7 +682,7 @@ mod fast { #[tokio::test] async fn deposit_cache_query() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await @@ -771,7 +765,7 @@ mod persist { #[tokio::test] async fn test_persist_caches() { async { - let log = null_logger(); + let log = test_logger(); let eth1 = new_anvil_instance() .await diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0009cd002e..4cc373f295 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -10,6 +10,7 @@ alloy-primitives = { workspace = true } types = { workspace = true } tokio = { workspace = true } slog = { workspace = true } +logging = { workspace = true } sensitive_url = { workspace = true } reqwest = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -17,7 +18,6 @@ serde_json = { workspace = true } serde = { workspace = true } warp = { workspace = true } jsonwebtoken = "8" -environment = { workspace = true } bytes = { workspace = true } task_executor = { workspace = true } hex = { workspace = true } diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 6fd853975d..004bb17616 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -6,10 +6,10 @@ use crate::engine_api::{ }; use crate::json_structures::JsonClientVersionV1; use bytes::Bytes; -use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use kzg::Kzg; +use logging::test_logger; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -149,7 +149,7 @@ impl MockServer { let ctx: Arc> = Arc::new(Context { config: server_config, jwt_key, - log: null_logger().unwrap(), + log: test_logger(), last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), previous_request: <_>::default(), diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 1252e0100b..f33794814b 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -12,7 +12,7 @@ pub fn new_env() -> Environment { EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .null_logger() + .test_logger() .expect("should start null logger") .build() .expect("should build env") diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 2c54c1375a..068feea1df 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -44,10 +44,10 @@ bytes = { workspace = true } beacon_processor = { workspace = true } [dev-dependencies] -environment = { workspace = true } serde_json = { workspace = true } proto_array = { workspace = true } genesis = { workspace = true } +logging = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 9377e277c2..afed095dba 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3,7 +3,6 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; -use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, @@ -24,6 +23,7 @@ use http_api::{ BlockId, StateId, }; use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId}; +use logging::test_logger; use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; @@ -251,7 +251,7 @@ impl ApiTester { "precondition: justification" ); - let log = null_logger().unwrap(); + let log = test_logger(); let ApiServer { ctx, @@ -349,7 +349,7 @@ impl ApiTester { let chain = harness.chain.clone(); - let log = null_logger().unwrap(); + let log = test_logger(); let ApiServer { ctx, diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index f8c93ad8fc..f835d13fb6 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -22,5 +22,5 @@ malloc_utils = { workspace = true } [dev-dependencies] tokio = { workspace = true } reqwest = { workspace = true } -environment = { workspace = true } types = { workspace = true } +logging = { workspace = true } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index b88a790afd..d903e233fb 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,6 +1,6 @@ use beacon_chain::test_utils::EphemeralHarnessType; -use environment::null_logger; use http_metrics::Config; +use logging::test_logger; use reqwest::header::HeaderValue; use reqwest::StatusCode; use std::net::{IpAddr, Ipv4Addr}; @@ -13,7 +13,7 @@ type Context = http_metrics::Context>; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn returns_200_ok() { async { - let log = null_logger().unwrap(); + let log = test_logger(); let context = Arc::new(Context { config: Config { diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index bbec70330b..4be6536df9 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -25,7 +25,7 @@ fn build_node(env: &mut Environment) -> LocalBeaconNode { #[test] fn http_server_genesis_state() { let mut env = env_builder() - .null_logger() + .test_logger() //.async_logger("debug", None) .expect("should build env logger") .multi_threaded_tokio_runtime() diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 5fc473c853..d3d91497cc 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -289,10 +289,10 @@ pub fn test_logger() -> Logger { sloggers::terminal::TerminalLoggerBuilder::new() .level(sloggers::types::Severity::Debug) .build() - .expect("Should build test_logger") + .expect("Should build TerminalLoggerBuilder") } else { sloggers::null::NullLoggerBuilder .build() - .expect("Should build null_logger") + .expect("Should build NullLoggerBuilder") } } diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index ec8f45d850..46fbff7eac 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::TaskExecutor; -use logging::test_logger; +pub use logging::test_logger; use slog::Logger; -use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; use tokio::runtime; @@ -67,10 +66,3 @@ impl TestRuntime { self.task_executor.log = log; } } - -pub fn null_logger() -> Result { - let log_builder = NullLoggerBuilder; - log_builder - .build() - .map_err(|e| format!("Failed to start null logger: {:?}", e)) -} diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 30721f3d5b..77d122efb7 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -14,6 +14,7 @@ jemalloc = ["malloc_utils/jemalloc"] bls = { workspace = true } clap = { workspace = true } log = { workspace = true } +sloggers = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } serde_json = { workspace = true } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 62ae602187..ec3bb5b9ed 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -66,13 +66,14 @@ use beacon_chain::{ }; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; -use environment::{null_logger, Environment}; +use environment::Environment; use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; use eth2_network_config::Eth2NetworkConfig; use log::{debug, info}; +use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -196,7 +197,9 @@ pub fn run( let store = HotColdDB::open_ephemeral( <_>::default(), spec.clone(), - null_logger().map_err(|e| format!("Failed to create null_logger: {:?}", e))?, + NullLoggerBuilder + .build() + .map_err(|e| format!("Error on NullLoggerBuilder: {:?}", e))?, ) .map_err(|e| format!("Failed to create ephemeral store: {:?}", e))?; let store = Arc::new(store); diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index a83a7a9157..aa2caa2350 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -12,7 +12,7 @@ use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; -use logging::SSELoggingComponents; +use logging::{test_logger, SSELoggingComponents}; use serde::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; @@ -34,8 +34,6 @@ use { #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; -pub use task_executor::test_utils::null_logger; - const LOG_CHANNEL_SIZE: usize = 16384; const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. @@ -184,9 +182,9 @@ impl EnvironmentBuilder { Ok(self) } - /// Specifies that all logs should be sent to `null` (i.e., ignored). - pub fn null_logger(mut self) -> Result { - self.log = Some(null_logger()?); + /// Sets a logger suitable for test usage. + pub fn test_logger(mut self) -> Result { + self.log = Some(test_logger()); Ok(self) } diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index ad775c99f5..b0c847612a 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -9,7 +9,7 @@ fn builder() -> EnvironmentBuilder { EnvironmentBuilder::mainnet() .multi_threaded_tokio_runtime() .expect("should set runtime") - .null_logger() + .test_logger() .expect("should set logger") } diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 7321fc1384..db5c53e0ac 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -27,3 +27,4 @@ eth2_network_config = { workspace = true } serde_json = { workspace = true } zip = { workspace = true } parking_lot = { workspace = true } +logging = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 13d92d2d85..f6ee01a4ba 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -22,6 +22,7 @@ mod tests { }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use logging::test_logger; use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; @@ -318,7 +319,7 @@ mod tests { using_web3signer: bool, spec: ChainSpec, ) -> Self { - let log = environment::null_logger().unwrap(); + let log = test_logger(); let validator_dir = TempDir::new().unwrap(); let config = validator_client::Config::default(); diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 9f93795e29..2c8eca8560 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -687,8 +687,8 @@ impl DoppelgangerService { #[cfg(test)] mod test { use super::*; - use environment::null_logger; use futures::executor::block_on; + use logging::test_logger; use slot_clock::TestingSlotClock; use std::future; use std::time::Duration; @@ -732,7 +732,7 @@ mod test { fn build(self) -> TestScenario { let mut rng = XorShiftRng::from_seed([42; 16]); let slot_clock = TestingSlotClock::new(Slot::new(0), GENESIS_TIME, SLOT_DURATION); - let log = null_logger().unwrap(); + let log = test_logger(); TestScenario { validators: (0..self.validator_count)