From 561898fc1c74c11a1a765f252ae504f35263f6ed Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 19 Feb 2026 06:08:56 +0530 Subject: [PATCH 1/6] Process head_chains in descending order of number of peers (#8859) N/A Another find by @gitToki. Sort the preferred_ids in descending order as originally intended from the comment in the function. Co-Authored-By: Pawan Dhananjay --- beacon_node/network/src/sync/range_sync/chain_collection.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 1d57ee6c3d..bd4dd6c181 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -351,7 +351,8 @@ impl ChainCollection { .iter() .map(|(id, chain)| (chain.available_peers(), !chain.is_syncing(), *id)) .collect::>(); - preferred_ids.sort_unstable(); + // Sort in descending order + preferred_ids.sort_unstable_by(|a, b| b.cmp(a)); let mut syncing_chains = SmallVec::<[Id; PARALLEL_HEAD_CHAINS]>::new(); for (_, _, id) in preferred_ids { From 4588971085840dc56cedc85ba0f12bcaa99be8ed Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 19 Feb 2026 12:57:53 +1100 Subject: [PATCH 2/6] Add sync batch state metrics (#8847) Co-Authored-By: Jimmy Chen --- beacon_node/network/src/metrics.rs | 7 +++++ .../network/src/sync/backfill_sync/mod.rs | 20 +++++++++++++- beacon_node/network/src/sync/batch.rs | 26 ++++++++++++++++++- .../src/sync/custody_backfill_sync/mod.rs | 21 +++++++++++++-- beacon_node/network/src/sync/manager.rs | 3 +++ .../network/src/sync/range_sync/chain.rs | 11 +++++++- .../src/sync/range_sync/chain_collection.rs | 21 +++++++++++++++ .../network/src/sync/range_sync/range.rs | 4 +++ 8 files changed, 108 insertions(+), 5 deletions(-) diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index cea06a28c8..0fa95b4758 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -462,6 +462,13 @@ pub static SYNCING_CHAIN_BATCH_AWAITING_PROCESSING: LazyLock> ]), ) }); +pub static SYNCING_CHAIN_BATCHES: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "sync_batches", + "Number of batches in sync chains by sync type and state", + &["sync_type", "state"], + ) +}); pub static SYNC_SINGLE_BLOCK_LOOKUPS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "sync_single_block_lookups", diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 9802ec56a1..f18d31863b 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -8,9 +8,11 @@ //! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill //! sync as failed, log an error and attempt to retry once a new peer joins the node. +use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::batch::{ - BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, + BatchConfig, BatchId, BatchInfo, BatchMetricsState, BatchOperationOutcome, + BatchProcessingResult, BatchState, }; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::manager::BatchProcessResult; @@ -31,6 +33,7 @@ use std::collections::{ use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::sync::Arc; +use strum::IntoEnumIterator; use tracing::{debug, error, info, warn}; use types::{ColumnIndex, Epoch, EthSpec}; @@ -1181,6 +1184,21 @@ impl BackFillSync { .epoch(T::EthSpec::slots_per_epoch()) } + pub fn register_metrics(&self) { + for state in BatchMetricsState::iter() { + let count = self + .batches + .values() + .filter(|b| b.state().metrics_state() == state) + .count(); + metrics::set_gauge_vec( + &metrics::SYNCING_CHAIN_BATCHES, + &["backfill", state.into()], + count as i64, + ); + } + } + /// Updates the global network state indicating the current state of a backfill sync. fn set_state(&self, state: BackFillState) { *self.network_globals.backfill_state.write() = state; diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index 8de386f5be..8f8d39ca4b 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -10,10 +10,22 @@ use std::marker::PhantomData; use std::ops::Sub; use std::time::Duration; use std::time::Instant; -use strum::Display; +use strum::{Display, EnumIter, IntoStaticStr}; use types::Slot; use types::{DataColumnSidecarList, Epoch, EthSpec}; +/// Batch states used as metrics labels. +#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumIter, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] +pub enum BatchMetricsState { + AwaitingDownload, + Downloading, + AwaitingProcessing, + Processing, + AwaitingValidation, + Failed, +} + pub type BatchId = Epoch; /// Type of expected batch. @@ -142,6 +154,18 @@ impl BatchState { pub fn poison(&mut self) -> BatchState { std::mem::replace(self, BatchState::Poisoned) } + + /// Returns the metrics state for this batch. + pub fn metrics_state(&self) -> BatchMetricsState { + match self { + BatchState::AwaitingDownload => BatchMetricsState::AwaitingDownload, + BatchState::Downloading(_) => BatchMetricsState::Downloading, + BatchState::AwaitingProcessing(..) => BatchMetricsState::AwaitingProcessing, + BatchState::Processing(_) => BatchMetricsState::Processing, + BatchState::AwaitingValidation(_) => BatchMetricsState::AwaitingValidation, + BatchState::Poisoned | BatchState::Failed => BatchMetricsState::Failed, + } + } } impl BatchInfo { diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index fa8b70c8b4..893aa849d3 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -12,14 +12,16 @@ use lighthouse_network::{ }; use logging::crit; use std::hash::{DefaultHasher, Hash, Hasher}; +use strum::IntoEnumIterator; use tracing::{debug, error, info, info_span, warn}; use types::{DataColumnSidecarList, Epoch, EthSpec}; +use crate::metrics; use crate::sync::{ backfill_sync::{BACKFILL_EPOCHS_PER_BATCH, ProcessResult, SyncStart}, batch::{ - BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, - ByRangeRequestType, + BatchConfig, BatchId, BatchInfo, BatchMetricsState, BatchOperationOutcome, + BatchProcessingResult, BatchState, ByRangeRequestType, }, block_sidecar_coupling::CouplingError, manager::CustodyBatchProcessResult, @@ -1114,6 +1116,21 @@ impl CustodyBackFillSync { *self.network_globals.custody_sync_state.write() = state; } + pub fn register_metrics(&self) { + for state in BatchMetricsState::iter() { + let count = self + .batches + .values() + .filter(|b| b.state().metrics_state() == state) + .count(); + metrics::set_gauge_vec( + &metrics::SYNCING_CHAIN_BATCHES, + &["custody_backfill", state.into()], + count as i64, + ); + } + } + /// A fully synced peer has joined us. /// If we are in a failed state, update a local variable to indicate we are able to restart /// the failed sync on the next attempt. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 096ed9c328..c2faff5b62 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -784,6 +784,9 @@ impl SyncManager { } _ = register_metrics_interval.tick() => { self.network.register_metrics(); + self.range_sync.register_metrics(); + self.backfill_sync.register_metrics(); + self.custody_backfill_sync.register_metrics(); } _ = epoch_interval.tick() => { self.update_sync_state(); diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d67d6468a9..61161ae6f4 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -3,7 +3,8 @@ use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::batch::BatchId; use crate::sync::batch::{ - BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, + BatchConfig, BatchInfo, BatchMetricsState, BatchOperationOutcome, BatchProcessingResult, + BatchState, }; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; @@ -234,6 +235,14 @@ impl SyncingChain { .sum() } + /// Returns the number of batches in the given metrics state. + pub fn count_batches_in_state(&self, state: BatchMetricsState) -> usize { + self.batches + .values() + .filter(|b| b.state().metrics_state() == state) + .count() + } + /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. pub fn remove_peer(&mut self, peer_id: &PeerId) -> ProcessingResult { diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index bd4dd6c181..b430b7c572 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -6,6 +6,7 @@ use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; +use crate::sync::batch::BatchMetricsState; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; @@ -17,6 +18,7 @@ use smallvec::SmallVec; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::Arc; +use strum::IntoEnumIterator; use tracing::{debug, error}; use types::EthSpec; use types::{Epoch, Hash256, Slot}; @@ -516,6 +518,25 @@ impl ChainCollection { } } + pub fn register_metrics(&self) { + for (sync_type, chains) in [ + ("range_finalized", &self.finalized_chains), + ("range_head", &self.head_chains), + ] { + for state in BatchMetricsState::iter() { + let count: usize = chains + .values() + .map(|chain| chain.count_batches_in_state(state)) + .sum(); + metrics::set_gauge_vec( + &metrics::SYNCING_CHAIN_BATCHES, + &[sync_type, state.into()], + count as i64, + ); + } + } + } + fn update_metrics(&self) { metrics::set_gauge_vec( &metrics::SYNCING_CHAINS_COUNT, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index c9656ad1d0..4c2123451a 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -371,6 +371,10 @@ where .update(network, &local, &mut self.awaiting_head_peers); } + pub fn register_metrics(&self) { + self.chains.register_metrics(); + } + /// Kickstarts sync. pub fn resume(&mut self, network: &mut SyncNetworkContext) { for (removed_chain, sync_type, remove_reason) in From 8cf6ffac4b1954bfc61939e116afd5e2ab349dbb Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 25 Feb 2026 23:10:41 +1100 Subject: [PATCH 3/6] Update yanked keccak 0.1.5 to 0.1.6 (#8900) Co-Authored-By: Jimmy Chen --- Cargo.lock | 7 +++---- Cargo.toml | 1 + 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d75f5c197..dd1637045b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4832,9 +4832,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -10607,8 +10607,7 @@ dependencies = [ [[package]] name = "yamux" version = "0.13.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deab71f2e20691b4728b349c6cee8fc7223880fa67b6b4f92225ec32225447e5" +source = "git+https://github.com/sigp/rust-yamux?rev=575b17c0f44f4253079a6bafaa2de74ca1d6dfaa#575b17c0f44f4253079a6bafaa2de74ca1d6dfaa" dependencies = [ "futures", "log", diff --git a/Cargo.toml b/Cargo.toml index aac26e060b..61caacf5df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -303,3 +303,4 @@ debug = true [patch.crates-io] quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } +yamux = { git = "https://github.com/sigp/rust-yamux", rev = "575b17c0f44f4253079a6bafaa2de74ca1d6dfaa" } From 95f12d0927831971ca9e1acf7ca7e87fdda56f4a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 27 Feb 2026 16:48:56 +1100 Subject: [PATCH 4/6] Bump version to v8.1.1 (#8853) --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dd1637045b..40c550f4c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.1.0" +version = "8.1.1" dependencies = [ "account_utils", "bls", @@ -1276,7 +1276,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.1.0" +version = "8.1.1" dependencies = [ "account_utils", "beacon_chain", @@ -1513,7 +1513,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.1.0" +version = "8.1.1" dependencies = [ "beacon_node", "bytes", @@ -4897,7 +4897,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.1.0" +version = "8.1.1" dependencies = [ "account_utils", "beacon_chain", @@ -5383,7 +5383,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.1.0" +version = "8.1.1" dependencies = [ "account_manager", "account_utils", @@ -5515,7 +5515,7 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.1.0" +version = "8.1.1" dependencies = [ "regex", ] @@ -9622,7 +9622,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "8.1.0" +version = "8.1.1" dependencies = [ "account_utils", "beacon_node_fallback", diff --git a/Cargo.toml b/Cargo.toml index 61caacf5df..5f6f43d2f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,7 @@ resolver = "2" [workspace.package] edition = "2024" -version = "8.1.0" +version = "8.1.1" [workspace.dependencies] account_utils = { path = "common/account_utils" } From 6194dddc5b9ea176fc38796fb5de6c7fac8a8143 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 2 Mar 2026 17:43:51 +1100 Subject: [PATCH 5/6] Persist custody context more readily (#8921) We received a bug report of a node restarting custody backfill unnecessarily after upgrading to Lighthouse v8.1.1. What happened is: - User started LH v8.0.1 many months ago, CGC updated 0 -> N but the CGC was not eagerly persisted. - LH experienced an unclean shutdown (not sure of what type). - Upon restarting (still running v8.0.1), the custody context read from disk contains CGC=0: `DEBUG Loaded persisted custody context custody_context: CustodyContext { validator_custody_count: 0, ...`). - CGC updates again to N, retriggering custody backfill: `DEBUG Validator count at head updated old_count: 0, new_count: N`. - Custody backfill does a bunch of downloading for no gain: `DEBUG Imported historical data columns epoch: Epoch(428433), total_imported: 0` - While custody backfill is running user updated to v8.1.1, and we see logs for the CGC=N being peristed upon clean shutdown, and then correctly read on startup with v8.1.1. - Custody backfill keeps running and downloading due to the CGC change still being considered in progress. - Call `persist_custody_context` inside the `register_validators` handler so that it is written to disk eagerly whenever it changes. The performance impact of this should be minimal as the amount of data is very small and this call can only happen at most ~128 times (once for each change) in the entire life of a beacon node. - Call `persist_custody_context` inside `BeaconChainBuilder::build` so that changes caused by CLI flags are persisted (otherwise starting a node with `--semi-supernode` and no validators, then shutting it down uncleanly would cause use to forget the CGC). These changes greatly reduce the timespan during which an unclean shutdown can create inconsistency. In the worst case, we only lose backfill progress that runs concurrently with the `register_validators` handler (should be extremely minimal, nigh impossible). Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 13 ++++++++++++- beacon_node/beacon_chain/src/builder.rs | 5 +++++ beacon_node/http_api/src/validator/mod.rs | 12 ++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9d204ac7f2..703ed24420 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -662,7 +662,18 @@ impl BeaconChain { .custody_context() .as_ref() .into(); - debug!(?custody_context, "Persisting custody context to store"); + + // Pattern match to avoid accidentally missing fields and to ignore deprecated fields. + let CustodyContextSsz { + validator_custody_at_head, + epoch_validator_custody_requirements, + persisted_is_supernode: _, + } = &custody_context; + debug!( + validator_custody_at_head, + ?epoch_validator_custody_requirements, + "Persisting custody context to store" + ); persist_custody_context::( self.store.clone(), diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 2c1dae9215..66a54d46e8 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1083,6 +1083,11 @@ where let cgc_change_effective_slot = cgc_changed.effective_epoch.start_slot(E::slots_per_epoch()); beacon_chain.update_data_column_custody_info(Some(cgc_change_effective_slot)); + + // Persist change to disk. + beacon_chain + .persist_custody_context() + .map_err(|e| format!("Failed writing updated CGC: {e:?}"))?; } info!( diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index df237d9f9b..a9082df715 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -727,6 +727,18 @@ pub fn post_validator_prepare_beacon_proposer( debug!(error = %e, "Could not send message to the network service. \ Likely shutdown") }); + + // Write the updated custody context to disk. This happens at most 128 + // times ever, so the I/O burden should be extremely minimal. Without a + // write here we risk forgetting custody backfill progress upon an + // unclean shutdown. The custody context is otherwise only persisted in + // `BeaconChain::drop`. + if let Err(error) = chain.persist_custody_context() { + error!( + ?error, + "Failed to persist custody context after CGC update" + ); + } } } From f4b5b033a227fcacdb8e8514bbca6cf6702f3a24 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 2 Mar 2026 09:19:41 +0200 Subject: [PATCH 6/6] Add `testing` feature to validator_client/http_api (#8909) Create a `testing` feature which we can use to gate off `test_utils.rs` and its associated dependencies from the rest of the crate. Co-Authored-By: Mac L --- validator_client/http_api/Cargo.toml | 12 +++++++++--- validator_client/http_api/src/lib.rs | 4 +++- validator_manager/Cargo.toml | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 2bd57867ac..e334ab9db0 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -8,14 +8,17 @@ authors = ["Sigma Prime "] name = "validator_http_api" path = "src/lib.rs" +[features] +testing = ["dep:deposit_contract", "dep:doppelganger_service", "dep:tempfile"] + [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } bls = { workspace = true } -deposit_contract = { workspace = true } +deposit_contract = { workspace = true, optional = true } directory = { workspace = true } dirs = { workspace = true } -doppelganger_service = { workspace = true } +doppelganger_service = { workspace = true, optional = true } eth2 = { workspace = true, features = ["lighthouse"] } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -38,7 +41,7 @@ slot_clock = { workspace = true } sysinfo = { workspace = true } system_health = { workspace = true } task_executor = { workspace = true } -tempfile = { workspace = true } +tempfile = { workspace = true, optional = true } tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } @@ -53,7 +56,10 @@ warp_utils = { workspace = true } zeroize = { workspace = true } [dev-dependencies] +deposit_contract = { workspace = true } +doppelganger_service = { workspace = true } futures = { workspace = true } itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } ssz_types = { workspace = true } +tempfile = { workspace = true } diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index a35b4ec6c6..8e9c077e57 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -1,3 +1,6 @@ +#[cfg(feature = "testing")] +pub mod test_utils; + mod api_secret; mod create_signed_voluntary_exit; mod create_validator; @@ -6,7 +9,6 @@ mod keystores; mod remotekeys; mod tests; -pub mod test_utils; pub use api_secret::PK_FILENAME; use graffiti::{delete_graffiti, get_graffiti, set_graffiti}; diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 16ce1e023f..d0155698b4 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -29,4 +29,4 @@ beacon_chain = { workspace = true } http_api = { workspace = true } regex = { workspace = true } tempfile = { workspace = true } -validator_http_api = { workspace = true } +validator_http_api = { workspace = true, features = ["testing"] }