Integrate tracing (#6339)

Tracing Integration
- [reference](5bbf1859e9/projects/project-ideas.md (L297))


  - [x] replace slog & log with tracing throughout the codebase
- [x] implement custom crit log
- [x] make relevant changes in the formatter
- [x] replace sloggers
- [x] re-write SSE logging components

cc: @macladson @eserilev
This commit is contained in:
ThreeHrSleep
2025-03-13 04:01:05 +05:30
committed by GitHub
parent f23f984f85
commit d60c24ef1c
241 changed files with 9485 additions and 9328 deletions

View File

@@ -33,7 +33,6 @@ safe_arith = { workspace = true }
sensitive_url = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
slog = { workspace = true }
slot_clock = { workspace = true }
state_processing = { workspace = true }
store = { workspace = true }
@@ -42,6 +41,7 @@ system_health = { path = "../../common/system_health" }
task_executor = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
tracing = { workspace = true }
tree_hash = { workspace = true }
types = { workspace = true }
warp = { workspace = true }
@@ -49,7 +49,6 @@ warp_utils = { workspace = true }
[dev-dependencies]
genesis = { workspace = true }
logging = { workspace = true }
proto_array = { workspace = true }
serde_json = { workspace = true }

View File

@@ -1,10 +1,10 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped};
use eth2::lighthouse::{BlockReward, BlockRewardsQuery};
use lru::LruCache;
use slog::{debug, warn, Logger};
use state_processing::BlockReplayer;
use std::num::NonZeroUsize;
use std::sync::Arc;
use tracing::{debug, warn};
use types::beacon_block::BlindedBeaconBlock;
use types::non_zero_usize::new_non_zero_usize;
use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error};
@@ -15,7 +15,6 @@ const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2);
pub fn get_block_rewards<T: BeaconChainTypes>(
query: BlockRewardsQuery,
chain: Arc<BeaconChain<T>>,
log: Logger,
) -> Result<Vec<BlockReward>, warp::Rejection> {
let start_slot = query.start_slot;
let end_slot = query.end_slot;
@@ -81,12 +80,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>(
.map_err(unhandled_error)?;
if block_replayer.state_root_miss() {
warn!(
log,
"Block reward state root miss";
"start_slot" => start_slot,
"end_slot" => end_slot,
);
warn!(%start_slot, %end_slot, "Block reward state root miss");
}
drop(block_replayer);
@@ -98,7 +92,6 @@ pub fn get_block_rewards<T: BeaconChainTypes>(
pub fn compute_block_rewards<T: BeaconChainTypes>(
blocks: Vec<BlindedBeaconBlock<T::EthSpec>>,
chain: Arc<BeaconChain<T>>,
log: Logger,
) -> Result<Vec<BlockReward>, warp::Rejection> {
let mut block_rewards = Vec::with_capacity(blocks.len());
let mut state_cache = LruCache::new(STATE_CACHE_SIZE);
@@ -110,18 +103,16 @@ pub fn compute_block_rewards<T: BeaconChainTypes>(
// Check LRU cache for a constructed state from a previous iteration.
let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) {
debug!(
log,
"Re-using cached state for block rewards";
"parent_root" => ?parent_root,
"slot" => block.slot(),
?parent_root,
slot = %block.slot(),
"Re-using cached state for block rewards"
);
state
} else {
debug!(
log,
"Fetching state for block rewards";
"parent_root" => ?parent_root,
"slot" => block.slot()
?parent_root,
slot = %block.slot(),
"Fetching state for block rewards"
);
let parent_block = chain
.get_blinded_block(&parent_root)
@@ -152,10 +143,9 @@ pub fn compute_block_rewards<T: BeaconChainTypes>(
if block_replayer.state_root_miss() {
warn!(
log,
"Block reward state root miss";
"parent_slot" => parent_block.slot(),
"slot" => block.slot(),
parent_slot = %parent_block.slot(),
slot = %block.slot(),
"Block reward state root miss"
);
}

View File

@@ -55,7 +55,7 @@ use health_metrics::observe::Observe;
use lighthouse_network::rpc::methods::MetaData;
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform;
use logging::SSELoggingComponents;
use logging::{crit, SSELoggingComponents};
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
use operation_pool::ReceivedPreCapella;
use parking_lot::RwLock;
@@ -64,7 +64,6 @@ pub use publish_blocks::{
};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
pub use state_id::StateId;
@@ -84,6 +83,7 @@ use tokio_stream::{
wrappers::{errors::BroadcastStreamRecvError, BroadcastStream},
StreamExt,
};
use tracing::{debug, error, info, warn};
use types::{
fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId,
AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec,
@@ -132,7 +132,6 @@ pub struct Context<T: BeaconChainTypes> {
pub beacon_processor_reprocess_send: Option<Sender<ReprocessQueueMessage>>,
pub eth1_service: Option<eth1::Service>,
pub sse_logging_components: Option<SSELoggingComponents>,
pub log: Logger,
}
/// Configuration for the HTTP server.
@@ -186,40 +185,6 @@ impl From<String> for Error {
}
}
/// Creates a `warp` logging wrapper which we use to create `slog` logs.
pub fn slog_logging(
log: Logger,
) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
warp::log::custom(move |info| {
match info.status() {
status
if status == StatusCode::OK
|| status == StatusCode::NOT_FOUND
|| status == StatusCode::PARTIAL_CONTENT =>
{
debug!(
log,
"Processed HTTP API request";
"elapsed" => format!("{:?}", info.elapsed()),
"status" => status.to_string(),
"path" => info.path(),
"method" => info.method().to_string(),
);
}
status => {
warn!(
log,
"Error processing HTTP API request";
"elapsed" => format!("{:?}", info.elapsed()),
"status" => status.to_string(),
"path" => info.path(),
"method" => info.method().to_string(),
);
}
};
})
}
/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging,
/// per say).
pub fn prometheus_metrics() -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
@@ -307,7 +272,6 @@ pub fn serve<T: BeaconChainTypes>(
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
) -> Result<HttpServer, Error> {
let config = ctx.config.clone();
let log = ctx.log.clone();
// Configure CORS.
let cors_builder = {
@@ -324,7 +288,7 @@ pub fn serve<T: BeaconChainTypes>(
// Sanity check.
if !config.enabled {
crit!(log, "Cannot start disabled HTTP server");
crit!("Cannot start disabled HTTP server");
return Err(Error::Other(
"A disabled server should not be started".to_string(),
));
@@ -485,10 +449,6 @@ pub fn serve<T: BeaconChainTypes>(
}
});
// Create a `warp` filter that provides access to the logger.
let inner_ctx = ctx.clone();
let log_filter = warp::any().map(move || inner_ctx.log.clone());
let inner_components = ctx.sse_logging_components.clone();
let sse_component_filter = warp::any().map(move || inner_components.clone());
@@ -1284,21 +1244,18 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_contents: PublishBlockRequest<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_block(
None,
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1318,15 +1275,13 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_bytes: Bytes,
consensus_version: ForkName,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = PublishBlockRequest::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1340,7 +1295,6 @@ pub fn serve<T: BeaconChainTypes>(
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1360,22 +1314,19 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_contents: PublishBlockRequest<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_block(
None,
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1396,7 +1347,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
@@ -1404,8 +1354,7 @@ pub fn serve<T: BeaconChainTypes>(
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = PublishBlockRequest::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1419,7 +1368,6 @@ pub fn serve<T: BeaconChainTypes>(
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1443,20 +1391,17 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_contents: Arc<SignedBlindedBeaconBlock<T::EthSpec>>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
block_contents,
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1476,14 +1421,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1497,7 +1440,6 @@ pub fn serve<T: BeaconChainTypes>(
block,
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1517,21 +1459,18 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
blinded_block: Arc<SignedBlindedBeaconBlock<T::EthSpec>>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
blinded_block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1551,15 +1490,13 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1573,7 +1510,6 @@ pub fn serve<T: BeaconChainTypes>(
block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1843,14 +1779,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(reprocess_send_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
attestations: Vec<Attestation<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
reprocess_tx: Option<Sender<ReprocessQueueMessage>>,
log: Logger| async move {
reprocess_tx: Option<Sender<ReprocessQueueMessage>>| async move {
let attestations = attestations.into_iter().map(Either::Left).collect();
let result = crate::publish_attestations::publish_attestations(
task_spawner,
@@ -1858,7 +1792,6 @@ pub fn serve<T: BeaconChainTypes>(
attestations,
network_tx,
reprocess_tx,
log,
)
.await
.map(|()| warp::reply::json(&()));
@@ -1874,25 +1807,22 @@ pub fn serve<T: BeaconChainTypes>(
.and(optional_consensus_version_header_filter)
.and(network_tx_filter.clone())
.and(reprocess_send_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
payload: Value,
fork_name: Option<ForkName>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
reprocess_tx: Option<Sender<ReprocessQueueMessage>>,
log: Logger| async move {
reprocess_tx: Option<Sender<ReprocessQueueMessage>>| async move {
let attestations =
match crate::publish_attestations::deserialize_attestation_payload::<T>(
payload, fork_name, &log,
payload, fork_name,
) {
Ok(attestations) => attestations,
Err(err) => {
warn!(
log,
"Unable to deserialize attestation POST request";
"error" => ?err
error = ?err,
"Unable to deserialize attestation POST request"
);
return warp::reply::with_status(
warp::reply::json(
@@ -1910,7 +1840,6 @@ pub fn serve<T: BeaconChainTypes>(
attestations,
network_tx,
reprocess_tx,
log,
)
.await
.map(|()| warp::reply::json(&()));
@@ -2185,16 +2114,14 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
signatures: Vec<SyncCommitteeMessage>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
sync_committees::process_sync_committee_signatures(
signatures, network_tx, &chain, log,
signatures, network_tx, &chain,
)?;
Ok(api_types::GenericResponse::from(()))
})
@@ -2222,13 +2149,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
address_changes: Vec<SignedBlsToExecutionChange>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
let mut failures = vec![];
@@ -2245,11 +2170,12 @@ pub fn serve<T: BeaconChainTypes>(
.to_execution_address;
// New to P2P *and* op pool, gossip immediately if post-Capella.
let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) {
ReceivedPreCapella::No
} else {
ReceivedPreCapella::Yes
};
let received_pre_capella =
if chain.current_slot_is_post_capella().unwrap_or(false) {
ReceivedPreCapella::No
} else {
ReceivedPreCapella::Yes
};
if matches!(received_pre_capella, ReceivedPreCapella::No) {
publish_pubsub_message(
&network_tx,
@@ -2260,32 +2186,29 @@ pub fn serve<T: BeaconChainTypes>(
}
// Import to op pool (may return `false` if there's a race).
let imported =
chain.import_bls_to_execution_change(verified_address_change, received_pre_capella);
let imported = chain.import_bls_to_execution_change(
verified_address_change,
received_pre_capella,
);
info!(
log,
"Processed BLS to execution change";
"validator_index" => validator_index,
"address" => ?address,
"published" => matches!(received_pre_capella, ReceivedPreCapella::No),
"imported" => imported,
%validator_index,
?address,
published =
matches!(received_pre_capella, ReceivedPreCapella::No),
imported,
"Processed BLS to execution change"
);
}
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
log,
"BLS to execution change already known";
"validator_index" => validator_index,
);
debug!(%validator_index, "BLS to execution change already known");
}
Err(e) => {
warn!(
log,
"Invalid BLS to execution change";
"validator_index" => validator_index,
"reason" => ?e,
"source" => "HTTP",
validator_index,
reason = ?e,
source = "HTTP",
"Invalid BLS to execution change"
);
failures.push(api_types::Failure::new(
index,
@@ -2658,17 +2581,15 @@ pub fn serve<T: BeaconChainTypes>(
.and(block_id_or_err)
.and(warp::path::end())
.and(warp_utils::json::json())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
block_id: BlockId,
validators: Vec<ValidatorId>,
log: Logger| {
validators: Vec<ValidatorId>| {
task_spawner.blocking_json_task(Priority::P1, move || {
let (rewards, execution_optimistic, finalized) =
sync_committee_rewards::compute_sync_committee_rewards(
chain, block_id, validators, log,
chain, block_id, validators,
)?;
Ok(api_types::GenericResponse::from(rewards)).map(|resp| {
@@ -2755,14 +2676,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::header::optional::<api_types::Accept>("accept"))
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|endpoint_version: EndpointVersion,
state_id: StateId,
accept_header: Option<api_types::Accept>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_response_task(Priority::P1, move || match accept_header {
Some(api_types::Accept::Ssz) => {
// We can ignore the optimistic status for the "fork" since it's a
@@ -2777,10 +2696,9 @@ pub fn serve<T: BeaconChainTypes>(
let response_bytes = state.as_ssz_bytes();
drop(timer);
debug!(
log,
"HTTP state load";
"total_time_ms" => t.elapsed().as_millis(),
"target_slot" => state.slot()
total_time_ms = t.elapsed().as_millis(),
target_slot = %state.slot(),
"HTTP state load"
);
Response::builder()
@@ -3248,16 +3166,14 @@ pub fn serve<T: BeaconChainTypes>(
.and(not_while_syncing_filter.clone())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|epoch: Epoch,
not_synced_filter: Result<(), Rejection>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
not_synced_filter?;
proposer_duties::proposer_duties(epoch, &chain, &log)
proposer_duties::proposer_duties(epoch, &chain)
})
},
);
@@ -3277,7 +3193,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::query::<api_types::ValidatorBlocksQuery>())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|endpoint_version: EndpointVersion,
slot: Slot,
@@ -3285,14 +3200,9 @@ pub fn serve<T: BeaconChainTypes>(
not_synced_filter: Result<(), Rejection>,
query: api_types::ValidatorBlocksQuery,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
debug!(
log,
"Block production request from HTTP API";
"slot" => slot
);
debug!(?slot, "Block production request from HTTP API");
not_synced_filter?;
@@ -3499,7 +3409,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
// V1 and V2 are identical except V2 has a consensus version header in the request.
// We only require this header for SSZ deserialization, which isn't supported for
@@ -3509,7 +3418,7 @@ pub fn serve<T: BeaconChainTypes>(
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
not_synced_filter?;
let seen_timestamp = timestamp_now();
@@ -3556,13 +3465,13 @@ pub fn serve<T: BeaconChainTypes>(
// aggregate has been successfully published by some other node.
Err(AttnError::AggregatorAlreadyKnown(_)) => continue,
Err(e) => {
error!(log,
"Failure verifying aggregate and proofs";
"error" => format!("{:?}", e),
"request_index" => index,
"aggregator_index" => aggregate.message().aggregator_index(),
"attestation_index" => aggregate.message().aggregate().committee_index(),
"attestation_slot" => aggregate.message().aggregate().data().slot,
error!(
error = ?e,
request_index = index,
aggregator_index = aggregate.message().aggregator_index(),
attestation_index = aggregate.message().aggregate().committee_index(),
attestation_slot = %aggregate.message().aggregate().data().slot,
"Failure verifying aggregate and proofs"
);
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
}
@@ -3577,22 +3486,21 @@ pub fn serve<T: BeaconChainTypes>(
// Import aggregate attestations
for (index, verified_aggregate) in verified_aggregates {
if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) {
error!(log,
"Failure applying verified aggregate attestation to fork choice";
"error" => format!("{:?}", e),
"request_index" => index,
"aggregator_index" => verified_aggregate.aggregate().message().aggregator_index(),
"attestation_index" => verified_aggregate.attestation().committee_index(),
"attestation_slot" => verified_aggregate.attestation().data().slot,
error!(
error = ?e,
request_index = index,
aggregator_index = verified_aggregate.aggregate().message().aggregator_index(),
attestation_index = verified_aggregate.attestation().committee_index(),
attestation_slot = %verified_aggregate.attestation().data().slot,
"Failure applying verified aggregate attestation to fork choice"
);
failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e)));
}
if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) {
warn!(
log,
"Could not add verified aggregate attestation to the inclusion pool";
"error" => ?e,
"request_index" => index,
error = ?e,
request_index = index,
"Could not add verified aggregate attestation to the inclusion pool"
);
failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e)));
}
@@ -3618,21 +3526,18 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(warp_utils::json::json())
.and(network_tx_filter)
.and(log_filter.clone())
.then(
|not_synced_filter: Result<(), Rejection>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
contributions: Vec<SignedContributionAndProof<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
not_synced_filter?;
sync_committees::process_signed_contribution_and_proofs(
contributions,
network_tx,
&chain,
log,
)?;
Ok(api_types::GenericResponse::from(()))
})
@@ -3648,13 +3553,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(validator_subscription_tx_filter.clone())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|subscriptions: Vec<api_types::BeaconCommitteeSubscription>,
validator_subscription_tx: Sender<ValidatorSubscriptionMessage>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
let subscriptions: std::collections::BTreeSet<_> = subscriptions
.iter()
@@ -3675,10 +3578,9 @@ pub fn serve<T: BeaconChainTypes>(
ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions };
if let Err(e) = validator_subscription_tx.try_send(message) {
warn!(
log,
"Unable to process committee subscriptions";
"info" => "the host may be overloaded or resource-constrained",
"error" => ?e,
info = "the host may be overloaded or resource-constrained",
error = ?e,
"Unable to process committee subscriptions"
);
return Err(warp_utils::reject::custom_server_error(
"unable to queue subscription, host may be overloaded or shutting down"
@@ -3699,13 +3601,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(not_while_syncing_filter.clone())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.and(warp_utils::json::json())
.then(
|not_synced_filter: Result<(), Rejection>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger,
preparation_data: Vec<ProposerPreparationData>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
not_synced_filter?;
@@ -3719,9 +3619,8 @@ pub fn serve<T: BeaconChainTypes>(
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
debug!(
log,
"Received proposer preparation data";
"count" => preparation_data.len(),
count = preparation_data.len(),
"Received proposer preparation data"
);
execution_layer
@@ -3753,12 +3652,10 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.and(warp_utils::json::json())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger,
register_val_data: Vec<SignedValidatorRegistrationData>| async {
let (tx, rx) = oneshot::channel();
@@ -3777,9 +3674,8 @@ pub fn serve<T: BeaconChainTypes>(
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
debug!(
log,
"Received register validator request";
"count" => register_val_data.len(),
count = register_val_data.len(),
"Received register validator request"
);
let head_snapshot = chain.head_snapshot();
@@ -3854,9 +3750,8 @@ pub fn serve<T: BeaconChainTypes>(
})?;
info!(
log,
"Forwarding register validator request to connected builder";
"count" => filtered_registration_data.len(),
count = filtered_registration_data.len(),
"Forwarding register validator request to connected builder"
);
// It's a waste of a `BeaconProcessor` worker to just
@@ -3881,10 +3776,9 @@ pub fn serve<T: BeaconChainTypes>(
.map(|resp| warp::reply::json(&resp).into_response())
.map_err(|e| {
warn!(
log,
"Relay error when registering validator(s)";
"num_registrations" => filtered_registration_data.len(),
"error" => ?e
num_registrations = filtered_registration_data.len(),
error = ?e,
"Relay error when registering validator(s)"
);
// Forward the HTTP status code if we are able to, otherwise fall back
// to a server error.
@@ -3938,13 +3832,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(validator_subscription_tx_filter)
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|subscriptions: Vec<types::SyncCommitteeSubscription>,
validator_subscription_tx: Sender<ValidatorSubscriptionMessage>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger
| {
task_spawner.blocking_json_task(Priority::P0, move || {
for subscription in subscriptions {
@@ -3958,10 +3850,9 @@ pub fn serve<T: BeaconChainTypes>(
};
if let Err(e) = validator_subscription_tx.try_send(message) {
warn!(
log,
"Unable to process sync subscriptions";
"info" => "the host may be overloaded or resource-constrained",
"error" => ?e
info = "the host may be overloaded or resource-constrained",
error = ?e,
"Unable to process sync subscriptions"
);
return Err(warp_utils::reject::custom_server_error(
"unable to queue subscription, host may be overloaded or shutting down".to_string(),
@@ -4431,10 +4322,9 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(|query, task_spawner: TaskSpawner<T::EthSpec>, chain, log| {
.then(|query, task_spawner: TaskSpawner<T::EthSpec>, chain| {
task_spawner.blocking_json_task(Priority::P1, move || {
block_rewards::get_block_rewards(query, chain, log)
block_rewards::get_block_rewards(query, chain)
})
});
@@ -4446,14 +4336,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|blocks, task_spawner: TaskSpawner<T::EthSpec>, chain, log| {
task_spawner.blocking_json_task(Priority::P1, move || {
block_rewards::compute_block_rewards(blocks, chain, log)
})
},
);
.then(|blocks, task_spawner: TaskSpawner<T::EthSpec>, chain| {
task_spawner.blocking_json_task(Priority::P1, move || {
block_rewards::compute_block_rewards(blocks, chain)
})
});
// GET lighthouse/analysis/attestation_performance/{index}
let get_lighthouse_attestation_performance = warp::path("lighthouse")
@@ -4631,7 +4518,9 @@ pub fn serve<T: BeaconChainTypes>(
match msg {
Ok(data) => {
// Serialize to json
match data.to_json_string() {
match serde_json::to_string(&data)
.map_err(|e| format!("{:?}", e))
{
// Send the json as a Server Side Event
Ok(json) => Ok(Event::default().data(json)),
Err(e) => {
@@ -4779,7 +4668,6 @@ pub fn serve<T: BeaconChainTypes>(
),
)
.recover(warp_utils::reject::handle_rejection)
.with(slog_logging(log.clone()))
.with(prometheus_metrics())
// Add a `Server` header.
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
@@ -4797,7 +4685,7 @@ pub fn serve<T: BeaconChainTypes>(
shutdown.await;
})?;
info!(log, "HTTP API is being served over TLS";);
info!("HTTP API is being served over TLS");
(socket, Box::pin(server))
}
@@ -4811,9 +4699,8 @@ pub fn serve<T: BeaconChainTypes>(
};
info!(
log,
"HTTP API started";
"listen_address" => %http_server.0,
listen_address = %http_server.0,
"HTTP API started"
);
Ok(http_server)

View File

@@ -7,9 +7,9 @@ use beacon_chain::{
};
use eth2::types::{self as api_types};
use safe_arith::SafeArith;
use slog::{debug, Logger};
use slot_clock::SlotClock;
use std::cmp::Ordering;
use tracing::debug;
use types::{Epoch, EthSpec, Hash256, Slot};
/// The struct that is returned to the requesting HTTP client.
@@ -19,7 +19,6 @@ type ApiDuties = api_types::DutiesResponse<Vec<api_types::ProposerData>>;
pub fn proposer_duties<T: BeaconChainTypes>(
request_epoch: Epoch,
chain: &BeaconChain<T>,
log: &Logger,
) -> Result<ApiDuties, warp::reject::Rejection> {
let current_epoch = chain
.slot_clock
@@ -52,11 +51,7 @@ pub fn proposer_duties<T: BeaconChainTypes>(
if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain)? {
Ok(duties)
} else {
debug!(
log,
"Proposer cache miss";
"request_epoch" => request_epoch,
);
debug!(%request_epoch, "Proposer cache miss");
compute_and_cache_proposer_duties(request_epoch, chain)
}
} else if request_epoch

View File

@@ -45,7 +45,6 @@ use eth2::types::Failure;
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
use serde_json::Value;
use slog::{debug, error, warn, Logger};
use std::borrow::Cow;
use std::sync::Arc;
use std::time::Duration;
@@ -53,6 +52,7 @@ use tokio::sync::{
mpsc::{Sender, UnboundedSender},
oneshot,
};
use tracing::{debug, error, warn};
use types::{Attestation, EthSpec, ForkName, SingleAttestation};
// Error variants are only used in `Debug` and considered `dead_code` by the compiler.
@@ -80,14 +80,10 @@ enum PublishAttestationResult {
pub fn deserialize_attestation_payload<T: BeaconChainTypes>(
payload: Value,
fork_name: Option<ForkName>,
log: &Logger,
) -> Result<Vec<Either<Attestation<T::EthSpec>, SingleAttestation>>, Error> {
if fork_name.is_some_and(|fork_name| fork_name.electra_enabled()) || fork_name.is_none() {
if fork_name.is_none() {
warn!(
log,
"No Consensus Version header specified.";
);
warn!("No Consensus Version header specified.");
}
Ok(serde_json::from_value::<Vec<SingleAttestation>>(payload)
@@ -111,7 +107,6 @@ fn verify_and_publish_attestation<T: BeaconChainTypes>(
either_attestation: &Either<Attestation<T::EthSpec>, SingleAttestation>,
seen_timestamp: Duration,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: &Logger,
) -> Result<(), Error> {
let attestation = convert_to_attestation(chain, either_attestation)?;
let verified_attestation = chain
@@ -157,16 +152,14 @@ fn verify_and_publish_attestation<T: BeaconChainTypes>(
if let Err(e) = &fc_result {
warn!(
log,
"Attestation invalid for fork choice";
"err" => ?e,
err = ?e,
"Attestation invalid for fork choice"
);
}
if let Err(e) = &naive_aggregation_result {
warn!(
log,
"Attestation invalid for aggregation";
"err" => ?e
err = ?e,
"Attestation invalid for aggregation"
);
}
@@ -232,7 +225,6 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
attestations: Vec<Either<Attestation<T::EthSpec>, SingleAttestation>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
reprocess_send: Option<Sender<ReprocessQueueMessage>>,
log: Logger,
) -> Result<(), warp::Rejection> {
// Collect metadata about attestations which we'll use to report failures. We need to
// move the `attestations` vec into the blocking task, so this small overhead is unavoidable.
@@ -246,7 +238,6 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
// Gossip validate and publish attestations that can be immediately processed.
let seen_timestamp = timestamp_now();
let inner_log = log.clone();
let mut prelim_results = task_spawner
.blocking_task(Priority::P0, move || {
Ok(attestations
@@ -257,7 +248,6 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
&attestation,
seen_timestamp,
&network_tx,
&inner_log,
) {
Ok(()) => PublishAttestationResult::Success,
Err(Error::Validation(AttestationError::UnknownHeadBlock {
@@ -270,14 +260,12 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
let (tx, rx) = oneshot::channel();
let reprocess_chain = chain.clone();
let reprocess_network_tx = network_tx.clone();
let reprocess_log = inner_log.clone();
let reprocess_fn = move || {
let result = verify_and_publish_attestation(
&reprocess_chain,
&attestation,
seen_timestamp,
&reprocess_network_tx,
&reprocess_log,
);
// Ignore failure on the oneshot that reports the result. This
// shouldn't happen unless some catastrophe befalls the waiting
@@ -330,10 +318,9 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
for (i, reprocess_result) in reprocess_indices.into_iter().zip(reprocess_results) {
let Some(result_entry) = prelim_results.get_mut(i) else {
error!(
log,
"Unreachable case in attestation publishing";
"case" => "prelim out of bounds",
"request_index" => i,
case = "prelim out of bounds",
request_index = i,
"Unreachable case in attestation publishing"
);
continue;
};
@@ -361,39 +348,35 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
Some(PublishAttestationResult::Failure(e)) => {
if let Some((slot, committee_index)) = attestation_metadata.get(index) {
error!(
log,
"Failure verifying attestation for gossip";
"error" => ?e,
"request_index" => index,
"committee_index" => committee_index,
"attestation_slot" => slot,
error = ?e,
request_index = index,
committee_index,
attestation_slot = %slot,
"Failure verifying attestation for gossip"
);
failures.push(Failure::new(index, format!("{e:?}")));
} else {
error!(
log,
"Unreachable case in attestation publishing";
"case" => "out of bounds",
"request_index" => index
case = "out of bounds",
request_index = index,
"Unreachable case in attestation publishing"
);
failures.push(Failure::new(index, "metadata logic error".into()));
}
}
Some(PublishAttestationResult::Reprocessing(_)) => {
error!(
log,
"Unreachable case in attestation publishing";
"case" => "reprocessing",
"request_index" => index
case = "reprocessing",
request_index = index,
"Unreachable case in attestation publishing"
);
failures.push(Failure::new(index, "reprocess logic error".into()));
}
None => {
error!(
log,
"Unreachable case in attestation publishing";
"case" => "result is None",
"request_index" => index
case = "result is None",
request_index = index,
"Unreachable case in attestation publishing"
);
failures.push(Failure::new(index, "result logic error".into()));
}
@@ -402,9 +385,8 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
if num_already_known > 0 {
debug!(
log,
"Some unagg attestations already known";
"count" => num_already_known
count = num_already_known,
"Some unagg attestations already known"
);
}

View File

@@ -18,13 +18,13 @@ use futures::TryFutureExt;
use lighthouse_network::{NetworkGlobals, PubsubMessage};
use network::NetworkMessage;
use rand::prelude::SliceRandom;
use slog::{debug, error, info, warn, Logger};
use slot_clock::SlotClock;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{debug, error, info, warn};
use tree_hash::TreeHash;
use types::{
AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource,
@@ -80,7 +80,6 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
provenanced_block: ProvenancedBlock<T, B>,
chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
duplicate_status_code: StatusCode,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
@@ -99,12 +98,12 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
"builder"
};
let block = unverified_block.inner_block();
debug!(log, "Signed block received in HTTP API"; "slot" => block.slot());
debug!(slot = %block.slot(), "Signed block received in HTTP API");
/* actually publish a block */
let publish_block_p2p = move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
sender,
log,
seen_timestamp|
-> Result<(), BlockError> {
let publish_timestamp = timestamp_now();
@@ -119,10 +118,9 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
);
info!(
log,
"Signed block published to network via HTTP API";
"slot" => block.slot(),
"publish_delay_ms" => publish_delay.as_millis(),
slot = %block.slot(),
publish_delay_ms = publish_delay.as_millis(),
"Signed block published to network via HTTP API"
);
crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone()))
@@ -136,7 +134,7 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
let sender_clone = network_tx.clone();
let build_sidecar_task_handle =
spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs, log.clone())?;
spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs)?;
// Gossip verify the block and blobs/data columns separately.
let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain);
@@ -151,19 +149,13 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
if BroadcastValidation::Gossip == validation_level && should_publish_block {
if let Some(block_publishing_delay) = block_publishing_delay_for_testing {
debug!(
log,
"Publishing block with artificial delay";
"block_publishing_delay" => ?block_publishing_delay
?block_publishing_delay,
"Publishing block with artificial delay"
);
tokio::time::sleep(block_publishing_delay).await;
}
publish_block_p2p(
block.clone(),
sender_clone.clone(),
log.clone(),
seen_timestamp,
)
.map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?;
publish_block_p2p(block.clone(), sender_clone.clone(), seen_timestamp)
.map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?;
}
let publish_fn_completed = Arc::new(AtomicBool::new(false));
@@ -175,15 +167,13 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
BroadcastValidation::Consensus => publish_block_p2p(
block_to_publish.clone(),
sender_clone.clone(),
log.clone(),
seen_timestamp,
)?,
BroadcastValidation::ConsensusAndEquivocation => {
check_slashable(&chain, block_root, &block_to_publish, &log)?;
check_slashable(&chain, block_root, &block_to_publish)?;
publish_block_p2p(
block_to_publish.clone(),
sender_clone.clone(),
log.clone(),
seen_timestamp,
)?;
}
@@ -206,11 +196,7 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
return if let BroadcastValidation::Gossip = validation_level {
Err(warp_utils::reject::broadcast_without_import(msg))
} else {
error!(
log,
"Invalid blob provided to HTTP API";
"reason" => &msg
);
error!(reason = &msg, "Invalid blob provided to HTTP API");
Err(warp_utils::reject::custom_bad_request(msg))
};
}
@@ -227,9 +213,8 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
let delay = data_column_publishing_delay.saturating_sub(block_publishing_delay);
if !delay.is_zero() {
debug!(
log,
"Publishing data columns with artificial delay";
"data_column_publishing_delay" => ?data_column_publishing_delay
?data_column_publishing_delay,
"Publishing data columns with artificial delay"
);
tokio::time::sleep(delay).await;
}
@@ -254,9 +239,8 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
Err(warp_utils::reject::broadcast_without_import(msg))
} else {
error!(
log,
"Invalid data column during block publication";
"reason" => &msg
reason = &msg,
"Invalid data column during block publication"
);
Err(warp_utils::reject::custom_bad_request(msg))
};
@@ -280,7 +264,6 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
is_locally_built_block,
seen_timestamp,
&chain,
&log,
)
.await
}
@@ -293,7 +276,6 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
is_locally_built_block,
seen_timestamp,
&chain,
&log,
)
.await
} else {
@@ -313,10 +295,9 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
}
Err(BlockError::DuplicateImportStatusUnknown(root)) => {
debug!(
log,
"Block previously seen";
"block_root" => ?root,
"slot" => block.slot(),
block_root = ?root,
slot = %block.slot(),
"Block previously seen"
);
let import_result = Box::pin(chain.process_block(
block_root,
@@ -333,16 +314,14 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlock<T>>(
is_locally_built_block,
seen_timestamp,
&chain,
&log,
)
.await
}
Err(e) => {
warn!(
log,
"Not publishing block - not gossip verified";
"slot" => slot,
"error" => %e
%slot,
error = %e,
"Not publishing block - not gossip verified"
);
Err(warp_utils::reject::custom_bad_request(e.to_string()))
}
@@ -365,7 +344,6 @@ fn spawn_build_data_sidecar_task<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
proofs_and_blobs: UnverifiedBlobs<T>,
log: Logger,
) -> Result<impl Future<Output = BuildDataSidecarTaskResult<T>>, Rejection> {
chain
.clone()
@@ -380,12 +358,12 @@ fn spawn_build_data_sidecar_task<T: BeaconChainTypes>(
if !peer_das_enabled {
// Pre-PeerDAS: construct blob sidecars for the network.
let gossip_verified_blobs =
build_gossip_verified_blobs(&chain, &block, blobs, kzg_proofs, &log)?;
build_gossip_verified_blobs(&chain, &block, blobs, kzg_proofs)?;
Ok((gossip_verified_blobs, vec![]))
} else {
// Post PeerDAS: construct data columns.
let gossip_verified_data_columns =
build_gossip_verified_data_columns(&chain, &block, blobs, &log)?;
build_gossip_verified_data_columns(&chain, &block, blobs)?;
Ok((vec![], gossip_verified_data_columns))
}
},
@@ -404,16 +382,14 @@ fn build_gossip_verified_data_columns<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
block: &SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>,
blobs: BlobsList<T::EthSpec>,
log: &Logger,
) -> Result<Vec<Option<GossipVerifiedDataColumn<T>>>, Rejection> {
let slot = block.slot();
let data_column_sidecars =
build_blob_data_column_sidecars(chain, block, blobs).map_err(|e| {
error!(
log,
"Invalid data column - not publishing block";
"error" => ?e,
"slot" => slot
error = ?e,
%slot,
"Invalid data column - not publishing block"
);
warp_utils::reject::custom_bad_request(format!("{e:?}"))
})?;
@@ -434,21 +410,19 @@ fn build_gossip_verified_data_columns<T: BeaconChainTypes>(
// or some of the other data columns if the block & data columns are only
// partially published by the other publisher.
debug!(
log,
"Data column for publication already known";
"column_index" => column_index,
"slot" => slot,
"proposer" => proposer,
column_index,
%slot,
proposer,
"Data column for publication already known"
);
Ok(None)
}
Err(e) => {
error!(
log,
"Data column for publication is gossip-invalid";
"column_index" => column_index,
"slot" => slot,
"error" => ?e,
column_index,
%slot,
error = ?e,
"Data column for publication is gossip-invalid"
);
Err(warp_utils::reject::custom_bad_request(format!("{e:?}")))
}
@@ -464,7 +438,6 @@ fn build_gossip_verified_blobs<T: BeaconChainTypes>(
block: &SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>,
blobs: BlobsList<T::EthSpec>,
kzg_proofs: KzgProofs<T::EthSpec>,
log: &Logger,
) -> Result<Vec<Option<GossipVerifiedBlob<T>>>, Rejection> {
let slot = block.slot();
let gossip_verified_blobs = kzg_proofs
@@ -479,11 +452,10 @@ fn build_gossip_verified_blobs<T: BeaconChainTypes>(
.map(Arc::new)
.map_err(|e| {
error!(
log,
"Invalid blob - not publishing block";
"error" => ?e,
"blob_index" => i,
"slot" => slot,
error = ?e,
blob_index = i,
%slot,
"Invalid blob - not publishing block"
);
warp_utils::reject::custom_bad_request(format!("{e:?}"))
})?;
@@ -499,21 +471,19 @@ fn build_gossip_verified_blobs<T: BeaconChainTypes>(
// or some of the other blobs if the block & blobs are only partially published
// by the other publisher.
debug!(
log,
"Blob for publication already known";
"blob_index" => blob_sidecar.index,
"slot" => slot,
"proposer" => proposer,
blob_index = blob_sidecar.index,
%slot,
proposer,
"Blob for publication already known"
);
Ok(None)
}
Err(e) => {
error!(
log,
"Blob for publication is gossip-invalid";
"blob_index" => blob_sidecar.index,
"slot" => slot,
"error" => ?e,
blob_index = blob_sidecar.index,
%slot,
error = ?e,
"Blob for publication is gossip-invalid"
);
Err(warp_utils::reject::custom_bad_request(e.to_string()))
}
@@ -524,6 +494,15 @@ fn build_gossip_verified_blobs<T: BeaconChainTypes>(
Ok(gossip_verified_blobs)
}
fn publish_blob_sidecars<T: BeaconChainTypes>(
sender_clone: &UnboundedSender<NetworkMessage<T::EthSpec>>,
blob: &GossipVerifiedBlob<T>,
) -> Result<(), BlockError> {
let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob())));
crate::publish_pubsub_message(sender_clone, pubsub_message)
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))
}
fn publish_column_sidecars<T: BeaconChainTypes>(
sender_clone: &UnboundedSender<NetworkMessage<T::EthSpec>>,
data_column_sidecars: &[Option<GossipVerifiedDataColumn<T>>],
@@ -554,15 +533,6 @@ fn publish_column_sidecars<T: BeaconChainTypes>(
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))
}
fn publish_blob_sidecars<T: BeaconChainTypes>(
sender_clone: &UnboundedSender<NetworkMessage<T::EthSpec>>,
blob: &GossipVerifiedBlob<T>,
) -> Result<(), BlockError> {
let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob())));
crate::publish_pubsub_message(sender_clone, pubsub_message)
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))
}
async fn post_block_import_logging_and_response<T: BeaconChainTypes>(
result: Result<AvailabilityProcessingStatus, BlockError>,
validation_level: BroadcastValidation,
@@ -570,7 +540,6 @@ async fn post_block_import_logging_and_response<T: BeaconChainTypes>(
is_locally_built_block: bool,
seen_timestamp: Duration,
chain: &Arc<BeaconChain<T>>,
log: &Logger,
) -> Result<Response, Rejection> {
match result {
// The `DuplicateFullyImported` case here captures the case where the block finishes
@@ -582,12 +551,11 @@ async fn post_block_import_logging_and_response<T: BeaconChainTypes>(
| Err(BlockError::DuplicateFullyImported(root)) => {
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
info!(
log,
"Valid block from HTTP API";
"block_delay" => ?delay,
"root" => %root,
"proposer_index" => block.message().proposer_index(),
"slot" => block.slot(),
block_delay = ?delay,
root = %root,
proposer_index = block.message().proposer_index(),
slot = %block.slot(),
"Valid block from HTTP API"
);
// Notify the validator monitor.
@@ -606,7 +574,7 @@ async fn post_block_import_logging_and_response<T: BeaconChainTypes>(
// blocks built with builders we consider the broadcast time to be
// when the blinded block is published to the builder.
if is_locally_built_block {
late_block_logging(chain, seen_timestamp, block.message(), root, "local", log)
late_block_logging(chain, seen_timestamp, block.message(), root, "local")
}
Ok(warp::reply().into_response())
}
@@ -615,11 +583,7 @@ async fn post_block_import_logging_and_response<T: BeaconChainTypes>(
if let BroadcastValidation::Gossip = validation_level {
Err(warp_utils::reject::broadcast_without_import(msg))
} else {
error!(
log,
"Invalid block provided to HTTP API";
"reason" => &msg
);
error!(reason = &msg, "Invalid block provided to HTTP API");
Err(warp_utils::reject::custom_bad_request(msg))
}
}
@@ -636,9 +600,8 @@ async fn post_block_import_logging_and_response<T: BeaconChainTypes>(
Err(warp_utils::reject::broadcast_without_import(format!("{e}")))
} else {
error!(
log,
"Invalid block provided to HTTP API";
"reason" => ?e,
reason = ?e,
"Invalid block provided to HTTP API"
);
Err(warp_utils::reject::custom_bad_request(format!(
"Invalid block: {e}"
@@ -654,20 +617,17 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
blinded_block: Arc<SignedBlindedBeaconBlock<T::EthSpec>>,
chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
duplicate_status_code: StatusCode,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
) -> Result<Response, Rejection> {
let block_root = blinded_block.canonical_root();
let full_block =
reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?;
let full_block = reconstruct_block(chain.clone(), block_root, blinded_block).await?;
publish_block::<T, _>(
Some(block_root),
full_block,
chain,
network_tx,
log,
validation_level,
duplicate_status_code,
network_globals,
@@ -682,7 +642,6 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block_root: Hash256,
block: Arc<SignedBlindedBeaconBlock<T::EthSpec>>,
log: Logger,
) -> Result<ProvenancedBlock<T, Arc<SignedBeaconBlock<T::EthSpec>>>, Rejection> {
let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() {
let el = chain.execution_layer.as_ref().ok_or_else(|| {
@@ -706,7 +665,7 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
} else if let Some(cached_payload) =
el.get_payload_by_root(&payload_header.tree_hash_root())
{
info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash());
info!(block_hash = ?cached_payload.block_hash(), "Reconstructing a full block using a local payload");
ProvenancedPayload::Local(cached_payload)
// Otherwise, this means we are attempting a blind block proposal.
} else {
@@ -721,7 +680,6 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
block.message(),
block_root,
"builder",
&log,
);
let full_payload = el
@@ -733,7 +691,7 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
e
))
})?;
info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash());
info!(block_hash = ?full_payload.block_hash(), "Successfully published a block to the builder network");
ProvenancedPayload::Builder(full_payload)
};
@@ -775,7 +733,6 @@ fn late_block_logging<T: BeaconChainTypes, P: AbstractExecPayload<T::EthSpec>>(
block: BeaconBlockRef<T::EthSpec, P>,
root: Hash256,
provenance: &str,
log: &Logger,
) {
let delay = get_block_delay_ms(seen_timestamp, block, &chain.slot_clock);
@@ -794,23 +751,21 @@ fn late_block_logging<T: BeaconChainTypes, P: AbstractExecPayload<T::EthSpec>>(
let delayed_threshold = too_late_threshold / 2;
if delay >= too_late_threshold {
error!(
log,
"Block was broadcast too late";
"msg" => "system may be overloaded, block likely to be orphaned",
"provenance" => provenance,
"delay_ms" => delay.as_millis(),
"slot" => block.slot(),
"root" => ?root,
msg = "system may be overloaded, block likely to be orphaned",
provenance,
delay_ms = delay.as_millis(),
slot = %block.slot(),
?root,
"Block was broadcast too late"
)
} else if delay >= delayed_threshold {
error!(
log,
"Block broadcast was delayed";
"msg" => "system may be overloaded, block may be orphaned",
"provenance" => provenance,
"delay_ms" => delay.as_millis(),
"slot" => block.slot(),
"root" => ?root,
msg = "system may be overloaded, block may be orphaned",
provenance,
delay_ms = delay.as_millis(),
slot = %block.slot(),
?root,
"Block broadcast was delayed"
)
}
}
@@ -820,7 +775,6 @@ fn check_slashable<T: BeaconChainTypes>(
chain_clone: &BeaconChain<T>,
block_root: Hash256,
block_clone: &SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>,
log_clone: &Logger,
) -> Result<(), BlockError> {
let slashable_cache = chain_clone.observed_slashable.read();
if slashable_cache
@@ -832,9 +786,8 @@ fn check_slashable<T: BeaconChainTypes>(
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
warn!(
log_clone,
"Not publishing equivocating block";
"slot" => block_clone.slot()
slot = %block_clone.slot(),
"Not publishing equivocating block"
);
return Err(BlockError::Slashable);
}

View File

@@ -2,9 +2,9 @@ use crate::{BlockId, ExecutionOptimistic};
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2::lighthouse::SyncCommitteeReward;
use eth2::types::ValidatorId;
use slog::{debug, Logger};
use state_processing::BlockReplayer;
use std::sync::Arc;
use tracing::debug;
use types::{BeaconState, SignedBlindedBeaconBlock};
use warp_utils::reject::{custom_not_found, unhandled_error};
@@ -12,7 +12,6 @@ pub fn compute_sync_committee_rewards<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block_id: BlockId,
validators: Vec<ValidatorId>,
log: Logger,
) -> Result<(Option<Vec<SyncCommitteeReward>>, ExecutionOptimistic, bool), warp::Rejection> {
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
@@ -23,7 +22,7 @@ pub fn compute_sync_committee_rewards<T: BeaconChainTypes>(
.map_err(unhandled_error)?;
let data = if reward_payload.is_empty() {
debug!(log, "compute_sync_committee_rewards returned empty");
debug!("compute_sync_committee_rewards returned empty");
None
} else if validators.is_empty() {
Some(reward_payload)

View File

@@ -11,11 +11,11 @@ use beacon_chain::{
use eth2::types::{self as api_types};
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
use slog::{debug, error, warn, Logger};
use slot_clock::SlotClock;
use std::cmp::max;
use std::collections::HashMap;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{debug, error, warn};
use types::{
slot_data::SlotData, BeaconStateError, Epoch, EthSpec, SignedContributionAndProof,
SyncCommitteeMessage, SyncDuty, SyncSubnetId,
@@ -178,7 +178,6 @@ pub fn process_sync_committee_signatures<T: BeaconChainTypes>(
sync_committee_signatures: Vec<SyncCommitteeMessage>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
chain: &BeaconChain<T>,
log: Logger,
) -> Result<(), warp::reject::Rejection> {
let mut failures = vec![];
@@ -192,10 +191,9 @@ pub fn process_sync_committee_signatures<T: BeaconChainTypes>(
Ok(positions) => positions,
Err(e) => {
error!(
log,
"Unable to compute subnet positions for sync message";
"error" => ?e,
"slot" => sync_committee_signature.slot,
error = ?e,
slot = %sync_committee_signature.slot,
"Unable to compute subnet positions for sync message"
);
failures.push(api_types::Failure::new(i, format!("Verification: {:?}", e)));
continue;
@@ -248,22 +246,20 @@ pub fn process_sync_committee_signatures<T: BeaconChainTypes>(
new_root,
}) => {
debug!(
log,
"Ignoring already-known sync message";
"new_root" => ?new_root,
"prev_root" => ?prev_root,
"slot" => slot,
"validator_index" => validator_index,
?new_root,
?prev_root,
%slot,
validator_index,
"Ignoring already-known sync message"
);
}
Err(e) => {
error!(
log,
"Failure verifying sync committee signature for gossip";
"error" => ?e,
"request_index" => i,
"slot" => sync_committee_signature.slot,
"validator_index" => sync_committee_signature.validator_index,
error = ?e,
request_index = i,
slot = %sync_committee_signature.slot,
validator_index = sync_committee_signature.validator_index,
"Failure verifying sync committee signature for gossip"
);
failures.push(api_types::Failure::new(i, format!("Verification: {:?}", e)));
}
@@ -273,11 +269,10 @@ pub fn process_sync_committee_signatures<T: BeaconChainTypes>(
if let Some(verified) = verified_for_pool {
if let Err(e) = chain.add_to_naive_sync_aggregation_pool(verified) {
error!(
log,
"Unable to add sync committee signature to pool";
"error" => ?e,
"slot" => sync_committee_signature.slot,
"validator_index" => sync_committee_signature.validator_index,
error = ?e,
slot = %sync_committee_signature.slot,
validator_index = sync_committee_signature.validator_index,
"Unable to add sync committee signature to pool"
);
}
}
@@ -312,7 +307,6 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>(
signed_contribution_and_proofs: Vec<SignedContributionAndProof<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
chain: &BeaconChain<T>,
log: Logger,
) -> Result<(), warp::reject::Rejection> {
let mut verified_contributions = Vec::with_capacity(signed_contribution_and_proofs.len());
let mut failures = vec![];
@@ -362,13 +356,12 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>(
Err(SyncVerificationError::AggregatorAlreadyKnown(_)) => continue,
Err(e) => {
error!(
log,
"Failure verifying signed contribution and proof";
"error" => ?e,
"request_index" => index,
"aggregator_index" => aggregator_index,
"subcommittee_index" => subcommittee_index,
"contribution_slot" => contribution_slot,
error = ?e,
request_index = index,
aggregator_index = aggregator_index,
subcommittee_index = subcommittee_index,
contribution_slot = %contribution_slot,
"Failure verifying signed contribution and proof"
);
failures.push(api_types::Failure::new(
index,
@@ -382,10 +375,9 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>(
for (index, verified_contribution) in verified_contributions {
if let Err(e) = chain.add_contribution_to_block_inclusion_pool(verified_contribution) {
warn!(
log,
"Could not add verified sync contribution to the inclusion pool";
"error" => ?e,
"request_index" => index,
error = ?e,
request_index = index,
"Could not add verified sync contribution to the inclusion pool"
);
failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e)));
}

View File

@@ -19,10 +19,8 @@ use lighthouse_network::{
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
ConnectedPoint, Enr, NetworkConfig, NetworkGlobals, PeerId, PeerManager,
};
use logging::test_logger;
use network::{NetworkReceivers, NetworkSenders};
use sensitive_url::SensitiveUrl;
use slog::Logger;
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
@@ -75,7 +73,6 @@ impl<E: EthSpec> InteractiveTester<E> {
) -> Self {
let mut harness_builder = BeaconChainHarness::builder(E::default())
.spec_or_default(spec.map(Arc::new))
.logger(test_logger())
.mock_execution_layer();
harness_builder = if let Some(initializer) = initializer {
@@ -102,13 +99,7 @@ impl<E: EthSpec> InteractiveTester<E> {
listening_socket,
network_rx,
..
} = create_api_server_with_config(
harness.chain.clone(),
config,
&harness.runtime,
harness.logger().clone(),
)
.await;
} = create_api_server_with_config(harness.chain.clone(), config, &harness.runtime).await;
tokio::spawn(server);
@@ -134,16 +125,14 @@ impl<E: EthSpec> InteractiveTester<E> {
pub async fn create_api_server<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
test_runtime: &TestRuntime,
log: Logger,
) -> ApiServer<T, impl Future<Output = ()>> {
create_api_server_with_config(chain, Config::default(), test_runtime, log).await
create_api_server_with_config(chain, Config::default(), test_runtime).await
}
pub async fn create_api_server_with_config<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
http_config: Config,
test_runtime: &TestRuntime,
log: Logger,
) -> ApiServer<T, impl Future<Output = ()>> {
// Use port 0 to allocate a new unused port.
let port = 0;
@@ -174,14 +163,13 @@ pub async fn create_api_server_with_config<T: BeaconChainTypes>(
meta_data,
vec![],
false,
&log,
network_config,
chain.spec.clone(),
));
// Only a peer manager can add peers, so we create a dummy manager.
let config = lighthouse_network::peer_manager::config::Config::default();
let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap();
let mut pm = PeerManager::new(config, network_globals.clone()).unwrap();
// add a peer
let peer_id = PeerId::random();
@@ -200,8 +188,7 @@ pub async fn create_api_server_with_config<T: BeaconChainTypes>(
}));
*network_globals.sync_state.write() = SyncState::Synced;
let eth1_service =
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
let eth1_service = eth1::Service::new(eth1::Config::default(), chain.spec.clone()).unwrap();
let beacon_processor_config = BeaconProcessorConfig {
// The number of workers must be greater than one. Tests which use the
@@ -225,7 +212,6 @@ pub async fn create_api_server_with_config<T: BeaconChainTypes>(
executor: test_runtime.task_executor.clone(),
current_workers: 0,
config: beacon_processor_config,
log: log.clone(),
}
.spawn_manager(
beacon_processor_rx,
@@ -258,7 +244,6 @@ pub async fn create_api_server_with_config<T: BeaconChainTypes>(
beacon_processor_reprocess_send: Some(reprocess_send),
eth1_service: Some(eth1_service),
sse_logging_components: None,
log,
});
let (listening_socket, server) =

View File

@@ -331,7 +331,6 @@ pub async fn consensus_partial_pass_only_consensus() {
let validator_count = 64;
let num_initial: u64 = 31;
let tester = InteractiveTester::<E>::new(None, validator_count).await;
let test_logger = tester.harness.logger().clone();
// Create some chain depth.
tester.harness.advance_slot();
@@ -379,7 +378,6 @@ pub async fn consensus_partial_pass_only_consensus() {
ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b),
tester.harness.chain.clone(),
&channel.0,
test_logger,
validation_level,
StatusCode::ACCEPTED,
network_globals,
@@ -624,7 +622,6 @@ pub async fn equivocation_consensus_late_equivocation() {
let validator_count = 64;
let num_initial: u64 = 31;
let tester = InteractiveTester::<E>::new(None, validator_count).await;
let test_logger = tester.harness.logger().clone();
// Create some chain depth.
tester.harness.advance_slot();
@@ -671,7 +668,6 @@ pub async fn equivocation_consensus_late_equivocation() {
ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b),
tester.harness.chain,
&channel.0,
test_logger,
validation_level,
StatusCode::ACCEPTED,
network_globals,
@@ -1236,7 +1232,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
let validator_count = 64;
let num_initial: u64 = 31;
let tester = InteractiveTester::<E>::new(None, validator_count).await;
let test_logger = tester.harness.logger().clone();
// Create some chain depth.
tester.harness.advance_slot();
@@ -1276,7 +1271,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
tester.harness.chain.clone(),
block_a.canonical_root(),
Arc::new(block_a),
test_logger.clone(),
)
.await
.unwrap();
@@ -1284,7 +1278,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
tester.harness.chain.clone(),
block_b.canonical_root(),
block_b.clone(),
test_logger.clone(),
)
.await
.unwrap();
@@ -1310,7 +1303,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
block_b,
tester.harness.chain,
&channel.0,
test_logger,
validation_level,
StatusCode::ACCEPTED,
network_globals,

View File

@@ -26,7 +26,6 @@ use http_api::{
BlockId, StateId,
};
use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId};
use logging::test_logger;
use network::NetworkReceivers;
use proto_array::ExecutionStatus;
use sensitive_url::SensitiveUrl;
@@ -135,7 +134,6 @@ impl ApiTester {
reconstruct_historic_states: config.retain_historic_states,
..ChainConfig::default()
})
.logger(logging::test_logger())
.deterministic_keypairs(VALIDATOR_COUNT)
.deterministic_withdrawal_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
@@ -277,8 +275,6 @@ impl ApiTester {
"precondition: justification"
);
let log = test_logger();
let ApiServer {
ctx,
server,
@@ -286,7 +282,7 @@ impl ApiTester {
network_rx,
local_enr,
external_peer_id,
} = create_api_server(chain.clone(), &harness.runtime, log).await;
} = create_api_server(chain.clone(), &harness.runtime).await;
harness.runtime.task_executor.spawn(server, "api_server");
@@ -375,7 +371,6 @@ impl ApiTester {
let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero());
let chain = harness.chain.clone();
let log = test_logger();
let ApiServer {
ctx,
@@ -384,7 +379,7 @@ impl ApiTester {
network_rx,
local_enr,
external_peer_id,
} = create_api_server(chain.clone(), &harness.runtime, log).await;
} = create_api_server(chain.clone(), &harness.runtime).await;
harness.runtime.task_executor.spawn(server, "api_server");