Integrate tracing (#6339)

Tracing Integration
- [reference](5bbf1859e9/projects/project-ideas.md (L297))


  - [x] replace slog & log with tracing throughout the codebase
- [x] implement custom crit log
- [x] make relevant changes in the formatter
- [x] replace sloggers
- [x] re-write SSE logging components

cc: @macladson @eserilev
This commit is contained in:
ThreeHrSleep
2025-03-13 04:01:05 +05:30
committed by GitHub
parent f23f984f85
commit d60c24ef1c
241 changed files with 9485 additions and 9328 deletions

View File

@@ -55,7 +55,7 @@ use health_metrics::observe::Observe;
use lighthouse_network::rpc::methods::MetaData;
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform;
use logging::SSELoggingComponents;
use logging::{crit, SSELoggingComponents};
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
use operation_pool::ReceivedPreCapella;
use parking_lot::RwLock;
@@ -64,7 +64,6 @@ pub use publish_blocks::{
};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
pub use state_id::StateId;
@@ -84,6 +83,7 @@ use tokio_stream::{
wrappers::{errors::BroadcastStreamRecvError, BroadcastStream},
StreamExt,
};
use tracing::{debug, error, info, warn};
use types::{
fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId,
AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec,
@@ -132,7 +132,6 @@ pub struct Context<T: BeaconChainTypes> {
pub beacon_processor_reprocess_send: Option<Sender<ReprocessQueueMessage>>,
pub eth1_service: Option<eth1::Service>,
pub sse_logging_components: Option<SSELoggingComponents>,
pub log: Logger,
}
/// Configuration for the HTTP server.
@@ -186,40 +185,6 @@ impl From<String> for Error {
}
}
/// Creates a `warp` logging wrapper which we use to create `slog` logs.
pub fn slog_logging(
log: Logger,
) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
warp::log::custom(move |info| {
match info.status() {
status
if status == StatusCode::OK
|| status == StatusCode::NOT_FOUND
|| status == StatusCode::PARTIAL_CONTENT =>
{
debug!(
log,
"Processed HTTP API request";
"elapsed" => format!("{:?}", info.elapsed()),
"status" => status.to_string(),
"path" => info.path(),
"method" => info.method().to_string(),
);
}
status => {
warn!(
log,
"Error processing HTTP API request";
"elapsed" => format!("{:?}", info.elapsed()),
"status" => status.to_string(),
"path" => info.path(),
"method" => info.method().to_string(),
);
}
};
})
}
/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging,
/// per say).
pub fn prometheus_metrics() -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
@@ -307,7 +272,6 @@ pub fn serve<T: BeaconChainTypes>(
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
) -> Result<HttpServer, Error> {
let config = ctx.config.clone();
let log = ctx.log.clone();
// Configure CORS.
let cors_builder = {
@@ -324,7 +288,7 @@ pub fn serve<T: BeaconChainTypes>(
// Sanity check.
if !config.enabled {
crit!(log, "Cannot start disabled HTTP server");
crit!("Cannot start disabled HTTP server");
return Err(Error::Other(
"A disabled server should not be started".to_string(),
));
@@ -485,10 +449,6 @@ pub fn serve<T: BeaconChainTypes>(
}
});
// Create a `warp` filter that provides access to the logger.
let inner_ctx = ctx.clone();
let log_filter = warp::any().map(move || inner_ctx.log.clone());
let inner_components = ctx.sse_logging_components.clone();
let sse_component_filter = warp::any().map(move || inner_components.clone());
@@ -1284,21 +1244,18 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_contents: PublishBlockRequest<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_block(
None,
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1318,15 +1275,13 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_bytes: Bytes,
consensus_version: ForkName,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = PublishBlockRequest::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1340,7 +1295,6 @@ pub fn serve<T: BeaconChainTypes>(
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1360,22 +1314,19 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_contents: PublishBlockRequest<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_block(
None,
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1396,7 +1347,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
@@ -1404,8 +1354,7 @@ pub fn serve<T: BeaconChainTypes>(
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = PublishBlockRequest::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1419,7 +1368,6 @@ pub fn serve<T: BeaconChainTypes>(
ProvenancedBlock::local_from_publish_request(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1443,20 +1391,17 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_contents: Arc<SignedBlindedBeaconBlock<T::EthSpec>>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
block_contents,
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1476,14 +1421,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1497,7 +1440,6 @@ pub fn serve<T: BeaconChainTypes>(
block,
chain,
&network_tx,
log,
BroadcastValidation::default(),
duplicate_block_status_code,
network_globals,
@@ -1517,21 +1459,18 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
blinded_block: Arc<SignedBlindedBeaconBlock<T::EthSpec>>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
blinded_block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1551,15 +1490,13 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(network_globals.clone())
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
log: Logger| {
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
@@ -1573,7 +1510,6 @@ pub fn serve<T: BeaconChainTypes>(
block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
duplicate_block_status_code,
network_globals,
@@ -1843,14 +1779,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(reprocess_send_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
attestations: Vec<Attestation<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
reprocess_tx: Option<Sender<ReprocessQueueMessage>>,
log: Logger| async move {
reprocess_tx: Option<Sender<ReprocessQueueMessage>>| async move {
let attestations = attestations.into_iter().map(Either::Left).collect();
let result = crate::publish_attestations::publish_attestations(
task_spawner,
@@ -1858,7 +1792,6 @@ pub fn serve<T: BeaconChainTypes>(
attestations,
network_tx,
reprocess_tx,
log,
)
.await
.map(|()| warp::reply::json(&()));
@@ -1874,25 +1807,22 @@ pub fn serve<T: BeaconChainTypes>(
.and(optional_consensus_version_header_filter)
.and(network_tx_filter.clone())
.and(reprocess_send_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
payload: Value,
fork_name: Option<ForkName>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
reprocess_tx: Option<Sender<ReprocessQueueMessage>>,
log: Logger| async move {
reprocess_tx: Option<Sender<ReprocessQueueMessage>>| async move {
let attestations =
match crate::publish_attestations::deserialize_attestation_payload::<T>(
payload, fork_name, &log,
payload, fork_name,
) {
Ok(attestations) => attestations,
Err(err) => {
warn!(
log,
"Unable to deserialize attestation POST request";
"error" => ?err
error = ?err,
"Unable to deserialize attestation POST request"
);
return warp::reply::with_status(
warp::reply::json(
@@ -1910,7 +1840,6 @@ pub fn serve<T: BeaconChainTypes>(
attestations,
network_tx,
reprocess_tx,
log,
)
.await
.map(|()| warp::reply::json(&()));
@@ -2185,16 +2114,14 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
signatures: Vec<SyncCommitteeMessage>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
sync_committees::process_sync_committee_signatures(
signatures, network_tx, &chain, log,
signatures, network_tx, &chain,
)?;
Ok(api_types::GenericResponse::from(()))
})
@@ -2222,13 +2149,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
address_changes: Vec<SignedBlsToExecutionChange>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
let mut failures = vec![];
@@ -2245,11 +2170,12 @@ pub fn serve<T: BeaconChainTypes>(
.to_execution_address;
// New to P2P *and* op pool, gossip immediately if post-Capella.
let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) {
ReceivedPreCapella::No
} else {
ReceivedPreCapella::Yes
};
let received_pre_capella =
if chain.current_slot_is_post_capella().unwrap_or(false) {
ReceivedPreCapella::No
} else {
ReceivedPreCapella::Yes
};
if matches!(received_pre_capella, ReceivedPreCapella::No) {
publish_pubsub_message(
&network_tx,
@@ -2260,32 +2186,29 @@ pub fn serve<T: BeaconChainTypes>(
}
// Import to op pool (may return `false` if there's a race).
let imported =
chain.import_bls_to_execution_change(verified_address_change, received_pre_capella);
let imported = chain.import_bls_to_execution_change(
verified_address_change,
received_pre_capella,
);
info!(
log,
"Processed BLS to execution change";
"validator_index" => validator_index,
"address" => ?address,
"published" => matches!(received_pre_capella, ReceivedPreCapella::No),
"imported" => imported,
%validator_index,
?address,
published =
matches!(received_pre_capella, ReceivedPreCapella::No),
imported,
"Processed BLS to execution change"
);
}
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
log,
"BLS to execution change already known";
"validator_index" => validator_index,
);
debug!(%validator_index, "BLS to execution change already known");
}
Err(e) => {
warn!(
log,
"Invalid BLS to execution change";
"validator_index" => validator_index,
"reason" => ?e,
"source" => "HTTP",
validator_index,
reason = ?e,
source = "HTTP",
"Invalid BLS to execution change"
);
failures.push(api_types::Failure::new(
index,
@@ -2658,17 +2581,15 @@ pub fn serve<T: BeaconChainTypes>(
.and(block_id_or_err)
.and(warp::path::end())
.and(warp_utils::json::json())
.and(log_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
block_id: BlockId,
validators: Vec<ValidatorId>,
log: Logger| {
validators: Vec<ValidatorId>| {
task_spawner.blocking_json_task(Priority::P1, move || {
let (rewards, execution_optimistic, finalized) =
sync_committee_rewards::compute_sync_committee_rewards(
chain, block_id, validators, log,
chain, block_id, validators,
)?;
Ok(api_types::GenericResponse::from(rewards)).map(|resp| {
@@ -2755,14 +2676,12 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::header::optional::<api_types::Accept>("accept"))
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|endpoint_version: EndpointVersion,
state_id: StateId,
accept_header: Option<api_types::Accept>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_response_task(Priority::P1, move || match accept_header {
Some(api_types::Accept::Ssz) => {
// We can ignore the optimistic status for the "fork" since it's a
@@ -2777,10 +2696,9 @@ pub fn serve<T: BeaconChainTypes>(
let response_bytes = state.as_ssz_bytes();
drop(timer);
debug!(
log,
"HTTP state load";
"total_time_ms" => t.elapsed().as_millis(),
"target_slot" => state.slot()
total_time_ms = t.elapsed().as_millis(),
target_slot = %state.slot(),
"HTTP state load"
);
Response::builder()
@@ -3248,16 +3166,14 @@ pub fn serve<T: BeaconChainTypes>(
.and(not_while_syncing_filter.clone())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|epoch: Epoch,
not_synced_filter: Result<(), Rejection>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
not_synced_filter?;
proposer_duties::proposer_duties(epoch, &chain, &log)
proposer_duties::proposer_duties(epoch, &chain)
})
},
);
@@ -3277,7 +3193,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::query::<api_types::ValidatorBlocksQuery>())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|endpoint_version: EndpointVersion,
slot: Slot,
@@ -3285,14 +3200,9 @@ pub fn serve<T: BeaconChainTypes>(
not_synced_filter: Result<(), Rejection>,
query: api_types::ValidatorBlocksQuery,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
debug!(
log,
"Block production request from HTTP API";
"slot" => slot
);
debug!(?slot, "Block production request from HTTP API");
not_synced_filter?;
@@ -3499,7 +3409,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(warp_utils::json::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
// V1 and V2 are identical except V2 has a consensus version header in the request.
// We only require this header for SSZ deserialization, which isn't supported for
@@ -3509,7 +3418,7 @@ pub fn serve<T: BeaconChainTypes>(
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
not_synced_filter?;
let seen_timestamp = timestamp_now();
@@ -3556,13 +3465,13 @@ pub fn serve<T: BeaconChainTypes>(
// aggregate has been successfully published by some other node.
Err(AttnError::AggregatorAlreadyKnown(_)) => continue,
Err(e) => {
error!(log,
"Failure verifying aggregate and proofs";
"error" => format!("{:?}", e),
"request_index" => index,
"aggregator_index" => aggregate.message().aggregator_index(),
"attestation_index" => aggregate.message().aggregate().committee_index(),
"attestation_slot" => aggregate.message().aggregate().data().slot,
error!(
error = ?e,
request_index = index,
aggregator_index = aggregate.message().aggregator_index(),
attestation_index = aggregate.message().aggregate().committee_index(),
attestation_slot = %aggregate.message().aggregate().data().slot,
"Failure verifying aggregate and proofs"
);
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
}
@@ -3577,22 +3486,21 @@ pub fn serve<T: BeaconChainTypes>(
// Import aggregate attestations
for (index, verified_aggregate) in verified_aggregates {
if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) {
error!(log,
"Failure applying verified aggregate attestation to fork choice";
"error" => format!("{:?}", e),
"request_index" => index,
"aggregator_index" => verified_aggregate.aggregate().message().aggregator_index(),
"attestation_index" => verified_aggregate.attestation().committee_index(),
"attestation_slot" => verified_aggregate.attestation().data().slot,
error!(
error = ?e,
request_index = index,
aggregator_index = verified_aggregate.aggregate().message().aggregator_index(),
attestation_index = verified_aggregate.attestation().committee_index(),
attestation_slot = %verified_aggregate.attestation().data().slot,
"Failure applying verified aggregate attestation to fork choice"
);
failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e)));
}
if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) {
warn!(
log,
"Could not add verified aggregate attestation to the inclusion pool";
"error" => ?e,
"request_index" => index,
error = ?e,
request_index = index,
"Could not add verified aggregate attestation to the inclusion pool"
);
failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e)));
}
@@ -3618,21 +3526,18 @@ pub fn serve<T: BeaconChainTypes>(
.and(chain_filter.clone())
.and(warp_utils::json::json())
.and(network_tx_filter)
.and(log_filter.clone())
.then(
|not_synced_filter: Result<(), Rejection>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
contributions: Vec<SignedContributionAndProof<T::EthSpec>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
not_synced_filter?;
sync_committees::process_signed_contribution_and_proofs(
contributions,
network_tx,
&chain,
log,
)?;
Ok(api_types::GenericResponse::from(()))
})
@@ -3648,13 +3553,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(validator_subscription_tx_filter.clone())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|subscriptions: Vec<api_types::BeaconCommitteeSubscription>,
validator_subscription_tx: Sender<ValidatorSubscriptionMessage>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger| {
chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P0, move || {
let subscriptions: std::collections::BTreeSet<_> = subscriptions
.iter()
@@ -3675,10 +3578,9 @@ pub fn serve<T: BeaconChainTypes>(
ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions };
if let Err(e) = validator_subscription_tx.try_send(message) {
warn!(
log,
"Unable to process committee subscriptions";
"info" => "the host may be overloaded or resource-constrained",
"error" => ?e,
info = "the host may be overloaded or resource-constrained",
error = ?e,
"Unable to process committee subscriptions"
);
return Err(warp_utils::reject::custom_server_error(
"unable to queue subscription, host may be overloaded or shutting down"
@@ -3699,13 +3601,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(not_while_syncing_filter.clone())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.and(warp_utils::json::json())
.then(
|not_synced_filter: Result<(), Rejection>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger,
preparation_data: Vec<ProposerPreparationData>| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
not_synced_filter?;
@@ -3719,9 +3619,8 @@ pub fn serve<T: BeaconChainTypes>(
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
debug!(
log,
"Received proposer preparation data";
"count" => preparation_data.len(),
count = preparation_data.len(),
"Received proposer preparation data"
);
execution_layer
@@ -3753,12 +3652,10 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.and(warp_utils::json::json())
.then(
|task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger,
register_val_data: Vec<SignedValidatorRegistrationData>| async {
let (tx, rx) = oneshot::channel();
@@ -3777,9 +3674,8 @@ pub fn serve<T: BeaconChainTypes>(
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
debug!(
log,
"Received register validator request";
"count" => register_val_data.len(),
count = register_val_data.len(),
"Received register validator request"
);
let head_snapshot = chain.head_snapshot();
@@ -3854,9 +3750,8 @@ pub fn serve<T: BeaconChainTypes>(
})?;
info!(
log,
"Forwarding register validator request to connected builder";
"count" => filtered_registration_data.len(),
count = filtered_registration_data.len(),
"Forwarding register validator request to connected builder"
);
// It's a waste of a `BeaconProcessor` worker to just
@@ -3881,10 +3776,9 @@ pub fn serve<T: BeaconChainTypes>(
.map(|resp| warp::reply::json(&resp).into_response())
.map_err(|e| {
warn!(
log,
"Relay error when registering validator(s)";
"num_registrations" => filtered_registration_data.len(),
"error" => ?e
num_registrations = filtered_registration_data.len(),
error = ?e,
"Relay error when registering validator(s)"
);
// Forward the HTTP status code if we are able to, otherwise fall back
// to a server error.
@@ -3938,13 +3832,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(validator_subscription_tx_filter)
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|subscriptions: Vec<types::SyncCommitteeSubscription>,
validator_subscription_tx: Sender<ValidatorSubscriptionMessage>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
log: Logger
| {
task_spawner.blocking_json_task(Priority::P0, move || {
for subscription in subscriptions {
@@ -3958,10 +3850,9 @@ pub fn serve<T: BeaconChainTypes>(
};
if let Err(e) = validator_subscription_tx.try_send(message) {
warn!(
log,
"Unable to process sync subscriptions";
"info" => "the host may be overloaded or resource-constrained",
"error" => ?e
info = "the host may be overloaded or resource-constrained",
error = ?e,
"Unable to process sync subscriptions"
);
return Err(warp_utils::reject::custom_server_error(
"unable to queue subscription, host may be overloaded or shutting down".to_string(),
@@ -4431,10 +4322,9 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(|query, task_spawner: TaskSpawner<T::EthSpec>, chain, log| {
.then(|query, task_spawner: TaskSpawner<T::EthSpec>, chain| {
task_spawner.blocking_json_task(Priority::P1, move || {
block_rewards::get_block_rewards(query, chain, log)
block_rewards::get_block_rewards(query, chain)
})
});
@@ -4446,14 +4336,11 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.then(
|blocks, task_spawner: TaskSpawner<T::EthSpec>, chain, log| {
task_spawner.blocking_json_task(Priority::P1, move || {
block_rewards::compute_block_rewards(blocks, chain, log)
})
},
);
.then(|blocks, task_spawner: TaskSpawner<T::EthSpec>, chain| {
task_spawner.blocking_json_task(Priority::P1, move || {
block_rewards::compute_block_rewards(blocks, chain)
})
});
// GET lighthouse/analysis/attestation_performance/{index}
let get_lighthouse_attestation_performance = warp::path("lighthouse")
@@ -4631,7 +4518,9 @@ pub fn serve<T: BeaconChainTypes>(
match msg {
Ok(data) => {
// Serialize to json
match data.to_json_string() {
match serde_json::to_string(&data)
.map_err(|e| format!("{:?}", e))
{
// Send the json as a Server Side Event
Ok(json) => Ok(Event::default().data(json)),
Err(e) => {
@@ -4779,7 +4668,6 @@ pub fn serve<T: BeaconChainTypes>(
),
)
.recover(warp_utils::reject::handle_rejection)
.with(slog_logging(log.clone()))
.with(prometheus_metrics())
// Add a `Server` header.
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
@@ -4797,7 +4685,7 @@ pub fn serve<T: BeaconChainTypes>(
shutdown.await;
})?;
info!(log, "HTTP API is being served over TLS";);
info!("HTTP API is being served over TLS");
(socket, Box::pin(server))
}
@@ -4811,9 +4699,8 @@ pub fn serve<T: BeaconChainTypes>(
};
info!(
log,
"HTTP API started";
"listen_address" => %http_server.0,
listen_address = %http_server.0,
"HTTP API started"
);
Ok(http_server)