Integrate tracing (#6339)

Tracing Integration
- [reference](5bbf1859e9/projects/project-ideas.md (L297))


  - [x] replace slog & log with tracing throughout the codebase
- [x] implement custom crit log
- [x] make relevant changes in the formatter
- [x] replace sloggers
- [x] re-write SSE logging components

cc: @macladson @eserilev
This commit is contained in:
ThreeHrSleep
2025-03-13 04:01:05 +05:30
committed by GitHub
parent f23f984f85
commit d60c24ef1c
241 changed files with 9485 additions and 9328 deletions

View File

@@ -35,7 +35,6 @@ use monitoring_api::{MonitoringHttpClient, ProcessType};
use network::{NetworkConfig, NetworkSenders, NetworkService};
use slasher::Slasher;
use slasher_service::SlasherService;
use slog::{debug, info, warn, Logger};
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::sync::Arc;
@@ -44,6 +43,7 @@ use std::time::{SystemTime, UNIX_EPOCH};
use store::database::interface::BeaconNodeBackend;
use timer::spawn_timer;
use tokio::sync::oneshot;
use tracing::{debug, info, warn};
use types::{
test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec,
ExecutionBlockHash, Hash256, SignedBeaconBlock,
@@ -170,11 +170,9 @@ where
let runtime_context =
runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?;
let context = runtime_context.service_context("beacon".into());
let log = context.log();
let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?;
let event_handler = if self.http_api_config.enabled {
Some(ServerSentEventHandler::new(
context.log().clone(),
self.http_api_config.sse_capacity_multiplier,
))
} else {
@@ -183,12 +181,8 @@ where
let execution_layer = if let Some(config) = config.execution_layer.clone() {
let context = runtime_context.service_context("exec".into());
let execution_layer = ExecutionLayer::from_config(
config,
context.executor.clone(),
context.log().clone(),
)
.map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?;
let execution_layer = ExecutionLayer::from_config(config, context.executor.clone())
.map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?;
Some(execution_layer)
} else {
None
@@ -205,7 +199,6 @@ where
};
let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg))
.logger(context.log().clone())
.store(store)
.task_executor(context.executor.clone())
.custom_spec(spec.clone())
@@ -245,7 +238,7 @@ where
// using it.
let client_genesis = if matches!(client_genesis, ClientGenesis::FromStore) && !chain_exists
{
info!(context.log(), "Defaulting to deposit contract genesis");
info!("Defaulting to deposit contract genesis");
ClientGenesis::DepositContract
} else if chain_exists {
@@ -253,9 +246,8 @@ where
|| matches!(client_genesis, ClientGenesis::CheckpointSyncUrl { .. })
{
info!(
context.log(),
"Refusing to checkpoint sync";
"msg" => "database already exists, use --purge-db to force checkpoint sync"
msg = "database already exists, use --purge-db to force checkpoint sync",
"Refusing to checkpoint sync"
);
}
@@ -295,12 +287,9 @@ where
builder.genesis_state(genesis_state).map(|v| (v, None))?
}
ClientGenesis::GenesisState => {
info!(
context.log(),
"Starting from known genesis state";
);
info!("Starting from known genesis state");
let genesis_state = genesis_state(&runtime_context, &config, log).await?;
let genesis_state = genesis_state(&runtime_context, &config).await?;
// If the user has not explicitly allowed genesis sync, prevent
// them from trying to sync from genesis if we're outside of the
@@ -348,12 +337,9 @@ where
anchor_block_bytes,
anchor_blobs_bytes,
} => {
info!(context.log(), "Starting checkpoint sync");
info!("Starting checkpoint sync");
if config.chain.genesis_backfill {
info!(
context.log(),
"Blocks will downloaded all the way back to genesis"
);
info!("Blocks will downloaded all the way back to genesis");
}
let anchor_state = BeaconState::from_ssz_bytes(&anchor_state_bytes, &spec)
@@ -371,7 +357,7 @@ where
} else {
None
};
let genesis_state = genesis_state(&runtime_context, &config, log).await?;
let genesis_state = genesis_state(&runtime_context, &config).await?;
builder
.weak_subjectivity_state(
@@ -384,15 +370,11 @@ where
}
ClientGenesis::CheckpointSyncUrl { url } => {
info!(
context.log(),
"Starting checkpoint sync";
"remote_url" => %url,
remote_url = %url,
"Starting checkpoint sync"
);
if config.chain.genesis_backfill {
info!(
context.log(),
"Blocks will be downloaded all the way back to genesis"
);
info!("Blocks will be downloaded all the way back to genesis");
}
let remote = BeaconNodeHttpClient::new(
@@ -406,7 +388,7 @@ where
// We want to fetch deposit snapshot before fetching the finalized beacon state to
// ensure that the snapshot is not newer than the beacon state that satisfies the
// deposit finalization conditions
debug!(context.log(), "Downloading deposit snapshot");
debug!("Downloading deposit snapshot");
let deposit_snapshot_result = remote
.get_deposit_snapshot()
.await
@@ -423,22 +405,18 @@ where
if deposit_snapshot.is_valid() {
Some(deposit_snapshot)
} else {
warn!(context.log(), "Remote BN sent invalid deposit snapshot!");
warn!("Remote BN sent invalid deposit snapshot!");
None
}
}
Ok(None) => {
warn!(
context.log(),
"Remote BN does not support EIP-4881 fast deposit sync"
);
warn!("Remote BN does not support EIP-4881 fast deposit sync");
None
}
Err(e) => {
warn!(
context.log(),
"Remote BN does not support EIP-4881 fast deposit sync";
"error" => e
error = e,
"Remote BN does not support EIP-4881 fast deposit sync"
);
None
}
@@ -447,21 +425,18 @@ where
None
};
debug!(
context.log(),
"Downloading finalized state";
);
debug!("Downloading finalized state");
let state = remote
.get_debug_beacon_states_ssz::<E>(StateId::Finalized, &spec)
.await
.map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))?
.ok_or_else(|| "Checkpoint state missing from remote".to_string())?;
debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot());
debug!(slot = ?state.slot(), "Downloaded finalized state");
let finalized_block_slot = state.latest_block_header().slot;
debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot);
debug!(block_slot = ?finalized_block_slot,"Downloading finalized block");
let block = remote
.get_beacon_blocks_ssz::<E>(BlockId::Slot(finalized_block_slot), &spec)
.await
@@ -476,24 +451,23 @@ where
.ok_or("Finalized block missing from remote, it returned 404")?;
let block_root = block.canonical_root();
debug!(context.log(), "Downloaded finalized block");
debug!("Downloaded finalized block");
let blobs = if block.message().body().has_blobs() {
debug!(context.log(), "Downloading finalized blobs");
debug!("Downloading finalized blobs");
if let Some(response) = remote
.get_blobs::<E>(BlockId::Root(block_root), None)
.await
.map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))?
{
debug!(context.log(), "Downloaded finalized blobs");
debug!("Downloaded finalized blobs");
Some(response.data)
} else {
warn!(
context.log(),
"Checkpoint server is missing blobs";
"block_root" => %block_root,
"hint" => "use a different URL or ask the provider to update",
"impact" => "db will be slightly corrupt until these blobs are pruned",
block_root = %block_root,
hint = "use a different URL or ask the provider to update",
impact = "db will be slightly corrupt until these blobs are pruned",
"Checkpoint server is missing blobs"
);
None
}
@@ -501,35 +475,31 @@ where
None
};
let genesis_state = genesis_state(&runtime_context, &config, log).await?;
let genesis_state = genesis_state(&runtime_context, &config).await?;
info!(
context.log(),
"Loaded checkpoint block and state";
"block_slot" => block.slot(),
"state_slot" => state.slot(),
"block_root" => ?block_root,
block_slot = %block.slot(),
state_slot = %state.slot(),
block_root = ?block_root,
"Loaded checkpoint block and state"
);
let service =
deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot(
config.eth1,
context.log().clone(),
spec.clone(),
&snapshot,
) {
Ok(service) => {
info!(
context.log(),
"Loaded deposit tree snapshot";
"deposits loaded" => snapshot.deposit_count,
deposits_loaded = snapshot.deposit_count,
"Loaded deposit tree snapshot"
);
Some(service)
}
Err(e) => {
warn!(context.log(),
"Unable to load deposit snapshot";
"error" => ?e
warn!(error = ?e,
"Unable to load deposit snapshot"
);
None
}
@@ -541,18 +511,14 @@ where
}
ClientGenesis::DepositContract => {
info!(
context.log(),
"Waiting for eth2 genesis from eth1";
"eth1_endpoints" => format!("{:?}", &config.eth1.endpoint),
"contract_deploy_block" => config.eth1.deposit_contract_deploy_block,
"deposit_contract" => &config.eth1.deposit_contract_address
eth1_endpoints = ?config.eth1.endpoint,
contract_deploy_block = config.eth1.deposit_contract_deploy_block,
deposit_contract = &config.eth1.deposit_contract_address,
"Waiting for eth2 genesis from eth1"
);
let genesis_service = Eth1GenesisService::new(
config.eth1,
context.log().clone(),
context.eth2_config().spec.clone(),
)?;
let genesis_service =
Eth1GenesisService::new(config.eth1, context.eth2_config().spec.clone())?;
// If the HTTP API server is enabled, start an instance of it where it only
// contains a reference to the eth1 service (all non-eth1 endpoints will fail
@@ -575,7 +541,6 @@ where
beacon_processor_send: None,
beacon_processor_reprocess_send: None,
eth1_service: Some(genesis_service.eth1_service.clone()),
log: context.log().clone(),
sse_logging_components: runtime_context.sse_logging_components.clone(),
});
@@ -587,10 +552,9 @@ where
let (listen_addr, server) = http_api::serve(ctx, exit_future)
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
let log_clone = context.log().clone();
let http_api_task = async move {
server.await;
debug!(log_clone, "HTTP API server task ended");
debug!("HTTP API server task ended");
};
context
@@ -617,9 +581,8 @@ where
// We will restart it again after we've finished setting up for genesis.
while TcpListener::bind(http_listen).is_err() {
warn!(
context.log(),
"Waiting for HTTP server port to open";
"port" => http_listen
port = %http_listen,
"Waiting for HTTP server port to open"
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
@@ -738,7 +701,7 @@ where
.as_ref()
.ok_or("monitoring_client requires a runtime_context")?
.service_context("monitoring_client".into());
let monitoring_client = MonitoringHttpClient::new(config, context.log().clone())?;
let monitoring_client = MonitoringHttpClient::new(config)?;
monitoring_client.auto_update(
context.executor,
vec![ProcessType::BeaconNode, ProcessType::System],
@@ -798,7 +761,6 @@ where
.beacon_processor_config
.take()
.ok_or("build requires a beacon_processor_config")?;
let log = runtime_context.log().clone();
let http_api_listen_addr = if self.http_api_config.enabled {
let ctx = Arc::new(http_api::Context {
@@ -812,7 +774,6 @@ where
beacon_processor_channels.work_reprocessing_tx.clone(),
),
sse_logging_components: runtime_context.sse_logging_components.clone(),
log: log.clone(),
});
let exit = runtime_context.executor.exit();
@@ -820,10 +781,9 @@ where
let (listen_addr, server) = http_api::serve(ctx, exit)
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
let http_log = runtime_context.log().clone();
let http_api_task = async move {
server.await;
debug!(http_log, "HTTP API server task ended");
debug!("HTTP API server task ended");
};
runtime_context
@@ -833,7 +793,7 @@ where
Some(listen_addr)
} else {
info!(log, "HTTP server is disabled");
info!("HTTP server is disabled");
None
};
@@ -844,7 +804,6 @@ where
db_path: self.db_path.clone(),
freezer_db_path: self.freezer_db_path.clone(),
gossipsub_registry: self.libp2p_registry.take().map(std::sync::Mutex::new),
log: log.clone(),
});
let exit = runtime_context.executor.exit();
@@ -858,7 +817,7 @@ where
Some(listen_addr)
} else {
debug!(log, "Metrics server is disabled");
debug!("Metrics server is disabled");
None
};
@@ -874,7 +833,6 @@ where
executor: beacon_processor_context.executor.clone(),
current_workers: 0,
config: beacon_processor_config,
log: beacon_processor_context.log().clone(),
}
.spawn_manager(
beacon_processor_channels.beacon_processor_rx,
@@ -895,12 +853,7 @@ where
}
let state_advance_context = runtime_context.service_context("state_advance".into());
let state_advance_log = state_advance_context.log().clone();
spawn_state_advance_timer(
state_advance_context.executor,
beacon_chain.clone(),
state_advance_log,
);
spawn_state_advance_timer(state_advance_context.executor, beacon_chain.clone());
if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() {
// Only send a head update *after* genesis.
@@ -929,9 +882,8 @@ where
// node comes online.
if let Err(e) = result {
warn!(
log,
"Failed to update head on execution engines";
"error" => ?e
error = ?e,
"Failed to update head on execution engines"
);
}
},
@@ -954,14 +906,12 @@ where
let inner_chain = beacon_chain.clone();
let light_client_update_context =
runtime_context.service_context("lc_update".to_string());
let log = light_client_update_context.log().clone();
light_client_update_context.executor.spawn(
async move {
compute_light_client_updates(
&inner_chain,
light_client_server_rv,
beacon_processor_channels.work_reprocessing_tx,
&log,
)
.await
},
@@ -1044,7 +994,6 @@ where
cold_path: &Path,
blobs_path: &Path,
config: StoreConfig,
log: Logger,
) -> Result<Self, String> {
let context = self
.runtime_context
@@ -1073,7 +1022,6 @@ where
genesis_state_root,
from,
to,
log,
)
};
@@ -1084,7 +1032,6 @@ where
schema_upgrade,
config,
spec,
context.log().clone(),
)
.map_err(|e| format!("Unable to open database: {:?}", e))?;
self.store = Some(store);
@@ -1132,22 +1079,15 @@ where
CachingEth1Backend::from_service(eth1_service_from_genesis)
} else if config.purge_cache {
CachingEth1Backend::new(config, context.log().clone(), spec)?
CachingEth1Backend::new(config, spec)?
} else {
beacon_chain_builder
.get_persisted_eth1_backend()?
.map(|persisted| {
Eth1Chain::from_ssz_container(
&persisted,
config.clone(),
&context.log().clone(),
spec.clone(),
)
.map(|chain| chain.into_backend())
Eth1Chain::from_ssz_container(&persisted, config.clone(), spec.clone())
.map(|chain| chain.into_backend())
})
.unwrap_or_else(|| {
CachingEth1Backend::new(config, context.log().clone(), spec.clone())
})?
.unwrap_or_else(|| CachingEth1Backend::new(config, spec.clone()))?
};
self.eth1_service = Some(backend.core.clone());
@@ -1230,7 +1170,6 @@ where
async fn genesis_state<E: EthSpec>(
context: &RuntimeContext<E>,
config: &ClientConfig,
log: &Logger,
) -> Result<BeaconState<E>, String> {
let eth2_network_config = context
.eth2_network_config
@@ -1240,7 +1179,6 @@ async fn genesis_state<E: EthSpec>(
.genesis_state::<E>(
config.genesis_state_url.as_deref(),
config.genesis_state_url_timeout,
log,
)
.await?
.ok_or_else(|| "Genesis state is unknown".to_string())

View File

@@ -2,8 +2,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent};
use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage;
use futures::channel::mpsc::Receiver;
use futures::StreamExt;
use slog::{error, Logger};
use tokio::sync::mpsc::Sender;
use tracing::error;
// Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent
// updates it is okay to drop some events in case of overloading. In normal network conditions
@@ -15,7 +15,6 @@ pub async fn compute_light_client_updates<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
mut light_client_server_rv: Receiver<LightClientProducerEvent<T::EthSpec>>,
reprocess_tx: Sender<ReprocessQueueMessage>,
log: &Logger,
) {
// Should only receive events for recent blocks, import_block filters by blocks close to clock.
//
@@ -28,12 +27,12 @@ pub async fn compute_light_client_updates<T: BeaconChainTypes>(
chain
.recompute_and_cache_light_client_updates(event)
.unwrap_or_else(|e| {
error!(log, "error computing light_client updates {:?}", e);
error!("error computing light_client updates {:?}", e);
});
let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root };
if reprocess_tx.try_send(msg).is_err() {
error!(log, "Failed to inform light client update"; "parent_root" => %parent_root)
error!(%parent_root,"Failed to inform light client update")
};
}
}

View File

@@ -8,12 +8,13 @@ use beacon_chain::{
BeaconChain, BeaconChainTypes, ExecutionStatus,
};
use lighthouse_network::{types::SyncState, NetworkGlobals};
use slog::{crit, debug, error, info, warn, Logger};
use logging::crit;
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::Mutex;
use tokio::time::sleep;
use tracing::{debug, error, info, warn};
use types::*;
/// Create a warning log whenever the peer count is at or below this value.
@@ -39,7 +40,6 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
let slot_duration = Duration::from_secs(seconds_per_slot);
let speedo = Mutex::new(Speedo::default());
let log = executor.log().clone();
// Keep track of sync state and reset the speedo on specific sync state changes.
// Specifically, if we switch between a sync and a backfill sync, reset the speedo.
@@ -56,15 +56,14 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
// waiting for genesis.
Some(next_slot) if next_slot > slot_duration => {
info!(
log,
"Waiting for genesis";
"peers" => peer_count_pretty(network.connected_peers()),
"wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)),
peers = peer_count_pretty(network.connected_peers()),
wait_time = estimated_time_pretty(Some(next_slot.as_secs() as f64)),
"Waiting for genesis"
);
eth1_logging(&beacon_chain, &log);
bellatrix_readiness_logging(Slot::new(0), &beacon_chain, &log).await;
capella_readiness_logging(Slot::new(0), &beacon_chain, &log).await;
genesis_execution_payload_logging(&beacon_chain, &log).await;
eth1_logging(&beacon_chain);
bellatrix_readiness_logging(Slot::new(0), &beacon_chain).await;
capella_readiness_logging(Slot::new(0), &beacon_chain).await;
genesis_execution_payload_logging(&beacon_chain).await;
sleep(slot_duration).await;
}
_ => break,
@@ -82,7 +81,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
let wait = match beacon_chain.slot_clock.duration_to_next_slot() {
Some(duration) => duration + slot_duration / 2,
None => {
warn!(log, "Unable to read current slot");
warn!("Unable to read current slot");
sleep(slot_duration).await;
continue;
}
@@ -120,11 +119,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
let current_slot = match beacon_chain.slot() {
Ok(slot) => slot,
Err(e) => {
error!(
log,
"Unable to read current slot";
"error" => format!("{:?}", e)
);
error!(error = ?e, "Unable to read current slot");
break;
}
};
@@ -168,19 +163,21 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
);
if connected_peer_count <= WARN_PEER_COUNT {
warn!(log, "Low peer count"; "peer_count" => peer_count_pretty(connected_peer_count));
warn!(
peer_count = peer_count_pretty(connected_peer_count),
"Low peer count"
);
}
debug!(
log,
"Slot timer";
"peers" => peer_count_pretty(connected_peer_count),
"finalized_root" => format!("{}", finalized_checkpoint.root),
"finalized_epoch" => finalized_checkpoint.epoch,
"head_block" => format!("{}", head_root),
"head_slot" => head_slot,
"current_slot" => current_slot,
"sync_state" =>format!("{}", current_sync_state)
peers = peer_count_pretty(connected_peer_count),
finalized_root = %finalized_checkpoint.root,
finalized_epoch = %finalized_checkpoint.epoch,
head_block = %head_root,
%head_slot,
%current_slot,
sync_state = %current_sync_state,
"Slot timer"
);
// Log if we are backfilling.
@@ -202,26 +199,31 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
if display_speed {
info!(
log,
"Downloading historical blocks";
"distance" => distance,
"speed" => sync_speed_pretty(speed),
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_oldest_block_slot.saturating_sub(beacon_chain.genesis_backfill_slot))),
distance,
speed = sync_speed_pretty(speed),
est_time = estimated_time_pretty(
speedo.estimated_time_till_slot(
original_oldest_block_slot
.saturating_sub(beacon_chain.genesis_backfill_slot)
)
),
"Downloading historical blocks"
);
} else {
info!(
log,
"Downloading historical blocks";
"distance" => distance,
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_oldest_block_slot.saturating_sub(beacon_chain.genesis_backfill_slot))),
distance,
est_time = estimated_time_pretty(
speedo.estimated_time_till_slot(
original_oldest_block_slot
.saturating_sub(beacon_chain.genesis_backfill_slot)
)
),
"Downloading historical blocks"
);
}
} else if !is_backfilling && last_backfill_log_slot.is_some() {
last_backfill_log_slot = None;
info!(
log,
"Historical block download complete";
);
info!("Historical block download complete");
}
// Log if we are syncing
@@ -238,20 +240,20 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
if display_speed {
info!(
log,
"Syncing";
"peers" => peer_count_pretty(connected_peer_count),
"distance" => distance,
"speed" => sync_speed_pretty(speed),
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
peers = peer_count_pretty(connected_peer_count),
distance,
speed = sync_speed_pretty(speed),
est_time =
estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
"Syncing"
);
} else {
info!(
log,
"Syncing";
"peers" => peer_count_pretty(connected_peer_count),
"distance" => distance,
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
peers = peer_count_pretty(connected_peer_count),
distance,
est_time =
estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
"Syncing"
);
}
} else if current_sync_state.is_synced() {
@@ -267,20 +269,18 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash),
Ok(ExecutionStatus::Optimistic(hash)) => {
warn!(
log,
"Head is optimistic";
"info" => "chain not fully verified, \
block and attestation production disabled until execution engine syncs",
"execution_block_hash" => ?hash,
info = "chain not fully verified, \
block and attestation production disabled until execution engine syncs",
execution_block_hash = ?hash,
"Head is optimistic"
);
format!("{} (unverified)", hash)
}
Ok(ExecutionStatus::Invalid(hash)) => {
crit!(
log,
"Head execution payload is invalid";
"msg" => "this scenario may be unrecoverable",
"execution_block_hash" => ?hash,
msg = "this scenario may be unrecoverable",
execution_block_hash = ?hash,
"Head execution payload is invalid"
);
format!("{} (invalid)", hash)
}
@@ -288,35 +288,33 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
};
info!(
log,
"Synced";
"peers" => peer_count_pretty(connected_peer_count),
"exec_hash" => block_hash,
"finalized_root" => format!("{}", finalized_checkpoint.root),
"finalized_epoch" => finalized_checkpoint.epoch,
"epoch" => current_epoch,
"block" => block_info,
"slot" => current_slot,
peers = peer_count_pretty(connected_peer_count),
exec_hash = block_hash,
finalized_root = %finalized_checkpoint.root,
finalized_epoch = %finalized_checkpoint.epoch,
epoch = %current_epoch,
block = block_info,
slot = %current_slot,
"Synced"
);
} else {
metrics::set_gauge(&metrics::IS_SYNCED, 0);
info!(
log,
"Searching for peers";
"peers" => peer_count_pretty(connected_peer_count),
"finalized_root" => format!("{}", finalized_checkpoint.root),
"finalized_epoch" => finalized_checkpoint.epoch,
"head_slot" => head_slot,
"current_slot" => current_slot,
peers = peer_count_pretty(connected_peer_count),
finalized_root = %finalized_checkpoint.root,
finalized_epoch = %finalized_checkpoint.epoch,
%head_slot,
%current_slot,
"Searching for peers"
);
}
eth1_logging(&beacon_chain, &log);
bellatrix_readiness_logging(current_slot, &beacon_chain, &log).await;
capella_readiness_logging(current_slot, &beacon_chain, &log).await;
deneb_readiness_logging(current_slot, &beacon_chain, &log).await;
electra_readiness_logging(current_slot, &beacon_chain, &log).await;
fulu_readiness_logging(current_slot, &beacon_chain, &log).await;
eth1_logging(&beacon_chain);
bellatrix_readiness_logging(current_slot, &beacon_chain).await;
capella_readiness_logging(current_slot, &beacon_chain).await;
deneb_readiness_logging(current_slot, &beacon_chain).await;
electra_readiness_logging(current_slot, &beacon_chain).await;
fulu_readiness_logging(current_slot, &beacon_chain).await;
}
};
@@ -331,7 +329,6 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
async fn bellatrix_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let merge_completed = beacon_chain
.canonical_head
@@ -355,10 +352,9 @@ async fn bellatrix_readiness_logging<T: BeaconChainTypes>(
// Logging of the EE being offline is handled in the other readiness logging functions.
if !beacon_chain.is_time_to_prepare_for_capella(current_slot) {
error!(
log,
"Execution endpoint required";
"info" => "you need an execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
info = "you need an execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html",
"Execution endpoint required"
);
}
return;
@@ -375,12 +371,11 @@ async fn bellatrix_readiness_logging<T: BeaconChainTypes>(
terminal_block_hash_epoch: None,
} => {
info!(
log,
"Ready for Bellatrix";
"terminal_total_difficulty" => %ttd,
"current_difficulty" => current_difficulty
terminal_total_difficulty = %ttd,
current_difficulty = current_difficulty
.map(|d| d.to_string())
.unwrap_or_else(|| "??".into()),
"Ready for Bellatrix"
)
}
MergeConfig {
@@ -389,29 +384,25 @@ async fn bellatrix_readiness_logging<T: BeaconChainTypes>(
terminal_block_hash_epoch: Some(terminal_block_hash_epoch),
} => {
info!(
log,
"Ready for Bellatrix";
"info" => "you are using override parameters, please ensure that you \
understand these parameters and their implications.",
"terminal_block_hash" => ?terminal_block_hash,
"terminal_block_hash_epoch" => ?terminal_block_hash_epoch,
info = "you are using override parameters, please ensure that you \
understand these parameters and their implications.",
?terminal_block_hash,
?terminal_block_hash_epoch,
"Ready for Bellatrix"
)
}
other => error!(
log,
"Inconsistent merge configuration";
"config" => ?other
config = ?other,
"Inconsistent merge configuration"
),
},
readiness @ BellatrixReadiness::NotSynced => warn!(
log,
"Not ready Bellatrix";
"info" => %readiness,
info = %readiness,
"Not ready Bellatrix"
),
readiness @ BellatrixReadiness::NoExecutionEndpoint => warn!(
log,
"Not ready for Bellatrix";
"info" => %readiness,
info = %readiness,
"Not ready for Bellatrix"
),
}
}
@@ -420,7 +411,6 @@ async fn bellatrix_readiness_logging<T: BeaconChainTypes>(
async fn capella_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let capella_completed = beacon_chain
.canonical_head
@@ -442,10 +432,9 @@ async fn capella_readiness_logging<T: BeaconChainTypes>(
// Logging of the EE being offline is handled in the other readiness logging functions.
if !beacon_chain.is_time_to_prepare_for_deneb(current_slot) {
error!(
log,
"Execution endpoint required";
"info" => "you need a Capella enabled execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html"
info = "you need a Capella enabled execution engine to validate blocks, see: \
https://lighthouse-book.sigmaprime.io/merge-migration.html",
"Execution endpoint required"
);
}
return;
@@ -454,24 +443,21 @@ async fn capella_readiness_logging<T: BeaconChainTypes>(
match beacon_chain.check_capella_readiness().await {
CapellaReadiness::Ready => {
info!(
log,
"Ready for Capella";
"info" => "ensure the execution endpoint is updated to the latest Capella/Shanghai release"
info = "ensure the execution endpoint is updated to the latest Capella/Shanghai release",
"Ready for Capella"
)
}
readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => {
error!(
log,
"Not ready for Capella";
"hint" => "the execution endpoint may be offline",
"info" => %readiness,
hint = "the execution endpoint may be offline",
info = %readiness,
"Not ready for Capella"
)
}
readiness => warn!(
log,
"Not ready for Capella";
"hint" => "try updating the execution endpoint",
"info" => %readiness,
hint = "try updating the execution endpoint",
info = %readiness,
"Not ready for Capella"
),
}
}
@@ -480,7 +466,6 @@ async fn capella_readiness_logging<T: BeaconChainTypes>(
async fn deneb_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let deneb_completed = beacon_chain
.canonical_head
@@ -500,9 +485,8 @@ async fn deneb_readiness_logging<T: BeaconChainTypes>(
if deneb_completed && !has_execution_layer {
error!(
log,
"Execution endpoint required";
"info" => "you need a Deneb enabled execution engine to validate blocks."
info = "you need a Deneb enabled execution engine to validate blocks.",
"Execution endpoint required"
);
return;
}
@@ -510,24 +494,22 @@ async fn deneb_readiness_logging<T: BeaconChainTypes>(
match beacon_chain.check_deneb_readiness().await {
DenebReadiness::Ready => {
info!(
log,
"Ready for Deneb";
"info" => "ensure the execution endpoint is updated to the latest Deneb/Cancun release"
info =
"ensure the execution endpoint is updated to the latest Deneb/Cancun release",
"Ready for Deneb"
)
}
readiness @ DenebReadiness::ExchangeCapabilitiesFailed { error: _ } => {
error!(
log,
"Not ready for Deneb";
"hint" => "the execution endpoint may be offline",
"info" => %readiness,
hint = "the execution endpoint may be offline",
info = %readiness,
"Not ready for Deneb"
)
}
readiness => warn!(
log,
"Not ready for Deneb";
"hint" => "try updating the execution endpoint",
"info" => %readiness,
hint = "try updating the execution endpoint",
info = %readiness,
"Not ready for Deneb"
),
}
}
@@ -535,7 +517,6 @@ async fn deneb_readiness_logging<T: BeaconChainTypes>(
async fn electra_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let electra_completed = beacon_chain
.canonical_head
@@ -556,9 +537,8 @@ async fn electra_readiness_logging<T: BeaconChainTypes>(
if electra_completed && !has_execution_layer {
// When adding a new fork, add a check for the next fork readiness here.
error!(
log,
"Execution endpoint required";
"info" => "you need a Electra enabled execution engine to validate blocks."
info = "you need a Electra enabled execution engine to validate blocks.",
"Execution endpoint required"
);
return;
}
@@ -566,24 +546,22 @@ async fn electra_readiness_logging<T: BeaconChainTypes>(
match beacon_chain.check_electra_readiness().await {
ElectraReadiness::Ready => {
info!(
log,
"Ready for Electra";
"info" => "ensure the execution endpoint is updated to the latest Electra/Prague release"
info =
"ensure the execution endpoint is updated to the latest Electra/Prague release",
"Ready for Electra"
)
}
readiness @ ElectraReadiness::ExchangeCapabilitiesFailed { error: _ } => {
error!(
log,
"Not ready for Electra";
"hint" => "the execution endpoint may be offline",
"info" => %readiness,
hint = "the execution endpoint may be offline",
info = %readiness,
"Not ready for Electra"
)
}
readiness => warn!(
log,
"Not ready for Electra";
"hint" => "try updating the execution endpoint",
"info" => %readiness,
hint = "try updating the execution endpoint",
info = %readiness,
"Not ready for Electra"
),
}
}
@@ -592,7 +570,6 @@ async fn electra_readiness_logging<T: BeaconChainTypes>(
async fn fulu_readiness_logging<T: BeaconChainTypes>(
current_slot: Slot,
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
let fulu_completed = beacon_chain
.canonical_head
@@ -612,9 +589,8 @@ async fn fulu_readiness_logging<T: BeaconChainTypes>(
if fulu_completed && !has_execution_layer {
error!(
log,
"Execution endpoint required";
"info" => "you need a Fulu enabled execution engine to validate blocks."
info = "you need a Fulu enabled execution engine to validate blocks.",
"Execution endpoint required"
);
return;
}
@@ -622,102 +598,86 @@ async fn fulu_readiness_logging<T: BeaconChainTypes>(
match beacon_chain.check_fulu_readiness().await {
FuluReadiness::Ready => {
info!(
log,
"Ready for Fulu";
"info" => "ensure the execution endpoint is updated to the latest Fulu release"
info = "ensure the execution endpoint is updated to the latest Fulu release",
"Ready for Fulu"
)
}
readiness @ FuluReadiness::ExchangeCapabilitiesFailed { error: _ } => {
error!(
log,
"Not ready for Fulu";
"hint" => "the execution endpoint may be offline",
"info" => %readiness,
hint = "the execution endpoint may be offline",
info = %readiness,
"Not ready for Fulu"
)
}
readiness => warn!(
log,
"Not ready for Fulu";
"hint" => "try updating the execution endpoint",
"info" => %readiness,
hint = "try updating the execution endpoint",
info = %readiness,
"Not ready for Fulu"
),
}
}
async fn genesis_execution_payload_logging<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
log: &Logger,
) {
async fn genesis_execution_payload_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
match beacon_chain
.check_genesis_execution_payload_is_correct()
.await
{
Ok(GenesisExecutionPayloadStatus::Correct(block_hash)) => {
info!(
log,
"Execution enabled from genesis";
"genesis_payload_block_hash" => ?block_hash,
genesis_payload_block_hash = ?block_hash,
"Execution enabled from genesis"
);
}
Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { got, expected }) => {
error!(
log,
"Genesis payload block hash mismatch";
"info" => "genesis is misconfigured and likely to fail",
"consensus_node_block_hash" => ?expected,
"execution_node_block_hash" => ?got,
info = "genesis is misconfigured and likely to fail",
consensus_node_block_hash = ?expected,
execution_node_block_hash = ?got,
"Genesis payload block hash mismatch"
);
}
Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { got, expected }) => {
error!(
log,
"Genesis payload transactions root mismatch";
"info" => "genesis is misconfigured and likely to fail",
"consensus_node_transactions_root" => ?expected,
"execution_node_transactions_root" => ?got,
info = "genesis is misconfigured and likely to fail",
consensus_node_transactions_root = ?expected,
execution_node_transactions_root = ?got,
"Genesis payload transactions root mismatch"
);
}
Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { got, expected }) => {
error!(
log,
"Genesis payload withdrawals root mismatch";
"info" => "genesis is misconfigured and likely to fail",
"consensus_node_withdrawals_root" => ?expected,
"execution_node_withdrawals_root" => ?got,
info = "genesis is misconfigured and likely to fail",
consensus_node_withdrawals_root = ?expected,
execution_node_withdrawals_root = ?got,
"Genesis payload withdrawals root mismatch"
);
}
Ok(GenesisExecutionPayloadStatus::OtherMismatch) => {
error!(
log,
"Genesis payload header mismatch";
"info" => "genesis is misconfigured and likely to fail",
"detail" => "see debug logs for payload headers"
info = "genesis is misconfigured and likely to fail",
detail = "see debug logs for payload headers",
"Genesis payload header mismatch"
);
}
Ok(GenesisExecutionPayloadStatus::Irrelevant) => {
info!(
log,
"Execution is not enabled from genesis";
);
info!("Execution is not enabled from genesis");
}
Ok(GenesisExecutionPayloadStatus::AlreadyHappened) => {
warn!(
log,
"Unable to check genesis which has already occurred";
"info" => "this is probably a race condition or a bug"
info = "this is probably a race condition or a bug",
"Unable to check genesis which has already occurred"
);
}
Err(e) => {
error!(
log,
"Unable to check genesis execution payload";
"error" => ?e
error = ?e,
"Unable to check genesis execution payload"
);
}
}
}
fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger) {
fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
let current_slot_opt = beacon_chain.slot().ok();
// Perform some logging about the eth1 chain
@@ -733,13 +693,12 @@ fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger
&beacon_chain.spec,
) {
debug!(
log,
"Eth1 cache sync status";
"eth1_head_block" => status.head_block_number,
"latest_cached_block_number" => status.latest_cached_block_number,
"latest_cached_timestamp" => status.latest_cached_block_timestamp,
"voting_target_timestamp" => status.voting_target_timestamp,
"ready" => status.lighthouse_is_cached_and_ready
eth1_head_block = status.head_block_number,
latest_cached_block_number = status.latest_cached_block_number,
latest_cached_timestamp = status.latest_cached_block_timestamp,
voting_target_timestamp = status.voting_target_timestamp,
ready = status.lighthouse_is_cached_and_ready,
"Eth1 cache sync status"
);
if !status.lighthouse_is_cached_and_ready {
@@ -755,16 +714,12 @@ fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger
.unwrap_or_else(|| "initializing deposits".to_string());
warn!(
log,
"Syncing deposit contract block cache";
"est_blocks_remaining" => distance,
est_blocks_remaining = distance,
"Syncing deposit contract block cache"
);
}
} else {
error!(
log,
"Unable to determine deposit contract sync status";
);
error!("Unable to determine deposit contract sync status");
}
}
}