Tracing cleanup (#7168)

#7153
#7146
#7147
#7148 -> Thanks to @ackintosh


  This PR does the following:
1. Disable logging to file when using either `--logfile-max-number 0` or `--logfile-max-size 0`. Note that disabling the log file in this way will also disable `discv5` and `libp2p` logging.
1.  `discv5` and `libp2p` logging will be disabled by default unless running `beacon_node` or `boot_node`. This also should fix the VC panic we were seeing.
1. Removes log rotation and compression from `libp2p` and `discv5` logs. It is now limited to 1 file and will rotate based on the value of the `--logfile-max-size` flag. We could potentially add flags specifically to control the size/number of these, however I felt a single log file was sufficient. Perhaps @AgeManning has opinions about this?
1. Removes all dependency logging and references to `dep_log`.
1.  Introduces workspace filtering to file and stdout. This explicitly allows logs from members of the Lighthouse workspace, disallowing all others. It uses a proc macro which pulls the member list from cargo metadata at compile time. This might be over-engineered but my hope is that this list will not require maintenance.
1. Unifies file and stdout JSON format. With slog, the formats were slightly different. @threehrsleep worked to maintain that format difference, to ensure there was no breaking changes. If these format differences are actually problematic we can restore it, however I felt the added complexity wasn't worth it.
1. General code improvements and cleanup.
This commit is contained in:
Mac L
2025-04-01 21:51:09 +11:00
committed by GitHub
parent fb7ec0d151
commit 4839ed620f
14 changed files with 382 additions and 365 deletions

29
Cargo.lock generated
View File

@@ -1187,6 +1187,20 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "cargo_metadata"
version = "0.19.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba"
dependencies = [
"camino",
"cargo-platform",
"semver 1.0.26",
"serde",
"serde_json",
"thiserror 2.0.12",
]
[[package]]
name = "cast"
version = "0.3.0"
@@ -1915,7 +1929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f"
dependencies = [
"data-encoding",
"syn 1.0.109",
"syn 2.0.100",
]
[[package]]
@@ -2853,7 +2867,7 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15"
dependencies = [
"arrayvec",
"bytes",
"cargo_metadata",
"cargo_metadata 0.15.4",
"chrono",
"elliptic-curve 0.12.3",
"ethabi 18.0.0",
@@ -4761,7 +4775,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
"windows-targets 0.48.5",
"windows-targets 0.52.6",
]
[[package]]
@@ -5458,6 +5472,7 @@ dependencies = [
"tracing-core",
"tracing-log",
"tracing-subscriber",
"workspace_members",
]
[[package]]
@@ -10152,6 +10167,14 @@ dependencies = [
"bitflags 2.9.0",
]
[[package]]
name = "workspace_members"
version = "0.1.0"
dependencies = [
"cargo_metadata 0.19.2",
"quote",
]
[[package]]
name = "write16"
version = "1.0.0"

View File

@@ -51,6 +51,7 @@ members = [
"common/unused_port",
"common/validator_dir",
"common/warp_utils",
"common/workspace_members",
"consensus/fixed_bytes",
"consensus/fork_choice",
@@ -120,6 +121,7 @@ bincode = "1"
bitvec = "1"
byteorder = "1"
bytes = "1"
cargo_metadata = "0.19"
clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] }
# Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable
# feature ourselves when desired.
@@ -246,6 +248,7 @@ kzg = { path = "crypto/kzg" }
metrics = { path = "common/metrics" }
lighthouse_network = { path = "beacon_node/lighthouse_network" }
lighthouse_version = { path = "common/lighthouse_version" }
workspace_members = { path = "common/workspace_members" }
lockfile = { path = "common/lockfile" }
logging = { path = "common/logging" }
lru_cache = { path = "common/lru_cache" }

View File

@@ -16,8 +16,9 @@ parking_lot = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = [ "time" ] }
tracing = "0.1"
tracing = { workspace = true }
tracing-appender = { workspace = true }
tracing-core = { workspace = true }
tracing-log = { workspace = true }
tracing-subscriber = { workspace = true }
workspace_members = { workspace = true }

View File

@@ -1,24 +1,24 @@
use chrono::Local;
use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize};
use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult};
use std::io::Write;
use std::path::PathBuf;
use std::sync::LazyLock;
use std::time::{Duration, Instant};
use tracing::Subscriber;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::layer::Context;
use tracing_subscriber::{EnvFilter, Layer};
use tracing_subscriber::EnvFilter;
pub const MAX_MESSAGE_WIDTH: usize = 40;
pub mod macros;
mod sse_logging_components;
mod tracing_libp2p_discv5_logging_layer;
pub mod tracing_logging_layer;
mod tracing_metrics_layer;
mod utils;
pub use sse_logging_components::SSELoggingComponents;
pub use tracing_libp2p_discv5_logging_layer::{
create_libp2p_discv5_tracing_layer, Libp2pDiscv5TracingLayer,
};
pub use tracing_logging_layer::LoggingLayer;
pub use tracing_metrics_layer::MetricsLayer;
pub use utils::build_workspace_filter;
/// The minimum interval between log messages indicating that a queue is full.
const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30);
@@ -51,132 +51,6 @@ impl TimeLatch {
}
}
pub struct Libp2pDiscv5TracingLayer {
pub libp2p_non_blocking_writer: NonBlocking,
pub _libp2p_guard: WorkerGuard,
pub discv5_non_blocking_writer: NonBlocking,
pub _discv5_guard: WorkerGuard,
}
impl<S> Layer<S> for Libp2pDiscv5TracingLayer
where
S: Subscriber,
{
fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<S>) {
let meta = event.metadata();
let log_level = meta.level();
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
let target = match meta.target().split_once("::") {
Some((crate_name, _)) => crate_name,
None => "unknown",
};
let mut writer = match target {
"gossipsub" => self.libp2p_non_blocking_writer.clone(),
"discv5" => self.discv5_non_blocking_writer.clone(),
_ => return,
};
let mut visitor = LogMessageExtractor {
message: String::default(),
};
event.record(&mut visitor);
let message = format!("{} {} {}\n", timestamp, log_level, visitor.message);
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
}
struct LogMessageExtractor {
message: String,
}
impl tracing_core::field::Visit for LogMessageExtractor {
fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) {
self.message = format!("{} {:?}", self.message, value);
}
}
pub fn create_libp2p_discv5_tracing_layer(
base_tracing_log_path: Option<PathBuf>,
max_log_size: u64,
compression: bool,
max_log_number: usize,
) -> Libp2pDiscv5TracingLayer {
if let Some(mut tracing_log_path) = base_tracing_log_path {
// Ensure that `tracing_log_path` only contains directories.
for p in tracing_log_path.clone().iter() {
tracing_log_path = tracing_log_path.join(p);
if let Ok(metadata) = tracing_log_path.metadata() {
if !metadata.is_dir() {
tracing_log_path.pop();
break;
}
}
}
let mut libp2p_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
let mut discv5_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
if compression {
libp2p_writer = libp2p_writer.compression(Compression::Gzip);
discv5_writer = discv5_writer.compression(Compression::Gzip);
}
let libp2p_writer = match libp2p_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize libp2p rolling file appender: {e}");
std::process::exit(1);
}
};
let discv5_writer = match discv5_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize discv5 rolling file appender: {e}");
std::process::exit(1);
}
};
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer);
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer);
Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
}
} else {
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(std::io::sink());
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(std::io::sink());
Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
}
}
}
/// Return a tracing subscriber suitable for test usage.
///
/// By default no logs will be printed, but they can be enabled via

View File

@@ -0,0 +1,113 @@
use chrono::Local;
use logroller::{LogRollerBuilder, Rotation, RotationSize};
use std::io::Write;
use std::path::PathBuf;
use tracing::Subscriber;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::{layer::Context, Layer};
pub struct Libp2pDiscv5TracingLayer {
pub libp2p_non_blocking_writer: NonBlocking,
_libp2p_guard: WorkerGuard,
pub discv5_non_blocking_writer: NonBlocking,
_discv5_guard: WorkerGuard,
}
impl<S> Layer<S> for Libp2pDiscv5TracingLayer
where
S: Subscriber,
{
fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<S>) {
let meta = event.metadata();
let log_level = meta.level();
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
let target = match meta.target().split_once("::") {
Some((crate_name, _)) => crate_name,
None => "unknown",
};
let mut writer = match target {
"libp2p_gossipsub" => self.libp2p_non_blocking_writer.clone(),
"discv5" => self.discv5_non_blocking_writer.clone(),
_ => return,
};
let mut visitor = LogMessageExtractor {
message: String::default(),
};
event.record(&mut visitor);
let message = format!("{} {} {}\n", timestamp, log_level, visitor.message);
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
}
struct LogMessageExtractor {
message: String,
}
impl tracing_core::field::Visit for LogMessageExtractor {
fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) {
self.message = format!("{} {:?}", self.message, value);
}
}
pub fn create_libp2p_discv5_tracing_layer(
base_tracing_log_path: Option<PathBuf>,
max_log_size: u64,
) -> Option<Libp2pDiscv5TracingLayer> {
if let Some(mut tracing_log_path) = base_tracing_log_path {
// Ensure that `tracing_log_path` only contains directories.
for p in tracing_log_path.clone().iter() {
tracing_log_path = tracing_log_path.join(p);
if let Ok(metadata) = tracing_log_path.metadata() {
if !metadata.is_dir() {
tracing_log_path.pop();
break;
}
}
}
let libp2p_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(1);
let discv5_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(1);
let libp2p_writer = match libp2p_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize libp2p rolling file appender: {e}");
std::process::exit(1);
}
};
let discv5_writer = match discv5_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize discv5 rolling file appender: {e}");
std::process::exit(1);
}
};
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer);
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer);
Some(Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
})
} else {
None
}
}

View File

@@ -1,3 +1,5 @@
use crate::utils::is_ascii_control;
use chrono::prelude::*;
use serde_json::{Map, Value};
use std::collections::HashMap;
@@ -13,14 +15,11 @@ use tracing_subscriber::Layer;
pub struct LoggingLayer {
pub non_blocking_writer: NonBlocking,
pub guard: WorkerGuard,
_guard: WorkerGuard,
pub disable_log_timestamp: bool,
pub log_color: bool,
pub logfile_color: bool,
pub log_format: Option<String>,
pub logfile_format: Option<String>,
pub extra_info: bool,
pub dep_logs: bool,
span_fields: Arc<Mutex<HashMap<Id, SpanData>>>,
}
@@ -28,25 +27,19 @@ impl LoggingLayer {
#[allow(clippy::too_many_arguments)]
pub fn new(
non_blocking_writer: NonBlocking,
guard: WorkerGuard,
_guard: WorkerGuard,
disable_log_timestamp: bool,
log_color: bool,
logfile_color: bool,
log_format: Option<String>,
logfile_format: Option<String>,
extra_info: bool,
dep_logs: bool,
) -> Self {
Self {
non_blocking_writer,
guard,
_guard,
disable_log_timestamp,
log_color,
logfile_color,
log_format,
logfile_format,
extra_info,
dep_logs,
span_fields: Arc::new(Mutex::new(HashMap::new())),
}
}
@@ -84,16 +77,6 @@ where
String::new()
};
if !self.dep_logs {
if let Some(file) = meta.file() {
if file.contains("/.cargo/") {
return;
}
} else {
return;
}
}
let mut writer = self.non_blocking_writer.clone();
let mut visitor = LogMessageExtractor {
@@ -122,16 +105,10 @@ where
None => "<unknown_line>".to_string(),
};
if module.contains("discv5") {
visitor
.fields
.push(("service".to_string(), "\"discv5\"".to_string()));
}
let gray = "\x1b[90m";
let reset = "\x1b[0m";
let location = if self.extra_info {
if self.logfile_color {
if self.log_color {
format!("{}{}::{}:{}{}", gray, module, file, line, reset)
} else {
format!("{}::{}:{}", module, file, line)
@@ -164,33 +141,16 @@ where
}
};
if self.dep_logs {
if self.logfile_format.as_deref() == Some("JSON") {
build_json_log_file(
&visitor,
plain_level_str,
meta,
&ctx,
&self.span_fields,
event,
&mut writer,
);
} else {
build_log_text(
&visitor,
plain_level_str,
&timestamp,
&ctx,
&self.span_fields,
event,
&location,
color_level_str,
self.logfile_color,
&mut writer,
);
}
} else if self.log_format.as_deref() == Some("JSON") {
build_json_log_stdout(&visitor, plain_level_str, &timestamp, &mut writer);
if self.log_format.as_deref() == Some("JSON") {
build_log_json(
&visitor,
plain_level_str,
meta,
&ctx,
&self.span_fields,
event,
&mut writer,
);
} else {
build_log_text(
&visitor,
@@ -300,49 +260,7 @@ impl tracing_core::field::Visit for LogMessageExtractor {
}
}
/// Function to filter out ascii control codes.
///
/// This helps to keep log formatting consistent.
/// Whitespace and padding control codes are excluded.
fn is_ascii_control(character: &u8) -> bool {
matches!(
character,
b'\x00'..=b'\x08' |
b'\x0b'..=b'\x0c' |
b'\x0e'..=b'\x1f' |
b'\x7f' |
b'\x81'..=b'\x9f'
)
}
fn build_json_log_stdout(
visitor: &LogMessageExtractor,
plain_level_str: &str,
timestamp: &str,
writer: &mut impl Write,
) {
let mut log_map = Map::new();
log_map.insert("msg".to_string(), Value::String(visitor.message.clone()));
log_map.insert(
"level".to_string(),
Value::String(plain_level_str.to_string()),
);
log_map.insert("ts".to_string(), Value::String(timestamp.to_string()));
for (key, val) in visitor.fields.clone().into_iter() {
let parsed_val = parse_field(&val);
log_map.insert(key, parsed_val);
}
let json_obj = Value::Object(log_map);
let output = format!("{}\n", json_obj);
if let Err(e) = writer.write_all(output.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
fn build_json_log_file<'a, S>(
fn build_log_json<'a, S>(
visitor: &LogMessageExtractor,
plain_level_str: &str,
meta: &tracing::Metadata<'_>,

View File

@@ -0,0 +1,31 @@
use std::collections::HashSet;
use tracing_subscriber::filter::FilterFn;
use workspace_members::workspace_crates;
const WORKSPACE_CRATES: &[&str] = workspace_crates!();
/// Constructs a filter which only permits logging from crates which are members of the workspace.
pub fn build_workspace_filter(
) -> Result<FilterFn<impl Fn(&tracing::Metadata) -> bool + Clone>, String> {
let workspace_crates: HashSet<&str> = WORKSPACE_CRATES.iter().copied().collect();
Ok(tracing_subscriber::filter::FilterFn::new(move |metadata| {
let target_crate = metadata.target().split("::").next().unwrap_or("");
workspace_crates.contains(target_crate)
}))
}
/// Function to filter out ascii control codes.
///
/// This helps to keep log formatting consistent.
/// Whitespace and padding control codes are excluded.
pub fn is_ascii_control(character: &u8) -> bool {
matches!(
character,
b'\x00'..=b'\x08' |
b'\x0b'..=b'\x0c' |
b'\x0e'..=b'\x1f' |
b'\x7f' |
b'\x81'..=b'\x9f'
)
}

View File

@@ -0,0 +1,11 @@
[package]
name = "workspace_members"
version = "0.1.0"
edition = { workspace = true }
[lib]
proc-macro = true
[dependencies]
cargo_metadata = { workspace = true }
quote = { workspace = true }

View File

@@ -0,0 +1,39 @@
use cargo_metadata::MetadataCommand;
use proc_macro::TokenStream;
use quote::quote;
use std::error::Error;
fn get_workspace_crates() -> Result<Vec<String>, Box<dyn Error>> {
let metadata = MetadataCommand::new().no_deps().exec()?;
Ok(metadata
.workspace_members
.iter()
.filter_map(|member_id| {
metadata
.packages
.iter()
.find(|package| &package.id == member_id)
.map(|package| package.name.clone())
})
.collect())
}
#[proc_macro]
pub fn workspace_crates(_input: TokenStream) -> TokenStream {
match get_workspace_crates() {
Ok(crate_names) => {
let crate_strs = crate_names.iter().map(|s| s.as_str());
quote! {
&[#(#crate_strs),*]
}
}
Err(e) => {
let msg = format!("Failed to get workspace crates: {e}");
quote! {
compile_error!(#msg);
}
}
}
.into()
}

View File

@@ -197,6 +197,13 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
Ok(self)
}
/// Initialize the Lighthouse-specific tracing logging components from
/// the provided config.
///
/// This consists of 3 tracing `Layers`:
/// - A `Layer` which logs to `stdout`
/// - An `Option<Layer>` which logs to a log file
/// - An `Option<Layer>` which emits logs to an SSE stream
pub fn init_tracing(
mut self,
config: LoggerConfig,
@@ -204,7 +211,7 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
) -> (
Self,
LoggingLayer,
LoggingLayer,
Option<LoggingLayer>,
Option<SSELoggingComponents>,
) {
let filename_prefix = match logfile_prefix {
@@ -216,72 +223,48 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
#[cfg(target_family = "unix")]
let file_mode = if config.is_restricted { 0o600 } else { 0o644 };
let file_logging_layer = {
if let Some(path) = config.path {
let mut appender = LogRollerBuilder::new(
path.clone(),
PathBuf::from(format!("{}.log", filename_prefix)),
)
.rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size)))
.max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
let file_logging_layer = match config.path {
None => {
eprintln!("No logfile path provided, logging to file is disabled");
None
}
Some(_) if config.max_log_number == 0 || config.max_log_size == 0 => {
// User has explicitly disabled logging to file, so don't emit a message.
None
}
Some(path) => {
let log_filename = PathBuf::from(format!("{}.log", filename_prefix));
let mut appender = LogRollerBuilder::new(path.clone(), log_filename)
.rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size)))
.max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
if config.compression {
appender = appender.compression(Compression::Gzip);
}
match appender.build() {
Ok(file_appender) => {
#[cfg(target_family = "unix")]
set_logfile_permissions(&path, filename_prefix, file_mode);
let (file_non_blocking_writer, file_guard) =
tracing_appender::non_blocking(file_appender);
LoggingLayer::new(
file_non_blocking_writer,
file_guard,
let (writer, guard) = tracing_appender::non_blocking(file_appender);
Some(LoggingLayer::new(
writer,
guard,
config.disable_log_timestamp,
false,
config.logfile_color,
config.log_format.clone(),
config.logfile_format.clone(),
config.extra_info,
false,
)
))
}
Err(e) => {
eprintln!("Failed to initialize rolling file appender: {}", e);
let (sink_writer, sink_guard) =
tracing_appender::non_blocking(std::io::sink());
LoggingLayer::new(
sink_writer,
sink_guard,
config.disable_log_timestamp,
false,
config.logfile_color,
config.log_format.clone(),
config.logfile_format.clone(),
config.extra_info,
false,
)
None
}
}
} else {
eprintln!("No path provided. File logging is disabled.");
let (sink_writer, sink_guard) = tracing_appender::non_blocking(std::io::sink());
LoggingLayer::new(
sink_writer,
sink_guard,
config.disable_log_timestamp,
false,
true,
config.log_format.clone(),
config.logfile_format.clone(),
config.extra_info,
false,
)
}
};
@@ -293,11 +276,8 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
stdout_guard,
config.disable_log_timestamp,
config.log_color,
true,
config.log_format,
config.logfile_format,
config.extra_info,
false,
);
let sse_logging_layer_opt = if config.sse_logging {
@@ -310,8 +290,8 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
(
self,
file_logging_layer,
stdout_logging_layer,
file_logging_layer,
sse_logging_layer_opt,
)
}

View File

@@ -1,47 +1,67 @@
use crate::{EnvironmentBuilder, LoggerConfig};
use clap::ArgMatches;
use logging::Libp2pDiscv5TracingLayer;
use logging::{tracing_logging_layer::LoggingLayer, SSELoggingComponents};
use logging::{
create_libp2p_discv5_tracing_layer, tracing_logging_layer::LoggingLayer, SSELoggingComponents,
};
use std::process;
use tracing_subscriber::filter::{FilterFn, LevelFilter};
use tracing_subscriber::filter::LevelFilter;
use types::EthSpec;
/// Constructs all logging layers including both Lighthouse-specific and
/// dependency logging.
///
/// The `Layer`s are as follows:
/// - A `Layer` which logs to `stdout`
/// - An `Option<Layer>` which logs to a log file
/// - An `Option<Layer>` which emits logs to an SSE stream
/// - An `Option<Layer>` which logs relevant dependencies to their
/// own log files. (Currently only `libp2p` and `discv5`)
pub fn construct_logger<E: EthSpec>(
logger_config: LoggerConfig,
matches: &ArgMatches,
environment_builder: EnvironmentBuilder<E>,
) -> (
EnvironmentBuilder<E>,
Libp2pDiscv5TracingLayer,
LoggingLayer,
LoggingLayer,
Option<SSELoggingComponents>,
LoggerConfig,
FilterFn,
LoggingLayer,
Option<LoggingLayer>,
Option<SSELoggingComponents>,
Option<Libp2pDiscv5TracingLayer>,
) {
let libp2p_discv5_layer = logging::create_libp2p_discv5_tracing_layer(
logger_config.path.clone(),
logger_config.max_log_size,
logger_config.compression,
logger_config.max_log_number,
);
let subcommand_name = matches.subcommand_name();
let logfile_prefix = subcommand_name.unwrap_or("lighthouse");
let logfile_prefix = matches.subcommand_name().unwrap_or("lighthouse");
let (builder, file_logging_layer, stdout_logging_layer, sse_logging_layer_opt) =
let (builder, stdout_logging_layer, file_logging_layer, sse_logging_layer_opt) =
environment_builder.init_tracing(logger_config.clone(), logfile_prefix);
let dependency_log_filter =
FilterFn::new(filter_dependency_log as fn(&tracing::Metadata<'_>) -> bool);
let libp2p_discv5_layer = if let Some(subcommand_name) = subcommand_name {
if subcommand_name == "beacon_node" || subcommand_name == "boot_node" {
if logger_config.max_log_size == 0 || logger_config.max_log_number == 0 {
// User has explicitly disabled logging to file.
None
} else {
create_libp2p_discv5_tracing_layer(
logger_config.path.clone(),
logger_config.max_log_size,
)
}
} else {
// Disable libp2p and discv5 logs when running other subcommands.
None
}
} else {
None
};
(
builder,
libp2p_discv5_layer,
file_logging_layer,
stdout_logging_layer,
sse_logging_layer_opt,
logger_config,
dependency_log_filter,
stdout_logging_layer,
file_logging_layer,
sse_logging_layer_opt,
libp2p_discv5_layer,
)
}
@@ -58,15 +78,3 @@ pub fn parse_level(level: &str) -> LevelFilter {
}
}
}
fn filter_dependency_log(meta: &tracing::Metadata<'_>) -> bool {
if let Some(file) = meta.file() {
let target = meta.target();
if file.contains("/.cargo/") {
return target.contains("discv5") || target.contains("libp2p");
} else {
return !file.contains("gossipsub") && !target.contains("hyper");
}
}
true
}

View File

@@ -17,17 +17,15 @@ use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODE
use ethereum_hashing::have_sha_extensions;
use futures::TryFutureExt;
use lighthouse_version::VERSION;
use logging::crit;
use logging::MetricsLayer;
use logging::{build_workspace_filter, crit, MetricsLayer};
use malloc_utils::configure_memory_allocator;
use std::backtrace::Backtrace;
use std::path::PathBuf;
use std::process::exit;
use std::sync::LazyLock;
use task_executor::ShutdownReason;
use tracing::{info, warn};
use tracing_subscriber::prelude::*;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use tracing::{info, warn, Level};
use tracing_subscriber::{filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt, Layer};
use types::{EthSpec, EthSpecId};
use validator_client::ProductionValidatorClient;
@@ -592,12 +590,11 @@ fn run<E: EthSpec>(
let (
builder,
libp2p_discv5_layer,
file_logging_layer,
stdout_logging_layer,
sse_logging_layer_opt,
logger_config,
dependency_log_filter,
stdout_logging_layer,
file_logging_layer,
sse_logging_layer_opt,
libp2p_discv5_layer,
) = tracing_common::construct_logger(
LoggerConfig {
path: log_path.clone(),
@@ -619,21 +616,50 @@ fn run<E: EthSpec>(
environment_builder,
);
let logging = tracing_subscriber::registry()
.with(dependency_log_filter)
.with(file_logging_layer.with_filter(logger_config.logfile_debug_level))
.with(stdout_logging_layer.with_filter(logger_config.debug_level))
.with(MetricsLayer)
.with(libp2p_discv5_layer);
let workspace_filter = build_workspace_filter()?;
let logging_result = if let Some(sse_logging_layer) = sse_logging_layer_opt {
logging.with(sse_logging_layer).try_init()
} else {
logging.try_init()
};
let mut logging_layers = Vec::new();
logging_layers.push(
stdout_logging_layer
.with_filter(logger_config.debug_level)
.with_filter(workspace_filter.clone())
.boxed(),
);
if let Some(file_logging_layer) = file_logging_layer {
logging_layers.push(
file_logging_layer
.with_filter(logger_config.logfile_debug_level)
.with_filter(workspace_filter)
.boxed(),
);
}
if let Some(sse_logging_layer) = sse_logging_layer_opt {
logging_layers.push(sse_logging_layer.boxed());
}
if let Some(libp2p_discv5_layer) = libp2p_discv5_layer {
logging_layers.push(
libp2p_discv5_layer
.with_filter(
EnvFilter::builder()
.with_default_directive(Level::DEBUG.into())
.from_env_lossy(),
)
.boxed(),
);
}
logging_layers.push(MetricsLayer.boxed());
let logging_result = tracing_subscriber::registry()
.with(logging_layers)
.try_init();
if let Err(e) = logging_result {
eprintln!("Failed to initialize dependency logging: {e}");
eprintln!("Failed to initialize logger: {e}");
}
let mut environment = builder

View File

@@ -15,7 +15,6 @@ use std::sync::Arc;
use std::time::Duration;
use environment::tracing_common;
use logging::MetricsLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
@@ -90,12 +89,11 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> {
let (
env_builder,
_libp2p_discv5_layer,
file_logging_layer,
stdout_logging_layer,
_sse_logging_layer_opt,
logger_config,
_dependency_log_filter,
stdout_logging_layer,
_file_logging_layer,
_sse_logging_layer_opt,
_libp2p_discv5_layer,
) = tracing_common::construct_logger(
LoggerConfig {
path: None,
@@ -118,9 +116,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> {
);
if let Err(e) = tracing_subscriber::registry()
.with(file_logging_layer.with_filter(logger_config.logfile_debug_level))
.with(stdout_logging_layer.with_filter(logger_config.debug_level))
.with(MetricsLayer)
.try_init()
{
eprintln!("Failed to initialize dependency logging: {e}");

View File

@@ -5,7 +5,6 @@ use clap::ArgMatches;
use crate::retry::with_retry;
use environment::tracing_common;
use futures::prelude::*;
use logging::MetricsLayer;
use node_test_rig::{
environment::{EnvironmentBuilder, LoggerConfig},
testing_validator_config, ValidatorFiles,
@@ -94,12 +93,11 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> {
let (
env_builder,
libp2p_discv5_layer,
file_logging_layer,
stdout_logging_layer,
_sse_logging_layer_opt,
logger_config,
dependency_log_filter,
stdout_logging_layer,
_file_logging_layer,
_sse_logging_layer_opt,
_libp2p_discv5_layer,
) = tracing_common::construct_logger(
LoggerConfig {
path: None,
@@ -122,11 +120,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> {
);
if let Err(e) = tracing_subscriber::registry()
.with(dependency_log_filter)
.with(file_logging_layer.with_filter(logger_config.logfile_debug_level))
.with(stdout_logging_layer.with_filter(logger_config.debug_level))
.with(libp2p_discv5_layer)
.with(MetricsLayer)
.try_init()
{
eprintln!("Failed to initialize dependency logging: {e}");