Tracing cleanup (#7168)

#7153
#7146
#7147
#7148 -> Thanks to @ackintosh


  This PR does the following:
1. Disable logging to file when using either `--logfile-max-number 0` or `--logfile-max-size 0`. Note that disabling the log file in this way will also disable `discv5` and `libp2p` logging.
1.  `discv5` and `libp2p` logging will be disabled by default unless running `beacon_node` or `boot_node`. This also should fix the VC panic we were seeing.
1. Removes log rotation and compression from `libp2p` and `discv5` logs. It is now limited to 1 file and will rotate based on the value of the `--logfile-max-size` flag. We could potentially add flags specifically to control the size/number of these, however I felt a single log file was sufficient. Perhaps @AgeManning has opinions about this?
1. Removes all dependency logging and references to `dep_log`.
1.  Introduces workspace filtering to file and stdout. This explicitly allows logs from members of the Lighthouse workspace, disallowing all others. It uses a proc macro which pulls the member list from cargo metadata at compile time. This might be over-engineered but my hope is that this list will not require maintenance.
1. Unifies file and stdout JSON format. With slog, the formats were slightly different. @threehrsleep worked to maintain that format difference, to ensure there was no breaking changes. If these format differences are actually problematic we can restore it, however I felt the added complexity wasn't worth it.
1. General code improvements and cleanup.
This commit is contained in:
Mac L
2025-04-01 21:51:09 +11:00
committed by GitHub
parent fb7ec0d151
commit 4839ed620f
14 changed files with 382 additions and 365 deletions

View File

@@ -197,6 +197,13 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
Ok(self)
}
/// Initialize the Lighthouse-specific tracing logging components from
/// the provided config.
///
/// This consists of 3 tracing `Layers`:
/// - A `Layer` which logs to `stdout`
/// - An `Option<Layer>` which logs to a log file
/// - An `Option<Layer>` which emits logs to an SSE stream
pub fn init_tracing(
mut self,
config: LoggerConfig,
@@ -204,7 +211,7 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
) -> (
Self,
LoggingLayer,
LoggingLayer,
Option<LoggingLayer>,
Option<SSELoggingComponents>,
) {
let filename_prefix = match logfile_prefix {
@@ -216,72 +223,48 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
#[cfg(target_family = "unix")]
let file_mode = if config.is_restricted { 0o600 } else { 0o644 };
let file_logging_layer = {
if let Some(path) = config.path {
let mut appender = LogRollerBuilder::new(
path.clone(),
PathBuf::from(format!("{}.log", filename_prefix)),
)
.rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size)))
.max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
let file_logging_layer = match config.path {
None => {
eprintln!("No logfile path provided, logging to file is disabled");
None
}
Some(_) if config.max_log_number == 0 || config.max_log_size == 0 => {
// User has explicitly disabled logging to file, so don't emit a message.
None
}
Some(path) => {
let log_filename = PathBuf::from(format!("{}.log", filename_prefix));
let mut appender = LogRollerBuilder::new(path.clone(), log_filename)
.rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size)))
.max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
if config.compression {
appender = appender.compression(Compression::Gzip);
}
match appender.build() {
Ok(file_appender) => {
#[cfg(target_family = "unix")]
set_logfile_permissions(&path, filename_prefix, file_mode);
let (file_non_blocking_writer, file_guard) =
tracing_appender::non_blocking(file_appender);
LoggingLayer::new(
file_non_blocking_writer,
file_guard,
let (writer, guard) = tracing_appender::non_blocking(file_appender);
Some(LoggingLayer::new(
writer,
guard,
config.disable_log_timestamp,
false,
config.logfile_color,
config.log_format.clone(),
config.logfile_format.clone(),
config.extra_info,
false,
)
))
}
Err(e) => {
eprintln!("Failed to initialize rolling file appender: {}", e);
let (sink_writer, sink_guard) =
tracing_appender::non_blocking(std::io::sink());
LoggingLayer::new(
sink_writer,
sink_guard,
config.disable_log_timestamp,
false,
config.logfile_color,
config.log_format.clone(),
config.logfile_format.clone(),
config.extra_info,
false,
)
None
}
}
} else {
eprintln!("No path provided. File logging is disabled.");
let (sink_writer, sink_guard) = tracing_appender::non_blocking(std::io::sink());
LoggingLayer::new(
sink_writer,
sink_guard,
config.disable_log_timestamp,
false,
true,
config.log_format.clone(),
config.logfile_format.clone(),
config.extra_info,
false,
)
}
};
@@ -293,11 +276,8 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
stdout_guard,
config.disable_log_timestamp,
config.log_color,
true,
config.log_format,
config.logfile_format,
config.extra_info,
false,
);
let sse_logging_layer_opt = if config.sse_logging {
@@ -310,8 +290,8 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
(
self,
file_logging_layer,
stdout_logging_layer,
file_logging_layer,
sse_logging_layer_opt,
)
}

View File

@@ -1,47 +1,67 @@
use crate::{EnvironmentBuilder, LoggerConfig};
use clap::ArgMatches;
use logging::Libp2pDiscv5TracingLayer;
use logging::{tracing_logging_layer::LoggingLayer, SSELoggingComponents};
use logging::{
create_libp2p_discv5_tracing_layer, tracing_logging_layer::LoggingLayer, SSELoggingComponents,
};
use std::process;
use tracing_subscriber::filter::{FilterFn, LevelFilter};
use tracing_subscriber::filter::LevelFilter;
use types::EthSpec;
/// Constructs all logging layers including both Lighthouse-specific and
/// dependency logging.
///
/// The `Layer`s are as follows:
/// - A `Layer` which logs to `stdout`
/// - An `Option<Layer>` which logs to a log file
/// - An `Option<Layer>` which emits logs to an SSE stream
/// - An `Option<Layer>` which logs relevant dependencies to their
/// own log files. (Currently only `libp2p` and `discv5`)
pub fn construct_logger<E: EthSpec>(
logger_config: LoggerConfig,
matches: &ArgMatches,
environment_builder: EnvironmentBuilder<E>,
) -> (
EnvironmentBuilder<E>,
Libp2pDiscv5TracingLayer,
LoggingLayer,
LoggingLayer,
Option<SSELoggingComponents>,
LoggerConfig,
FilterFn,
LoggingLayer,
Option<LoggingLayer>,
Option<SSELoggingComponents>,
Option<Libp2pDiscv5TracingLayer>,
) {
let libp2p_discv5_layer = logging::create_libp2p_discv5_tracing_layer(
logger_config.path.clone(),
logger_config.max_log_size,
logger_config.compression,
logger_config.max_log_number,
);
let subcommand_name = matches.subcommand_name();
let logfile_prefix = subcommand_name.unwrap_or("lighthouse");
let logfile_prefix = matches.subcommand_name().unwrap_or("lighthouse");
let (builder, file_logging_layer, stdout_logging_layer, sse_logging_layer_opt) =
let (builder, stdout_logging_layer, file_logging_layer, sse_logging_layer_opt) =
environment_builder.init_tracing(logger_config.clone(), logfile_prefix);
let dependency_log_filter =
FilterFn::new(filter_dependency_log as fn(&tracing::Metadata<'_>) -> bool);
let libp2p_discv5_layer = if let Some(subcommand_name) = subcommand_name {
if subcommand_name == "beacon_node" || subcommand_name == "boot_node" {
if logger_config.max_log_size == 0 || logger_config.max_log_number == 0 {
// User has explicitly disabled logging to file.
None
} else {
create_libp2p_discv5_tracing_layer(
logger_config.path.clone(),
logger_config.max_log_size,
)
}
} else {
// Disable libp2p and discv5 logs when running other subcommands.
None
}
} else {
None
};
(
builder,
libp2p_discv5_layer,
file_logging_layer,
stdout_logging_layer,
sse_logging_layer_opt,
logger_config,
dependency_log_filter,
stdout_logging_layer,
file_logging_layer,
sse_logging_layer_opt,
libp2p_discv5_layer,
)
}
@@ -58,15 +78,3 @@ pub fn parse_level(level: &str) -> LevelFilter {
}
}
}
fn filter_dependency_log(meta: &tracing::Metadata<'_>) -> bool {
if let Some(file) = meta.file() {
let target = meta.target();
if file.contains("/.cargo/") {
return target.contains("discv5") || target.contains("libp2p");
} else {
return !file.contains("gossipsub") && !target.contains("hyper");
}
}
true
}