Tracing cleanup (#7168)

#7153
#7146
#7147
#7148 -> Thanks to @ackintosh


  This PR does the following:
1. Disable logging to file when using either `--logfile-max-number 0` or `--logfile-max-size 0`. Note that disabling the log file in this way will also disable `discv5` and `libp2p` logging.
1.  `discv5` and `libp2p` logging will be disabled by default unless running `beacon_node` or `boot_node`. This also should fix the VC panic we were seeing.
1. Removes log rotation and compression from `libp2p` and `discv5` logs. It is now limited to 1 file and will rotate based on the value of the `--logfile-max-size` flag. We could potentially add flags specifically to control the size/number of these, however I felt a single log file was sufficient. Perhaps @AgeManning has opinions about this?
1. Removes all dependency logging and references to `dep_log`.
1.  Introduces workspace filtering to file and stdout. This explicitly allows logs from members of the Lighthouse workspace, disallowing all others. It uses a proc macro which pulls the member list from cargo metadata at compile time. This might be over-engineered but my hope is that this list will not require maintenance.
1. Unifies file and stdout JSON format. With slog, the formats were slightly different. @threehrsleep worked to maintain that format difference, to ensure there was no breaking changes. If these format differences are actually problematic we can restore it, however I felt the added complexity wasn't worth it.
1. General code improvements and cleanup.
This commit is contained in:
Mac L
2025-04-01 21:51:09 +11:00
committed by GitHub
parent fb7ec0d151
commit 4839ed620f
14 changed files with 382 additions and 365 deletions

View File

@@ -1,24 +1,24 @@
use chrono::Local;
use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize};
use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult};
use std::io::Write;
use std::path::PathBuf;
use std::sync::LazyLock;
use std::time::{Duration, Instant};
use tracing::Subscriber;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::layer::Context;
use tracing_subscriber::{EnvFilter, Layer};
use tracing_subscriber::EnvFilter;
pub const MAX_MESSAGE_WIDTH: usize = 40;
pub mod macros;
mod sse_logging_components;
mod tracing_libp2p_discv5_logging_layer;
pub mod tracing_logging_layer;
mod tracing_metrics_layer;
mod utils;
pub use sse_logging_components::SSELoggingComponents;
pub use tracing_libp2p_discv5_logging_layer::{
create_libp2p_discv5_tracing_layer, Libp2pDiscv5TracingLayer,
};
pub use tracing_logging_layer::LoggingLayer;
pub use tracing_metrics_layer::MetricsLayer;
pub use utils::build_workspace_filter;
/// The minimum interval between log messages indicating that a queue is full.
const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30);
@@ -51,132 +51,6 @@ impl TimeLatch {
}
}
pub struct Libp2pDiscv5TracingLayer {
pub libp2p_non_blocking_writer: NonBlocking,
pub _libp2p_guard: WorkerGuard,
pub discv5_non_blocking_writer: NonBlocking,
pub _discv5_guard: WorkerGuard,
}
impl<S> Layer<S> for Libp2pDiscv5TracingLayer
where
S: Subscriber,
{
fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<S>) {
let meta = event.metadata();
let log_level = meta.level();
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
let target = match meta.target().split_once("::") {
Some((crate_name, _)) => crate_name,
None => "unknown",
};
let mut writer = match target {
"gossipsub" => self.libp2p_non_blocking_writer.clone(),
"discv5" => self.discv5_non_blocking_writer.clone(),
_ => return,
};
let mut visitor = LogMessageExtractor {
message: String::default(),
};
event.record(&mut visitor);
let message = format!("{} {} {}\n", timestamp, log_level, visitor.message);
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
}
struct LogMessageExtractor {
message: String,
}
impl tracing_core::field::Visit for LogMessageExtractor {
fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) {
self.message = format!("{} {:?}", self.message, value);
}
}
pub fn create_libp2p_discv5_tracing_layer(
base_tracing_log_path: Option<PathBuf>,
max_log_size: u64,
compression: bool,
max_log_number: usize,
) -> Libp2pDiscv5TracingLayer {
if let Some(mut tracing_log_path) = base_tracing_log_path {
// Ensure that `tracing_log_path` only contains directories.
for p in tracing_log_path.clone().iter() {
tracing_log_path = tracing_log_path.join(p);
if let Ok(metadata) = tracing_log_path.metadata() {
if !metadata.is_dir() {
tracing_log_path.pop();
break;
}
}
}
let mut libp2p_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
let mut discv5_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
if compression {
libp2p_writer = libp2p_writer.compression(Compression::Gzip);
discv5_writer = discv5_writer.compression(Compression::Gzip);
}
let libp2p_writer = match libp2p_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize libp2p rolling file appender: {e}");
std::process::exit(1);
}
};
let discv5_writer = match discv5_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize discv5 rolling file appender: {e}");
std::process::exit(1);
}
};
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer);
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer);
Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
}
} else {
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(std::io::sink());
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(std::io::sink());
Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
}
}
}
/// Return a tracing subscriber suitable for test usage.
///
/// By default no logs will be printed, but they can be enabled via

View File

@@ -0,0 +1,113 @@
use chrono::Local;
use logroller::{LogRollerBuilder, Rotation, RotationSize};
use std::io::Write;
use std::path::PathBuf;
use tracing::Subscriber;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::{layer::Context, Layer};
pub struct Libp2pDiscv5TracingLayer {
pub libp2p_non_blocking_writer: NonBlocking,
_libp2p_guard: WorkerGuard,
pub discv5_non_blocking_writer: NonBlocking,
_discv5_guard: WorkerGuard,
}
impl<S> Layer<S> for Libp2pDiscv5TracingLayer
where
S: Subscriber,
{
fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<S>) {
let meta = event.metadata();
let log_level = meta.level();
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
let target = match meta.target().split_once("::") {
Some((crate_name, _)) => crate_name,
None => "unknown",
};
let mut writer = match target {
"libp2p_gossipsub" => self.libp2p_non_blocking_writer.clone(),
"discv5" => self.discv5_non_blocking_writer.clone(),
_ => return,
};
let mut visitor = LogMessageExtractor {
message: String::default(),
};
event.record(&mut visitor);
let message = format!("{} {} {}\n", timestamp, log_level, visitor.message);
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
}
struct LogMessageExtractor {
message: String,
}
impl tracing_core::field::Visit for LogMessageExtractor {
fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) {
self.message = format!("{} {:?}", self.message, value);
}
}
pub fn create_libp2p_discv5_tracing_layer(
base_tracing_log_path: Option<PathBuf>,
max_log_size: u64,
) -> Option<Libp2pDiscv5TracingLayer> {
if let Some(mut tracing_log_path) = base_tracing_log_path {
// Ensure that `tracing_log_path` only contains directories.
for p in tracing_log_path.clone().iter() {
tracing_log_path = tracing_log_path.join(p);
if let Ok(metadata) = tracing_log_path.metadata() {
if !metadata.is_dir() {
tracing_log_path.pop();
break;
}
}
}
let libp2p_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(1);
let discv5_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(1);
let libp2p_writer = match libp2p_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize libp2p rolling file appender: {e}");
std::process::exit(1);
}
};
let discv5_writer = match discv5_writer.build() {
Ok(writer) => writer,
Err(e) => {
eprintln!("Failed to initialize discv5 rolling file appender: {e}");
std::process::exit(1);
}
};
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer);
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer);
Some(Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
})
} else {
None
}
}

View File

@@ -1,3 +1,5 @@
use crate::utils::is_ascii_control;
use chrono::prelude::*;
use serde_json::{Map, Value};
use std::collections::HashMap;
@@ -13,14 +15,11 @@ use tracing_subscriber::Layer;
pub struct LoggingLayer {
pub non_blocking_writer: NonBlocking,
pub guard: WorkerGuard,
_guard: WorkerGuard,
pub disable_log_timestamp: bool,
pub log_color: bool,
pub logfile_color: bool,
pub log_format: Option<String>,
pub logfile_format: Option<String>,
pub extra_info: bool,
pub dep_logs: bool,
span_fields: Arc<Mutex<HashMap<Id, SpanData>>>,
}
@@ -28,25 +27,19 @@ impl LoggingLayer {
#[allow(clippy::too_many_arguments)]
pub fn new(
non_blocking_writer: NonBlocking,
guard: WorkerGuard,
_guard: WorkerGuard,
disable_log_timestamp: bool,
log_color: bool,
logfile_color: bool,
log_format: Option<String>,
logfile_format: Option<String>,
extra_info: bool,
dep_logs: bool,
) -> Self {
Self {
non_blocking_writer,
guard,
_guard,
disable_log_timestamp,
log_color,
logfile_color,
log_format,
logfile_format,
extra_info,
dep_logs,
span_fields: Arc::new(Mutex::new(HashMap::new())),
}
}
@@ -84,16 +77,6 @@ where
String::new()
};
if !self.dep_logs {
if let Some(file) = meta.file() {
if file.contains("/.cargo/") {
return;
}
} else {
return;
}
}
let mut writer = self.non_blocking_writer.clone();
let mut visitor = LogMessageExtractor {
@@ -122,16 +105,10 @@ where
None => "<unknown_line>".to_string(),
};
if module.contains("discv5") {
visitor
.fields
.push(("service".to_string(), "\"discv5\"".to_string()));
}
let gray = "\x1b[90m";
let reset = "\x1b[0m";
let location = if self.extra_info {
if self.logfile_color {
if self.log_color {
format!("{}{}::{}:{}{}", gray, module, file, line, reset)
} else {
format!("{}::{}:{}", module, file, line)
@@ -164,33 +141,16 @@ where
}
};
if self.dep_logs {
if self.logfile_format.as_deref() == Some("JSON") {
build_json_log_file(
&visitor,
plain_level_str,
meta,
&ctx,
&self.span_fields,
event,
&mut writer,
);
} else {
build_log_text(
&visitor,
plain_level_str,
&timestamp,
&ctx,
&self.span_fields,
event,
&location,
color_level_str,
self.logfile_color,
&mut writer,
);
}
} else if self.log_format.as_deref() == Some("JSON") {
build_json_log_stdout(&visitor, plain_level_str, &timestamp, &mut writer);
if self.log_format.as_deref() == Some("JSON") {
build_log_json(
&visitor,
plain_level_str,
meta,
&ctx,
&self.span_fields,
event,
&mut writer,
);
} else {
build_log_text(
&visitor,
@@ -300,49 +260,7 @@ impl tracing_core::field::Visit for LogMessageExtractor {
}
}
/// Function to filter out ascii control codes.
///
/// This helps to keep log formatting consistent.
/// Whitespace and padding control codes are excluded.
fn is_ascii_control(character: &u8) -> bool {
matches!(
character,
b'\x00'..=b'\x08' |
b'\x0b'..=b'\x0c' |
b'\x0e'..=b'\x1f' |
b'\x7f' |
b'\x81'..=b'\x9f'
)
}
fn build_json_log_stdout(
visitor: &LogMessageExtractor,
plain_level_str: &str,
timestamp: &str,
writer: &mut impl Write,
) {
let mut log_map = Map::new();
log_map.insert("msg".to_string(), Value::String(visitor.message.clone()));
log_map.insert(
"level".to_string(),
Value::String(plain_level_str.to_string()),
);
log_map.insert("ts".to_string(), Value::String(timestamp.to_string()));
for (key, val) in visitor.fields.clone().into_iter() {
let parsed_val = parse_field(&val);
log_map.insert(key, parsed_val);
}
let json_obj = Value::Object(log_map);
let output = format!("{}\n", json_obj);
if let Err(e) = writer.write_all(output.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
fn build_json_log_file<'a, S>(
fn build_log_json<'a, S>(
visitor: &LogMessageExtractor,
plain_level_str: &str,
meta: &tracing::Metadata<'_>,

View File

@@ -0,0 +1,31 @@
use std::collections::HashSet;
use tracing_subscriber::filter::FilterFn;
use workspace_members::workspace_crates;
const WORKSPACE_CRATES: &[&str] = workspace_crates!();
/// Constructs a filter which only permits logging from crates which are members of the workspace.
pub fn build_workspace_filter(
) -> Result<FilterFn<impl Fn(&tracing::Metadata) -> bool + Clone>, String> {
let workspace_crates: HashSet<&str> = WORKSPACE_CRATES.iter().copied().collect();
Ok(tracing_subscriber::filter::FilterFn::new(move |metadata| {
let target_crate = metadata.target().split("::").next().unwrap_or("");
workspace_crates.contains(target_crate)
}))
}
/// Function to filter out ascii control codes.
///
/// This helps to keep log formatting consistent.
/// Whitespace and padding control codes are excluded.
pub fn is_ascii_control(character: &u8) -> bool {
matches!(
character,
b'\x00'..=b'\x08' |
b'\x0b'..=b'\x0c' |
b'\x0e'..=b'\x1f' |
b'\x7f' |
b'\x81'..=b'\x9f'
)
}