Integrate tracing (#6339)

Tracing Integration
- [reference](5bbf1859e9/projects/project-ideas.md (L297))


  - [x] replace slog & log with tracing throughout the codebase
- [x] implement custom crit log
- [x] make relevant changes in the formatter
- [x] replace sloggers
- [x] re-write SSE logging components

cc: @macladson @eserilev
This commit is contained in:
ThreeHrSleep
2025-03-13 04:01:05 +05:30
committed by GitHub
parent f23f984f85
commit d60c24ef1c
241 changed files with 9485 additions and 9328 deletions

View File

@@ -14,7 +14,7 @@ regex = { workspace = true }
rpassword = "5.0.0"
serde = { workspace = true }
serde_yaml = { workspace = true }
slog = { workspace = true }
tracing = { workspace = true }
types = { workspace = true }
validator_dir = { workspace = true }
zeroize = { workspace = true }

View File

@@ -7,11 +7,11 @@ use crate::{default_keystore_password_path, read_password_string, write_file_via
use eth2_keystore::Keystore;
use regex::Regex;
use serde::{Deserialize, Serialize};
use slog::{error, Logger};
use std::collections::HashSet;
use std::fs::{self, create_dir_all, File};
use std::io;
use std::path::{Path, PathBuf};
use tracing::error;
use types::{graffiti::GraffitiString, Address, PublicKey};
use validator_dir::VOTING_KEYSTORE_FILE;
use zeroize::Zeroizing;
@@ -266,7 +266,6 @@ impl ValidatorDefinitions {
&mut self,
validators_dir: P,
secrets_dir: P,
log: &Logger,
) -> Result<usize, Error> {
let mut keystore_paths = vec![];
recursively_find_voting_keystores(validators_dir, &mut keystore_paths)
@@ -311,10 +310,9 @@ impl ValidatorDefinitions {
Ok(keystore) => keystore,
Err(e) => {
error!(
log,
"Unable to read validator keystore";
"error" => e,
"keystore" => format!("{:?}", voting_keystore_path)
error = ?e,
keystore = ?voting_keystore_path,
"Unable to read validator keystore"
);
return None;
}
@@ -336,9 +334,8 @@ impl ValidatorDefinitions {
}
None => {
error!(
log,
"Invalid keystore public key";
"keystore" => format!("{:?}", voting_keystore_path)
keystore = ?voting_keystore_path,
"Invalid keystore public key"
);
return None;
}

View File

@@ -20,12 +20,11 @@ bytes = { workspace = true }
discv5 = { workspace = true }
eth2_config = { workspace = true }
kzg = { workspace = true }
logging = { workspace = true }
pretty_reqwest_error = { workspace = true }
reqwest = { workspace = true }
sensitive_url = { workspace = true }
serde_yaml = { workspace = true }
sha2 = { workspace = true }
slog = { workspace = true }
tracing = { workspace = true }
types = { workspace = true }
url = { workspace = true }

View File

@@ -19,12 +19,12 @@ use pretty_reqwest_error::PrettyReqwestError;
use reqwest::{Client, Error};
use sensitive_url::SensitiveUrl;
use sha2::{Digest, Sha256};
use slog::{info, warn, Logger};
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use tracing::{info, warn};
use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256};
use url::Url;
@@ -198,7 +198,6 @@ impl Eth2NetworkConfig {
&self,
genesis_state_url: Option<&str>,
timeout: Duration,
log: &Logger,
) -> Result<Option<BeaconState<E>>, String> {
let spec = self.chain_spec::<E>()?;
match &self.genesis_state_source {
@@ -217,9 +216,9 @@ impl Eth2NetworkConfig {
format!("Unable to parse genesis state bytes checksum: {:?}", e)
})?;
let bytes = if let Some(specified_url) = genesis_state_url {
download_genesis_state(&[specified_url], timeout, checksum, log).await
download_genesis_state(&[specified_url], timeout, checksum).await
} else {
download_genesis_state(built_in_urls, timeout, checksum, log).await
download_genesis_state(built_in_urls, timeout, checksum).await
}?;
let state = BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| {
format!("Downloaded genesis state SSZ bytes are invalid: {:?}", e)
@@ -387,7 +386,6 @@ async fn download_genesis_state(
urls: &[&str],
timeout: Duration,
checksum: Hash256,
log: &Logger,
) -> Result<Vec<u8>, String> {
if urls.is_empty() {
return Err(
@@ -407,11 +405,10 @@ async fn download_genesis_state(
.unwrap_or_else(|_| "<REDACTED>".to_string());
info!(
log,
"Downloading genesis state";
"server" => &redacted_url,
"timeout" => ?timeout,
"info" => "this may take some time on testnets with large validator counts"
server = &redacted_url,
timeout = ?timeout,
info = "this may take some time on testnets with large validator counts",
"Downloading genesis state"
);
let client = Client::new();
@@ -424,10 +421,9 @@ async fn download_genesis_state(
return Ok(bytes.into());
} else {
warn!(
log,
"Genesis state download failed";
"server" => &redacted_url,
"timeout" => ?timeout,
server = &redacted_url,
timeout = ?timeout,
"Genesis state download failed"
);
errors.push(format!(
"Response from {} did not match local checksum",
@@ -505,7 +501,7 @@ mod tests {
async fn mainnet_genesis_state() {
let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap();
config
.genesis_state::<E>(None, Duration::from_secs(1), &logging::test_logger())
.genesis_state::<E>(None, Duration::from_secs(1))
.await
.expect("beacon state can decode");
}

View File

@@ -1,7 +1,7 @@
[package]
name = "lighthouse_version"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -9,14 +9,12 @@ test_logger = [] # Print log output to stderr when running tests instead of drop
[dependencies]
chrono = { version = "0.4", default-features = false, features = ["clock", "std"] }
logroller = { workspace = true }
metrics = { workspace = true }
once_cell = "1.17.1"
parking_lot = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
slog = { workspace = true }
slog-term = { workspace = true }
sloggers = { workspace = true }
take_mut = "0.2.2"
tokio = { workspace = true, features = [ "time" ] }
tracing = "0.1"
tracing-appender = { workspace = true }

View File

@@ -1,307 +0,0 @@
//! An object that can be used to pass through a channel and be cloned. It can therefore be used
//! via the broadcast channel.
use parking_lot::Mutex;
use serde::ser::SerializeMap;
use serde::serde_if_integer128;
use serde::Serialize;
use slog::{BorrowedKV, Key, Level, OwnedKVList, Record, RecordStatic, Serializer, SingleKV, KV};
use std::cell::RefCell;
use std::fmt;
use std::fmt::Write;
use std::sync::Arc;
use take_mut::take;
thread_local! {
static TL_BUF: RefCell<String> = RefCell::new(String::with_capacity(128))
}
/// Serialized record.
#[derive(Clone)]
pub struct AsyncRecord {
msg: String,
level: Level,
location: Box<slog::RecordLocation>,
tag: String,
logger_values: OwnedKVList,
kv: Arc<Mutex<dyn KV + Send>>,
}
impl AsyncRecord {
/// Serializes a `Record` and an `OwnedKVList`.
pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self {
let mut ser = ToSendSerializer::new();
record
.kv()
.serialize(record, &mut ser)
.expect("`ToSendSerializer` can't fail");
AsyncRecord {
msg: fmt::format(*record.msg()),
level: record.level(),
location: Box::new(*record.location()),
tag: String::from(record.tag()),
logger_values: logger_values.clone(),
kv: Arc::new(Mutex::new(ser.finish())),
}
}
pub fn to_json_string(&self) -> Result<String, String> {
serde_json::to_string(&self).map_err(|e| format!("{:?}", e))
}
}
pub struct ToSendSerializer {
kv: Box<dyn KV + Send>,
}
impl ToSendSerializer {
fn new() -> Self {
ToSendSerializer { kv: Box::new(()) }
}
fn finish(self) -> Box<dyn KV + Send> {
self.kv
}
}
impl Serializer for ToSendSerializer {
fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_unit(&mut self, key: Key) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ()))));
Ok(())
}
fn emit_none(&mut self, key: Key) -> slog::Result {
let val: Option<()> = None;
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_char(&mut self, key: Key, val: char) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result {
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_str(&mut self, key: Key, val: &str) -> slog::Result {
let val = val.to_owned();
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result {
let val = fmt::format(*val);
take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val))));
Ok(())
}
}
impl Serialize for AsyncRecord {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
// Get the current time
let dt = chrono::Local::now().format("%b %e %T").to_string();
let rs = RecordStatic {
location: &self.location,
level: self.level,
tag: &self.tag,
};
let mut map_serializer = SerdeSerializer::new(serializer)?;
// Serialize the time and log level first
map_serializer.serialize_entry("time", &dt)?;
map_serializer.serialize_entry("level", self.level.as_short_str())?;
let kv = self.kv.lock();
// Convoluted pattern to avoid binding `format_args!` to a temporary.
// See: https://stackoverflow.com/questions/56304313/cannot-use-format-args-due-to-temporary-value-is-freed-at-the-end-of-this-state
let mut f = |msg: std::fmt::Arguments| {
map_serializer.serialize_entry("msg", msg.to_string())?;
let record = Record::new(&rs, &msg, BorrowedKV(&(*kv)));
self.logger_values
.serialize(&record, &mut map_serializer)
.map_err(serde::ser::Error::custom)?;
record
.kv()
.serialize(&record, &mut map_serializer)
.map_err(serde::ser::Error::custom)
};
f(format_args!("{}", self.msg))?;
map_serializer.end()
}
}
struct SerdeSerializer<S: serde::Serializer> {
/// Current state of map serializing: `serde::Serializer::MapState`
ser_map: S::SerializeMap,
}
impl<S: serde::Serializer> SerdeSerializer<S> {
fn new(ser: S) -> Result<Self, S::Error> {
let ser_map = ser.serialize_map(None)?;
Ok(SerdeSerializer { ser_map })
}
fn serialize_entry<K, V>(&mut self, key: K, value: V) -> Result<(), S::Error>
where
K: serde::Serialize,
V: serde::Serialize,
{
self.ser_map.serialize_entry(&key, &value)
}
/// Finish serialization, and return the serializer
fn end(self) -> Result<S::Ok, S::Error> {
self.ser_map.end()
}
}
// NOTE: This is borrowed from slog_json
macro_rules! impl_m(
($s:expr, $key:expr, $val:expr) => ({
let k_s: &str = $key.as_ref();
$s.ser_map.serialize_entry(k_s, $val)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("serde serialization error: {}", e)))?;
Ok(())
});
);
impl<S> slog::Serializer for SerdeSerializer<S>
where
S: serde::Serializer,
{
fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_unit(&mut self, key: Key) -> slog::Result {
impl_m!(self, key, &())
}
fn emit_char(&mut self, key: Key, val: char) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_none(&mut self, key: Key) -> slog::Result {
let val: Option<()> = None;
impl_m!(self, key, &val)
}
fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result {
impl_m!(self, key, &val)
}
serde_if_integer128! {
fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result {
impl_m!(self, key, &val)
}
}
fn emit_str(&mut self, key: Key, val: &str) -> slog::Result {
impl_m!(self, key, &val)
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result {
TL_BUF.with(|buf| {
let mut buf = buf.borrow_mut();
buf.write_fmt(*val).unwrap();
let res = { || impl_m!(self, key, &*buf) }();
buf.clear();
res
})
}
}

View File

@@ -1,20 +1,20 @@
use metrics::{inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult};
use slog::Logger;
use slog_term::Decorator;
use std::io::{Result, Write};
use chrono::Local;
use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize};
use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult};
use std::io::Write;
use std::path::PathBuf;
use std::sync::LazyLock;
use std::time::{Duration, Instant};
use tracing_appender::non_blocking::NonBlocking;
use tracing_appender::rolling::{RollingFileAppender, Rotation};
use tracing_logging_layer::LoggingLayer;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use tracing::Subscriber;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::layer::Context;
use tracing_subscriber::{EnvFilter, Layer};
pub const MAX_MESSAGE_WIDTH: usize = 40;
pub mod async_record;
pub mod macros;
mod sse_logging_components;
mod tracing_logging_layer;
pub mod tracing_logging_layer;
mod tracing_metrics_layer;
pub use sse_logging_components::SSELoggingComponents;
@@ -32,169 +32,6 @@ pub static ERRORS_TOTAL: LazyLock<MetricsResult<IntCounter>> =
pub static CRITS_TOTAL: LazyLock<MetricsResult<IntCounter>> =
LazyLock::new(|| try_create_int_counter("crit_total", "Count of crits logged"));
pub struct AlignedTermDecorator<D: Decorator> {
wrapped: D,
message_width: usize,
}
impl<D: Decorator> AlignedTermDecorator<D> {
pub fn new(decorator: D, message_width: usize) -> Self {
AlignedTermDecorator {
wrapped: decorator,
message_width,
}
}
}
impl<D: Decorator> Decorator for AlignedTermDecorator<D> {
fn with_record<F>(
&self,
record: &slog::Record,
_logger_values: &slog::OwnedKVList,
f: F,
) -> Result<()>
where
F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()>,
{
match record.level() {
slog::Level::Info => inc_counter(&INFOS_TOTAL),
slog::Level::Warning => inc_counter(&WARNS_TOTAL),
slog::Level::Error => inc_counter(&ERRORS_TOTAL),
slog::Level::Critical => inc_counter(&CRITS_TOTAL),
_ => (),
}
self.wrapped.with_record(record, _logger_values, |deco| {
f(&mut AlignedRecordDecorator::new(deco, self.message_width))
})
}
}
struct AlignedRecordDecorator<'a> {
wrapped: &'a mut dyn slog_term::RecordDecorator,
message_count: usize,
message_active: bool,
ignore_comma: bool,
message_width: usize,
}
impl<'a> AlignedRecordDecorator<'a> {
fn new(
decorator: &'a mut dyn slog_term::RecordDecorator,
message_width: usize,
) -> AlignedRecordDecorator<'a> {
AlignedRecordDecorator {
wrapped: decorator,
message_count: 0,
ignore_comma: false,
message_active: false,
message_width,
}
}
fn filtered_write(&mut self, buf: &[u8]) -> Result<usize> {
if self.ignore_comma {
//don't write comma
self.ignore_comma = false;
Ok(buf.len())
} else if self.message_active {
self.wrapped.write(buf).inspect(|n| self.message_count += n)
} else {
self.wrapped.write(buf)
}
}
}
impl Write for AlignedRecordDecorator<'_> {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
if buf.iter().any(u8::is_ascii_control) {
let filtered = buf
.iter()
.cloned()
.map(|c| if !is_ascii_control(&c) { c } else { b'_' })
.collect::<Vec<u8>>();
self.filtered_write(&filtered)
} else {
self.filtered_write(buf)
}
}
fn flush(&mut self) -> Result<()> {
self.wrapped.flush()
}
}
impl slog_term::RecordDecorator for AlignedRecordDecorator<'_> {
fn reset(&mut self) -> Result<()> {
self.message_active = false;
self.message_count = 0;
self.ignore_comma = false;
self.wrapped.reset()
}
fn start_whitespace(&mut self) -> Result<()> {
self.wrapped.start_whitespace()
}
fn start_msg(&mut self) -> Result<()> {
self.message_active = true;
self.ignore_comma = false;
self.wrapped.start_msg()
}
fn start_timestamp(&mut self) -> Result<()> {
self.wrapped.start_timestamp()
}
fn start_level(&mut self) -> Result<()> {
self.wrapped.start_level()
}
fn start_comma(&mut self) -> Result<()> {
if self.message_active && self.message_count + 1 < self.message_width {
self.ignore_comma = true;
}
self.wrapped.start_comma()
}
fn start_key(&mut self) -> Result<()> {
if self.message_active && self.message_count + 1 < self.message_width {
write!(
self,
"{}",
" ".repeat(self.message_width - self.message_count)
)?;
self.message_active = false;
self.message_count = 0;
self.ignore_comma = false;
}
self.wrapped.start_key()
}
fn start_value(&mut self) -> Result<()> {
self.wrapped.start_value()
}
fn start_separator(&mut self) -> Result<()> {
self.wrapped.start_separator()
}
}
/// Function to filter out ascii control codes.
///
/// This helps to keep log formatting consistent.
/// Whitespace and padding control codes are excluded.
fn is_ascii_control(character: &u8) -> bool {
matches!(
character,
b'\x00'..=b'\x08' |
b'\x0b'..=b'\x0c' |
b'\x0e'..=b'\x1f' |
b'\x7f' |
b'\x81'..=b'\x9f'
)
}
/// Provides de-bounce functionality for logging.
#[derive(Default)]
pub struct TimeLatch(Option<Instant>);
@@ -214,75 +51,127 @@ impl TimeLatch {
}
}
pub fn create_tracing_layer(base_tracing_log_path: PathBuf) {
let mut tracing_log_path = PathBuf::new();
pub struct Libp2pDiscv5TracingLayer {
pub libp2p_non_blocking_writer: NonBlocking,
pub _libp2p_guard: WorkerGuard,
pub discv5_non_blocking_writer: NonBlocking,
pub _discv5_guard: WorkerGuard,
}
// Ensure that `tracing_log_path` only contains directories.
for p in base_tracing_log_path.iter() {
tracing_log_path = tracing_log_path.join(p);
if let Ok(metadata) = tracing_log_path.metadata() {
if !metadata.is_dir() {
tracing_log_path.pop();
break;
}
impl<S> Layer<S> for Libp2pDiscv5TracingLayer
where
S: Subscriber,
{
fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<S>) {
let meta = event.metadata();
let log_level = meta.level();
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
let target = match meta.target().split_once("::") {
Some((crate_name, _)) => crate_name,
None => "unknown",
};
let mut writer = match target {
"gossipsub" => self.libp2p_non_blocking_writer.clone(),
"discv5" => self.discv5_non_blocking_writer.clone(),
_ => return,
};
let mut visitor = LogMessageExtractor {
message: String::default(),
};
event.record(&mut visitor);
let message = format!("{} {} {}\n", timestamp, log_level, visitor.message);
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env()
.or_else(|_| tracing_subscriber::EnvFilter::try_new("warn"))
{
Ok(filter) => filter,
Err(e) => {
eprintln!("Failed to initialize dependency logging {e}");
return;
}
};
let Ok(libp2p_writer) = RollingFileAppender::builder()
.rotation(Rotation::DAILY)
.max_log_files(2)
.filename_prefix("libp2p")
.filename_suffix("log")
.build(tracing_log_path.clone())
else {
eprintln!("Failed to initialize libp2p rolling file appender");
return;
};
let Ok(discv5_writer) = RollingFileAppender::builder()
.rotation(Rotation::DAILY)
.max_log_files(2)
.filename_prefix("discv5")
.filename_suffix("log")
.build(tracing_log_path)
else {
eprintln!("Failed to initialize discv5 rolling file appender");
return;
};
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer);
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer);
let custom_layer = LoggingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
};
if let Err(e) = tracing_subscriber::fmt()
.with_env_filter(filter_layer)
.with_writer(std::io::sink)
.finish()
.with(MetricsLayer)
.with(custom_layer)
.try_init()
{
eprintln!("Failed to initialize dependency logging {e}");
}
}
/// Return a logger suitable for test usage.
struct LogMessageExtractor {
message: String,
}
impl tracing_core::field::Visit for LogMessageExtractor {
fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) {
self.message = format!("{} {:?}", self.message, value);
}
}
pub fn create_libp2p_discv5_tracing_layer(
base_tracing_log_path: Option<PathBuf>,
max_log_size: u64,
compression: bool,
max_log_number: usize,
) -> Libp2pDiscv5TracingLayer {
if let Some(mut tracing_log_path) = base_tracing_log_path {
// Ensure that `tracing_log_path` only contains directories.
for p in tracing_log_path.clone().iter() {
tracing_log_path = tracing_log_path.join(p);
if let Ok(metadata) = tracing_log_path.metadata() {
if !metadata.is_dir() {
tracing_log_path.pop();
break;
}
}
}
let mut libp2p_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
let mut discv5_writer =
LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log"))
.rotation(Rotation::SizeBased(RotationSize::MB(max_log_size)))
.max_keep_files(max_log_number.try_into().unwrap_or_else(|e| {
eprintln!("Failed to convert max_log_number to u64: {}", e);
10
}));
if compression {
libp2p_writer = libp2p_writer.compression(Compression::Gzip);
discv5_writer = discv5_writer.compression(Compression::Gzip);
}
let Ok(libp2p_writer) = libp2p_writer.build() else {
eprintln!("Failed to initialize libp2p rolling file appender");
std::process::exit(1);
};
let Ok(discv5_writer) = discv5_writer.build() else {
eprintln!("Failed to initialize discv5 rolling file appender");
std::process::exit(1);
};
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer);
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer);
Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
}
} else {
let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(std::io::sink());
let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(std::io::sink());
Libp2pDiscv5TracingLayer {
libp2p_non_blocking_writer,
_libp2p_guard,
discv5_non_blocking_writer,
_discv5_guard,
}
}
}
/// Return a tracing subscriber suitable for test usage.
///
/// By default no logs will be printed, but they can be enabled via
/// the `test_logger` feature. This feature can be enabled for any
@@ -290,17 +179,10 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) {
/// ```bash
/// cargo test -p beacon_chain --features logging/test_logger
/// ```
pub fn test_logger() -> Logger {
use sloggers::Build;
pub fn create_test_tracing_subscriber() {
if cfg!(feature = "test_logger") {
sloggers::terminal::TerminalLoggerBuilder::new()
.level(sloggers::types::Severity::Debug)
.build()
.expect("Should build TerminalLoggerBuilder")
} else {
sloggers::null::NullLoggerBuilder
.build()
.expect("Should build NullLoggerBuilder")
let _ = tracing_subscriber::fmt()
.with_env_filter(EnvFilter::try_new("debug").unwrap())
.try_init();
}
}

View File

@@ -0,0 +1,6 @@
#[macro_export]
macro_rules! crit {
($($arg:tt)*) => {
tracing::error!(error_type = "crit", $($arg)*);
};
}

View File

@@ -1,46 +1,108 @@
//! This module provides an implementation of `slog::Drain` that optionally writes to a channel if
//! there are subscribers to a HTTP SSE stream.
use crate::async_record::AsyncRecord;
use slog::{Drain, OwnedKVList, Record};
use std::panic::AssertUnwindSafe;
use serde_json::json;
use serde_json::Value;
use std::sync::Arc;
use tokio::sync::broadcast::Sender;
use tracing::field::{Field, Visit};
use tracing::{Event, Subscriber};
use tracing_subscriber::layer::{Context, Layer};
/// Default log level for SSE Events.
// NOTE: Made this a constant. Debug level seems to be pretty intense. Can make this
// configurable later if needed.
const LOG_LEVEL: slog::Level = slog::Level::Info;
const LOG_LEVEL: tracing::Level = tracing::Level::INFO;
/// The components required in the HTTP API task to receive logged events.
#[derive(Clone)]
pub struct SSELoggingComponents {
/// The channel to receive events from.
pub sender: Arc<AssertUnwindSafe<Sender<AsyncRecord>>>,
pub sender: Arc<Sender<Arc<Value>>>,
}
impl SSELoggingComponents {
/// Create a new SSE drain.
pub fn new(channel_size: usize) -> Self {
let (sender, _receiver) = tokio::sync::broadcast::channel(channel_size);
let sender = Arc::new(AssertUnwindSafe(sender));
SSELoggingComponents { sender }
SSELoggingComponents {
sender: Arc::new(sender),
}
}
}
impl Drain for SSELoggingComponents {
type Ok = ();
type Err = &'static str;
impl<S: Subscriber> Layer<S> for SSELoggingComponents {
fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) {
if *event.metadata().level() > LOG_LEVEL {
return;
}
fn log(&self, record: &Record, logger_values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().is_at_least(LOG_LEVEL) {
// Attempt to send the logs
match self.sender.send(AsyncRecord::from(record, logger_values)) {
Ok(_num_sent) => {} // Everything got sent
Err(_err) => {} // There are no subscribers, do nothing
let mut visitor = TracingEventVisitor::new();
event.record(&mut visitor);
let mut log_entry = visitor.finish(event.metadata());
if let Some(error_type) = log_entry
.get("fields")
.and_then(|fields| fields.get("error_type"))
.and_then(|val| val.as_str())
{
if error_type.eq_ignore_ascii_case("crit") {
log_entry["level"] = json!("CRIT");
if let Some(Value::Object(ref mut map)) = log_entry.get_mut("fields") {
map.remove("error_type");
}
}
}
Ok(())
let _ = self.sender.send(Arc::new(log_entry));
}
}
struct TracingEventVisitor {
fields: serde_json::Map<String, Value>,
}
impl TracingEventVisitor {
fn new() -> Self {
TracingEventVisitor {
fields: serde_json::Map::new(),
}
}
fn finish(self, metadata: &tracing::Metadata<'_>) -> Value {
let mut log_entry = serde_json::Map::new();
log_entry.insert(
"time".to_string(),
json!(chrono::Local::now()
.format("%b %d %H:%M:%S%.3f")
.to_string()),
);
log_entry.insert("level".to_string(), json!(metadata.level().to_string()));
log_entry.insert("target".to_string(), json!(metadata.target()));
log_entry.insert("fields".to_string(), Value::Object(self.fields));
Value::Object(log_entry)
}
}
impl Visit for TracingEventVisitor {
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
self.fields
.insert(field.name().to_string(), json!(format!("{:?}", value)));
}
fn record_str(&mut self, field: &Field, value: &str) {
self.fields.insert(field.name().to_string(), json!(value));
}
fn record_i64(&mut self, field: &Field, value: i64) {
self.fields.insert(field.name().to_string(), json!(value));
}
fn record_u64(&mut self, field: &Field, value: u64) {
self.fields.insert(field.name().to_string(), json!(value));
}
fn record_bool(&mut self, field: &Field, value: bool) {
self.fields.insert(field.name().to_string(), json!(value));
}
}

View File

@@ -1,56 +1,531 @@
use chrono::prelude::*;
use serde_json::{Map, Value};
use std::collections::HashMap;
use std::io::Write;
use std::sync::{Arc, Mutex};
use tracing::field::Field;
use tracing::span::Id;
use tracing::Subscriber;
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::layer::Context;
use tracing_subscriber::registry::LookupSpan;
use tracing_subscriber::Layer;
pub struct LoggingLayer {
pub libp2p_non_blocking_writer: NonBlocking,
pub _libp2p_guard: WorkerGuard,
pub discv5_non_blocking_writer: NonBlocking,
pub _discv5_guard: WorkerGuard,
pub non_blocking_writer: NonBlocking,
pub guard: WorkerGuard,
pub disable_log_timestamp: bool,
pub log_color: bool,
pub logfile_color: bool,
pub log_format: Option<String>,
pub logfile_format: Option<String>,
pub extra_info: bool,
pub dep_logs: bool,
span_fields: Arc<Mutex<HashMap<Id, SpanData>>>,
}
impl LoggingLayer {
#[allow(clippy::too_many_arguments)]
pub fn new(
non_blocking_writer: NonBlocking,
guard: WorkerGuard,
disable_log_timestamp: bool,
log_color: bool,
logfile_color: bool,
log_format: Option<String>,
logfile_format: Option<String>,
extra_info: bool,
dep_logs: bool,
) -> Self {
Self {
non_blocking_writer,
guard,
disable_log_timestamp,
log_color,
logfile_color,
log_format,
logfile_format,
extra_info,
dep_logs,
span_fields: Arc::new(Mutex::new(HashMap::new())),
}
}
}
impl<S> Layer<S> for LoggingLayer
where
S: Subscriber,
S: Subscriber + for<'a> LookupSpan<'a>,
{
fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context<S>) {
fn on_new_span(&self, attrs: &tracing::span::Attributes<'_>, id: &Id, _ctx: Context<S>) {
let metadata = attrs.metadata();
let span_name = metadata.name();
let mut visitor = SpanFieldsExtractor::default();
attrs.record(&mut visitor);
let span_data = SpanData {
name: span_name.to_string(),
fields: visitor.fields,
};
let mut span_fields = match self.span_fields.lock() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
span_fields.insert(id.clone(), span_data);
}
fn on_event(&self, event: &tracing::Event<'_>, ctx: Context<S>) {
let meta = event.metadata();
let log_level = meta.level();
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
let target = match meta.target().split_once("::") {
Some((crate_name, _)) => crate_name,
None => "unknown",
let timestamp = if !self.disable_log_timestamp {
Local::now().format("%b %d %H:%M:%S%.3f").to_string()
} else {
String::new()
};
let mut writer = match target {
"gossipsub" => self.libp2p_non_blocking_writer.clone(),
"discv5" => self.discv5_non_blocking_writer.clone(),
_ => return,
};
if !self.dep_logs {
if let Some(file) = meta.file() {
if file.contains("/.cargo/") {
return;
}
} else {
return;
}
}
let mut writer = self.non_blocking_writer.clone();
let mut visitor = LogMessageExtractor {
message: String::default(),
message: String::new(),
fields: Vec::new(),
is_crit: false,
};
event.record(&mut visitor);
// Remove ascii control codes from message.
// All following formatting and logs components are predetermined or known.
if visitor.message.as_bytes().iter().any(u8::is_ascii_control) {
let filtered = visitor
.message
.as_bytes()
.iter()
.map(|c| if is_ascii_control(c) { b'_' } else { *c })
.collect::<Vec<u8>>();
visitor.message = String::from_utf8(filtered).unwrap_or_default();
};
event.record(&mut visitor);
let message = format!("{} {} {}\n", timestamp, log_level, visitor.message);
let module = meta.module_path().unwrap_or("<unknown_module>");
let file = meta.file().unwrap_or("<unknown_file>");
let line = match meta.line() {
Some(line) => line.to_string(),
None => "<unknown_line>".to_string(),
};
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
if module.contains("discv5") {
visitor
.fields
.push(("service".to_string(), "\"discv5\"".to_string()));
}
let gray = "\x1b[90m";
let reset = "\x1b[0m";
let location = if self.extra_info {
if self.logfile_color {
format!("{}{}::{}:{}{}", gray, module, file, line, reset)
} else {
format!("{}::{}:{}", module, file, line)
}
} else {
String::new()
};
let plain_level_str = if visitor.is_crit {
"CRIT"
} else {
match *log_level {
tracing::Level::ERROR => "ERROR",
tracing::Level::WARN => "WARN",
tracing::Level::INFO => "INFO",
tracing::Level::DEBUG => "DEBUG",
tracing::Level::TRACE => "TRACE",
}
};
let color_level_str = if visitor.is_crit {
"\x1b[35mCRIT\x1b[0m"
} else {
match *log_level {
tracing::Level::ERROR => "\x1b[31mERROR\x1b[0m",
tracing::Level::WARN => "\x1b[33mWARN\x1b[0m",
tracing::Level::INFO => "\x1b[32mINFO\x1b[0m",
tracing::Level::DEBUG => "\x1b[34mDEBUG\x1b[0m",
tracing::Level::TRACE => "\x1b[35mTRACE\x1b[0m",
}
};
if self.dep_logs {
if self.logfile_format.as_deref() == Some("JSON") {
build_json_log_file(
&visitor,
plain_level_str,
meta,
&ctx,
&self.span_fields,
event,
&mut writer,
);
} else {
build_log_text(
&visitor,
plain_level_str,
&timestamp,
&ctx,
&self.span_fields,
event,
&location,
color_level_str,
self.logfile_color,
&mut writer,
);
}
} else if self.log_format.as_deref() == Some("JSON") {
build_json_log_stdout(&visitor, plain_level_str, &timestamp, &mut writer);
} else {
build_log_text(
&visitor,
plain_level_str,
&timestamp,
&ctx,
&self.span_fields,
event,
&location,
color_level_str,
self.log_color,
&mut writer,
);
}
}
}
struct SpanData {
name: String,
fields: Vec<(String, String)>,
}
#[derive(Default)]
struct SpanFieldsExtractor {
fields: Vec<(String, String)>,
}
impl tracing_core::field::Visit for SpanFieldsExtractor {
fn record_str(&mut self, field: &Field, value: &str) {
self.fields
.push((field.name().to_string(), format!("\"{}\"", value)));
}
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
self.fields
.push((field.name().to_string(), format!("{:?}", value)));
}
fn record_i64(&mut self, field: &Field, value: i64) {
self.fields
.push((field.name().to_string(), value.to_string()));
}
fn record_u64(&mut self, field: &Field, value: u64) {
self.fields
.push((field.name().to_string(), value.to_string()));
}
fn record_bool(&mut self, field: &Field, value: bool) {
self.fields
.push((field.name().to_string(), value.to_string()));
}
}
struct LogMessageExtractor {
message: String,
fields: Vec<(String, String)>,
is_crit: bool,
}
impl tracing_core::field::Visit for LogMessageExtractor {
fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) {
self.message = format!("{} {:?}", self.message, value);
fn record_str(&mut self, field: &Field, value: &str) {
if field.name() == "message" {
if self.message.is_empty() {
self.message = value.to_string();
} else {
self.fields
.push(("msg_id".to_string(), format!("\"{}\"", value)));
}
} else if field.name() == "error_type" && value == "crit" {
self.is_crit = true;
} else {
self.fields
.push((field.name().to_string(), format!("\"{}\"", value)));
}
}
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
if field.name() == "message" {
if self.message.is_empty() {
self.message = format!("{:?}", value);
} else {
self.fields
.push(("msg_id".to_string(), format!("{:?}", value)));
}
} else if field.name() == "error_type" && format!("{:?}", value) == "\"crit\"" {
self.is_crit = true;
} else {
self.fields
.push((field.name().to_string(), format!("{:?}", value)));
}
}
fn record_i64(&mut self, field: &Field, value: i64) {
self.fields
.push((field.name().to_string(), value.to_string()));
}
fn record_u64(&mut self, field: &Field, value: u64) {
self.fields
.push((field.name().to_string(), value.to_string()));
}
fn record_bool(&mut self, field: &Field, value: bool) {
self.fields
.push((field.name().to_string(), value.to_string()));
}
}
/// Function to filter out ascii control codes.
///
/// This helps to keep log formatting consistent.
/// Whitespace and padding control codes are excluded.
fn is_ascii_control(character: &u8) -> bool {
matches!(
character,
b'\x00'..=b'\x08' |
b'\x0b'..=b'\x0c' |
b'\x0e'..=b'\x1f' |
b'\x7f' |
b'\x81'..=b'\x9f'
)
}
fn build_json_log_stdout(
visitor: &LogMessageExtractor,
plain_level_str: &str,
timestamp: &str,
writer: &mut impl Write,
) {
let mut log_map = Map::new();
log_map.insert("msg".to_string(), Value::String(visitor.message.clone()));
log_map.insert(
"level".to_string(),
Value::String(plain_level_str.to_string()),
);
log_map.insert("ts".to_string(), Value::String(timestamp.to_string()));
for (key, val) in visitor.fields.clone().into_iter() {
let parsed_val = parse_field(&val);
log_map.insert(key, parsed_val);
}
let json_obj = Value::Object(log_map);
let output = format!("{}\n", json_obj);
if let Err(e) = writer.write_all(output.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
fn build_json_log_file<'a, S>(
visitor: &LogMessageExtractor,
plain_level_str: &str,
meta: &tracing::Metadata<'_>,
ctx: &Context<'_, S>,
span_fields: &Arc<Mutex<HashMap<Id, SpanData>>>,
event: &tracing::Event<'_>,
writer: &mut impl Write,
) where
S: Subscriber + for<'lookup> LookupSpan<'lookup>,
{
let utc_timestamp = Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true);
let mut log_map = Map::new();
log_map.insert("msg".to_string(), Value::String(visitor.message.clone()));
log_map.insert(
"level".to_string(),
Value::String(plain_level_str.to_string()),
);
log_map.insert("ts".to_string(), Value::String(utc_timestamp));
let module_path = meta.module_path().unwrap_or("<unknown_module>");
let line_number = meta
.line()
.map_or("<unknown_line>".to_string(), |l| l.to_string());
let module_field = format!("{}:{}", module_path, line_number);
log_map.insert("module".to_string(), Value::String(module_field));
for (key, val) in visitor.fields.clone().into_iter() {
let cleaned_value = if val.starts_with('\"') && val.ends_with('\"') && val.len() >= 2 {
&val[1..val.len() - 1]
} else {
&val
};
let parsed_val =
serde_json::from_str(cleaned_value).unwrap_or(Value::String(cleaned_value.to_string()));
log_map.insert(key, parsed_val);
}
if let Some(scope) = ctx.event_scope(event) {
let guard = span_fields.lock().ok();
if let Some(span_map) = guard {
for span in scope {
let id = span.id();
if let Some(span_data) = span_map.get(&id) {
for (key, val) in &span_data.fields {
let parsed_span_val = parse_field(val);
log_map.insert(key.clone(), parsed_span_val);
}
}
}
}
}
let json_obj = Value::Object(log_map);
let output = format!("{}\n", json_obj);
if let Err(e) = writer.write_all(output.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
#[allow(clippy::too_many_arguments)]
fn build_log_text<'a, S>(
visitor: &LogMessageExtractor,
plain_level_str: &str,
timestamp: &str,
ctx: &Context<'_, S>,
span_fields: &Arc<Mutex<HashMap<Id, SpanData>>>,
event: &tracing::Event<'_>,
location: &str,
color_level_str: &str,
use_color: bool,
writer: &mut impl Write,
) where
S: Subscriber + for<'lookup> LookupSpan<'lookup>,
{
let bold_start = "\x1b[1m";
let bold_end = "\x1b[0m";
let mut collected_span_fields = Vec::new();
if let Some(scope) = ctx.event_scope(event) {
for span in scope {
let id = span.id();
let span_fields_map = span_fields.lock().unwrap();
if let Some(span_data) = span_fields_map.get(&id) {
collected_span_fields.push((span_data.name.clone(), span_data.fields.clone()));
}
}
}
let mut formatted_spans = String::new();
for (_, fields) in collected_span_fields.iter().rev() {
for (i, (field_name, field_value)) in fields.iter().enumerate() {
if i > 0 && !visitor.fields.is_empty() {
formatted_spans.push_str(", ");
}
if use_color {
formatted_spans.push_str(&format!(
"{}{}{}: {}",
bold_start, field_name, bold_end, field_value
));
} else {
formatted_spans.push_str(&format!("{}: {}", field_name, field_value));
}
}
}
let level_str = if use_color {
color_level_str
} else {
plain_level_str
};
let fixed_message_width = 44;
let message_len = visitor.message.len();
let message_content = if use_color {
format!("{}{}{}", bold_start, visitor.message, bold_end)
} else {
visitor.message.clone()
};
let padded_message = if message_len < fixed_message_width {
let extra_color_len = if use_color {
bold_start.len() + bold_end.len()
} else {
0
};
format!(
"{:<width$}",
message_content,
width = fixed_message_width + extra_color_len
)
} else {
message_content.clone()
};
let mut formatted_fields = String::new();
for (i, (field_name, field_value)) in visitor.fields.iter().enumerate() {
if i > 0 {
formatted_fields.push_str(", ");
}
if use_color {
formatted_fields.push_str(&format!(
"{}{}{}: {}",
bold_start, field_name, bold_end, field_value
));
} else {
formatted_fields.push_str(&format!("{}: {}", field_name, field_value));
}
if i == visitor.fields.len() - 1 && !collected_span_fields.is_empty() {
formatted_fields.push(',');
}
}
let full_message = if !formatted_fields.is_empty() {
format!("{} {}", padded_message, formatted_fields)
} else {
padded_message.to_string()
};
let message = if !location.is_empty() {
format!(
"{} {} {} {} {}\n",
timestamp, level_str, location, full_message, formatted_spans
)
} else {
format!(
"{} {} {} {}\n",
timestamp, level_str, full_message, formatted_spans
)
};
if let Err(e) = writer.write_all(message.as_bytes()) {
eprintln!("Failed to write log: {}", e);
}
}
fn parse_field(val: &str) -> Value {
let cleaned = if val.starts_with('"') && val.ends_with('"') && val.len() >= 2 {
&val[1..val.len() - 1]
} else {
val
};
serde_json::from_str(cleaned).unwrap_or(Value::String(cleaned.to_string()))
}

View File

@@ -1,51 +0,0 @@
use std::env;
use std::process::Command;
use std::process::Output;
fn run_cmd(cmd_line: &str) -> Result<Output, std::io::Error> {
if cfg!(target_os = "windows") {
Command::new(r#"cmd"#).args(["/C", cmd_line]).output()
} else {
Command::new(r#"sh"#).args(["-c", cmd_line]).output()
}
}
#[test]
fn test_test_logger_with_feature_test_logger() {
let cur_dir = env::current_dir().unwrap();
let test_dir = cur_dir
.join("..")
.join("..")
.join("testing")
.join("test-test_logger");
let cmd_line = format!(
"cd {} && cargo test --features logging/test_logger",
test_dir.to_str().unwrap()
);
let output = run_cmd(&cmd_line);
// Assert output data DOES contain "INFO hi, "
let data = String::from_utf8(output.unwrap().stderr).unwrap();
println!("data={}", data);
assert!(data.contains("INFO hi, "));
}
#[test]
fn test_test_logger_no_features() {
// Test without features
let cur_dir = env::current_dir().unwrap();
let test_dir = cur_dir
.join("..")
.join("..")
.join("testing")
.join("test-test_logger");
let cmd_line = format!("cd {} && cargo test", test_dir.to_str().unwrap());
let output = run_cmd(&cmd_line);
// Assert output data DOES contain "INFO hi, "
let data = String::from_utf8(output.unwrap().stderr).unwrap();
println!("data={}", data);
assert!(!data.contains("INFO hi, "));
}

View File

@@ -15,7 +15,7 @@ reqwest = { workspace = true }
sensitive_url = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
slog = { workspace = true }
store = { workspace = true }
task_executor = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }

View File

@@ -9,9 +9,9 @@ use reqwest::{IntoUrl, Response};
pub use reqwest::{StatusCode, Url};
use sensitive_url::SensitiveUrl;
use serde::{Deserialize, Serialize};
use slog::{debug, error, info};
use task_executor::TaskExecutor;
use tokio::time::{interval_at, Instant};
use tracing::{debug, error, info};
use types::*;
pub use types::ProcessType;
@@ -69,11 +69,10 @@ pub struct MonitoringHttpClient {
freezer_db_path: Option<PathBuf>,
update_period: Duration,
monitoring_endpoint: SensitiveUrl,
log: slog::Logger,
}
impl MonitoringHttpClient {
pub fn new(config: &Config, log: slog::Logger) -> Result<Self, String> {
pub fn new(config: &Config) -> Result<Self, String> {
Ok(Self {
client: reqwest::Client::new(),
db_path: config.db_path.clone(),
@@ -83,7 +82,6 @@ impl MonitoringHttpClient {
),
monitoring_endpoint: SensitiveUrl::parse(&config.monitoring_endpoint)
.map_err(|e| format!("Invalid monitoring endpoint: {:?}", e))?,
log,
})
}
@@ -111,10 +109,9 @@ impl MonitoringHttpClient {
);
info!(
self.log,
"Starting monitoring API";
"endpoint" => %self.monitoring_endpoint,
"update_period" => format!("{}s", self.update_period.as_secs()),
endpoint = %self.monitoring_endpoint,
update_period = format!("{}s", self.update_period.as_secs()),
"Starting monitoring API"
);
let update_future = async move {
@@ -122,10 +119,10 @@ impl MonitoringHttpClient {
interval.tick().await;
match self.send_metrics(&processes).await {
Ok(()) => {
debug!(self.log, "Metrics sent to remote server"; "endpoint" => %self.monitoring_endpoint);
debug!(endpoint = %self.monitoring_endpoint, "Metrics sent to remote server");
}
Err(e) => {
error!(self.log, "Failed to send metrics to remote endpoint"; "error" => %e)
error!(error = %e, "Failed to send metrics to remote endpoint")
}
}
}
@@ -187,18 +184,16 @@ impl MonitoringHttpClient {
for process in processes {
match self.get_metrics(process).await {
Err(e) => error!(
self.log,
"Failed to get metrics";
"process_type" => ?process,
"error" => %e
process_type = ?process,
error = %e,
"Failed to get metrics"
),
Ok(metric) => metrics.push(metric),
}
}
info!(
self.log,
"Sending metrics to remote endpoint";
"endpoint" => %self.monitoring_endpoint
endpoint = %self.monitoring_endpoint,
"Sending metrics to remote endpoint"
);
self.post(self.monitoring_endpoint.full.clone(), &metrics)
.await

View File

@@ -4,17 +4,9 @@ version = "0.1.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
[features]
default = ["slog"]
slog = ["dep:slog", "dep:sloggers", "dep:logging"]
tracing = ["dep:tracing"]
[dependencies]
async-channel = { workspace = true }
futures = { workspace = true }
logging = { workspace = true, optional = true }
metrics = { workspace = true }
slog = { workspace = true, optional = true }
sloggers = { workspace = true, optional = true }
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
tracing = { workspace = true, optional = true }
tracing = { workspace = true }

View File

@@ -1,20 +1,14 @@
mod metrics;
#[cfg(not(feature = "tracing"))]
pub mod test_utils;
use futures::channel::mpsc::Sender;
use futures::prelude::*;
use std::sync::Weak;
use tokio::runtime::{Handle, Runtime};
use tracing::{debug, instrument};
pub use tokio::task::JoinHandle;
// Set up logging framework
#[cfg(not(feature = "tracing"))]
use slog::{debug, o};
#[cfg(feature = "tracing")]
use tracing::debug;
/// Provides a reason when Lighthouse is shut down.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ShutdownReason {
@@ -85,8 +79,9 @@ pub struct TaskExecutor {
///
/// The task must provide a reason for shutting down.
signal_tx: Sender<ShutdownReason>,
#[cfg(not(feature = "tracing"))]
log: slog::Logger,
/// The name of the service for inclusion in the logger output.
service_name: String,
}
impl TaskExecutor {
@@ -97,39 +92,29 @@ impl TaskExecutor {
/// This function should only be used during testing. In production, prefer to obtain an
/// instance of `Self` via a `environment::RuntimeContext` (see the `lighthouse/environment`
/// crate).
#[instrument(parent = None,level = "info", fields(service = service_name), name = "task_executor", skip_all)]
pub fn new<T: Into<HandleProvider>>(
handle: T,
exit: async_channel::Receiver<()>,
#[cfg(not(feature = "tracing"))] log: slog::Logger,
signal_tx: Sender<ShutdownReason>,
service_name: String,
) -> Self {
Self {
handle_provider: handle.into(),
exit,
signal_tx,
#[cfg(not(feature = "tracing"))]
log,
service_name,
}
}
/// Clones the task executor adding a service name.
#[cfg(not(feature = "tracing"))]
#[instrument(parent = None,level = "info", fields(service = service_name), name = "task_executor", skip_all)]
pub fn clone_with_name(&self, service_name: String) -> Self {
TaskExecutor {
handle_provider: self.handle_provider.clone(),
exit: self.exit.clone(),
signal_tx: self.signal_tx.clone(),
log: self.log.new(o!("service" => service_name)),
}
}
/// Clones the task executor adding a service name.
#[cfg(feature = "tracing")]
pub fn clone(&self) -> Self {
TaskExecutor {
handle_provider: self.handle_provider.clone(),
exit: self.exit.clone(),
signal_tx: self.signal_tx.clone(),
service_name,
}
}
@@ -139,6 +124,7 @@ impl TaskExecutor {
/// The purpose of this function is to create a compile error if some function which previously
/// returned `()` starts returning something else. Such a case may otherwise result in
/// accidental error suppression.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn spawn_ignoring_error(
&self,
task: impl Future<Output = Result<(), ()>> + Send + 'static,
@@ -150,6 +136,7 @@ impl TaskExecutor {
/// Spawn a task to monitor the completion of another task.
///
/// If the other task exits by panicking, then the monitor task will shut down the executor.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
fn spawn_monitor<R: Send>(
&self,
task_handle: impl Future<Output = Result<R, tokio::task::JoinError>> + Send + 'static,
@@ -168,13 +155,7 @@ impl TaskExecutor {
drop(timer);
});
} else {
#[cfg(not(feature = "tracing"))]
debug!(
self.log,
"Couldn't spawn monitor task. Runtime shutting down"
);
#[cfg(feature = "tracing")]
debug!("Couldn't spawn monitor task. Runtime shutting down");
debug!("Couldn't spawn monitor task. Runtime shutting down")
}
}
@@ -187,6 +168,7 @@ impl TaskExecutor {
/// of a panic, the executor will be shut down via `self.signal_tx`.
///
/// This function generates prometheus metrics on number of tasks and task duration.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn spawn(&self, task: impl Future<Output = ()> + Send + 'static, name: &'static str) {
if let Some(task_handle) = self.spawn_handle(task, name) {
self.spawn_monitor(task_handle, name)
@@ -202,6 +184,7 @@ impl TaskExecutor {
/// This is useful in cases where the future to be spawned needs to do additional cleanup work when
/// the task is completed/canceled (e.g. writing local variables to disk) or the task is created from
/// some framework which does its own cleanup (e.g. a hyper server).
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn spawn_without_exit(
&self,
task: impl Future<Output = ()> + Send + 'static,
@@ -218,9 +201,6 @@ impl TaskExecutor {
if let Some(handle) = self.handle() {
handle.spawn(future);
} else {
#[cfg(not(feature = "tracing"))]
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
#[cfg(feature = "tracing")]
debug!("Couldn't spawn task. Runtime shutting down");
}
}
@@ -242,16 +222,13 @@ impl TaskExecutor {
/// The task is cancelled when the corresponding async-channel is dropped.
///
/// This function generates prometheus metrics on number of tasks and task duration.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn spawn_handle<R: Send + 'static>(
&self,
task: impl Future<Output = R> + Send + 'static,
name: &'static str,
) -> Option<tokio::task::JoinHandle<Option<R>>> {
let exit = self.exit();
#[cfg(not(feature = "tracing"))]
let log = self.log.clone();
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
// Task is shutdown before it completes if `exit` receives
let int_gauge_1 = int_gauge.clone();
@@ -262,9 +239,6 @@ impl TaskExecutor {
let result = match future::select(Box::pin(task), exit).await {
future::Either::Left((value, _)) => Some(value),
future::Either::Right(_) => {
#[cfg(not(feature = "tracing"))]
debug!(log, "Async task shutdown, exit received"; "task" => name);
#[cfg(feature = "tracing")]
debug!(task = name, "Async task shutdown, exit received");
None
}
@@ -273,9 +247,6 @@ impl TaskExecutor {
result
}))
} else {
#[cfg(not(feature = "tracing"))]
debug!(log, "Couldn't spawn task. Runtime shutting down");
#[cfg(feature = "tracing")]
debug!("Couldn't spawn task. Runtime shutting down");
None
}
@@ -290,6 +261,7 @@ impl TaskExecutor {
/// The Future returned behaves like the standard JoinHandle which can return an error if the
/// task failed.
/// This function generates prometheus metrics on number of tasks and task duration.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn spawn_blocking_handle<F, R>(
&self,
task: F,
@@ -299,18 +271,12 @@ impl TaskExecutor {
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
#[cfg(not(feature = "tracing"))]
let log = self.log.clone();
let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]);
metrics::inc_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]);
let join_handle = if let Some(handle) = self.handle() {
handle.spawn_blocking(task)
} else {
#[cfg(not(feature = "tracing"))]
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
#[cfg(feature = "tracing")]
debug!("Couldn't spawn task. Runtime shutting down");
return None;
};
@@ -319,9 +285,6 @@ impl TaskExecutor {
let result = match join_handle.await {
Ok(result) => Ok(result),
Err(error) => {
#[cfg(not(feature = "tracing"))]
debug!(log, "Blocking task ended unexpectedly"; "error" => %error);
#[cfg(feature = "tracing")]
debug!(%error, "Blocking task ended unexpectedly");
Err(error)
}
@@ -347,6 +310,7 @@ impl TaskExecutor {
/// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to
/// @paulhauner if you plan to use this function in production. He has put metrics in here to
/// track any use of it, so don't think you can pull a sneaky one on him.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn block_on_dangerous<F: Future>(
&self,
future: F,
@@ -354,44 +318,20 @@ impl TaskExecutor {
) -> Option<F::Output> {
let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]);
metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]);
#[cfg(not(feature = "tracing"))]
let log = self.log.clone();
let handle = self.handle()?;
let exit = self.exit();
#[cfg(not(feature = "tracing"))]
debug!(
log,
"Starting block_on task";
"name" => name
);
#[cfg(feature = "tracing")]
debug!(name, "Starting block_on task");
handle.block_on(async {
let output = tokio::select! {
output = future => {
#[cfg(not(feature = "tracing"))]
debug!(
log,
"Completed block_on task";
"name" => name
);
#[cfg(feature = "tracing")]
debug!(
name,
"Completed block_on task"
);
Some(output)
},
}
_ = exit => {
#[cfg(not(feature = "tracing"))]
debug!(
log,
"Cancelled block_on task";
"name" => name,
);
#[cfg(feature = "tracing")]
debug!(
name,
"Cancelled block_on task"
@@ -406,6 +346,7 @@ impl TaskExecutor {
}
/// Returns a `Handle` to the current runtime.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn handle(&self) -> Option<Handle> {
self.handle_provider.handle()
}
@@ -420,13 +361,8 @@ impl TaskExecutor {
}
/// Get a channel to request shutting down.
#[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)]
pub fn shutdown_sender(&self) -> Sender<ShutdownReason> {
self.signal_tx.clone()
}
/// Returns a reference to the logger.
#[cfg(not(feature = "tracing"))]
pub fn log(&self) -> &slog::Logger {
&self.log
}
}

View File

@@ -1,6 +1,4 @@
use crate::TaskExecutor;
pub use logging::test_logger;
use slog::Logger;
use std::sync::Arc;
use tokio::runtime;
@@ -16,7 +14,6 @@ pub struct TestRuntime {
runtime: Option<Arc<tokio::runtime::Runtime>>,
_runtime_shutdown: async_channel::Sender<()>,
pub task_executor: TaskExecutor,
pub log: Logger,
}
impl Default for TestRuntime {
@@ -26,7 +23,6 @@ impl Default for TestRuntime {
fn default() -> Self {
let (runtime_shutdown, exit) = async_channel::bounded(1);
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let log = test_logger();
let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() {
(None, handle)
@@ -41,13 +37,12 @@ impl Default for TestRuntime {
(Some(runtime), handle)
};
let task_executor = TaskExecutor::new(handle, exit, log.clone(), shutdown_tx);
let task_executor = TaskExecutor::new(handle, exit, shutdown_tx, "test".to_string());
Self {
runtime,
_runtime_shutdown: runtime_shutdown,
task_executor,
log,
}
}
}
@@ -59,10 +54,3 @@ impl Drop for TestRuntime {
}
}
}
impl TestRuntime {
pub fn set_logger(&mut self, log: Logger) {
self.log = log.clone();
self.task_executor.log = log;
}
}