Replace tracing::debug! with debug! same for other levels (#8300)

Just visual clean-up, making logging statements look uniform. There's no reason to use `tracing::debug` instead of `debug`. If we ever need to migrate our logging lib in the future it would make things easier too.


  


Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com>

Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com>

Co-Authored-By: Michael Sproul <michaelsproul@users.noreply.github.com>
This commit is contained in:
Lion - dapplion
2026-01-08 04:04:44 -03:00
committed by GitHub
parent 0706e62f52
commit 6166ad2eb2
7 changed files with 26 additions and 24 deletions

View File

@@ -9,7 +9,7 @@ use network_utils::enr_ext::EnrExt;
use parking_lot::RwLock; use parking_lot::RwLock;
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::Arc; use std::sync::Arc;
use tracing::error; use tracing::{debug, error};
use types::data_column_custody_group::{compute_subnets_from_custody_group, get_custody_groups}; use types::data_column_custody_group::{compute_subnets_from_custody_group, get_custody_groups};
use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec};
@@ -79,7 +79,7 @@ impl<E: EthSpec> NetworkGlobals<E> {
sampling_subnets.extend(subnets); sampling_subnets.extend(subnets);
} }
tracing::debug!( debug!(
cgc = custody_group_count, cgc = custody_group_count,
?sampling_subnets, ?sampling_subnets,
"Starting node with custody params" "Starting node with custody params"

View File

@@ -14,7 +14,7 @@ use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use tokio::time::sleep; use tokio::time::sleep;
use tracing::{Instrument, debug, error, info_span, warn}; use tracing::{Instrument, debug, error, info, info_span, warn};
use types::{ use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader,
BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch,
@@ -1041,7 +1041,7 @@ fn test_tcp_columns_by_root_chunked_rpc() {
loop { loop {
match sender.next_event().await { match sender.next_event().await {
NetworkEvent::PeerConnectedOutgoing(peer_id) => { NetworkEvent::PeerConnectedOutgoing(peer_id) => {
tracing::info!("Sending RPC"); info!("Sending RPC");
tokio::time::sleep(Duration::from_secs(1)).await; tokio::time::sleep(Duration::from_secs(1)).await;
sender sender
.send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .send_request(peer_id, AppRequestId::Router, rpc_request.clone())
@@ -1055,7 +1055,7 @@ fn test_tcp_columns_by_root_chunked_rpc() {
Response::DataColumnsByRoot(Some(sidecar)) => { Response::DataColumnsByRoot(Some(sidecar)) => {
assert_eq!(sidecar, data_column.clone()); assert_eq!(sidecar, data_column.clone());
messages_received += 1; messages_received += 1;
tracing::info!("Chunk received"); info!("Chunk received");
} }
Response::DataColumnsByRoot(None) => { Response::DataColumnsByRoot(None) => {
// should be exactly messages_to_send // should be exactly messages_to_send
@@ -1082,7 +1082,7 @@ fn test_tcp_columns_by_root_chunked_rpc() {
} => { } => {
if request_type == rpc_request { if request_type == rpc_request {
// send the response // send the response
tracing::info!("Receiver got request"); info!("Receiver got request");
for _ in 0..messages_to_send { for _ in 0..messages_to_send {
receiver.send_response( receiver.send_response(
@@ -1090,7 +1090,7 @@ fn test_tcp_columns_by_root_chunked_rpc() {
inbound_request_id, inbound_request_id,
rpc_response.clone(), rpc_response.clone(),
); );
tracing::info!("Sending message"); info!("Sending message");
} }
// send the stream termination // send the stream termination
receiver.send_response( receiver.send_response(
@@ -1098,11 +1098,11 @@ fn test_tcp_columns_by_root_chunked_rpc() {
inbound_request_id, inbound_request_id,
Response::DataColumnsByRoot(None), Response::DataColumnsByRoot(None),
); );
tracing::info!("Send stream term"); info!("Send stream term");
} }
} }
e => { e => {
tracing::info!(?e, "Got event"); info!(?e, "Got event");
} // Ignore other events } // Ignore other events
} }
} }
@@ -1186,7 +1186,7 @@ fn test_tcp_columns_by_range_chunked_rpc() {
loop { loop {
match sender.next_event().await { match sender.next_event().await {
NetworkEvent::PeerConnectedOutgoing(peer_id) => { NetworkEvent::PeerConnectedOutgoing(peer_id) => {
tracing::info!("Sending RPC"); info!("Sending RPC");
sender sender
.send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .send_request(peer_id, AppRequestId::Router, rpc_request.clone())
.unwrap(); .unwrap();
@@ -1199,7 +1199,7 @@ fn test_tcp_columns_by_range_chunked_rpc() {
Response::DataColumnsByRange(Some(sidecar)) => { Response::DataColumnsByRange(Some(sidecar)) => {
assert_eq!(sidecar, data_column.clone()); assert_eq!(sidecar, data_column.clone());
messages_received += 1; messages_received += 1;
tracing::info!("Chunk received"); info!("Chunk received");
} }
Response::DataColumnsByRange(None) => { Response::DataColumnsByRange(None) => {
// should be exactly messages_to_send // should be exactly messages_to_send
@@ -1226,7 +1226,7 @@ fn test_tcp_columns_by_range_chunked_rpc() {
} => { } => {
if request_type == rpc_request { if request_type == rpc_request {
// send the response // send the response
tracing::info!("Receiver got request"); info!("Receiver got request");
for _ in 0..messages_to_send { for _ in 0..messages_to_send {
receiver.send_response( receiver.send_response(
@@ -1234,7 +1234,7 @@ fn test_tcp_columns_by_range_chunked_rpc() {
inbound_request_id, inbound_request_id,
rpc_response.clone(), rpc_response.clone(),
); );
tracing::info!("Sending message"); info!("Sending message");
} }
// send the stream termination // send the stream termination
receiver.send_response( receiver.send_response(
@@ -1242,7 +1242,7 @@ fn test_tcp_columns_by_range_chunked_rpc() {
inbound_request_id, inbound_request_id,
Response::DataColumnsByRange(None), Response::DataColumnsByRange(None),
); );
tracing::info!("Send stream term"); info!("Send stream term");
} }
} }
_ => {} // Ignore other events _ => {} // Ignore other events

View File

@@ -346,7 +346,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
} }
} }
CouplingError::BlobPeerFailure(msg) => { CouplingError::BlobPeerFailure(msg) => {
tracing::debug!(?batch_id, msg, "Blob peer failure"); debug!(?batch_id, msg, "Blob peer failure");
} }
CouplingError::InternalError(msg) => { CouplingError::InternalError(msg) => {
error!(?batch_id, msg, "Block components coupling internal error"); error!(?batch_id, msg, "Block components coupling internal error");

View File

@@ -357,7 +357,7 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
// we request the data from. // we request the data from.
// If there are duplicated indices, its likely a peer sending us the same index multiple times. // If there are duplicated indices, its likely a peer sending us the same index multiple times.
// However we can still proceed even if there are extra columns, just log an error. // However we can still proceed even if there are extra columns, just log an error.
tracing::debug!(?block_root, ?index, "Repeated column for block_root"); debug!(?block_root, ?index, "Repeated column for block_root");
continue; continue;
} }
} }
@@ -408,7 +408,7 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
if !data_columns_by_index.is_empty() { if !data_columns_by_index.is_empty() {
let remaining_indices = data_columns_by_index.keys().collect::<Vec<_>>(); let remaining_indices = data_columns_by_index.keys().collect::<Vec<_>>();
// log the error but don't return an error, we can still progress with extra columns. // log the error but don't return an error, we can still progress with extra columns.
tracing::debug!( debug!(
?block_root, ?block_root,
?remaining_indices, ?remaining_indices,
"Not all columns consumed for block" "Not all columns consumed for block"
@@ -428,7 +428,7 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
let remaining_roots = data_columns_by_block.keys().collect::<Vec<_>>(); let remaining_roots = data_columns_by_block.keys().collect::<Vec<_>>();
// log the error but don't return an error, we can still progress with responses. // log the error but don't return an error, we can still progress with responses.
// this is most likely an internal error with overrequesting or a client bug. // this is most likely an internal error with overrequesting or a client bug.
tracing::debug!(?remaining_roots, "Not all columns consumed for block"); debug!(?remaining_roots, "Not all columns consumed for block");
} }
Ok(rpc_blocks) Ok(rpc_blocks)

View File

@@ -18,7 +18,7 @@ use std::collections::{BTreeMap, HashSet, btree_map::Entry};
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::marker::PhantomData; use std::marker::PhantomData;
use strum::IntoStaticStr; use strum::IntoStaticStr;
use tracing::{Span, debug, instrument, warn}; use tracing::{Span, debug, error, instrument, warn};
use types::{ColumnIndex, Epoch, EthSpec, Hash256, Slot}; use types::{ColumnIndex, Epoch, EthSpec, Hash256, Slot};
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
@@ -942,10 +942,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
} }
} }
CouplingError::BlobPeerFailure(msg) => { CouplingError::BlobPeerFailure(msg) => {
tracing::debug!(?batch_id, msg, "Blob peer failure"); debug!(?batch_id, msg, "Blob peer failure");
} }
CouplingError::InternalError(msg) => { CouplingError::InternalError(msg) => {
tracing::error!(?batch_id, msg, "Block components coupling internal error"); error!(?batch_id, msg, "Block components coupling internal error");
} }
} }
} }

View File

@@ -14,6 +14,7 @@ use ssz::Encode;
use std::net::{SocketAddrV4, SocketAddrV6}; use std::net::{SocketAddrV4, SocketAddrV6};
use std::time::Duration; use std::time::Duration;
use std::{marker::PhantomData, path::PathBuf}; use std::{marker::PhantomData, path::PathBuf};
use tracing::{info, warn};
use types::EthSpec; use types::EthSpec;
/// A set of configuration parameters for the bootnode, established from CLI arguments. /// A set of configuration parameters for the bootnode, established from CLI arguments.
@@ -117,7 +118,7 @@ impl<E: EthSpec> BootNodeConfig<E> {
let genesis_state_root = genesis_state let genesis_state_root = genesis_state
.canonical_root() .canonical_root()
.map_err(|e| format!("Error hashing genesis state: {e:?}"))?; .map_err(|e| format!("Error hashing genesis state: {e:?}"))?;
tracing::info!(root = ?genesis_state_root, "Genesis state found"); info!(root = ?genesis_state_root, "Genesis state found");
let enr_fork = spec.enr_fork_id::<E>( let enr_fork = spec.enr_fork_id::<E>(
types::Slot::from(0u64), types::Slot::from(0u64),
genesis_state.genesis_validators_root(), genesis_state.genesis_validators_root(),
@@ -125,7 +126,7 @@ impl<E: EthSpec> BootNodeConfig<E> {
Some(enr_fork.as_ssz_bytes()) Some(enr_fork.as_ssz_bytes())
} else { } else {
tracing::warn!("No genesis state provided. No Eth2 field added to the ENR"); warn!("No genesis state provided. No Eth2 field added to the ENR");
None None
} }
}; };

View File

@@ -730,7 +730,8 @@ fn run<E: EthSpec>(
#[cfg(all(feature = "modern", target_arch = "x86_64"))] #[cfg(all(feature = "modern", target_arch = "x86_64"))]
if !std::is_x86_feature_detected!("adx") { if !std::is_x86_feature_detected!("adx") {
tracing::warn!( use tracing::warn;
warn!(
advice = "If you get a SIGILL, please try Lighthouse portable build", advice = "If you get a SIGILL, please try Lighthouse portable build",
"CPU seems incompatible with optimized Lighthouse build" "CPU seems incompatible with optimized Lighthouse build"
); );