Integrate tracing (#6339)

Tracing Integration
- [reference](5bbf1859e9/projects/project-ideas.md (L297))


  - [x] replace slog & log with tracing throughout the codebase
- [x] implement custom crit log
- [x] make relevant changes in the formatter
- [x] replace sloggers
- [x] re-write SSE logging components

cc: @macladson @eserilev
This commit is contained in:
ThreeHrSleep
2025-03-13 04:01:05 +05:30
committed by GitHub
parent f23f984f85
commit d60c24ef1c
241 changed files with 9485 additions and 9328 deletions

View File

@@ -2,16 +2,15 @@ use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
use operation_pool::{
PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20,
};
use slog::{debug, info, Logger};
use std::sync::Arc;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
use tracing::{debug, info};
use types::Attestation;
pub fn upgrade_to_v20<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Upgrading from v19 to v20");
info!("Upgrading from v19 to v20");
// Load a V15 op pool and transform it to V20.
let Some(PersistedOperationPoolV15::<T::EthSpec> {
@@ -24,7 +23,7 @@ pub fn upgrade_to_v20<T: BeaconChainTypes>(
capella_bls_change_broadcast_indices,
}) = db.get_item(&OP_POOL_DB_KEY)?
else {
debug!(log, "Nothing to do, no operation pool stored");
debug!("Nothing to do, no operation pool stored");
return Ok(vec![]);
};
@@ -52,9 +51,8 @@ pub fn upgrade_to_v20<T: BeaconChainTypes>(
pub fn downgrade_from_v20<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Downgrading from v20 to v19");
info!("Downgrading from v20 to v19");
// Load a V20 op pool and transform it to V15.
let Some(PersistedOperationPoolV20::<T::EthSpec> {
@@ -67,7 +65,7 @@ pub fn downgrade_from_v20<T: BeaconChainTypes>(
capella_bls_change_broadcast_indices,
}) = db.get_item(&OP_POOL_DB_KEY)?
else {
debug!(log, "Nothing to do, no operation pool stored");
debug!("Nothing to do, no operation pool stored");
return Ok(vec![]);
};
@@ -77,7 +75,10 @@ pub fn downgrade_from_v20<T: BeaconChainTypes>(
if let Attestation::Base(attestation) = attestation.into() {
Some((attestation, indices))
} else {
info!(log, "Dropping attestation during downgrade"; "reason" => "not a base attestation");
info!(
reason = "not a base attestation",
"Dropping attestation during downgrade"
);
None
}
})
@@ -88,7 +89,10 @@ pub fn downgrade_from_v20<T: BeaconChainTypes>(
.filter_map(|slashing| match slashing.try_into() {
Ok(slashing) => Some(slashing),
Err(_) => {
info!(log, "Dropping attester slashing during downgrade"; "reason" => "not a base attester slashing");
info!(
reason = "not a base attester slashing",
"Dropping attester slashing during downgrade"
);
None
}
})

View File

@@ -1,18 +1,17 @@
use crate::beacon_chain::BeaconChainTypes;
use crate::validator_pubkey_cache::DatabasePubkey;
use slog::{info, Logger};
use ssz::{Decode, Encode};
use std::sync::Arc;
use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem};
use tracing::info;
use types::{Hash256, PublicKey};
const LOG_EVERY: usize = 200_000;
pub fn upgrade_to_v21<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Upgrading from v20 to v21");
info!("Upgrading from v20 to v21");
let mut ops = vec![];
@@ -29,22 +28,20 @@ pub fn upgrade_to_v21<T: BeaconChainTypes>(
if i > 0 && i % LOG_EVERY == 0 {
info!(
log,
"Public key decompression in progress";
"keys_decompressed" => i
keys_decompressed = i,
"Public key decompression in progress"
);
}
}
info!(log, "Public key decompression complete");
info!("Public key decompression complete");
Ok(ops)
}
pub fn downgrade_from_v21<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Downgrading from v21 to v20");
info!("Downgrading from v21 to v20");
let mut ops = vec![];
@@ -67,15 +64,11 @@ pub fn downgrade_from_v21<T: BeaconChainTypes>(
));
if i > 0 && i % LOG_EVERY == 0 {
info!(
log,
"Public key compression in progress";
"keys_compressed" => i
);
info!(keys_compressed = i, "Public key compression in progress");
}
}
info!(log, "Public key compression complete");
info!("Public key compression complete");
Ok(ops)
}

View File

@@ -1,5 +1,4 @@
use crate::beacon_chain::BeaconChainTypes;
use slog::{info, Logger};
use std::sync::Arc;
use store::chunked_iter::ChunkedVectorIter;
use store::{
@@ -10,6 +9,7 @@ use store::{
partial_beacon_state::PartialBeaconState,
AnchorInfo, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp,
};
use tracing::info;
use types::{BeaconState, Hash256, Slot};
const LOG_EVERY: usize = 200_000;
@@ -40,9 +40,8 @@ fn load_old_schema_frozen_state<T: BeaconChainTypes>(
pub fn upgrade_to_v22<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
genesis_state_root: Option<Hash256>,
log: Logger,
) -> Result<(), Error> {
info!(log, "Upgrading from v21 to v22");
info!("Upgrading from v21 to v22");
let old_anchor = db.get_anchor_info();
@@ -71,9 +70,8 @@ pub fn upgrade_to_v22<T: BeaconChainTypes>(
// this write.
if split_slot > 0 {
info!(
log,
"Re-storing genesis state";
"state_root" => ?genesis_state_root,
state_root = ?genesis_state_root,
"Re-storing genesis state"
);
db.store_cold_state(&genesis_state_root, &genesis_state, &mut cold_ops)?;
}
@@ -87,7 +85,6 @@ pub fn upgrade_to_v22<T: BeaconChainTypes>(
oldest_block_slot,
split_slot,
&mut cold_ops,
&log,
)?;
// Commit this first batch of non-destructive cold database ops.
@@ -107,14 +104,13 @@ pub fn upgrade_to_v22<T: BeaconChainTypes>(
db.store_schema_version_atomically(SchemaVersion(22), hot_ops)?;
// Finally, clean up the old-format data from the freezer database.
delete_old_schema_freezer_data::<T>(&db, &log)?;
delete_old_schema_freezer_data::<T>(&db)?;
Ok(())
}
pub fn delete_old_schema_freezer_data<T: BeaconChainTypes>(
db: &Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: &Logger,
) -> Result<(), Error> {
let mut cold_ops = vec![];
@@ -140,11 +136,7 @@ pub fn delete_old_schema_freezer_data<T: BeaconChainTypes>(
}
let delete_ops = cold_ops.len();
info!(
log,
"Deleting historic states";
"delete_ops" => delete_ops,
);
info!(delete_ops, "Deleting historic states");
db.cold_db.do_atomically(cold_ops)?;
// In order to reclaim space, we need to compact the freezer DB as well.
@@ -159,13 +151,11 @@ pub fn write_new_schema_block_roots<T: BeaconChainTypes>(
oldest_block_slot: Slot,
split_slot: Slot,
cold_ops: &mut Vec<KeyValueStoreOp>,
log: &Logger,
) -> Result<(), Error> {
info!(
log,
"Starting beacon block root migration";
"oldest_block_slot" => oldest_block_slot,
"genesis_block_root" => ?genesis_block_root,
%oldest_block_slot,
?genesis_block_root,
"Starting beacon block root migration"
);
// Store the genesis block root if it would otherwise not be stored.
@@ -196,9 +186,8 @@ pub fn write_new_schema_block_roots<T: BeaconChainTypes>(
if i > 0 && i % LOG_EVERY == 0 {
info!(
log,
"Beacon block root migration in progress";
"roots_migrated" => i
roots_migrated = i,
"Beacon block root migration in progress"
);
}
}