mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-16 11:22:56 +00:00
Integrate tracing (#6339)
Tracing Integration
- [reference](5bbf1859e9/projects/project-ideas.md (L297))
- [x] replace slog & log with tracing throughout the codebase
- [x] implement custom crit log
- [x] make relevant changes in the formatter
- [x] replace sloggers
- [x] re-write SSE logging components
cc: @macladson @eserilev
This commit is contained in:
@@ -22,7 +22,6 @@ use lru::LruCache;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use safe_arith::SafeArith;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slog::{debug, error, info, trace, warn, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::{
|
||||
@@ -37,6 +36,7 @@ use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList};
|
||||
use types::*;
|
||||
use zstd::{Decoder, Encoder};
|
||||
@@ -81,8 +81,6 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
historic_state_cache: Mutex<HistoricStateCache<E>>,
|
||||
/// Chain spec.
|
||||
pub(crate) spec: Arc<ChainSpec>,
|
||||
/// Logger.
|
||||
pub log: Logger,
|
||||
/// Mere vessel for E.
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
@@ -203,7 +201,6 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
|
||||
pub fn open_ephemeral(
|
||||
config: StoreConfig,
|
||||
spec: Arc<ChainSpec>,
|
||||
log: Logger,
|
||||
) -> Result<HotColdDB<E, MemoryStore<E>, MemoryStore<E>>, Error> {
|
||||
config.verify::<E>()?;
|
||||
|
||||
@@ -226,7 +223,6 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
|
||||
config,
|
||||
hierarchy,
|
||||
spec,
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
@@ -246,7 +242,6 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
migrate_schema: impl FnOnce(Arc<Self>, SchemaVersion, SchemaVersion) -> Result<(), Error>,
|
||||
config: StoreConfig,
|
||||
spec: Arc<ChainSpec>,
|
||||
log: Logger,
|
||||
) -> Result<Arc<Self>, Error> {
|
||||
config.verify::<E>()?;
|
||||
|
||||
@@ -272,10 +267,8 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
config,
|
||||
hierarchy,
|
||||
spec,
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
// Load the config from disk but don't error on a failed read because the config itself may
|
||||
// need migrating.
|
||||
let _ = db.load_config();
|
||||
@@ -287,10 +280,9 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
*db.split.write() = split;
|
||||
|
||||
info!(
|
||||
db.log,
|
||||
"Hot-Cold DB initialized";
|
||||
"split_slot" => split.slot,
|
||||
"split_state" => ?split.state_root
|
||||
%split.slot,
|
||||
split_state = ?split.state_root,
|
||||
"Hot-Cold DB initialized"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -352,11 +344,10 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
)?;
|
||||
|
||||
info!(
|
||||
db.log,
|
||||
"Blob DB initialized";
|
||||
"path" => ?blobs_db_path,
|
||||
"oldest_blob_slot" => ?new_blob_info.oldest_blob_slot,
|
||||
"oldest_data_column_slot" => ?new_data_column_info.oldest_data_column_slot,
|
||||
path = ?blobs_db_path,
|
||||
oldest_blob_slot = ?new_blob_info.oldest_blob_slot,
|
||||
oldest_data_column_slot = ?new_data_column_info.oldest_data_column_slot,
|
||||
"Blob DB initialized"
|
||||
);
|
||||
|
||||
// Ensure that the schema version of the on-disk database matches the software.
|
||||
@@ -364,10 +355,9 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
let db = Arc::new(db);
|
||||
if let Some(schema_version) = db.load_schema_version()? {
|
||||
debug!(
|
||||
db.log,
|
||||
"Attempting schema migration";
|
||||
"from_version" => schema_version.as_u64(),
|
||||
"to_version" => CURRENT_SCHEMA_VERSION.as_u64(),
|
||||
from_version = schema_version.as_u64(),
|
||||
to_version = CURRENT_SCHEMA_VERSION.as_u64(),
|
||||
"Attempting schema migration"
|
||||
);
|
||||
migrate_schema(db.clone(), schema_version, CURRENT_SCHEMA_VERSION)?;
|
||||
} else {
|
||||
@@ -385,10 +375,9 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
if let Ok(hierarchy_config) = disk_config.hierarchy_config() {
|
||||
if &db.config.hierarchy_config != hierarchy_config {
|
||||
info!(
|
||||
db.log,
|
||||
"Updating historic state config";
|
||||
"previous_config" => %hierarchy_config,
|
||||
"new_config" => %db.config.hierarchy_config,
|
||||
previous_config = %hierarchy_config,
|
||||
new_config = %db.config.hierarchy_config,
|
||||
"Updating historic state config"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -400,9 +389,9 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
|
||||
|
||||
// If configured, run a foreground compaction pass.
|
||||
if db.config.compact_on_init {
|
||||
info!(db.log, "Running foreground compaction");
|
||||
info!("Running foreground compaction");
|
||||
db.compact()?;
|
||||
info!(db.log, "Foreground compaction complete");
|
||||
info!("Foreground compaction complete");
|
||||
}
|
||||
|
||||
Ok(db)
|
||||
@@ -991,12 +980,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
let split = self.split.read_recursive();
|
||||
|
||||
if state_root != split.state_root {
|
||||
warn!(
|
||||
self.log,
|
||||
"State cache missed";
|
||||
"state_root" => ?state_root,
|
||||
"block_root" => ?block_root,
|
||||
);
|
||||
warn!(?state_root, ?block_root, "State cache missed");
|
||||
}
|
||||
|
||||
// Sanity check max-slot against the split slot.
|
||||
@@ -1025,10 +1009,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
.lock()
|
||||
.put_state(*state_root, block_root, state)?;
|
||||
debug!(
|
||||
self.log,
|
||||
"Cached state";
|
||||
"state_root" => ?state_root,
|
||||
"slot" => state.slot(),
|
||||
?state_root,
|
||||
slot = %state.slot(),
|
||||
"Cached state"
|
||||
);
|
||||
}
|
||||
drop(split);
|
||||
@@ -1308,9 +1291,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
Ok(BlobSidecarListFromRoot::NoBlobs | BlobSidecarListFromRoot::NoRoot) => {}
|
||||
Err(e) => {
|
||||
error!(
|
||||
self.log, "Error getting blobs";
|
||||
"block_root" => %block_root,
|
||||
"error" => ?e
|
||||
%block_root,
|
||||
error = ?e,
|
||||
"Error getting blobs"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1333,9 +1316,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
self.log, "Error getting data columns";
|
||||
"block_root" => %block_root,
|
||||
"error" => ?e
|
||||
%block_root,
|
||||
error = ?e,
|
||||
"Error getting data columns"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1363,10 +1346,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
// Rollback on failure
|
||||
if let Err(e) = tx_res {
|
||||
error!(
|
||||
self.log,
|
||||
"Database write failed";
|
||||
"error" => ?e,
|
||||
"action" => "reverting blob DB changes"
|
||||
error = ?e,
|
||||
action = "reverting blob DB changes",
|
||||
"Database write failed"
|
||||
);
|
||||
let mut blob_cache_ops = blob_cache_ops;
|
||||
for op in blob_cache_ops.iter_mut() {
|
||||
@@ -1475,10 +1457,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
.put_state(*state_root, block_root, state)?
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Skipping storage of cached state";
|
||||
"slot" => state.slot(),
|
||||
"state_root" => ?state_root
|
||||
slot = %state.slot(),
|
||||
?state_root,
|
||||
"Skipping storage of cached state"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
@@ -1486,10 +1467,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
// On the epoch boundary, store the full state.
|
||||
if state.slot() % E::slots_per_epoch() == 0 {
|
||||
trace!(
|
||||
self.log,
|
||||
"Storing full state on epoch boundary";
|
||||
"slot" => state.slot().as_u64(),
|
||||
"state_root" => format!("{:?}", state_root)
|
||||
slot = %state.slot().as_u64(),
|
||||
?state_root,
|
||||
"Storing full state on epoch boundary"
|
||||
);
|
||||
store_full_state(state_root, state, ops)?;
|
||||
}
|
||||
@@ -1512,11 +1492,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
if *state_root != self.get_split_info().state_root {
|
||||
// Do not warn on start up when loading the split state.
|
||||
warn!(
|
||||
self.log,
|
||||
"State cache missed";
|
||||
"state_root" => ?state_root,
|
||||
);
|
||||
warn!(?state_root, "State cache missed");
|
||||
}
|
||||
|
||||
let state_from_disk = self.load_hot_state(state_root)?;
|
||||
@@ -1528,10 +1504,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
.lock()
|
||||
.put_state(*state_root, block_root, &state)?;
|
||||
debug!(
|
||||
self.log,
|
||||
"Cached state";
|
||||
"state_root" => ?state_root,
|
||||
"slot" => state.slot(),
|
||||
?state_root,
|
||||
slot = %state.slot(),
|
||||
"Cached state"
|
||||
);
|
||||
Ok(Some(state))
|
||||
} else {
|
||||
@@ -1595,10 +1570,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
.put_state(state_root, latest_block_root, state)?
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Cached ancestor state";
|
||||
"state_root" => ?state_root,
|
||||
"slot" => slot,
|
||||
?state_root,
|
||||
%slot,
|
||||
"Cached ancestor state"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
@@ -1650,35 +1624,31 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
match self.hierarchy.storage_strategy(slot)? {
|
||||
StorageStrategy::ReplayFrom(from) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Storing cold state";
|
||||
"strategy" => "replay",
|
||||
"from_slot" => from,
|
||||
"slot" => state.slot(),
|
||||
strategy = "replay",
|
||||
from_slot = %from,
|
||||
%slot,
|
||||
"Storing cold state",
|
||||
);
|
||||
// Already have persisted the state summary, don't persist anything else
|
||||
}
|
||||
StorageStrategy::Snapshot => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Storing cold state";
|
||||
"strategy" => "snapshot",
|
||||
"slot" => state.slot(),
|
||||
strategy = "snapshot",
|
||||
%slot,
|
||||
"Storing cold state"
|
||||
);
|
||||
self.store_cold_state_as_snapshot(state, ops)?;
|
||||
}
|
||||
StorageStrategy::DiffFrom(from) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Storing cold state";
|
||||
"strategy" => "diff",
|
||||
"from_slot" => from,
|
||||
"slot" => state.slot(),
|
||||
strategy = "diff",
|
||||
from_slot = %from,
|
||||
%slot,
|
||||
"Storing cold state"
|
||||
);
|
||||
self.store_cold_state_as_diff(state, from, ops)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1837,10 +1807,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
metrics::start_timer(&metrics::STORE_BEACON_COLD_BUILD_BEACON_CACHES_TIME);
|
||||
base_state.build_all_caches(&self.spec)?;
|
||||
debug!(
|
||||
self.log,
|
||||
"Built caches for historic state";
|
||||
"target_slot" => slot,
|
||||
"build_time_ms" => metrics::stop_timer_with_duration(cache_timer).as_millis()
|
||||
target_slot = %slot,
|
||||
build_time_ms = metrics::stop_timer_with_duration(cache_timer).as_millis(),
|
||||
"Built caches for historic state"
|
||||
);
|
||||
self.historic_state_cache
|
||||
.lock()
|
||||
@@ -1862,10 +1831,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
})?;
|
||||
let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?;
|
||||
debug!(
|
||||
self.log,
|
||||
"Replayed blocks for historic state";
|
||||
"target_slot" => slot,
|
||||
"replay_time_ms" => metrics::stop_timer_with_duration(replay_timer).as_millis()
|
||||
target_slot = %slot,
|
||||
replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(),
|
||||
"Replayed blocks for historic state"
|
||||
);
|
||||
|
||||
self.historic_state_cache
|
||||
@@ -1893,9 +1861,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
fn load_hdiff_buffer_for_slot(&self, slot: Slot) -> Result<(Slot, HDiffBuffer), Error> {
|
||||
if let Some(buffer) = self.historic_state_cache.lock().get_hdiff_buffer(slot) {
|
||||
debug!(
|
||||
self.log,
|
||||
"Hit hdiff buffer cache";
|
||||
"slot" => slot
|
||||
%slot,
|
||||
"Hit hdiff buffer cache"
|
||||
);
|
||||
metrics::inc_counter(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT);
|
||||
return Ok((slot, buffer));
|
||||
@@ -1919,10 +1886,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
let load_time_ms = t.elapsed().as_millis();
|
||||
debug!(
|
||||
self.log,
|
||||
"Cached state and hdiff buffer";
|
||||
"load_time_ms" => load_time_ms,
|
||||
"slot" => slot
|
||||
load_time_ms,
|
||||
%slot,
|
||||
"Cached state and hdiff buffer"
|
||||
);
|
||||
|
||||
Ok((slot, buffer))
|
||||
@@ -1945,10 +1911,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
let load_time_ms = t.elapsed().as_millis();
|
||||
debug!(
|
||||
self.log,
|
||||
"Cached hdiff buffer";
|
||||
"load_time_ms" => load_time_ms,
|
||||
"slot" => slot
|
||||
load_time_ms,
|
||||
%slot,
|
||||
"Cached hdiff buffer"
|
||||
);
|
||||
|
||||
Ok((slot, buffer))
|
||||
@@ -2052,9 +2017,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
.map(|block_replayer| {
|
||||
if have_state_root_iterator && block_replayer.state_root_miss() {
|
||||
warn!(
|
||||
self.log,
|
||||
"State root cache miss during block replay";
|
||||
"slot" => target_slot,
|
||||
slot = %target_slot,
|
||||
"State root cache miss during block replay"
|
||||
);
|
||||
}
|
||||
block_replayer.into_state()
|
||||
@@ -2180,11 +2144,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
&self.spec
|
||||
}
|
||||
|
||||
/// Get a reference to the `Logger` used by the database.
|
||||
pub fn logger(&self) -> &Logger {
|
||||
&self.log
|
||||
}
|
||||
|
||||
/// Fetch a copy of the current split slot from memory.
|
||||
pub fn get_split_slot(&self) -> Slot {
|
||||
self.split.read_recursive().slot
|
||||
@@ -2579,17 +2538,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
columns.extend(previous_schema_columns);
|
||||
|
||||
for column in columns {
|
||||
info!(
|
||||
self.log,
|
||||
"Starting compaction";
|
||||
"column" => ?column
|
||||
);
|
||||
info!(?column, "Starting compaction");
|
||||
self.cold_db.compact_column(column)?;
|
||||
info!(
|
||||
self.log,
|
||||
"Finishing compaction";
|
||||
"column" => ?column
|
||||
);
|
||||
info!(?column, "Finishing compaction");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -2690,16 +2641,15 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
})??;
|
||||
|
||||
if already_pruned && !force {
|
||||
info!(self.log, "Execution payloads are pruned");
|
||||
info!("Execution payloads are pruned");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Iterate block roots backwards to the Bellatrix fork or the anchor slot, whichever comes
|
||||
// first.
|
||||
warn!(
|
||||
self.log,
|
||||
"Pruning finalized payloads";
|
||||
"info" => "you may notice degraded I/O performance while this runs"
|
||||
info = "you may notice degraded I/O performance while this runs",
|
||||
"Pruning finalized payloads"
|
||||
);
|
||||
let anchor_info = self.get_anchor_info();
|
||||
|
||||
@@ -2713,58 +2663,41 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
Ok(tuple) => tuple,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
self.log,
|
||||
"Stopping payload pruning early";
|
||||
"error" => ?e,
|
||||
error = ?e,
|
||||
"Stopping payload pruning early"
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
if slot < bellatrix_fork_slot {
|
||||
info!(
|
||||
self.log,
|
||||
"Payload pruning reached Bellatrix boundary";
|
||||
);
|
||||
info!("Payload pruning reached Bellatrix boundary");
|
||||
break;
|
||||
}
|
||||
|
||||
if Some(block_root) != last_pruned_block_root
|
||||
&& self.execution_payload_exists(&block_root)?
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Pruning execution payload";
|
||||
"slot" => slot,
|
||||
"block_root" => ?block_root,
|
||||
);
|
||||
debug!(%slot, ?block_root, "Pruning execution payload");
|
||||
last_pruned_block_root = Some(block_root);
|
||||
ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
||||
}
|
||||
|
||||
if slot <= anchor_info.oldest_block_slot {
|
||||
info!(
|
||||
self.log,
|
||||
"Payload pruning reached anchor oldest block slot";
|
||||
"slot" => slot
|
||||
);
|
||||
info!(%slot, "Payload pruning reached anchor oldest block slot");
|
||||
break;
|
||||
}
|
||||
}
|
||||
let payloads_pruned = ops.len();
|
||||
self.do_atomically_with_block_and_blobs_cache(ops)?;
|
||||
info!(
|
||||
self.log,
|
||||
"Execution payload pruning complete";
|
||||
"payloads_pruned" => payloads_pruned,
|
||||
);
|
||||
info!(%payloads_pruned, "Execution payload pruning complete");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to prune blobs, approximating the current epoch from the split slot.
|
||||
pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> {
|
||||
let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else {
|
||||
debug!(self.log, "Deneb fork is disabled");
|
||||
debug!("Deneb fork is disabled");
|
||||
return Ok(());
|
||||
};
|
||||
// The current epoch is >= split_epoch + 2. It could be greater if the database is
|
||||
@@ -2795,7 +2728,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
data_availability_boundary: Epoch,
|
||||
) -> Result<(), Error> {
|
||||
if self.spec.deneb_fork_epoch.is_none() {
|
||||
debug!(self.log, "Deneb fork is disabled");
|
||||
debug!("Deneb fork is disabled");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -2804,17 +2737,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
let epochs_per_blob_prune = self.get_config().epochs_per_blob_prune;
|
||||
|
||||
if !force && !pruning_enabled {
|
||||
debug!(
|
||||
self.log,
|
||||
"Blob pruning is disabled";
|
||||
"prune_blobs" => pruning_enabled
|
||||
);
|
||||
debug!(prune_blobs = pruning_enabled, "Blob pruning is disabled");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let blob_info = self.get_blob_info();
|
||||
let Some(oldest_blob_slot) = blob_info.oldest_blob_slot else {
|
||||
error!(self.log, "Slot of oldest blob is not known");
|
||||
error!("Slot of oldest blob is not known");
|
||||
return Err(HotColdDBError::BlobPruneLogicError.into());
|
||||
};
|
||||
|
||||
@@ -2837,13 +2766,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
if !force && !should_prune || !can_prune {
|
||||
debug!(
|
||||
self.log,
|
||||
"Blobs are pruned";
|
||||
"oldest_blob_slot" => oldest_blob_slot,
|
||||
"data_availability_boundary" => data_availability_boundary,
|
||||
"split_slot" => split.slot,
|
||||
"end_epoch" => end_epoch,
|
||||
"start_epoch" => start_epoch,
|
||||
%oldest_blob_slot,
|
||||
%data_availability_boundary,
|
||||
%split.slot,
|
||||
%end_epoch,
|
||||
%start_epoch,
|
||||
"Blobs are pruned"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
@@ -2852,21 +2780,19 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
let anchor = self.get_anchor_info();
|
||||
if oldest_blob_slot < anchor.oldest_block_slot {
|
||||
error!(
|
||||
self.log,
|
||||
"Oldest blob is older than oldest block";
|
||||
"oldest_blob_slot" => oldest_blob_slot,
|
||||
"oldest_block_slot" => anchor.oldest_block_slot
|
||||
%oldest_blob_slot,
|
||||
oldest_block_slot = %anchor.oldest_block_slot,
|
||||
"Oldest blob is older than oldest block"
|
||||
);
|
||||
return Err(HotColdDBError::BlobPruneLogicError.into());
|
||||
}
|
||||
|
||||
// Iterate block roots forwards from the oldest blob slot.
|
||||
debug!(
|
||||
self.log,
|
||||
"Pruning blobs";
|
||||
"start_epoch" => start_epoch,
|
||||
"end_epoch" => end_epoch,
|
||||
"data_availability_boundary" => data_availability_boundary,
|
||||
%start_epoch,
|
||||
%end_epoch,
|
||||
%data_availability_boundary,
|
||||
"Pruning blobs"
|
||||
);
|
||||
|
||||
// We collect block roots of deleted blobs in memory. Even for 10y of blob history this
|
||||
@@ -2922,10 +2848,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
let op = self.compare_and_set_blob_info(blob_info, new_blob_info)?;
|
||||
self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::KeyValueOp(op)])?;
|
||||
|
||||
debug!(
|
||||
self.log,
|
||||
"Blob pruning complete";
|
||||
);
|
||||
debug!("Blob pruning complete");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -2995,18 +2918,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
// If we just deleted the genesis state, re-store it using the current* schema.
|
||||
if self.get_split_slot() > 0 {
|
||||
info!(
|
||||
self.log,
|
||||
"Re-storing genesis state";
|
||||
"state_root" => ?genesis_state_root,
|
||||
state_root = ?genesis_state_root,
|
||||
"Re-storing genesis state"
|
||||
);
|
||||
self.store_cold_state(&genesis_state_root, genesis_state, &mut cold_ops)?;
|
||||
}
|
||||
|
||||
info!(
|
||||
self.log,
|
||||
"Deleting historic states";
|
||||
"delete_ops" => delete_ops,
|
||||
);
|
||||
info!(delete_ops, "Deleting historic states");
|
||||
self.cold_db.do_atomically(cold_ops)?;
|
||||
|
||||
// In order to reclaim space, we need to compact the freezer DB as well.
|
||||
@@ -3022,9 +2940,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
pub fn prune_old_hot_states(&self) -> Result<(), Error> {
|
||||
let split = self.get_split_info();
|
||||
debug!(
|
||||
self.log,
|
||||
"Database state pruning started";
|
||||
"split_slot" => split.slot,
|
||||
%split.slot,
|
||||
"Database state pruning started"
|
||||
);
|
||||
let mut state_delete_batch = vec![];
|
||||
for res in self
|
||||
@@ -3046,11 +2963,10 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
"non-canonical"
|
||||
};
|
||||
debug!(
|
||||
self.log,
|
||||
"Deleting state";
|
||||
"state_root" => ?state_root,
|
||||
"slot" => summary.slot,
|
||||
"reason" => reason,
|
||||
?state_root,
|
||||
slot = %summary.slot,
|
||||
%reason,
|
||||
"Deleting state"
|
||||
);
|
||||
state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot)));
|
||||
}
|
||||
@@ -3058,11 +2974,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
}
|
||||
let num_deleted_states = state_delete_batch.len();
|
||||
self.do_atomically_with_block_and_blobs_cache(state_delete_batch)?;
|
||||
debug!(
|
||||
self.log,
|
||||
"Database state pruning complete";
|
||||
"num_deleted_states" => num_deleted_states,
|
||||
);
|
||||
debug!(%num_deleted_states, "Database state pruning complete");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -3075,9 +2987,8 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
finalized_state: &BeaconState<E>,
|
||||
) -> Result<(), Error> {
|
||||
debug!(
|
||||
store.log,
|
||||
"Freezer migration started";
|
||||
"slot" => finalized_state.slot()
|
||||
slot = %finalized_state.slot(),
|
||||
"Freezer migration started"
|
||||
);
|
||||
|
||||
// 0. Check that the migration is sensible.
|
||||
@@ -3153,7 +3064,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
// stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state
|
||||
// which always needs to be copied from the hot DB to the freezer and should not be deleted.
|
||||
if slot != 0 && slot < anchor_info.state_upper_limit {
|
||||
debug!(store.log, "Pruning finalized state"; "slot" => slot);
|
||||
debug!(%slot, "Pruning finalized state");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -3213,10 +3124,9 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
// place in code.
|
||||
if latest_split_slot != current_split_slot {
|
||||
error!(
|
||||
store.log,
|
||||
"Race condition detected: Split point changed while moving states to the freezer";
|
||||
"previous split slot" => current_split_slot,
|
||||
"current split slot" => latest_split_slot,
|
||||
previous_split_slot = %current_split_slot,
|
||||
current_split_slot = %latest_split_slot,
|
||||
"Race condition detected: Split point changed while moving states to the freezer"
|
||||
);
|
||||
|
||||
// Assume the freezing procedure will be retried in case this happens.
|
||||
@@ -3252,9 +3162,8 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
)?;
|
||||
|
||||
debug!(
|
||||
store.log,
|
||||
"Freezer migration complete";
|
||||
"slot" => finalized_state.slot()
|
||||
slot = %finalized_state.slot(),
|
||||
"Freezer migration complete"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
||||
Reference in New Issue
Block a user