Prevent writing to state cache when migrating the database (#7067)

* add an update_cache flag to get_state to have more granular control over when we write to the cache

* State cache tweaks

- add state-cache-headroom flag to control pruning
- prune old epoch boundary states ahead of mid-epoch states
- never prune head block's state
- avoid caching ancestor states unless they are on an epoch boundary

---------

Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Eitan Seri-Levi
2025-03-03 17:15:12 -07:00
committed by GitHub
parent 8f62b1934a
commit defdc595fc
28 changed files with 178 additions and 77 deletions

View File

@@ -73,7 +73,7 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
/// Cache of beacon states.
///
/// LOCK ORDERING: this lock must always be locked *after* the `split` if both are required.
state_cache: Mutex<StateCache<E>>,
pub state_cache: Mutex<StateCache<E>>,
/// Cache of historic states and hierarchical diff buffers.
///
/// This cache is never pruned. It is only populated in response to historical queries from the
@@ -218,7 +218,10 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
blobs_db: MemoryStore::open(),
hot_db: MemoryStore::open(),
block_cache: Mutex::new(BlockCache::new(config.block_cache_size)),
state_cache: Mutex::new(StateCache::new(config.state_cache_size)),
state_cache: Mutex::new(StateCache::new(
config.state_cache_size,
config.state_cache_headroom,
)),
historic_state_cache: Mutex::new(HistoricStateCache::new(
config.hdiff_buffer_cache_size,
config.historic_state_cache_size,
@@ -264,7 +267,10 @@ impl<E: EthSpec> HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>> {
cold_db: BeaconNodeBackend::open(&config, cold_path)?,
hot_db,
block_cache: Mutex::new(BlockCache::new(config.block_cache_size)),
state_cache: Mutex::new(StateCache::new(config.state_cache_size)),
state_cache: Mutex::new(StateCache::new(
config.state_cache_size,
config.state_cache_headroom,
)),
historic_state_cache: Mutex::new(HistoricStateCache::new(
config.hdiff_buffer_cache_size,
config.historic_state_cache_size,
@@ -945,6 +951,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
&self,
state_root: &Hash256,
slot: Option<Slot>,
update_cache: bool,
) -> Result<Option<BeaconState<E>>, Error> {
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
@@ -956,10 +963,10 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
// chain. This way we avoid returning a state that doesn't match `state_root`.
self.load_cold_state(state_root)
} else {
self.get_hot_state(state_root)
self.get_hot_state(state_root, update_cache)
}
} else {
match self.get_hot_state(state_root)? {
match self.get_hot_state(state_root, update_cache)? {
Some(state) => Ok(Some(state)),
None => self.load_cold_state(state_root),
}
@@ -1015,7 +1022,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
state_root
};
let mut opt_state = self
.load_hot_state(&state_root)?
.load_hot_state(&state_root, true)?
.map(|(state, _block_root)| (state_root, state));
if let Some((state_root, state)) = opt_state.as_mut() {
@@ -1126,6 +1133,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
/// Load an epoch boundary state by using the hot state summary look-up.
///
/// Will fall back to the cold DB if a hot state summary is not found.
///
/// NOTE: only used in tests at the moment
pub fn load_epoch_boundary_state(
&self,
state_root: &Hash256,
@@ -1136,9 +1145,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}) = self.load_hot_state_summary(state_root)?
{
// NOTE: minor inefficiency here because we load an unnecessary hot state summary
let (state, _) = self.load_hot_state(&epoch_boundary_state_root)?.ok_or(
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root),
)?;
let (state, _) = self
.load_hot_state(&epoch_boundary_state_root, true)?
.ok_or(HotColdDBError::MissingEpochBoundaryState(
epoch_boundary_state_root,
))?;
Ok(Some(state))
} else {
// Try the cold DB
@@ -1505,7 +1516,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
/// Get a post-finalization state from the database or store.
pub fn get_hot_state(&self, state_root: &Hash256) -> Result<Option<BeaconState<E>>, Error> {
pub fn get_hot_state(
&self,
state_root: &Hash256,
update_cache: bool,
) -> Result<Option<BeaconState<E>>, Error> {
if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) {
return Ok(Some(state));
}
@@ -1519,20 +1534,30 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
);
}
let state_from_disk = self.load_hot_state(state_root)?;
let state_from_disk = self.load_hot_state(state_root, update_cache)?;
if let Some((mut state, block_root)) = state_from_disk {
state.update_tree_hash_cache()?;
state.build_all_caches(&self.spec)?;
self.state_cache
.lock()
.put_state(*state_root, block_root, &state)?;
debug!(
self.log,
"Cached state";
"state_root" => ?state_root,
"slot" => state.slot(),
);
if update_cache {
state.update_tree_hash_cache()?;
state.build_all_caches(&self.spec)?;
self.state_cache
.lock()
.put_state(*state_root, block_root, &state)?;
debug!(
self.log,
"Cached state";
"state_root" => ?state_root,
"slot" => state.slot(),
);
} else {
debug!(
self.log,
"Did not cache state";
"state_root" => ?state_root,
"slot" => state.slot(),
);
}
Ok(Some(state))
} else {
Ok(None)
@@ -1548,6 +1573,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
pub fn load_hot_state(
&self,
state_root: &Hash256,
update_cache: bool,
) -> Result<Option<(BeaconState<E>, Hash256)>, Error> {
metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT);
@@ -1579,11 +1605,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let mut state = if slot % E::slots_per_epoch() == 0 {
boundary_state
} else {
// Cache ALL intermediate states that are reached during block replay. We may want
// to restrict this in future to only cache epoch boundary states. At worst we will
// cache up to 32 states for each state loaded, which should not flush out the cache
// entirely.
// If replaying blocks, and `update_cache` is true, also cache the epoch boundary
// state that this state is based on. It may be useful as the basis of more states
// in the same epoch.
let state_cache_hook = |state_root, state: &mut BeaconState<E>| {
if !update_cache || state.slot() % E::slots_per_epoch() != 0 {
return Ok(());
}
// Ensure all caches are built before attempting to cache.
state.update_tree_hash_cache()?;
state.build_all_caches(&self.spec)?;
@@ -1598,7 +1626,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.log,
"Cached ancestor state";
"state_root" => ?state_root,
"slot" => slot,
"state_slot" => state.slot(),
"descendant_slot" => slot,
);
}
Ok(())
@@ -2668,10 +2697,15 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
return Ok(());
};
// Load the split state so we can backtrack to find execution payloads.
let split_state = self.get_state(&split.state_root, Some(split.slot))?.ok_or(
HotColdDBError::MissingSplitState(split.state_root, split.slot),
)?;
// Load the split state so we can backtrack to find execution payloads. The split state
// should be in the state cache as the enshrined finalized state, so this should never
// cache miss.
let split_state = self
.get_state(&split.state_root, Some(split.slot), true)?
.ok_or(HotColdDBError::MissingSplitState(
split.state_root,
split.slot,
))?;
// The finalized block may or may not have its execution payload stored, depending on
// whether it was at a skipped slot. However for a fully pruned database its parent
@@ -3169,8 +3203,9 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
// Store slot -> state_root and state_root -> slot mappings.
store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?;
} else {
// We purposely do not update the state cache to store this state
let state: BeaconState<E> = store
.get_hot_state(&state_root)?
.get_hot_state(&state_root, false)?
.ok_or(HotColdDBError::MissingStateToFreeze(state_root))?;
store.store_cold_state(&state_root, &state, &mut cold_db_ops)?;