mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-17 04:48:21 +00:00
Merge remote-tracking branch 'origin/unstable' into tree-states
This commit is contained in:
@@ -28,6 +28,8 @@ pub struct StoreConfig {
|
||||
pub compact_on_init: bool,
|
||||
/// Whether to compact the database during database pruning.
|
||||
pub compact_on_prune: bool,
|
||||
/// Whether to prune payloads on initialization and finalization.
|
||||
pub prune_payloads: bool,
|
||||
}
|
||||
|
||||
/// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params.
|
||||
@@ -53,6 +55,7 @@ impl Default for StoreConfig {
|
||||
compression_level: DEFAULT_COMPRESSION_LEVEL,
|
||||
compact_on_init: false,
|
||||
compact_on_prune: true,
|
||||
prune_payloads: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::config::{
|
||||
use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator};
|
||||
use crate::hot_state_iter::HotStateRootIter;
|
||||
use crate::impls::beacon_state::{get_full_state, store_full_state};
|
||||
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
|
||||
use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator};
|
||||
use crate::leveldb_store::{BytesKey, LevelDB};
|
||||
use crate::memory_store::MemoryStore;
|
||||
use crate::metadata::{
|
||||
@@ -22,6 +22,7 @@ use crate::{
|
||||
get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp,
|
||||
PartialBeaconState, StoreItem, StoreOp,
|
||||
};
|
||||
use itertools::process_results;
|
||||
use leveldb::iterator::LevelDBIterator;
|
||||
use lru::LruCache;
|
||||
use milhouse::Diff;
|
||||
@@ -363,8 +364,10 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
};
|
||||
|
||||
// If the block is after the split point then we should have the full execution payload
|
||||
// stored in the database. Otherwise, just return the blinded block.
|
||||
// Hold the split lock so that it can't change.
|
||||
// stored in the database. If it isn't but payload pruning is disabled, try to load it
|
||||
// on-demand.
|
||||
//
|
||||
// Hold the split lock so that it can't change while loading the payload.
|
||||
let split = self.split.read_recursive();
|
||||
|
||||
let block = if blinded_block.message().execution_payload().is_err()
|
||||
@@ -377,6 +380,18 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
self.block_cache.lock().put(*block_root, full_block.clone());
|
||||
|
||||
DatabaseBlock::Full(full_block)
|
||||
} else if !self.config.prune_payloads {
|
||||
// If payload pruning is disabled there's a chance we may have the payload of
|
||||
// this finalized block. Attempt to load it but don't error in case it's missing.
|
||||
if let Some(payload) = self.get_execution_payload(block_root)? {
|
||||
DatabaseBlock::Full(
|
||||
blinded_block
|
||||
.try_into_full_block(Some(payload))
|
||||
.ok_or(Error::AddPayloadLogicError)?,
|
||||
)
|
||||
} else {
|
||||
DatabaseBlock::Blinded(blinded_block)
|
||||
}
|
||||
} else {
|
||||
DatabaseBlock::Blinded(blinded_block)
|
||||
};
|
||||
@@ -417,7 +432,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
blinded_block: SignedBeaconBlock<E, BlindedPayload<E>>,
|
||||
) -> Result<SignedBeaconBlock<E>, Error> {
|
||||
if blinded_block.message().execution_payload().is_ok() {
|
||||
let execution_payload = self.get_execution_payload(block_root)?;
|
||||
let execution_payload = self
|
||||
.get_execution_payload(block_root)?
|
||||
.ok_or(HotColdDBError::MissingExecutionPayload(*block_root))?;
|
||||
blinded_block.try_into_full_block(Some(execution_payload))
|
||||
} else {
|
||||
blinded_block.try_into_full_block(None)
|
||||
@@ -462,9 +479,14 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
pub fn get_execution_payload(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
) -> Result<ExecutionPayload<E>, Error> {
|
||||
self.get_item(block_root)?
|
||||
.ok_or_else(|| HotColdDBError::MissingExecutionPayload(*block_root).into())
|
||||
) -> Result<Option<ExecutionPayload<E>>, Error> {
|
||||
self.get_item(block_root)
|
||||
}
|
||||
|
||||
/// Check if the execution payload for a block exists on disk.
|
||||
pub fn execution_payload_exists(&self, block_root: &Hash256) -> Result<bool, Error> {
|
||||
self.get_item::<ExecutionPayload<E>>(block_root)
|
||||
.map(|payload| payload.is_some())
|
||||
}
|
||||
|
||||
/// Determine whether a block exists in the database.
|
||||
@@ -1642,6 +1664,113 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
&CompactionTimestamp(compaction_timestamp.as_secs()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Try to prune all execution payloads, returning early if there is no need to prune.
|
||||
pub fn try_prune_execution_payloads(&self, force: bool) -> Result<(), Error> {
|
||||
let split = self.get_split_info();
|
||||
|
||||
if split.slot == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let bellatrix_fork_slot = if let Some(epoch) = self.spec.bellatrix_fork_epoch {
|
||||
epoch.start_slot(E::slots_per_epoch())
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Load the split state so we can backtrack to find execution payloads.
|
||||
let split_state = self.get_state(&split.state_root, Some(split.slot))?.ok_or(
|
||||
HotColdDBError::MissingSplitState(split.state_root, split.slot),
|
||||
)?;
|
||||
|
||||
// The finalized block may or may not have its execution payload stored, depending on
|
||||
// whether it was at a skipped slot. However for a fully pruned database its parent
|
||||
// should *always* have been pruned. In case of a long split (no parent found) we
|
||||
// continue as if the payloads are pruned, as the node probably has other things to worry
|
||||
// about.
|
||||
let split_block_root = split_state.get_latest_block_root(split.state_root);
|
||||
|
||||
let already_pruned =
|
||||
process_results(split_state.rev_iter_block_roots(&self.spec), |mut iter| {
|
||||
iter.find(|(_, block_root)| *block_root != split_block_root)
|
||||
.map_or(Ok(true), |(_, split_parent_root)| {
|
||||
self.execution_payload_exists(&split_parent_root)
|
||||
.map(|exists| !exists)
|
||||
})
|
||||
})??;
|
||||
|
||||
if already_pruned && !force {
|
||||
info!(self.log, "Execution payloads are pruned");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Iterate block roots backwards to the Bellatrix fork or the anchor slot, whichever comes
|
||||
// first.
|
||||
warn!(
|
||||
self.log,
|
||||
"Pruning finalized payloads";
|
||||
"info" => "you may notice degraded I/O performance while this runs"
|
||||
);
|
||||
let anchor_slot = self.get_anchor_info().map(|info| info.anchor_slot);
|
||||
|
||||
let mut ops = vec![];
|
||||
let mut last_pruned_block_root = None;
|
||||
|
||||
for res in std::iter::once(Ok((split_block_root, split.slot)))
|
||||
.chain(BlockRootsIterator::new(self, &split_state))
|
||||
{
|
||||
let (block_root, slot) = match res {
|
||||
Ok(tuple) => tuple,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
self.log,
|
||||
"Stopping payload pruning early";
|
||||
"error" => ?e,
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
if slot < bellatrix_fork_slot {
|
||||
info!(
|
||||
self.log,
|
||||
"Payload pruning reached Bellatrix boundary";
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
if Some(block_root) != last_pruned_block_root
|
||||
&& self.execution_payload_exists(&block_root)?
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Pruning execution payload";
|
||||
"slot" => slot,
|
||||
"block_root" => ?block_root,
|
||||
);
|
||||
last_pruned_block_root = Some(block_root);
|
||||
ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
||||
}
|
||||
|
||||
if Some(slot) == anchor_slot {
|
||||
info!(
|
||||
self.log,
|
||||
"Payload pruning reached anchor state";
|
||||
"slot" => slot
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
let payloads_pruned = ops.len();
|
||||
self.do_atomically(ops)?;
|
||||
info!(
|
||||
self.log,
|
||||
"Execution payload pruning complete";
|
||||
"payloads_pruned" => payloads_pruned,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||
@@ -1687,15 +1816,15 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
// the cold DB.
|
||||
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
|
||||
|
||||
let state_root_iter = StateRootsIterator::new(&store, finalized_state);
|
||||
for maybe_pair in state_root_iter.take_while(|result| match result {
|
||||
Ok((_, slot)) => {
|
||||
let state_root_iter = RootsIterator::new(&store, finalized_state);
|
||||
for maybe_tuple in state_root_iter.take_while(|result| match result {
|
||||
Ok((_, _, slot)) => {
|
||||
slot >= ¤t_split_slot
|
||||
&& anchor_slot.map_or(true, |anchor_slot| slot >= &anchor_slot)
|
||||
}
|
||||
Err(_) => true,
|
||||
}) {
|
||||
let (state_root, slot) = maybe_pair?;
|
||||
let (block_root, state_root, slot) = maybe_tuple?;
|
||||
|
||||
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||
|
||||
@@ -1719,6 +1848,14 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
|
||||
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
||||
hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot)));
|
||||
|
||||
// Delete the execution payload if payload pruning is enabled. At a skipped slot we may
|
||||
// delete the payload for the finalized block itself, but that's OK as we only guarantee
|
||||
// that payloads are present for slots >= the split slot. The payload fetching code is also
|
||||
// forgiving of missing payloads.
|
||||
if store.config.prune_payloads {
|
||||
hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
||||
}
|
||||
}
|
||||
|
||||
// Warning: Critical section. We have to take care not to put any of the two databases in an
|
||||
|
||||
Reference in New Issue
Block a user