mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-27 01:33:33 +00:00
Prune abandoned forks (#916)
* Address compiler warning
* Prune abandoned fork choice forks
* New approach to pruning
* Wrap some block hashes in a newtype pattern
For increased type safety.
* Add Graphviz chain dump emitter for debugging
* Fix broken test case
* Make prunes_abandoned_forks use real DiskStore
* Mark finalized blocks in the GraphViz output
* Refine debug stringification of Slot and Epoch
Before this commit: print!("{:?}", Slot(123)) == "Slot(\n123\n)".
After this commit: print!("{:?", Slot(123)) == "Slot(123)".
* Simplify build_block()
* Rewrite test case using more composable test primitives
* Working rewritten test case
* Tighten fork prunning test checks
* Add another pruning test case
* Bugfix: Finalized blocks weren't always properly detected
* Pruning: Add pruning_does_not_touch_blocks_prior_to_finalization test case
* Tighten pruning tests: check if heads are tracked properly
* Add a failing test case for a buggy scenario
* Change name of function to a more accurate one
* Fix failing test case
* Test case: Were skipped slots' states pruned?
* Style fix: Simplify dereferencing
* Tighten pruning tests: check if abandoned states are deleted
* Towards atomicity of db ops
* Correct typo
* Prune also skipped slots' states
* New logic for handling skipped states
* Make skipped slots test pass
* Post conflict resolution fixes
* Formatting fixes
* Tests passing
* Block hashes in Graphviz node labels
* Removed unused changes
* Fix bug with states having < SlotsPerHistoricalRoot roots
* Consolidate State/BlockRootsIterator for pruning
* Address review feedback
* Fix a bug in pruning tests
* Detach prune_abandoned_forks() from its object
* Move migrate.rs from store to beacon_chain
* Move forks pruning onto a background thread
* Bugfix: Heads weren't pruned when prune set contained only the head
* Rename: freeze_to_state() -> process_finalization()
* Eliminate redundant function parameter
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
@@ -4,6 +4,7 @@ use crate::events::{EventHandler, EventKind};
|
||||
use crate::fork_choice::{Error as ForkChoiceError, ForkChoice};
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::metrics;
|
||||
use crate::migrate::Migrate;
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::SnapshotCache;
|
||||
@@ -26,14 +27,16 @@ use state_processing::{
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::io::prelude::*;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use store::iter::{
|
||||
BlockRootsIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator,
|
||||
BlockRootsIterator, ParentRootBlockIterator, ReverseBlockRootIterator,
|
||||
ReverseStateRootIterator, StateRootsIterator,
|
||||
};
|
||||
use store::{Error as DBError, Migrate, StateBatch, Store};
|
||||
use store::{Error as DBError, StateBatch, Store};
|
||||
use tree_hash::TreeHash;
|
||||
use types::*;
|
||||
|
||||
@@ -170,7 +173,7 @@ pub struct HeadInfo {
|
||||
|
||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
type Store: store::Store<Self::EthSpec>;
|
||||
type StoreMigrator: store::Migrate<Self::Store, Self::EthSpec>;
|
||||
type StoreMigrator: Migrate<Self::Store, Self::EthSpec>;
|
||||
type SlotClock: slot_clock::SlotClock;
|
||||
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
|
||||
type EthSpec: types::EthSpec;
|
||||
@@ -202,7 +205,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// A handler for events generated by the beacon chain.
|
||||
pub event_handler: T::EventHandler,
|
||||
/// Used to track the heads of the beacon chain.
|
||||
pub(crate) head_tracker: HeadTracker,
|
||||
pub(crate) head_tracker: Arc<HeadTracker>,
|
||||
/// A cache dedicated to block processing.
|
||||
pub(crate) block_processing_cache: TimeoutRwLock<SnapshotCache<T::EthSpec>>,
|
||||
/// Caches the shuffling for a given epoch and state root.
|
||||
@@ -498,6 +501,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.head_tracker.heads()
|
||||
}
|
||||
|
||||
pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool {
|
||||
self.head_tracker.contains_head((*block_hash).into())
|
||||
}
|
||||
|
||||
/// Returns the `BeaconState` at the given slot.
|
||||
///
|
||||
/// Returns `None` when the state is not found in the database or there is an error skipping
|
||||
@@ -1796,6 +1803,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let beacon_block_root = self.fork_choice.find_head(&self)?;
|
||||
|
||||
let current_head = self.head_info()?;
|
||||
let old_finalized_root = current_head.finalized_checkpoint.root;
|
||||
|
||||
if beacon_block_root == current_head.block_root {
|
||||
return Ok(());
|
||||
@@ -1923,7 +1931,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
});
|
||||
|
||||
if new_finalized_epoch != old_finalized_epoch {
|
||||
self.after_finalization(old_finalized_epoch, finalized_root)?;
|
||||
self.after_finalization(
|
||||
old_finalized_epoch,
|
||||
finalized_root,
|
||||
old_finalized_root.into(),
|
||||
)?;
|
||||
}
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconHeadChanged {
|
||||
@@ -1942,6 +1954,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
old_finalized_epoch: Epoch,
|
||||
finalized_block_root: Hash256,
|
||||
old_finalized_root: SignedBeaconBlockHash,
|
||||
) -> Result<(), Error> {
|
||||
let finalized_block = self
|
||||
.store
|
||||
@@ -1981,10 +1994,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
// TODO: configurable max finality distance
|
||||
let max_finality_distance = 0;
|
||||
self.store_migrator.freeze_to_state(
|
||||
self.store_migrator.process_finalization(
|
||||
finalized_block.state_root,
|
||||
finalized_state,
|
||||
max_finality_distance,
|
||||
Arc::clone(&self.head_tracker),
|
||||
old_finalized_root,
|
||||
finalized_block_root.into(),
|
||||
);
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconFinalization {
|
||||
@@ -2052,6 +2068,100 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
Ok(dump)
|
||||
}
|
||||
|
||||
pub fn dump_as_dot<W: Write>(&self, output: &mut W) {
|
||||
let canonical_head_hash = self
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
|
||||
.unwrap()
|
||||
.beacon_block_root;
|
||||
let mut visited: HashSet<Hash256> = HashSet::new();
|
||||
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
let genesis_block_hash = Hash256::zero();
|
||||
write!(output, "digraph beacon {{\n").unwrap();
|
||||
write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap();
|
||||
|
||||
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
|
||||
// properly.
|
||||
let heads = {
|
||||
let mut heads = self.heads();
|
||||
let canonical_head_index = heads
|
||||
.iter()
|
||||
.position(|(block_hash, _)| *block_hash == canonical_head_hash)
|
||||
.unwrap();
|
||||
let (canonical_head_hash, canonical_head_slot) =
|
||||
heads.swap_remove(canonical_head_index);
|
||||
heads.insert(0, (canonical_head_hash, canonical_head_slot));
|
||||
heads
|
||||
};
|
||||
|
||||
for (head_hash, _head_slot) in heads {
|
||||
for (block_hash, signed_beacon_block) in
|
||||
ParentRootBlockIterator::new(&*self.store, head_hash)
|
||||
{
|
||||
if visited.contains(&block_hash) {
|
||||
break;
|
||||
}
|
||||
visited.insert(block_hash);
|
||||
|
||||
if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||
let block = self.get_block(&block_hash).unwrap().unwrap();
|
||||
let state = self
|
||||
.get_state(&block.state_root(), Some(block.slot()))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
finalized_blocks.insert(state.finalized_checkpoint.root);
|
||||
}
|
||||
|
||||
if block_hash == canonical_head_hash {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else if finalized_blocks.contains(&block_hash) {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?} -> _{:?};\n",
|
||||
block_hash,
|
||||
signed_beacon_block.parent_root()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
write!(output, "}}\n").unwrap();
|
||||
}
|
||||
|
||||
// Used for debugging
|
||||
#[allow(dead_code)]
|
||||
pub fn dump_dot_file(&self, file_name: &str) {
|
||||
let mut file = std::fs::File::create(file_name).unwrap();
|
||||
self.dump_as_dot(&mut file);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {
|
||||
|
||||
Reference in New Issue
Block a user