mirror of
https://github.com/sigp/lighthouse.git
synced 2026-05-07 00:42:42 +00:00
Add commmand for pruning states (#4835)
## Issue Addressed Closes #4481. (Continuation of #4648) ## Proposed Changes - [x] Add `lighthouse db prune-states` - [x] Make it work - [x] Ensure block roots are handled correctly (to be addressed in 4735) - [x] Check perf on mainnet/Goerli/Gnosis (takes a few seconds max) - [x] Run block root healing logic (#4875 ) at the beginning - [x] Add some tests - [x] Update docs - [x] Add `--freezer` flag and other improvements to `lighthouse db inspect` Co-authored-by: Michael Sproul <michael@sigmaprime.io> Co-authored-by: Jimmy Chen <jimmy@sigmaprime.io> Co-authored-by: Michael Sproul <micsproul@gmail.com>
This commit is contained in:
@@ -27,7 +27,7 @@ use std::collections::HashSet;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
|
||||
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN};
|
||||
use store::{
|
||||
chunked_vector::{chunk_key, Field},
|
||||
get_key_for_col,
|
||||
@@ -3306,6 +3306,77 @@ fn check_blob_existence(
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn prune_historic_states() {
|
||||
let num_blocks_produced = E::slots_per_epoch() * 5;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
let genesis_state_root = harness.chain.genesis_state_root;
|
||||
let genesis_state = harness
|
||||
.chain
|
||||
.get_state(&genesis_state_root, None)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
harness
|
||||
.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Check historical state is present.
|
||||
let state_roots_iter = harness
|
||||
.chain
|
||||
.forwards_iter_state_roots(Slot::new(0))
|
||||
.unwrap();
|
||||
for (state_root, slot) in state_roots_iter
|
||||
.take(E::slots_per_epoch() as usize)
|
||||
.map(Result::unwrap)
|
||||
{
|
||||
assert!(store.get_state(&state_root, Some(slot)).unwrap().is_some());
|
||||
}
|
||||
|
||||
store
|
||||
.prune_historic_states(genesis_state_root, &genesis_state)
|
||||
.unwrap();
|
||||
|
||||
// Check that anchor info is updated.
|
||||
let anchor_info = store.get_anchor_info().unwrap();
|
||||
assert_eq!(anchor_info.state_lower_limit, 0);
|
||||
assert_eq!(anchor_info.state_upper_limit, STATE_UPPER_LIMIT_NO_RETAIN);
|
||||
|
||||
// Historical states should be pruned.
|
||||
let state_roots_iter = harness
|
||||
.chain
|
||||
.forwards_iter_state_roots(Slot::new(1))
|
||||
.unwrap();
|
||||
for (state_root, slot) in state_roots_iter
|
||||
.take(E::slots_per_epoch() as usize)
|
||||
.map(Result::unwrap)
|
||||
{
|
||||
assert!(store.get_state(&state_root, Some(slot)).unwrap().is_none());
|
||||
}
|
||||
|
||||
// Ensure that genesis state is still accessible
|
||||
let genesis_state_root = harness.chain.genesis_state_root;
|
||||
assert!(store
|
||||
.get_state(&genesis_state_root, Some(Slot::new(0)))
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
// Run for another two epochs.
|
||||
let additional_blocks_produced = 2 * E::slots_per_epoch();
|
||||
harness
|
||||
.extend_slots(additional_blocks_produced as usize)
|
||||
.await;
|
||||
|
||||
check_finalization(&harness, num_blocks_produced + additional_blocks_produced);
|
||||
check_split_slot(&harness, store);
|
||||
}
|
||||
|
||||
/// Checks that two chains are the same, for the purpose of these tests.
|
||||
///
|
||||
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
||||
|
||||
Reference in New Issue
Block a user