mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-17 03:42:46 +00:00
* Start extracting freezer changes for tree-states * Remove unused config args * Add comments * Remove unwraps * Subjective more clear implementation * Clean up hdiff * Update xdelta3 * Tree states archive metrics (#6040) * Add store cache size metrics * Add compress timer metrics * Add diff apply compute timer metrics * Add diff buffer cache hit metrics * Add hdiff buffer load times * Add blocks replayed metric * Move metrics to store * Future proof some metrics --------- Co-authored-by: Michael Sproul <michael@sigmaprime.io> * Port and clean up forwards iterator changes * Add and polish hierarchy-config flag * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Cleaner errors * Fix beacon_chain test compilation * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Patch a few more freezer block roots * Fix genesis block root bug * Fix test failing due to pending updates * Beacon chain tests passing * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Fix doc lint * Implement DB schema upgrade for hierarchical state diffs (#6193) * DB upgrade * Add flag * Delete RestorePointHash * Update docs * Update docs * Implement hierarchical state diffs config migration (#6245) * Implement hierarchical state diffs config migration * Review PR * Remove TODO * Set CURRENT_SCHEMA_VERSION correctly * Fix genesis state loading * Re-delete some PartialBeaconState stuff --------- Co-authored-by: Michael Sproul <michael@sigmaprime.io> * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Fix test compilation * Update schema downgrade test * Fix tests * Fix null anchor migration * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Fix tree states upgrade migration (#6328) * Towards crash safety * Fix compilation * Move cold summaries and state roots to new columns * Rename StateRoots chunked field * Update prune states * Clean hdiff CLI flag and metrics * Fix "staged reconstruction" * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Fix alloy issues * Fix staged reconstruction logic * Prevent weird slot drift * Remove "allow" flag * Update CLI help * Remove FIXME about downgrade * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Remove some unnecessary error variants * Fix new test * Tree states archive - review comments and metrics (#6386) * Review PR comments and metrics * Comments * Add anchor metrics * drop prev comment * Update metadata.rs * Apply suggestions from code review --------- Co-authored-by: Michael Sproul <micsproul@gmail.com> * Update beacon_node/store/src/hot_cold_store.rs Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Clarify comment and remove anchor_slot garbage * Simplify database anchor (#6397) * Simplify database anchor * Update beacon_node/store/src/reconstruct.rs * Add migration for anchor * Fix and simplify light_client store tests * Fix incompatible config test * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * More metrics * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * New historic state cache (#6475) * New historic state cache * Add more metrics * State cache hit rate metrics * Fix store metrics * More logs and metrics * Fix logger * Ensure cached states have built caches :O * Replay blocks in preference to diffing * Two separate caches * Distribute cache build time to next slot * Re-plumb historic-state-cache flag * Clean up metrics * Update book * Update beacon_node/store/src/hdiff.rs Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> * Update beacon_node/store/src/historic_state_cache.rs Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> --------- Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> * Update database docs * Update diagram * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Update lockbud to work with bindgen/etc * Correct pkg name for Debian * Remove vestigial epochs_per_state_diff * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Markdown lint * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Address Jimmy's review comments * Simplify ReplayFrom case * Fix and document genesis_state_root * Typo Co-authored-by: Jimmy Chen <jchen.tc@gmail.com> * Merge branch 'unstable' into tree-states-archive * Compute diff of validators list manually (#6556) * Split hdiff computation * Dedicated logic for historical roots and summaries * Benchmark against real states * Mutated source? * Version the hdiff * Add lighthouse DB config for hierarchy exponents * Tidy up hierarchy exponents flag * Apply suggestions from code review Co-authored-by: Michael Sproul <micsproul@gmail.com> * Address PR review * Remove hardcoded paths in benchmarks * Delete unused function in benches * lint --------- Co-authored-by: Michael Sproul <michael@sigmaprime.io> * Test hdiff binary format stability (#6585) * Merge remote-tracking branch 'origin/unstable' into tree-states-archive * Add deprecation warning for SPRP * Update xdelta to get rid of duplicate deps * Document test
249 lines
8.3 KiB
Rust
249 lines
8.3 KiB
Rust
use crate::{DBColumn, Error, StoreItem};
|
|
use serde::{Deserialize, Serialize};
|
|
use ssz::{Decode, Encode};
|
|
use ssz_derive::{Decode, Encode};
|
|
use types::{Checkpoint, Hash256, Slot};
|
|
|
|
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(22);
|
|
|
|
// All the keys that get stored under the `BeaconMeta` column.
|
|
//
|
|
// We use `repeat_byte` because it's a const fn.
|
|
pub const SCHEMA_VERSION_KEY: Hash256 = Hash256::repeat_byte(0);
|
|
pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1);
|
|
pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2);
|
|
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
|
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
|
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
|
pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6);
|
|
pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7);
|
|
|
|
/// State upper limit value used to indicate that a node is not storing historic states.
|
|
pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX);
|
|
|
|
/// The `AnchorInfo` encoding full availability of all historic blocks & states.
|
|
pub const ANCHOR_FOR_ARCHIVE_NODE: AnchorInfo = AnchorInfo {
|
|
anchor_slot: Slot::new(0),
|
|
oldest_block_slot: Slot::new(0),
|
|
oldest_block_parent: Hash256::ZERO,
|
|
state_upper_limit: Slot::new(0),
|
|
state_lower_limit: Slot::new(0),
|
|
};
|
|
|
|
/// The `AnchorInfo` encoding an uninitialized anchor.
|
|
///
|
|
/// This value should never exist except on initial start-up prior to the anchor being initialised
|
|
/// by `init_anchor_info`.
|
|
pub const ANCHOR_UNINITIALIZED: AnchorInfo = AnchorInfo {
|
|
anchor_slot: Slot::new(u64::MAX),
|
|
oldest_block_slot: Slot::new(u64::MAX),
|
|
oldest_block_parent: Hash256::ZERO,
|
|
state_upper_limit: Slot::new(u64::MAX),
|
|
state_lower_limit: Slot::new(0),
|
|
};
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
|
pub struct SchemaVersion(pub u64);
|
|
|
|
impl SchemaVersion {
|
|
pub fn as_u64(self) -> u64 {
|
|
self.0
|
|
}
|
|
}
|
|
|
|
impl StoreItem for SchemaVersion {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.0.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(SchemaVersion(u64::from_ssz_bytes(bytes)?))
|
|
}
|
|
}
|
|
|
|
/// The checkpoint used for pruning the database.
|
|
///
|
|
/// Updated whenever pruning is successful.
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
pub struct PruningCheckpoint {
|
|
pub checkpoint: Checkpoint,
|
|
}
|
|
|
|
impl StoreItem for PruningCheckpoint {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.checkpoint.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(PruningCheckpoint {
|
|
checkpoint: Checkpoint::from_ssz_bytes(bytes)?,
|
|
})
|
|
}
|
|
}
|
|
|
|
/// The last time the database was compacted.
|
|
pub struct CompactionTimestamp(pub u64);
|
|
|
|
impl StoreItem for CompactionTimestamp {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.0.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(CompactionTimestamp(u64::from_ssz_bytes(bytes)?))
|
|
}
|
|
}
|
|
|
|
/// Database parameters relevant to weak subjectivity sync.
|
|
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize)]
|
|
pub struct AnchorInfo {
|
|
/// The slot at which the anchor state is present and which we cannot revert. Values on start:
|
|
/// - Genesis start: 0
|
|
/// - Checkpoint sync: Slot of the finalized checkpoint block
|
|
///
|
|
/// Immutable
|
|
pub anchor_slot: Slot,
|
|
/// All blocks with slots greater than or equal to this value are available in the database.
|
|
/// Additionally, the genesis block is always available.
|
|
///
|
|
/// Values on start:
|
|
/// - Genesis start: 0
|
|
/// - Checkpoint sync: Slot of the finalized checkpoint block
|
|
///
|
|
/// Progressively decreases during backfill sync until reaching 0.
|
|
pub oldest_block_slot: Slot,
|
|
/// The block root of the next block that needs to be added to fill in the history.
|
|
///
|
|
/// Zero if we know all blocks back to genesis.
|
|
pub oldest_block_parent: Hash256,
|
|
/// All states with slots _greater than or equal to_ `min(split.slot, state_upper_limit)` are
|
|
/// available in the database. If `state_upper_limit` is higher than `split.slot`, states are
|
|
/// not being written to the freezer database.
|
|
///
|
|
/// Values on start if state reconstruction is enabled:
|
|
/// - Genesis start: 0
|
|
/// - Checkpoint sync: Slot of the next scheduled snapshot
|
|
///
|
|
/// Value on start if state reconstruction is disabled:
|
|
/// - 2^64 - 1 representing no historic state storage.
|
|
///
|
|
/// Immutable until state reconstruction completes.
|
|
pub state_upper_limit: Slot,
|
|
/// All states with slots _less than or equal to_ this value are available in the database.
|
|
/// The minimum value is 0, indicating that the genesis state is always available.
|
|
///
|
|
/// Values on start:
|
|
/// - Genesis start: 0
|
|
/// - Checkpoint sync: 0
|
|
///
|
|
/// When full block backfill completes (`oldest_block_slot == 0`) state reconstruction starts and
|
|
/// this value will progressively increase until reaching `state_upper_limit`.
|
|
pub state_lower_limit: Slot,
|
|
}
|
|
|
|
impl AnchorInfo {
|
|
/// Returns true if the block backfill has completed.
|
|
/// This is a comparison between the oldest block slot and the target backfill slot (which is
|
|
/// likely to be the closest WSP).
|
|
pub fn block_backfill_complete(&self, target_slot: Slot) -> bool {
|
|
self.oldest_block_slot <= target_slot
|
|
}
|
|
|
|
/// Return true if all historic states are stored, i.e. if state reconstruction is complete.
|
|
pub fn all_historic_states_stored(&self) -> bool {
|
|
self.state_lower_limit == self.state_upper_limit
|
|
}
|
|
|
|
/// Return true if no historic states other than genesis are stored in the database.
|
|
pub fn no_historic_states_stored(&self, split_slot: Slot) -> bool {
|
|
self.state_lower_limit == 0 && self.state_upper_limit >= split_slot
|
|
}
|
|
|
|
/// Return true if no historic states other than genesis *will ever be stored*.
|
|
pub fn full_state_pruning_enabled(&self) -> bool {
|
|
self.state_lower_limit == 0 && self.state_upper_limit == STATE_UPPER_LIMIT_NO_RETAIN
|
|
}
|
|
}
|
|
|
|
impl StoreItem for AnchorInfo {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(Self::from_ssz_bytes(bytes)?)
|
|
}
|
|
}
|
|
|
|
/// Database parameters relevant to blob sync.
|
|
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)]
|
|
pub struct BlobInfo {
|
|
/// The slot after which blobs are or *will be* available (>=).
|
|
///
|
|
/// If this slot is in the future, then it is the first slot of the Deneb fork, from which blobs
|
|
/// will be available.
|
|
///
|
|
/// If the `oldest_blob_slot` is `None` then this means that the Deneb fork epoch is not yet
|
|
/// known.
|
|
pub oldest_blob_slot: Option<Slot>,
|
|
/// A separate blobs database is in use (deprecated, always `true`).
|
|
pub blobs_db: bool,
|
|
}
|
|
|
|
impl StoreItem for BlobInfo {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(Self::from_ssz_bytes(bytes)?)
|
|
}
|
|
}
|
|
|
|
/// Database parameters relevant to data column sync.
|
|
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)]
|
|
pub struct DataColumnInfo {
|
|
/// The slot after which data columns are or *will be* available (>=).
|
|
///
|
|
/// If this slot is in the future, then it is the first slot of the EIP-7594 fork, from which
|
|
/// data columns will be available.
|
|
///
|
|
/// If the `oldest_data_column_slot` is `None` then this means that the EIP-7594 fork epoch is
|
|
/// not yet known.
|
|
pub oldest_data_column_slot: Option<Slot>,
|
|
}
|
|
|
|
impl StoreItem for DataColumnInfo {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(Self::from_ssz_bytes(bytes)?)
|
|
}
|
|
}
|