mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-15 10:52:43 +00:00
## Issue Addressed Closes #1891 Closes #1784 ## Proposed Changes Implement checkpoint sync for Lighthouse, enabling it to start from a weak subjectivity checkpoint. ## Additional Info - [x] Return unavailable status for out-of-range blocks requested by peers (#2561) - [x] Implement sync daemon for fetching historical blocks (#2561) - [x] Verify chain hashes (either in `historical_blocks.rs` or the calling module) - [x] Consistency check for initial block + state - [x] Fetch the initial state and block from a beacon node HTTP endpoint - [x] Don't crash fetching beacon states by slot from the API - [x] Background service for state reconstruction, triggered by CLI flag or API call. Considered out of scope for this PR: - Drop the requirement to provide the `--checkpoint-block` (this would require some pretty heavy refactoring of block verification) Co-authored-by: Diva M <divma@protonmail.com>
80 lines
2.5 KiB
Rust
80 lines
2.5 KiB
Rust
use crate::{DBColumn, Error, StoreItem};
|
|
use serde_derive::{Deserialize, Serialize};
|
|
use ssz::{Decode, Encode};
|
|
use ssz_derive::{Decode, Encode};
|
|
use types::{EthSpec, MinimalEthSpec};
|
|
|
|
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
|
|
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
|
|
|
|
/// Database configuration parameters.
|
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
pub struct StoreConfig {
|
|
/// Number of slots to wait between storing restore points in the freezer database.
|
|
pub slots_per_restore_point: u64,
|
|
/// Maximum number of blocks to store in the in-memory block cache.
|
|
pub block_cache_size: usize,
|
|
/// Whether to compact the database on initialization.
|
|
pub compact_on_init: bool,
|
|
/// Whether to compact the database during database pruning.
|
|
pub compact_on_prune: bool,
|
|
}
|
|
|
|
/// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params.
|
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
|
pub struct OnDiskStoreConfig {
|
|
pub slots_per_restore_point: u64,
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
pub enum StoreConfigError {
|
|
MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 },
|
|
}
|
|
|
|
impl Default for StoreConfig {
|
|
fn default() -> Self {
|
|
Self {
|
|
// Safe default for tests, shouldn't ever be read by a CLI node.
|
|
slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64,
|
|
block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
|
|
compact_on_init: false,
|
|
compact_on_prune: true,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl StoreConfig {
|
|
pub fn as_disk_config(&self) -> OnDiskStoreConfig {
|
|
OnDiskStoreConfig {
|
|
slots_per_restore_point: self.slots_per_restore_point,
|
|
}
|
|
}
|
|
|
|
pub fn check_compatibility(
|
|
&self,
|
|
on_disk_config: &OnDiskStoreConfig,
|
|
) -> Result<(), StoreConfigError> {
|
|
if self.slots_per_restore_point != on_disk_config.slots_per_restore_point {
|
|
return Err(StoreConfigError::MismatchedSlotsPerRestorePoint {
|
|
config: self.slots_per_restore_point,
|
|
on_disk: on_disk_config.slots_per_restore_point,
|
|
});
|
|
}
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
impl StoreItem for OnDiskStoreConfig {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(Self::from_ssz_bytes(bytes)?)
|
|
}
|
|
}
|