mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-22 06:14:38 +00:00
Implement checkpoint sync (#2244)
## Issue Addressed Closes #1891 Closes #1784 ## Proposed Changes Implement checkpoint sync for Lighthouse, enabling it to start from a weak subjectivity checkpoint. ## Additional Info - [x] Return unavailable status for out-of-range blocks requested by peers (#2561) - [x] Implement sync daemon for fetching historical blocks (#2561) - [x] Verify chain hashes (either in `historical_blocks.rs` or the calling module) - [x] Consistency check for initial block + state - [x] Fetch the initial state and block from a beacon node HTTP endpoint - [x] Don't crash fetching beacon states by slot from the API - [x] Background service for state reconstruction, triggered by CLI flag or API call. Considered out of scope for this PR: - Drop the requirement to provide the `--checkpoint-block` (this would require some pretty heavy refactoring of block verification) Co-authored-by: Diva M <divma@protonmail.com>
This commit is contained in:
@@ -1,8 +1,10 @@
|
||||
use crate::{DBColumn, Error, StoreItem};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::{Decode, Encode};
|
||||
use types::{Checkpoint, Hash256};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{Checkpoint, Hash256, Slot};
|
||||
|
||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(4);
|
||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(5);
|
||||
|
||||
// All the keys that get stored under the `BeaconMeta` column.
|
||||
//
|
||||
@@ -12,6 +14,7 @@ pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1);
|
||||
pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2);
|
||||
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
||||
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
||||
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct SchemaVersion(pub u64);
|
||||
@@ -76,3 +79,41 @@ impl StoreItem for CompactionTimestamp {
|
||||
Ok(CompactionTimestamp(u64::from_ssz_bytes(bytes)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Database parameters relevant to weak subjectivity sync.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize)]
|
||||
pub struct AnchorInfo {
|
||||
/// The slot at which the anchor state is present and which we cannot revert.
|
||||
pub anchor_slot: Slot,
|
||||
/// The slot from which historical blocks are available (>=).
|
||||
pub oldest_block_slot: Slot,
|
||||
/// The block root of the next block that needs to be added to fill in the history.
|
||||
///
|
||||
/// Zero if we know all blocks back to genesis.
|
||||
pub oldest_block_parent: Hash256,
|
||||
/// The slot from which historical states are available (>=).
|
||||
pub state_upper_limit: Slot,
|
||||
/// The slot before which historical states are available (<=).
|
||||
pub state_lower_limit: Slot,
|
||||
}
|
||||
|
||||
impl AnchorInfo {
|
||||
/// Returns true if the block backfill has completed.
|
||||
pub fn block_backfill_complete(&self) -> bool {
|
||||
self.oldest_block_slot == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl StoreItem for AnchorInfo {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::BeaconMeta
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
||||
Ok(Self::from_ssz_bytes(bytes)?)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user