Implement checkpoint sync (#2244)

## Issue Addressed

Closes #1891
Closes #1784

## Proposed Changes

Implement checkpoint sync for Lighthouse, enabling it to start from a weak subjectivity checkpoint.

## Additional Info

- [x] Return unavailable status for out-of-range blocks requested by peers (#2561)
- [x] Implement sync daemon for fetching historical blocks (#2561)
- [x] Verify chain hashes (either in `historical_blocks.rs` or the calling module)
- [x] Consistency check for initial block + state
- [x] Fetch the initial state and block from a beacon node HTTP endpoint
- [x] Don't crash fetching beacon states by slot from the API
- [x] Background service for state reconstruction, triggered by CLI flag or API call.

Considered out of scope for this PR:

- Drop the requirement to provide the `--checkpoint-block` (this would require some pretty heavy refactoring of block verification)


Co-authored-by: Diva M <divma@protonmail.com>
This commit is contained in:
Michael Sproul
2021-09-22 00:37:28 +00:00
parent 280e4fe23d
commit 9667dc2f03
71 changed files with 4012 additions and 459 deletions

View File

@@ -0,0 +1,75 @@
use crate::chunked_vector::{chunk_key, Chunk, ChunkError, Field};
use crate::{Error, KeyValueStore, KeyValueStoreOp};
use types::EthSpec;
/// Buffered writer for chunked vectors (block roots mainly).
pub struct ChunkWriter<'a, F, E, S>
where
F: Field<E>,
E: EthSpec,
S: KeyValueStore<E>,
{
/// Buffered chunk awaiting writing to disk (always dirty).
chunk: Chunk<F::Value>,
/// Chunk index of `chunk`.
index: usize,
store: &'a S,
}
impl<'a, F, E, S> ChunkWriter<'a, F, E, S>
where
F: Field<E>,
E: EthSpec,
S: KeyValueStore<E>,
{
pub fn new(store: &'a S, vindex: usize) -> Result<Self, Error> {
let chunk_index = F::chunk_index(vindex);
let chunk = Chunk::load(store, F::column(), &chunk_key(chunk_index))?
.unwrap_or_else(|| Chunk::new(vec![F::Value::default(); F::chunk_size()]));
Ok(Self {
chunk,
index: chunk_index,
store,
})
}
/// Set the value at a given vector index, writing the current chunk and moving on if necessary.
pub fn set(
&mut self,
vindex: usize,
value: F::Value,
batch: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
let chunk_index = F::chunk_index(vindex);
// Advance to the next chunk.
if chunk_index != self.index {
self.write(batch)?;
*self = Self::new(self.store, vindex)?;
}
let i = vindex % F::chunk_size();
let existing_value = &self.chunk.values[i];
if existing_value == &value || existing_value == &F::Value::default() {
self.chunk.values[i] = value;
Ok(())
} else {
Err(ChunkError::Inconsistent {
field: F::column(),
chunk_index,
existing_value: format!("{:?}", existing_value),
new_value: format!("{:?}", value),
}
.into())
}
}
/// Write the current chunk to disk.
///
/// Should be called before the writer is dropped, in order to write the final chunk to disk.
pub fn write(&self, batch: &mut Vec<KeyValueStoreOp>) -> Result<(), Error> {
self.chunk.store(F::column(), &chunk_key(self.index), batch)
}
}

View File

@@ -97,7 +97,7 @@ where
self.current_chunk = Chunk::load(
&self.store.cold_db,
F::column(),
&chunk_key(self.next_cindex as u64),
&chunk_key(self.next_cindex),
)
.map_err(|e| {
error!(

View File

@@ -34,8 +34,8 @@ pub enum UpdatePattern {
/// Map a chunk index to bytes that can be used to key the NoSQL database.
///
/// We shift chunks up by 1 to make room for a genesis chunk that is handled separately.
pub fn chunk_key(cindex: u64) -> [u8; 8] {
(cindex + 1).to_be_bytes()
pub fn chunk_key(cindex: usize) -> [u8; 8] {
(cindex as u64 + 1).to_be_bytes()
}
/// Return the database key for the genesis value.
@@ -73,6 +73,11 @@ pub trait Field<E: EthSpec>: Copy {
128
}
/// Convert a v-index (vector index) to a chunk index.
fn chunk_index(vindex: usize) -> usize {
vindex / Self::chunk_size()
}
/// Get the value of this field at the given vector index, from the state.
fn get_value(
state: &BeaconState<E>,
@@ -399,7 +404,7 @@ where
I: Iterator<Item = usize>,
{
for chunk_index in range {
let chunk_key = &chunk_key(chunk_index as u64)[..];
let chunk_key = &chunk_key(chunk_index)[..];
let existing_chunk =
Chunk::<F::Value>::load(store, F::column(), chunk_key)?.unwrap_or_else(Chunk::default);
@@ -440,7 +445,7 @@ fn range_query<S: KeyValueStore<E>, E: EthSpec, T: Decode + Encode>(
let mut result = Vec::with_capacity(len);
for chunk_index in range {
let key = &chunk_key(chunk_index as u64)[..];
let key = &chunk_key(chunk_index)[..];
let chunk = Chunk::load(store, column, key)?.ok_or(ChunkError::Missing { chunk_index })?;
result.push(chunk);
}

View File

@@ -24,8 +24,6 @@ pub struct StoreConfig {
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
pub struct OnDiskStoreConfig {
pub slots_per_restore_point: u64,
// NOTE: redundant, see https://github.com/sigp/lighthouse/issues/1784
pub _block_cache_size: usize,
}
#[derive(Debug, Clone)]
@@ -49,7 +47,6 @@ impl StoreConfig {
pub fn as_disk_config(&self) -> OnDiskStoreConfig {
OnDiskStoreConfig {
slots_per_restore_point: self.slots_per_restore_point,
_block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
}
}

View File

@@ -13,13 +13,46 @@ pub enum Error {
BeaconStateError(BeaconStateError),
PartialBeaconStateError,
HotColdDBError(HotColdDBError),
DBError { message: String },
DBError {
message: String,
},
RlpError(String),
BlockNotFound(Hash256),
NoContinuationData,
SplitPointModified(Slot, Slot),
ConfigError(StoreConfigError),
SchemaMigrationError(String),
/// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied.
AnchorInfoConcurrentMutation,
/// The block or state is unavailable due to weak subjectivity sync.
HistoryUnavailable,
/// State reconstruction cannot commence because not all historic blocks are known.
MissingHistoricBlocks {
oldest_block_slot: Slot,
},
/// State reconstruction failed because it didn't reach the upper limit slot.
///
/// This should never happen (it's a logic error).
StateReconstructionDidNotComplete,
StateReconstructionRootMismatch {
slot: Slot,
expected: Hash256,
computed: Hash256,
},
}
pub trait HandleUnavailable<T> {
fn handle_unavailable(self) -> std::result::Result<Option<T>, Error>;
}
impl<T> HandleUnavailable<T> for Result<T> {
fn handle_unavailable(self) -> std::result::Result<Option<T>, Error> {
match self {
Ok(x) => Ok(Some(x)),
Err(Error::HistoryUnavailable) => Ok(None),
Err(e) => Err(e),
}
}
}
impl From<DecodeError> for Error {

View File

@@ -3,17 +3,15 @@ use crate::chunked_vector::{
};
use crate::config::{OnDiskStoreConfig, StoreConfig};
use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator};
use crate::impls::{
beacon_block_as_kv_store_op,
beacon_state::{get_full_state, store_full_state},
};
use crate::impls::beacon_state::{get_full_state, store_full_state};
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
use crate::leveldb_store::BytesKey;
use crate::leveldb_store::LevelDB;
use crate::memory_store::MemoryStore;
use crate::metadata::{
CompactionTimestamp, PruningCheckpoint, SchemaVersion, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY,
CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY,
AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY,
SCHEMA_VERSION_KEY, SPLIT_KEY,
};
use crate::metrics;
use crate::{
@@ -23,13 +21,15 @@ use crate::{
use leveldb::iterator::LevelDBIterator;
use lru::LruCache;
use parking_lot::{Mutex, RwLock};
use slog::{debug, error, info, trace, warn, Logger};
use serde_derive::{Deserialize, Serialize};
use slog::{debug, error, info, trace, Logger};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use state_processing::{
per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy,
SlotProcessingError,
};
use std::cmp::min;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::path::Path;
@@ -57,8 +57,10 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
///
/// States with slots less than `split.slot` are in the cold DB, while states with slots
/// greater than or equal are in the hot DB.
split: RwLock<Split>,
config: StoreConfig,
pub(crate) split: RwLock<Split>,
/// The starting slots for the range of blocks & states stored in the database.
anchor_info: RwLock<Option<AnchorInfo>>,
pub(crate) config: StoreConfig,
/// Cold database containing compact historical data.
pub cold_db: Cold,
/// Hot database containing duplicated but quick-to-access recent data.
@@ -68,7 +70,7 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
/// LRU cache of deserialized blocks. Updated whenever a block is loaded.
block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>,
/// Chain spec.
spec: ChainSpec,
pub(crate) spec: ChainSpec,
/// Logger.
pub(crate) log: Logger,
/// Mere vessel for E.
@@ -95,11 +97,13 @@ pub enum HotColdDBError {
MissingHotStateSummary(Hash256),
MissingEpochBoundaryState(Hash256),
MissingSplitState(Hash256, Slot),
MissingAnchorInfo,
HotStateSummaryError(BeaconStateError),
RestorePointDecodeError(ssz::DecodeError),
BlockReplayBeaconError(BeaconStateError),
BlockReplaySlotError(SlotProcessingError),
BlockReplayBlockError(BlockProcessingError),
MissingLowerLimitState(Slot),
InvalidSlotsPerRestorePoint {
slots_per_restore_point: u64,
slots_per_historical_root: u64,
@@ -126,6 +130,7 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
let db = HotColdDB {
split: RwLock::new(Split::default()),
anchor_info: RwLock::new(None),
cold_db: MemoryStore::open(),
hot_db: MemoryStore::open(),
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
@@ -158,6 +163,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
let db = Arc::new(HotColdDB {
split: RwLock::new(Split::default()),
anchor_info: RwLock::new(None),
cold_db: LevelDB::open(cold_path)?,
hot_db: LevelDB::open(hot_path)?,
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
@@ -190,6 +196,9 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
// Load the previous split slot from the database (if any). This ensures we can
// stop and restart correctly.
if let Some(split) = db.load_split()? {
*db.split.write() = split;
*db.anchor_info.write() = db.load_anchor_info()?;
info!(
db.log,
"Hot-Cold DB initialized";
@@ -197,7 +206,6 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
"split_slot" => split.slot,
"split_state" => format!("{:?}", split.state_root)
);
*db.split.write() = split;
}
// Run a garbage collection pass.
@@ -243,8 +251,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
block: SignedBeaconBlock<E>,
) -> Result<(), Error> {
// Store on disk.
self.hot_db
.do_atomically(vec![beacon_block_as_kv_store_op(block_root, &block)])?;
let op = self.block_as_kv_store_op(block_root, &block);
self.hot_db.do_atomically(vec![op])?;
// Update cache.
self.block_cache.lock().put(*block_root, block);
@@ -252,6 +260,18 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
Ok(())
}
/// Prepare a signed beacon block for storage in the database.
#[must_use]
pub fn block_as_kv_store_op(
&self,
key: &Hash256,
block: &SignedBeaconBlock<E>,
) -> KeyValueStoreOp {
// FIXME(altair): re-add block write/overhead metrics, or remove them
let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes());
KeyValueStoreOp::PutKeyValue(db_key, block.as_ssz_bytes())
}
/// Fetch a block from the store.
pub fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error> {
metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT);
@@ -467,7 +487,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
Some(state_slot) => {
let epoch_boundary_slot =
state_slot / E::slots_per_epoch() * E::slots_per_epoch();
self.load_cold_state_by_slot(epoch_boundary_slot).map(Some)
self.load_cold_state_by_slot(epoch_boundary_slot)
}
None => Ok(None),
}
@@ -492,7 +512,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
for op in batch {
match op {
StoreOp::PutBlock(block_root, block) => {
key_value_batch.push(beacon_block_as_kv_store_op(block_root, block));
key_value_batch.push(self.block_as_kv_store_op(block_root, block));
}
StoreOp::PutState(state_root, state) => {
@@ -563,6 +583,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
Ok(())
}
/// Store a post-finalization state efficiently in the hot database.
///
/// On an epoch boundary, store a full state. On an intermediate slot, store
@@ -639,21 +660,16 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
/// Store a pre-finalization state in the freezer database.
///
/// Will log a warning and not store anything if the state does not lie on a restore point
/// boundary.
/// If the state doesn't lie on a restore point boundary then just its summary will be stored.
pub fn store_cold_state(
&self,
state_root: &Hash256,
state: &BeaconState<E>,
ops: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
ops.push(ColdStateSummary { slot: state.slot() }.as_kv_store_op(*state_root));
if state.slot() % self.config.slots_per_restore_point != 0 {
warn!(
self.log,
"Not storing non-restore point state in freezer";
"slot" => state.slot().as_u64(),
"state_root" => format!("{:?}", state_root)
);
return Ok(());
}
@@ -688,7 +704,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
/// Return `None` if no state with `state_root` lies in the freezer.
pub fn load_cold_state(&self, state_root: &Hash256) -> Result<Option<BeaconState<E>>, Error> {
match self.load_cold_state_slot(state_root)? {
Some(slot) => self.load_cold_state_by_slot(slot).map(Some),
Some(slot) => self.load_cold_state_by_slot(slot),
None => Ok(None),
}
}
@@ -696,12 +712,22 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
/// Load a pre-finalization state from the freezer database.
///
/// Will reconstruct the state if it lies between restore points.
pub fn load_cold_state_by_slot(&self, slot: Slot) -> Result<BeaconState<E>, Error> {
if slot % self.config.slots_per_restore_point == 0 {
let restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point;
self.load_restore_point_by_index(restore_point_idx)
pub fn load_cold_state_by_slot(&self, slot: Slot) -> Result<Option<BeaconState<E>>, Error> {
// Guard against fetching states that do not exist due to gaps in the historic state
// database, which can occur due to checkpoint sync or re-indexing.
// See the comments in `get_historic_state_limits` for more information.
let (lower_limit, upper_limit) = self.get_historic_state_limits();
if slot <= lower_limit || slot >= upper_limit {
if slot % self.config.slots_per_restore_point == 0 {
let restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point;
self.load_restore_point_by_index(restore_point_idx)
} else {
self.load_cold_intermediate_state(slot)
}
.map(Some)
} else {
self.load_cold_intermediate_state(slot)
Ok(None)
}
}
@@ -742,17 +768,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let split = self.split.read_recursive();
let low_restore_point = self.load_restore_point_by_index(low_restore_point_idx)?;
// If the slot of the high point lies outside the freezer, use the split state
// as the upper restore point.
let high_restore_point = if high_restore_point_idx * self.config.slots_per_restore_point
>= split.slot.as_u64()
{
self.get_state(&split.state_root, Some(split.slot))?.ok_or(
HotColdDBError::MissingSplitState(split.state_root, split.slot),
)?
} else {
self.load_restore_point_by_index(high_restore_point_idx)?
};
let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?;
// 2. Load the blocks from the high restore point back to the low restore point.
let blocks = self.load_blocks_to_replay(
@@ -765,6 +781,24 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.replay_blocks(low_restore_point, blocks, slot, BlockReplay::Accurate)
}
/// Get the restore point with the given index, or if it is out of bounds, the split state.
pub(crate) fn get_restore_point(
&self,
restore_point_idx: u64,
split: &Split,
) -> Result<BeaconState<E>, Error> {
if restore_point_idx * self.config.slots_per_restore_point >= split.slot.as_u64() {
self.get_state(&split.state_root, Some(split.slot))?
.ok_or(HotColdDBError::MissingSplitState(
split.state_root,
split.slot,
))
.map_err(Into::into)
} else {
self.load_restore_point_by_index(restore_point_idx)
}
}
/// Get a suitable block root for backtracking from `high_restore_point` to the state at `slot`.
///
/// Defaults to the block root for `slot`, which *should* be in range.
@@ -800,12 +834,21 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
.as_ref()
.map_or(true, |block| block.slot() <= end_slot)
})
// Include the block at the start slot (if any). Whilst it doesn't need to be applied
// to the state, it contains a potentially useful state root.
.take_while(|result| {
result
.as_ref()
.map_or(true, |block| block.slot() >= start_slot)
// Include the block at the start slot (if any). Whilst it doesn't need to be
// applied to the state, it contains a potentially useful state root.
//
// Return `true` on an `Err` so that the `collect` fails, unless the error is a
// `BlockNotFound` error and some blocks are intentionally missing from the DB.
// This complexity is unfortunately necessary to avoid loading the parent of the
// oldest known block -- we can't know that we have all the required blocks until we
// load a block with slot less than the start slot, which is impossible if there are
// no blocks with slot less than the start slot.
.take_while(|result| match result {
Ok(block) => block.slot() >= start_slot,
Err(Error::BlockNotFound(_)) => {
self.get_oldest_block_slot() == self.spec.genesis_slot
}
Err(_) => true,
})
.collect::<Result<_, _>>()?;
blocks.reverse();
@@ -904,6 +947,15 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.split.read_recursive().slot
}
/// Fetch a copy of the current split slot from memory.
pub fn get_split_info(&self) -> Split {
*self.split.read_recursive()
}
pub fn set_split(&self, slot: Slot, state_root: Hash256) {
*self.split.write() = Split { slot, state_root };
}
/// Fetch the slot of the most recently stored restore point.
pub fn get_latest_restore_point_slot(&self) -> Slot {
(self.get_split_slot() - 1) / self.config.slots_per_restore_point
@@ -920,6 +972,122 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.hot_db.put(&SCHEMA_VERSION_KEY, &schema_version)
}
/// Initialise the anchor info for checkpoint sync starting from `block`.
pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result<(), Error> {
let anchor_slot = block.slot();
let slots_per_restore_point = self.config.slots_per_restore_point;
// Set the `state_upper_limit` to the slot of the *next* restore point.
// See `get_state_upper_limit` for rationale.
let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 {
anchor_slot
} else {
(anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point
};
let anchor_info = AnchorInfo {
anchor_slot,
oldest_block_slot: anchor_slot,
oldest_block_parent: block.parent_root(),
state_upper_limit: next_restore_point_slot,
state_lower_limit: self.spec.genesis_slot,
};
self.compare_and_set_anchor_info(None, Some(anchor_info))
}
/// Get a clone of the store's anchor info.
///
/// To do mutations, use `compare_and_set_anchor_info`.
pub fn get_anchor_info(&self) -> Option<AnchorInfo> {
self.anchor_info.read_recursive().clone()
}
/// Atomically update the anchor info from `prev_value` to `new_value`.
///
/// Return an `AnchorInfoConcurrentMutation` error if the `prev_value` provided
/// is not correct.
pub fn compare_and_set_anchor_info(
&self,
prev_value: Option<AnchorInfo>,
new_value: Option<AnchorInfo>,
) -> Result<(), Error> {
let mut anchor_info = self.anchor_info.write();
if *anchor_info == prev_value {
self.store_anchor_info(&new_value)?;
*anchor_info = new_value;
Ok(())
} else {
Err(Error::AnchorInfoConcurrentMutation)
}
}
/// Load the anchor info from disk, but do not set `self.anchor_info`.
fn load_anchor_info(&self) -> Result<Option<AnchorInfo>, Error> {
self.hot_db.get(&ANCHOR_INFO_KEY)
}
/// Store the given `anchor_info` to disk.
///
/// The argument is intended to be `self.anchor_info`, but is passed manually to avoid issues
/// with recursive locking.
fn store_anchor_info(&self, anchor_info: &Option<AnchorInfo>) -> Result<(), Error> {
if let Some(ref anchor_info) = anchor_info {
self.hot_db.put(&ANCHOR_INFO_KEY, anchor_info)?;
} else {
self.hot_db.delete::<AnchorInfo>(&ANCHOR_INFO_KEY)?;
}
Ok(())
}
/// If an anchor exists, return its `anchor_slot` field.
pub fn get_anchor_slot(&self) -> Option<Slot> {
self.anchor_info
.read_recursive()
.as_ref()
.map(|a| a.anchor_slot)
}
/// Return the slot-window describing the available historic states.
///
/// Returns `(lower_limit, upper_limit)`.
///
/// The lower limit is the maximum slot such that frozen states are available for all
/// previous slots (<=).
///
/// The upper limit is the minimum slot such that frozen states are available for all
/// subsequent slots (>=).
///
/// If `lower_limit >= upper_limit` then all states are available. This will be true
/// if the database is completely filled in, as we'll return `(split_slot, 0)` in this
/// instance.
pub fn get_historic_state_limits(&self) -> (Slot, Slot) {
// If checkpoint sync is used then states in the hot DB will always be available, but may
// become unavailable as finalisation advances due to the lack of a restore point in the
// database. For this reason we take the minimum of the split slot and the
// restore-point-aligned `state_upper_limit`, which should be set _ahead_ of the checkpoint
// slot during initialisation.
//
// E.g. if we start from a checkpoint at slot 2048+1024=3072 with SPRP=2048, then states
// with slots 3072-4095 will be available only while they are in the hot database, and this
// function will return the current split slot as the upper limit. Once slot 4096 is reached
// a new restore point will be created at that slot, making all states from 4096 onwards
// permanently available.
let split_slot = self.get_split_slot();
self.anchor_info
.read_recursive()
.as_ref()
.map_or((split_slot, self.spec.genesis_slot), |a| {
(a.state_lower_limit, min(a.state_upper_limit, split_slot))
})
}
/// Return the minimum slot such that blocks are available for all subsequent slots.
pub fn get_oldest_block_slot(&self) -> Slot {
self.anchor_info
.read_recursive()
.as_ref()
.map_or(self.spec.genesis_slot, |anchor| anchor.oldest_block_slot)
}
/// Load previously-stored config from disk.
fn load_config(&self) -> Result<Option<OnDiskStoreConfig>, Error> {
self.hot_db.get(&CONFIG_KEY)
@@ -935,6 +1103,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.hot_db.get(&SPLIT_KEY)
}
/// Store the split point to disk.
pub fn store_split(&self) -> Result<(), Error> {
self.hot_db.put_sync(&SPLIT_KEY, &*self.split.read())?;
Ok(())
}
/// Load the state root of a restore point.
fn load_restore_point_hash(&self, restore_point_index: u64) -> Result<Hash256, Error> {
let key = Self::restore_point_key(restore_point_index);
@@ -1037,6 +1211,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
.map(|pc: PruningCheckpoint| pc.checkpoint))
}
/// Store the checkpoint to begin pruning from (the "old finalized checkpoint").
pub fn store_pruning_checkpoint(&self, checkpoint: Checkpoint) -> Result<(), Error> {
self.hot_db
.do_atomically(vec![self.pruning_checkpoint_store_op(checkpoint)])
}
/// Create a staged store for the pruning checkpoint.
pub fn pruning_checkpoint_store_op(&self, checkpoint: Checkpoint) -> KeyValueStoreOp {
PruningCheckpoint { checkpoint }.as_kv_store_op(PRUNING_CHECKPOINT_KEY)
@@ -1075,6 +1255,11 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
// The new frozen head must increase the current split slot, and lie on an epoch
// boundary (in order for the hot state summary scheme to work).
let current_split_slot = store.split.read_recursive().slot;
let anchor_slot = store
.anchor_info
.read_recursive()
.as_ref()
.map(|a| a.anchor_slot);
if frozen_head.slot() < current_split_slot {
return Err(HotColdDBError::FreezeSlotError {
@@ -1094,7 +1279,10 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
// to the cold DB.
let state_root_iter = StateRootsIterator::new(store.clone(), frozen_head);
for maybe_pair in state_root_iter.take_while(|result| match result {
Ok((_, slot)) => slot >= &current_split_slot,
Ok((_, slot)) => {
slot >= &current_split_slot
&& anchor_slot.map_or(true, |anchor_slot| slot >= &anchor_slot)
}
Err(_) => true,
}) {
let (state_root, slot) = maybe_pair?;
@@ -1183,10 +1371,10 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
}
/// Struct for storing the split slot and state root in the database.
#[derive(Debug, Clone, Copy, Default, Encode, Decode)]
#[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)]
pub struct Split {
slot: Slot,
state_root: Hash256,
pub(crate) slot: Slot,
pub(crate) state_root: Hash256,
}
impl StoreItem for Split {
@@ -1252,8 +1440,8 @@ impl HotStateSummary {
/// Struct for summarising a state in the freezer database.
#[derive(Debug, Clone, Copy, Default, Encode, Decode)]
struct ColdStateSummary {
slot: Slot,
pub(crate) struct ColdStateSummary {
pub slot: Slot,
}
impl StoreItem for ColdStateSummary {

View File

@@ -1,15 +1 @@
use crate::*;
use ssz::Encode;
pub mod beacon_state;
/// Prepare a signed beacon block for storage in the database.
#[must_use]
pub fn beacon_block_as_kv_store_op<T: EthSpec>(
key: &Hash256,
block: &SignedBeaconBlock<T>,
) -> KeyValueStoreOp {
// FIXME(altair): re-add block write/overhead metrics, or remove them
let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes());
KeyValueStoreOp::PutKeyValue(db_key, block.as_ssz_bytes())
}

View File

@@ -1,3 +1,4 @@
use crate::errors::HandleUnavailable;
use crate::{Error, HotColdDB, ItemStore};
use std::borrow::Cow;
use std::marker::PhantomData;
@@ -201,15 +202,20 @@ impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T,
(Ok(block_root), Ok(state_root)) => Ok(Some((*block_root, *state_root, self.slot))),
(Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => {
// Read a `BeaconState` from the store that has access to prior historical roots.
let beacon_state =
next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?;
if let Some(beacon_state) =
next_historical_root_backtrack_state(&*self.store, &self.beacon_state)
.handle_unavailable()?
{
self.beacon_state = Cow::Owned(beacon_state);
self.beacon_state = Cow::Owned(beacon_state);
let block_root = *self.beacon_state.get_block_root(self.slot)?;
let state_root = *self.beacon_state.get_state_root(self.slot)?;
let block_root = *self.beacon_state.get_block_root(self.slot)?;
let state_root = *self.beacon_state.get_state_root(self.slot)?;
Ok(Some((block_root, state_root, self.slot)))
Ok(Some((block_root, state_root, self.slot)))
} else {
// No more states available due to weak subjectivity sync.
Ok(None)
}
}
(Err(e), _) => Err(e.into()),
(Ok(_), Err(e)) => Err(e.into()),
@@ -329,6 +335,9 @@ impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
}
/// Fetch the next state to use whilst backtracking in `*RootsIterator`.
///
/// Return `Err(HistoryUnavailable)` in the case where no more backtrack states are available
/// due to weak subjectivity sync.
fn next_historical_root_backtrack_state<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: &HotColdDB<E, Hot, Cold>,
current_state: &BeaconState<E>,
@@ -338,10 +347,17 @@ fn next_historical_root_backtrack_state<E: EthSpec, Hot: ItemStore<E>, Cold: Ite
// not frozen, this just means we might not jump back by the maximum amount on
// our first jump (i.e. at most 1 extra state load).
let new_state_slot = slot_of_prev_restore_point::<E>(current_state.slot());
let new_state_root = current_state.get_state_root(new_state_slot)?;
Ok(store
.get_state(new_state_root, Some(new_state_slot))?
.ok_or_else(|| BeaconStateError::MissingBeaconState((*new_state_root).into()))?)
let (_, historic_state_upper_limit) = store.get_historic_state_limits();
if new_state_slot >= historic_state_upper_limit {
let new_state_root = current_state.get_state_root(new_state_slot)?;
Ok(store
.get_state(new_state_root, Some(new_state_slot))?
.ok_or_else(|| BeaconStateError::MissingBeaconState((*new_state_root).into()))?)
} else {
Err(Error::HistoryUnavailable)
}
}
/// Compute the slot of the last guaranteed restore point in the freezer database.

View File

@@ -10,6 +10,7 @@
#[macro_use]
extern crate lazy_static;
mod chunk_writer;
pub mod chunked_iter;
pub mod chunked_vector;
pub mod config;
@@ -23,9 +24,11 @@ mod memory_store;
pub mod metadata;
pub mod metrics;
mod partial_beacon_state;
pub mod reconstruct;
pub mod iter;
pub use self::chunk_writer::ChunkWriter;
pub use self::config::StoreConfig;
pub use self::hot_cold_store::{BlockReplay, HotColdDB, HotStateSummary, Split};
pub use self::leveldb_store::LevelDB;
@@ -33,6 +36,7 @@ pub use self::memory_store::MemoryStore;
pub use self::partial_beacon_state::PartialBeaconState;
pub use errors::Error;
pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer;
pub use metadata::AnchorInfo;
pub use metrics::scrape_for_metrics;
use parking_lot::MutexGuard;
pub use types::*;

View File

@@ -1,8 +1,10 @@
use crate::{DBColumn, Error, StoreItem};
use serde_derive::{Deserialize, Serialize};
use ssz::{Decode, Encode};
use types::{Checkpoint, Hash256};
use ssz_derive::{Decode, Encode};
use types::{Checkpoint, Hash256, Slot};
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(4);
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(5);
// All the keys that get stored under the `BeaconMeta` column.
//
@@ -12,6 +14,7 @@ pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1);
pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2);
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct SchemaVersion(pub u64);
@@ -76,3 +79,41 @@ impl StoreItem for CompactionTimestamp {
Ok(CompactionTimestamp(u64::from_ssz_bytes(bytes)?))
}
}
/// Database parameters relevant to weak subjectivity sync.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize)]
pub struct AnchorInfo {
/// The slot at which the anchor state is present and which we cannot revert.
pub anchor_slot: Slot,
/// The slot from which historical blocks are available (>=).
pub oldest_block_slot: Slot,
/// The block root of the next block that needs to be added to fill in the history.
///
/// Zero if we know all blocks back to genesis.
pub oldest_block_parent: Hash256,
/// The slot from which historical states are available (>=).
pub state_upper_limit: Slot,
/// The slot before which historical states are available (<=).
pub state_lower_limit: Slot,
}
impl AnchorInfo {
/// Returns true if the block backfill has completed.
pub fn block_backfill_complete(&self) -> bool {
self.oldest_block_slot == 0
}
}
impl StoreItem for AnchorInfo {
fn db_column() -> DBColumn {
DBColumn::BeaconMeta
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
Ok(Self::from_ssz_bytes(bytes)?)
}
}

View File

@@ -0,0 +1,160 @@
//! Implementation of historic state reconstruction (given complete block history).
use crate::hot_cold_store::{HotColdDB, HotColdDBError};
use crate::{Error, ItemStore, KeyValueStore};
use itertools::{process_results, Itertools};
use slog::info;
use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy};
use std::sync::Arc;
use types::{EthSpec, Hash256};
impl<E, Hot, Cold> HotColdDB<E, Hot, Cold>
where
E: EthSpec,
Hot: KeyValueStore<E> + ItemStore<E>,
Cold: KeyValueStore<E> + ItemStore<E>,
{
pub fn reconstruct_historic_states(self: &Arc<Self>) -> Result<(), Error> {
let mut anchor = if let Some(anchor) = self.get_anchor_info() {
anchor
} else {
// Nothing to do, history is complete.
return Ok(());
};
// Check that all historic blocks are known.
if anchor.oldest_block_slot != 0 {
return Err(Error::MissingHistoricBlocks {
oldest_block_slot: anchor.oldest_block_slot,
});
}
info!(
self.log,
"Beginning historic state reconstruction";
"start_slot" => anchor.state_lower_limit,
);
let slots_per_restore_point = self.config.slots_per_restore_point;
// Iterate blocks from the state lower limit to the upper limit.
let lower_limit_slot = anchor.state_lower_limit;
let split = self.get_split_info();
let upper_limit_state = self.get_restore_point(
anchor.state_upper_limit.as_u64() / slots_per_restore_point,
&split,
)?;
let upper_limit_slot = upper_limit_state.slot();
// Use a dummy root, as we never read the block for the upper limit state.
let upper_limit_block_root = Hash256::repeat_byte(0xff);
let block_root_iter = Self::forwards_block_roots_iterator(
self.clone(),
lower_limit_slot,
upper_limit_state,
upper_limit_block_root,
&self.spec,
)?;
// The state to be advanced.
let mut state = self
.load_cold_state_by_slot(lower_limit_slot)?
.ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?;
state.build_all_caches(&self.spec)?;
process_results(block_root_iter, |iter| -> Result<(), Error> {
let mut io_batch = vec![];
let mut prev_state_root = None;
for ((prev_block_root, _), (block_root, slot)) in iter.tuple_windows() {
let is_skipped_slot = prev_block_root == block_root;
let block = if is_skipped_slot {
None
} else {
Some(
self.get_block(&block_root)?
.ok_or(Error::BlockNotFound(block_root))?,
)
};
// Advance state to slot.
per_slot_processing(&mut state, prev_state_root.take(), &self.spec)
.map_err(HotColdDBError::BlockReplaySlotError)?;
// Apply block.
if let Some(block) = block {
per_block_processing(
&mut state,
&block,
Some(block_root),
BlockSignatureStrategy::NoVerification,
&self.spec,
)
.map_err(HotColdDBError::BlockReplayBlockError)?;
prev_state_root = Some(block.state_root());
}
let state_root = prev_state_root
.ok_or(())
.or_else(|_| state.update_tree_hash_cache())?;
// Stage state for storage in freezer DB.
self.store_cold_state(&state_root, &state, &mut io_batch)?;
// If the slot lies on an epoch boundary, commit the batch and update the anchor.
if slot % slots_per_restore_point == 0 || slot + 1 == upper_limit_slot {
info!(
self.log,
"State reconstruction in progress";
"slot" => slot,
"remaining" => upper_limit_slot - 1 - slot
);
self.cold_db.do_atomically(std::mem::take(&mut io_batch))?;
// Update anchor.
let old_anchor = Some(anchor.clone());
if slot + 1 == upper_limit_slot {
// The two limits have met in the middle! We're done!
// Perform one last integrity check on the state reached.
let computed_state_root = state.update_tree_hash_cache()?;
if computed_state_root != state_root {
return Err(Error::StateReconstructionRootMismatch {
slot,
expected: state_root,
computed: computed_state_root,
});
}
self.compare_and_set_anchor_info(old_anchor, None)?;
return Ok(());
} else {
// The lower limit has been raised, store it.
anchor.state_lower_limit = slot;
self.compare_and_set_anchor_info(old_anchor, Some(anchor.clone()))?;
}
}
}
// Should always reach the `upper_limit_slot` and return early above.
Err(Error::StateReconstructionDidNotComplete)
})??;
// Check that the split point wasn't mutated during the state reconstruction process.
// It shouldn't have been, due to the serialization of requests through the store migrator,
// so this is just a paranoid check.
let latest_split = self.get_split_info();
if split != latest_split {
return Err(Error::SplitPointModified(latest_split.slot, split.slot));
}
Ok(())
}
}