mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-17 12:58:31 +00:00
Fix
This commit is contained in:
@@ -1,151 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use bls::Hash256;
|
||||
use execution_layer::ExecutionLayer;
|
||||
use futures::Stream;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::mpsc::{self, UnboundedSender};
|
||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
use tracing::debug;
|
||||
use types::{EthSpec, SignedExecutionPayloadEnvelope};
|
||||
|
||||
use crate::{BeaconChainError, BeaconChainTypes, BeaconStore, beacon_block_streamer::CheckCaches};
|
||||
|
||||
type PayloadEnvelopeResult<E> =
|
||||
Result<Option<Arc<SignedExecutionPayloadEnvelope<E>>>, BeaconChainError>;
|
||||
|
||||
pub struct PayloadEnvelopeStreamer<T: BeaconChainTypes> {
|
||||
// TODO(gloas) remove _ when we use the execution layer
|
||||
// to load payload envelopes
|
||||
_execution_layer: ExecutionLayer<T::EthSpec>,
|
||||
store: BeaconStore<T>,
|
||||
task_executor: TaskExecutor,
|
||||
_check_caches: CheckCaches,
|
||||
}
|
||||
|
||||
// TODO(gloas) eventually we'll need to expand this to support loading blinded payload envelopes from the db
|
||||
// and fetching the execution payload from the EL. See BlockStreamer impl as an example
|
||||
impl<T: BeaconChainTypes> PayloadEnvelopeStreamer<T> {
|
||||
pub fn new(
|
||||
execution_layer_opt: Option<ExecutionLayer<T::EthSpec>>,
|
||||
store: BeaconStore<T>,
|
||||
task_executor: TaskExecutor,
|
||||
check_caches: CheckCaches,
|
||||
) -> Result<Arc<Self>, BeaconChainError> {
|
||||
let execution_layer = execution_layer_opt
|
||||
.as_ref()
|
||||
.ok_or(BeaconChainError::ExecutionLayerMissing)?
|
||||
.clone();
|
||||
|
||||
Ok(Arc::new(Self {
|
||||
_execution_layer: execution_layer,
|
||||
store,
|
||||
task_executor,
|
||||
_check_caches: check_caches,
|
||||
}))
|
||||
}
|
||||
|
||||
// TODO(gloas) simply a stub impl for now. Should check some exec payload envelope cache
|
||||
// and return the envelope if it exists in the cache
|
||||
fn check_payload_envelope_cache(
|
||||
&self,
|
||||
_beacon_block_root: Hash256,
|
||||
) -> Option<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>> {
|
||||
// if self.check_caches == CheckCaches::Yes
|
||||
None
|
||||
}
|
||||
|
||||
async fn load_envelopes(
|
||||
self: &Arc<Self>,
|
||||
beacon_block_roots: &[Hash256],
|
||||
) -> Result<Vec<(Hash256, PayloadEnvelopeResult<T::EthSpec>)>, BeaconChainError> {
|
||||
let streamer = self.clone();
|
||||
let roots = beacon_block_roots.to_vec();
|
||||
// Loading from the DB is slow -> spawn a blocking task
|
||||
self.task_executor
|
||||
.spawn_blocking_and_await(
|
||||
move || {
|
||||
let mut results = Vec::new();
|
||||
for root in roots {
|
||||
if let Some(cached) = streamer.check_payload_envelope_cache(root) {
|
||||
results.push((root, Ok(Some(cached))));
|
||||
continue;
|
||||
}
|
||||
// TODO(gloas) we'll want to use the execution layer directly to call
|
||||
// the engine api method eth_getBlockByHash()
|
||||
match streamer.store.get_payload_envelope(&root) {
|
||||
Ok(opt_envelope) => {
|
||||
results.push((root, Ok(opt_envelope.map(Arc::new))));
|
||||
}
|
||||
Err(e) => {
|
||||
results.push((root, Err(BeaconChainError::DBError(e))));
|
||||
}
|
||||
}
|
||||
}
|
||||
results
|
||||
},
|
||||
"load_execution_payload_envelopes",
|
||||
)
|
||||
.await
|
||||
.map_err(BeaconChainError::from)
|
||||
}
|
||||
|
||||
async fn stream_payload_envelopes(
|
||||
self: Arc<Self>,
|
||||
beacon_block_roots: Vec<Hash256>,
|
||||
sender: UnboundedSender<(Hash256, Arc<PayloadEnvelopeResult<T::EthSpec>>)>,
|
||||
) {
|
||||
let results = match self.load_envelopes(&beacon_block_roots).await {
|
||||
Ok(results) => results,
|
||||
Err(e) => {
|
||||
send_errors(beacon_block_roots, sender, e).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
for (root, result) in results {
|
||||
if sender.send((root, Arc::new(result))).is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stream(
|
||||
self: Arc<Self>,
|
||||
beacon_block_roots: Vec<Hash256>,
|
||||
sender: UnboundedSender<(Hash256, Arc<PayloadEnvelopeResult<T::EthSpec>>)>,
|
||||
) {
|
||||
self.stream_payload_envelopes(beacon_block_roots, sender)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub fn launch_stream(
|
||||
self: Arc<Self>,
|
||||
beacon_block_roots: Vec<Hash256>,
|
||||
) -> impl Stream<Item = (Hash256, Arc<PayloadEnvelopeResult<T::EthSpec>>)> {
|
||||
let (envelope_tx, envelope_rx) = mpsc::unbounded_channel();
|
||||
debug!(
|
||||
envelopes = beacon_block_roots.len(),
|
||||
"Launching a PayloadEnvelopeStreamer"
|
||||
);
|
||||
let executor = self.task_executor.clone();
|
||||
executor.spawn(
|
||||
self.stream(beacon_block_roots, envelope_tx),
|
||||
"get_payload_envelopes_sender",
|
||||
);
|
||||
UnboundedReceiverStream::new(envelope_rx)
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_errors<E: EthSpec>(
|
||||
beacon_block_roots: Vec<Hash256>,
|
||||
sender: UnboundedSender<(Hash256, Arc<PayloadEnvelopeResult<E>>)>,
|
||||
beacon_chain_error: BeaconChainError,
|
||||
) {
|
||||
let result = Arc::new(Err(beacon_chain_error));
|
||||
for beacon_block_root in beacon_block_roots {
|
||||
if sender.send((beacon_block_root, result.clone())).is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,6 @@ pub mod envelope_times_cache;
|
||||
mod errors;
|
||||
pub mod events;
|
||||
pub mod execution_payload;
|
||||
pub mod execution_payload_envelope_streamer;
|
||||
pub mod fetch_blobs;
|
||||
pub mod fork_choice_signal;
|
||||
pub mod graffiti_calculator;
|
||||
|
||||
@@ -1,608 +0,0 @@
|
||||
use crate::{
|
||||
beacon_chain::BeaconChainTypes,
|
||||
summaries_dag::{DAGStateSummary, DAGStateSummaryV22, StateSummariesDAG},
|
||||
};
|
||||
use ssz::{Decode, DecodeError, Encode};
|
||||
use ssz_derive::Encode;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use store::{
|
||||
DBColumn, Error, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem,
|
||||
hdiff::StorageStrategy,
|
||||
hot_cold_store::{HotStateSummaryV22, OptionalDiffBaseState},
|
||||
};
|
||||
use tracing::{debug, info, warn};
|
||||
use types::{
|
||||
BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot,
|
||||
execution::StatePayloadStatus,
|
||||
};
|
||||
|
||||
/// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it.
|
||||
///
|
||||
/// We delete it as part of the v24 migration.
|
||||
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
||||
|
||||
pub fn store_full_state_v22<E: EthSpec>(
|
||||
state_root: &Hash256,
|
||||
state: &BeaconState<E>,
|
||||
ops: &mut Vec<KeyValueStoreOp>,
|
||||
) -> Result<(), Error> {
|
||||
let bytes = StorageContainer::new(state).as_ssz_bytes();
|
||||
ops.push(KeyValueStoreOp::PutKeyValue(
|
||||
DBColumn::BeaconState,
|
||||
state_root.as_slice().to_vec(),
|
||||
bytes,
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a V22 state from the database either as a full state or using block replay.
|
||||
pub fn get_state_v22<T: BeaconChainTypes>(
|
||||
db: &Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
state_root: &Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<BeaconState<T::EthSpec>>, Error> {
|
||||
let Some(summary) = db.get_item::<HotStateSummaryV22>(state_root)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(base_state) =
|
||||
get_full_state_v22(&db.hot_db, &summary.epoch_boundary_state_root, spec)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
// Loading hot states via block replay doesn't care about the schema version, so we can use
|
||||
// the DB's current method for this.
|
||||
let update_cache = false;
|
||||
db.load_hot_state_using_replay(
|
||||
base_state,
|
||||
summary.slot,
|
||||
summary.latest_block_root,
|
||||
StatePayloadStatus::Pending,
|
||||
update_cache,
|
||||
)
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
pub fn get_full_state_v22<KV: KeyValueStore<E>, E: EthSpec>(
|
||||
db: &KV,
|
||||
state_root: &Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<BeaconState<E>>, Error> {
|
||||
match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? {
|
||||
Some(bytes) => {
|
||||
let container = StorageContainer::from_ssz_bytes(&bytes, spec)?;
|
||||
Ok(Some(container.try_into()?))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// A container for storing `BeaconState` components.
|
||||
///
|
||||
/// DEPRECATED.
|
||||
#[derive(Encode)]
|
||||
pub struct StorageContainer<E: EthSpec> {
|
||||
state: BeaconState<E>,
|
||||
committee_caches: Vec<Arc<CommitteeCache>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> StorageContainer<E> {
|
||||
/// Create a new instance for storing a `BeaconState`.
|
||||
pub fn new(state: &BeaconState<E>) -> Self {
|
||||
Self {
|
||||
state: state.clone(),
|
||||
committee_caches: state.committee_caches().to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> {
|
||||
// We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't
|
||||
// compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here.
|
||||
let mut builder = ssz::SszDecoderBuilder::new(bytes);
|
||||
|
||||
builder.register_anonymous_variable_length_item()?;
|
||||
builder.register_type::<Vec<CommitteeCache>>()?;
|
||||
|
||||
let mut decoder = builder.build()?;
|
||||
|
||||
let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?;
|
||||
let committee_caches = decoder.decode_next()?;
|
||||
|
||||
Ok(Self {
|
||||
state,
|
||||
committee_caches,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TryInto<BeaconState<E>> for StorageContainer<E> {
|
||||
type Error = Error;
|
||||
|
||||
fn try_into(mut self) -> Result<BeaconState<E>, Error> {
|
||||
let mut state = self.state;
|
||||
|
||||
for i in (0..CACHED_EPOCHS).rev() {
|
||||
if i >= self.committee_caches.len() {
|
||||
return Err(Error::SszDecodeError(DecodeError::BytesInvalid(
|
||||
"Insufficient committees for BeaconState".to_string(),
|
||||
)));
|
||||
};
|
||||
|
||||
state.committee_caches_mut()[i] = self.committee_caches.remove(i);
|
||||
}
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
}
|
||||
|
||||
/// The checkpoint used for pruning the database.
|
||||
///
|
||||
/// Updated whenever pruning is successful.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PruningCheckpoint {
|
||||
pub checkpoint: Checkpoint,
|
||||
}
|
||||
|
||||
impl StoreItem for PruningCheckpoint {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::BeaconMeta
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.checkpoint.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
||||
Ok(PruningCheckpoint {
|
||||
checkpoint: Checkpoint::from_ssz_bytes(bytes)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn upgrade_to_v24<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
let mut migrate_ops = vec![];
|
||||
let split = db.get_split_info();
|
||||
let hot_hdiff_start_slot = split.slot;
|
||||
|
||||
// Delete the `PruningCheckpoint` (no longer used).
|
||||
migrate_ops.push(KeyValueStoreOp::DeleteKey(
|
||||
DBColumn::BeaconMeta,
|
||||
PRUNING_CHECKPOINT_KEY.as_slice().to_vec(),
|
||||
));
|
||||
|
||||
// Sanity check to make sure the HDiff grid is aligned with the epoch start
|
||||
if hot_hdiff_start_slot % T::EthSpec::slots_per_epoch() != 0 {
|
||||
return Err(Error::MigrationError(format!(
|
||||
"hot_hdiff_start_slot is not first slot in epoch {hot_hdiff_start_slot}"
|
||||
)));
|
||||
}
|
||||
|
||||
// After V24 hot tree states, the in-memory `anchor_info.anchor_slot` is the start slot of the
|
||||
// hot HDiff grid. Before the migration, it's set to the slot of the anchor state in the DB:
|
||||
// - the genesis state on a genesis sync, or
|
||||
// - the checkpoint state on a checkpoint sync.
|
||||
//
|
||||
// If the node has been running for a while the `anchor_slot` might be less than the finalized
|
||||
// checkpoint. This upgrade constructs a grid only with unfinalized states, rooted in the
|
||||
// current finalized state. So we set the `anchor_slot` to `split.slot` to root the grid in the
|
||||
// current finalized state. Each migration sets the split to
|
||||
// ```
|
||||
// Split { slot: finalized_state.slot(), state_root: finalized_state_root }
|
||||
// ```
|
||||
{
|
||||
let anchor_info = db.get_anchor_info();
|
||||
|
||||
// If the node is already an archive node, we can set the anchor slot to 0 and copy
|
||||
// snapshots and diffs from the freezer DB to the hot DB in order to establish an initial
|
||||
// hot grid that is aligned/"perfect" (no `start_slot`/`anchor_slot` to worry about).
|
||||
//
|
||||
// This only works if all of the following are true:
|
||||
//
|
||||
// - We have the previous snapshot for the split state stored in the freezer DB, i.e.
|
||||
// if `previous_snapshot_slot >= state_upper_limit`.
|
||||
// - The split state itself will be stored as a diff or snapshot in the new grid. We choose
|
||||
// not to support a split state that requires block replay, because computing its previous
|
||||
// state root from the DAG is not straight-forward.
|
||||
let dummy_start_slot = Slot::new(0);
|
||||
let closest_layer_points = db
|
||||
.hierarchy
|
||||
.closest_layer_points(split.slot, dummy_start_slot);
|
||||
|
||||
let previous_snapshot_slot =
|
||||
closest_layer_points
|
||||
.iter()
|
||||
.copied()
|
||||
.min()
|
||||
.ok_or(Error::MigrationError(
|
||||
"closest_layer_points must not be empty".to_string(),
|
||||
))?;
|
||||
|
||||
if previous_snapshot_slot >= anchor_info.state_upper_limit
|
||||
&& db
|
||||
.hierarchy
|
||||
.storage_strategy(split.slot, dummy_start_slot, StatePayloadStatus::Pending)
|
||||
.is_ok_and(|strategy| !strategy.is_replay_from())
|
||||
{
|
||||
info!(
|
||||
%previous_snapshot_slot,
|
||||
split_slot = %split.slot,
|
||||
"Aligning hot diff grid to freezer"
|
||||
);
|
||||
|
||||
// Set anchor slot to 0 in case it was set to something else by a previous checkpoint
|
||||
// sync.
|
||||
let mut new_anchor_info = anchor_info.clone();
|
||||
new_anchor_info.anchor_slot = Slot::new(0);
|
||||
|
||||
// Update the anchor on disk atomically if migration is successful
|
||||
migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?);
|
||||
|
||||
// Copy each of the freezer layers to the hot DB in slot ascending order.
|
||||
for layer_slot in closest_layer_points.into_iter().rev() {
|
||||
// Do not try to load the split state itself from the freezer, it won't be there.
|
||||
// It will be migrated in the main loop below.
|
||||
if layer_slot == split.slot {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut freezer_state = db.load_cold_state_by_slot(layer_slot)?;
|
||||
|
||||
let state_root = freezer_state.canonical_root()?;
|
||||
|
||||
let mut state_ops = vec![];
|
||||
db.store_hot_state(&state_root, &freezer_state, &mut state_ops)?;
|
||||
db.hot_db.do_atomically(state_ops)?;
|
||||
}
|
||||
} else {
|
||||
// Otherwise for non-archive nodes, set the anchor slot for the hot grid to the current
|
||||
// split slot (the oldest slot available).
|
||||
let mut new_anchor_info = anchor_info.clone();
|
||||
new_anchor_info.anchor_slot = hot_hdiff_start_slot;
|
||||
|
||||
// Update the anchor in disk atomically if migration is successful
|
||||
migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?);
|
||||
}
|
||||
}
|
||||
|
||||
let state_summaries_dag = new_dag::<T>(&db)?;
|
||||
|
||||
// We compute the state summaries DAG outside of a DB migration. Therefore if the DB is properly
|
||||
// prunned, it should have a single root equal to the split.
|
||||
let state_summaries_dag_roots = state_summaries_dag.tree_roots();
|
||||
if state_summaries_dag_roots.len() == 1 {
|
||||
let (root_summary_state_root, root_summary) =
|
||||
state_summaries_dag_roots.first().expect("len == 1");
|
||||
if *root_summary_state_root != split.state_root {
|
||||
warn!(
|
||||
?root_summary_state_root,
|
||||
?root_summary,
|
||||
?split,
|
||||
"State summaries DAG root is not the split"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
warn!(
|
||||
location = "migration",
|
||||
state_summaries_dag_roots = ?state_summaries_dag_roots,
|
||||
"State summaries DAG found more than one root"
|
||||
);
|
||||
}
|
||||
|
||||
// Sort summaries by slot so we have their ancestor diffs already stored when we store them.
|
||||
// If the summaries are sorted topologically we can insert them into the DB like if they were a
|
||||
// new state, re-using existing code. As states are likely to be sequential the diff cache
|
||||
// should kick in making the migration more efficient. If we just iterate the column of
|
||||
// summaries we may get distance state of each iteration.
|
||||
let summaries_by_slot = state_summaries_dag.summaries_by_slot_ascending();
|
||||
debug!(
|
||||
summaries_count = state_summaries_dag.summaries_count(),
|
||||
slots_count = summaries_by_slot.len(),
|
||||
min_slot = ?summaries_by_slot.first_key_value().map(|(slot, _)| slot),
|
||||
max_slot = ?summaries_by_slot.last_key_value().map(|(slot, _)| slot),
|
||||
?state_summaries_dag_roots,
|
||||
%hot_hdiff_start_slot,
|
||||
split_state_root = ?split.state_root,
|
||||
"Starting hot states migration"
|
||||
);
|
||||
|
||||
// Upgrade all hot DB state summaries to the new type:
|
||||
// - Set all summaries of boundary states to `Snapshot` type
|
||||
// - Set all others to `Replay` pointing to `epoch_boundary_state_root`
|
||||
|
||||
let mut diffs_written = 0;
|
||||
let mut summaries_written = 0;
|
||||
let mut last_log_time = Instant::now();
|
||||
|
||||
for (slot, old_hot_state_summaries) in summaries_by_slot {
|
||||
for (state_root, old_summary) in old_hot_state_summaries {
|
||||
if slot < hot_hdiff_start_slot {
|
||||
// To reach here, there must be some pruning issue with the DB where we still have
|
||||
// hot states below the split slot. This states can't be migrated as we can't compute
|
||||
// a storage strategy for them. After this if else block, the summary and state are
|
||||
// scheduled for deletion.
|
||||
debug!(
|
||||
%slot,
|
||||
?state_root,
|
||||
"Ignoring state summary prior to split slot"
|
||||
);
|
||||
} else {
|
||||
// 1. Store snapshot or diff at this slot (if required).
|
||||
let storage_strategy =
|
||||
db.hot_storage_strategy(slot, StatePayloadStatus::Pending)?;
|
||||
debug!(
|
||||
%slot,
|
||||
?state_root,
|
||||
?storage_strategy,
|
||||
"Migrating state summary"
|
||||
);
|
||||
|
||||
match storage_strategy {
|
||||
StorageStrategy::DiffFrom(_) | StorageStrategy::Snapshot => {
|
||||
// Load the state and re-store it as a snapshot or diff.
|
||||
let state = get_state_v22::<T>(&db, &state_root, &db.spec)?
|
||||
.ok_or(Error::MissingState(state_root))?;
|
||||
|
||||
// Store immediately so that future diffs can load and diff from it.
|
||||
let mut ops = vec![];
|
||||
// We must commit the hot state summary immediately, otherwise we can't diff
|
||||
// against it and future writes will fail. That's why we write the new hot
|
||||
// summaries in a different column to have both new and old data present at
|
||||
// once. Otherwise if the process crashes during the migration the database will
|
||||
// be broken.
|
||||
db.store_hot_state_summary(&state_root, &state, &mut ops)?;
|
||||
db.store_hot_state_diffs(&state_root, &state, &mut ops)?;
|
||||
db.hot_db.do_atomically(ops)?;
|
||||
diffs_written += 1;
|
||||
}
|
||||
StorageStrategy::ReplayFrom(diff_base_slot) => {
|
||||
// Optimization: instead of having to load the state of each summary we load x32
|
||||
// less states by manually computing the HotStateSummary roots using the
|
||||
// computed state dag.
|
||||
//
|
||||
// No need to store diffs for states that will be reconstructed by replaying
|
||||
// blocks.
|
||||
//
|
||||
// 2. Convert the summary to the new format.
|
||||
if state_root == split.state_root {
|
||||
return Err(Error::MigrationError(
|
||||
"unreachable: split state should be stored as a snapshot or diff"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
let previous_state_root = state_summaries_dag
|
||||
.previous_state_root(state_root)
|
||||
.map_err(|e| {
|
||||
Error::MigrationError(format!(
|
||||
"error computing previous_state_root {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let diff_base_state = OptionalDiffBaseState::new(
|
||||
diff_base_slot,
|
||||
state_summaries_dag
|
||||
.ancestor_state_root_at_slot(state_root, diff_base_slot)
|
||||
.map_err(|e| {
|
||||
Error::MigrationError(format!(
|
||||
"error computing ancestor_state_root_at_slot \
|
||||
({state_root:?}, {diff_base_slot}): {e:?}"
|
||||
))
|
||||
})?,
|
||||
);
|
||||
|
||||
let new_summary = HotStateSummary {
|
||||
slot,
|
||||
latest_block_root: old_summary.latest_block_root,
|
||||
latest_block_slot: old_summary.latest_block_slot,
|
||||
previous_state_root,
|
||||
diff_base_state,
|
||||
};
|
||||
let op = new_summary.as_kv_store_op(state_root);
|
||||
// It's not necessary to immediately commit the summaries of states that are
|
||||
// ReplayFrom. However we do so for simplicity.
|
||||
db.hot_db.do_atomically(vec![op])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Stage old data for deletion.
|
||||
if slot % T::EthSpec::slots_per_epoch() == 0 {
|
||||
migrate_ops.push(KeyValueStoreOp::DeleteKey(
|
||||
DBColumn::BeaconState,
|
||||
state_root.as_slice().to_vec(),
|
||||
));
|
||||
}
|
||||
|
||||
// Delete previous summaries
|
||||
migrate_ops.push(KeyValueStoreOp::DeleteKey(
|
||||
DBColumn::BeaconStateSummary,
|
||||
state_root.as_slice().to_vec(),
|
||||
));
|
||||
|
||||
summaries_written += 1;
|
||||
if last_log_time.elapsed() > Duration::from_secs(5) {
|
||||
last_log_time = Instant::now();
|
||||
info!(
|
||||
diffs_written,
|
||||
summaries_written,
|
||||
summaries_count = state_summaries_dag.summaries_count(),
|
||||
"Hot states migration in progress"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
diffs_written,
|
||||
summaries_written,
|
||||
summaries_count = state_summaries_dag.summaries_count(),
|
||||
"Hot states migration complete"
|
||||
);
|
||||
|
||||
Ok(migrate_ops)
|
||||
}
|
||||
|
||||
pub fn downgrade_from_v24<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
let state_summaries = db
|
||||
.load_hot_state_summaries()?
|
||||
.into_iter()
|
||||
.map(|(state_root, summary)| (state_root, summary.into()))
|
||||
.collect::<Vec<(Hash256, DAGStateSummary)>>();
|
||||
|
||||
info!(
|
||||
summaries_count = state_summaries.len(),
|
||||
"DB downgrade of v24 state summaries started"
|
||||
);
|
||||
|
||||
let state_summaries_dag = StateSummariesDAG::new(state_summaries)
|
||||
.map_err(|e| Error::MigrationError(format!("Error on new StateSumariesDAG {e:?}")))?;
|
||||
|
||||
let mut migrate_ops = vec![];
|
||||
let mut states_written = 0;
|
||||
let mut summaries_written = 0;
|
||||
let mut summaries_skipped = 0;
|
||||
let mut last_log_time = Instant::now();
|
||||
|
||||
// Rebuild the PruningCheckpoint from the split.
|
||||
let split = db.get_split_info();
|
||||
let pruning_checkpoint = PruningCheckpoint {
|
||||
checkpoint: Checkpoint {
|
||||
epoch: split.slot.epoch(T::EthSpec::slots_per_epoch()),
|
||||
root: split.block_root,
|
||||
},
|
||||
};
|
||||
migrate_ops.push(pruning_checkpoint.as_kv_store_op(PRUNING_CHECKPOINT_KEY));
|
||||
|
||||
// Convert state summaries back to the old format.
|
||||
for (state_root, summary) in state_summaries_dag
|
||||
.summaries_by_slot_ascending()
|
||||
.into_iter()
|
||||
.flat_map(|(_, summaries)| summaries)
|
||||
{
|
||||
// No need to migrate any states prior to the split. The v22 schema does not need them, and
|
||||
// they would generate warnings about a disjoint DAG when re-upgrading to V24.
|
||||
if summary.slot < split.slot {
|
||||
debug!(
|
||||
slot = %summary.slot,
|
||||
?state_root,
|
||||
"Skipping migration of pre-split state"
|
||||
);
|
||||
summaries_skipped += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// If boundary state: persist.
|
||||
// Do not cache these states as they are unlikely to be relevant later.
|
||||
let update_cache = false;
|
||||
if summary.slot % T::EthSpec::slots_per_epoch() == 0 {
|
||||
let (state, _) = db
|
||||
.load_hot_state(&state_root, update_cache)?
|
||||
.ok_or(Error::MissingState(state_root))?;
|
||||
|
||||
// Immediately commit the state, so we don't OOM. It's stored in a different
|
||||
// column so if the migration crashes we'll just store extra harmless junk in the DB.
|
||||
let mut state_write_ops = vec![];
|
||||
store_full_state_v22(&state_root, &state, &mut state_write_ops)?;
|
||||
db.hot_db.do_atomically(state_write_ops)?;
|
||||
states_written += 1;
|
||||
}
|
||||
|
||||
// Persist old summary.
|
||||
let epoch_boundary_state_slot = summary.slot - summary.slot % T::EthSpec::slots_per_epoch();
|
||||
let old_summary = HotStateSummaryV22 {
|
||||
slot: summary.slot,
|
||||
latest_block_root: summary.latest_block_root,
|
||||
epoch_boundary_state_root: state_summaries_dag
|
||||
.ancestor_state_root_at_slot(state_root, epoch_boundary_state_slot)
|
||||
.map_err(|e| {
|
||||
Error::MigrationError(format!(
|
||||
"error computing ancestor_state_root_at_slot({state_root:?}, {epoch_boundary_state_slot}) {e:?}"
|
||||
))
|
||||
})?,
|
||||
};
|
||||
migrate_ops.push(KeyValueStoreOp::PutKeyValue(
|
||||
DBColumn::BeaconStateSummary,
|
||||
state_root.as_slice().to_vec(),
|
||||
old_summary.as_ssz_bytes(),
|
||||
));
|
||||
summaries_written += 1;
|
||||
|
||||
if last_log_time.elapsed() > Duration::from_secs(5) {
|
||||
last_log_time = Instant::now();
|
||||
info!(
|
||||
states_written,
|
||||
summaries_written,
|
||||
summaries_count = state_summaries_dag.summaries_count(),
|
||||
"DB downgrade of v24 state summaries in progress"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all V24 schema data. We do this outside the loop over summaries to ensure we cover
|
||||
// every piece of data and to simplify logic around skipping certain summaries that do not get
|
||||
// migrated.
|
||||
for db_column in [
|
||||
DBColumn::BeaconStateHotSummary,
|
||||
DBColumn::BeaconStateHotDiff,
|
||||
DBColumn::BeaconStateHotSnapshot,
|
||||
] {
|
||||
for key in db.hot_db.iter_column_keys::<Hash256>(db_column) {
|
||||
let state_root = key?;
|
||||
migrate_ops.push(KeyValueStoreOp::DeleteKey(
|
||||
db_column,
|
||||
state_root.as_slice().to_vec(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
states_written,
|
||||
summaries_written,
|
||||
summaries_skipped,
|
||||
summaries_count = state_summaries_dag.summaries_count(),
|
||||
"DB downgrade of v24 state summaries completed"
|
||||
);
|
||||
|
||||
Ok(migrate_ops)
|
||||
}
|
||||
|
||||
fn new_dag<T: BeaconChainTypes>(
|
||||
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
|
||||
) -> Result<StateSummariesDAG, Error> {
|
||||
// Collect all sumaries for unfinalized states
|
||||
let state_summaries_v22 = db
|
||||
.hot_db
|
||||
// Collect summaries from the legacy V22 column BeaconStateSummary
|
||||
.iter_column::<Hash256>(DBColumn::BeaconStateSummary)
|
||||
.map(|res| {
|
||||
let (key, value) = res?;
|
||||
let state_root: Hash256 = key;
|
||||
let summary = HotStateSummaryV22::from_ssz_bytes(&value)?;
|
||||
let block_root = summary.latest_block_root;
|
||||
// Read blocks to get the block slot and parent root. In Holesky forced finalization it
|
||||
// took 5100 ms to read 15072 state summaries, so it's not really necessary to
|
||||
// de-duplicate block reads.
|
||||
let block = db
|
||||
.get_blinded_block(&block_root)?
|
||||
.ok_or(Error::MissingBlock(block_root))?;
|
||||
|
||||
Ok((
|
||||
state_root,
|
||||
DAGStateSummaryV22 {
|
||||
slot: summary.slot,
|
||||
latest_block_root: summary.latest_block_root,
|
||||
block_slot: block.slot(),
|
||||
block_parent_root: block.parent_root(),
|
||||
},
|
||||
))
|
||||
})
|
||||
.collect::<Result<Vec<_>, Error>>()?;
|
||||
|
||||
StateSummariesDAG::new_from_v22(state_summaries_v22)
|
||||
.map_err(|e| Error::MigrationError(format!("error computing states summaries dag {e:?}")))
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use eth2::lighthouse::{
|
||||
AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics,
|
||||
};
|
||||
use state_processing::{
|
||||
BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use types::{BeaconState, BeaconStateError, EthSpec, Hash256};
|
||||
use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error};
|
||||
|
||||
const MAX_REQUEST_RANGE_EPOCHS: usize = 100;
|
||||
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
// We don't use the inner values directly, but they're used in the Debug impl.
|
||||
enum AttestationPerformanceError {
|
||||
BlockReplay(#[allow(dead_code)] BlockReplayError),
|
||||
BeaconState(#[allow(dead_code)] BeaconStateError),
|
||||
UnableToFindValidator(#[allow(dead_code)] usize),
|
||||
}
|
||||
|
||||
impl From<BlockReplayError> for AttestationPerformanceError {
|
||||
fn from(e: BlockReplayError) -> Self {
|
||||
Self::BlockReplay(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for AttestationPerformanceError {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Self::BeaconState(e)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_attestation_performance<T: BeaconChainTypes>(
|
||||
target: String,
|
||||
query: AttestationPerformanceQuery,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<Vec<AttestationPerformance>, warp::Rejection> {
|
||||
let spec = &chain.spec;
|
||||
// We increment by 2 here so that when we build the state from the `prior_slot` it is
|
||||
// still 1 epoch ahead of the first epoch we want to analyse.
|
||||
// This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results
|
||||
// for the correct epoch.
|
||||
let start_epoch = query.start_epoch + 2;
|
||||
let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
let prior_slot = start_slot - 1;
|
||||
|
||||
let end_epoch = query.end_epoch + 2;
|
||||
let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Ensure end_epoch is smaller than the current epoch - 1.
|
||||
let current_epoch = chain.epoch().map_err(unhandled_error)?;
|
||||
if query.end_epoch >= current_epoch - 1 {
|
||||
return Err(custom_bad_request(format!(
|
||||
"end_epoch must be less than the current epoch - 1. current: {}, end: {}",
|
||||
current_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
// Check query is valid.
|
||||
if start_epoch > end_epoch {
|
||||
return Err(custom_bad_request(format!(
|
||||
"start_epoch must not be larger than end_epoch. start: {}, end: {}",
|
||||
query.start_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
// The response size can grow exceptionally large therefore we should check that the
|
||||
// query is within permitted bounds to prevent potential OOM errors.
|
||||
if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS {
|
||||
return Err(custom_bad_request(format!(
|
||||
"end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}",
|
||||
MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
// Either use the global validator set, or the specified index.
|
||||
//
|
||||
// Does no further validation of the indices, so in the event an index has not yet been
|
||||
// activated or does not yet exist (according to the head state), it will return all fields as
|
||||
// `false`.
|
||||
let index_range = if target.to_lowercase() == "global" {
|
||||
chain
|
||||
.with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect()))
|
||||
.map_err(unhandled_error::<BeaconChainError>)?
|
||||
} else {
|
||||
vec![target.parse::<u64>().map_err(|_| {
|
||||
custom_bad_request(format!(
|
||||
"Invalid validator index: {:?}",
|
||||
target.to_lowercase()
|
||||
))
|
||||
})?]
|
||||
};
|
||||
|
||||
// Load block roots.
|
||||
let mut block_roots: Vec<Hash256> = chain
|
||||
.forwards_iter_block_roots_until(start_slot, end_slot)
|
||||
.map_err(unhandled_error)?
|
||||
.map(|res| res.map(|(root, _)| root))
|
||||
.collect::<Result<Vec<Hash256>, _>>()
|
||||
.map_err(unhandled_error)?;
|
||||
block_roots.dedup();
|
||||
|
||||
// Load first block so we can get its parent.
|
||||
let first_block_root = block_roots.first().ok_or_else(|| {
|
||||
custom_server_error(
|
||||
"No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(),
|
||||
)
|
||||
})?;
|
||||
let first_block = chain
|
||||
.get_blinded_block(first_block_root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Load the block of the prior slot which will be used to build the starting state.
|
||||
let prior_block = chain
|
||||
.get_blinded_block(&first_block.parent_root())
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root()))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Load state for block replay.
|
||||
let state_root = prior_block.state_root();
|
||||
|
||||
// This branch is reached from the HTTP API. We assume the user wants
|
||||
// to cache states so that future calls are faster.
|
||||
let state = chain
|
||||
.get_state(&state_root, Some(prior_slot), true)
|
||||
.and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root)))
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Allocate an AttestationPerformance vector for each validator in the range.
|
||||
let mut perfs: Vec<AttestationPerformance> =
|
||||
AttestationPerformance::initialize(index_range.clone());
|
||||
|
||||
let post_slot_hook = |state: &mut BeaconState<T::EthSpec>,
|
||||
summary: Option<EpochProcessingSummary<T::EthSpec>>,
|
||||
_is_skip_slot: bool|
|
||||
-> Result<(), AttestationPerformanceError> {
|
||||
// If a `summary` was not output then an epoch boundary was not crossed
|
||||
// so we move onto the next slot.
|
||||
if let Some(summary) = summary {
|
||||
for (position, i) in index_range.iter().enumerate() {
|
||||
let index = *i as usize;
|
||||
|
||||
let val = perfs
|
||||
.get_mut(position)
|
||||
.ok_or(AttestationPerformanceError::UnableToFindValidator(index))?;
|
||||
|
||||
// We are two epochs ahead since the summary is generated for
|
||||
// `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return
|
||||
// data for the epoch before that.
|
||||
let epoch = state.previous_epoch().as_u64() - 1;
|
||||
|
||||
let is_active = summary.is_active_unslashed_in_previous_epoch(index);
|
||||
|
||||
let received_source_reward = summary.is_previous_epoch_source_attester(index)?;
|
||||
|
||||
let received_head_reward = summary.is_previous_epoch_head_attester(index)?;
|
||||
|
||||
let received_target_reward = summary.is_previous_epoch_target_attester(index)?;
|
||||
|
||||
let inclusion_delay = summary
|
||||
.previous_epoch_inclusion_info(index)
|
||||
.map(|info| info.delay);
|
||||
|
||||
let perf = AttestationPerformanceStatistics {
|
||||
active: is_active,
|
||||
head: received_head_reward,
|
||||
target: received_target_reward,
|
||||
source: received_source_reward,
|
||||
delay: inclusion_delay,
|
||||
};
|
||||
|
||||
val.epochs.insert(epoch, perf);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Initialize block replayer
|
||||
let mut replayer = BlockReplayer::new(state, spec)
|
||||
.no_state_root_iter()
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.post_slot_hook(Box::new(post_slot_hook));
|
||||
|
||||
// Iterate through block roots in chunks to reduce load on memory.
|
||||
for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) {
|
||||
// Load blocks from the block root chunks.
|
||||
let blocks = block_root_chunks
|
||||
.iter()
|
||||
.map(|root| {
|
||||
chain
|
||||
.get_blinded_block(root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root))
|
||||
})
|
||||
.map_err(unhandled_error)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// TODO(gloas): add payloads
|
||||
replayer = replayer
|
||||
.apply_blocks(blocks, vec![], None)
|
||||
.map_err(|e| custom_server_error(format!("{:?}", e)))?;
|
||||
}
|
||||
|
||||
drop(replayer);
|
||||
|
||||
Ok(perfs)
|
||||
}
|
||||
@@ -1,410 +0,0 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use eth2::lighthouse::{
|
||||
BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use state_processing::{
|
||||
BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use types::{
|
||||
AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec,
|
||||
Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot,
|
||||
};
|
||||
use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error};
|
||||
|
||||
/// Load blocks from block roots in chunks to reduce load on memory.
|
||||
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
// We don't use the inner values directly, but they're used in the Debug impl.
|
||||
enum PackingEfficiencyError {
|
||||
BlockReplay(#[allow(dead_code)] BlockReplayError),
|
||||
BeaconState(#[allow(dead_code)] BeaconStateError),
|
||||
CommitteeStoreError(#[allow(dead_code)] Slot),
|
||||
InvalidAttestationError,
|
||||
}
|
||||
|
||||
impl From<BlockReplayError> for PackingEfficiencyError {
|
||||
fn from(e: BlockReplayError) -> Self {
|
||||
Self::BlockReplay(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for PackingEfficiencyError {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Self::BeaconState(e)
|
||||
}
|
||||
}
|
||||
|
||||
struct CommitteeStore {
|
||||
current_epoch_committees: Vec<OwnedBeaconCommittee>,
|
||||
previous_epoch_committees: Vec<OwnedBeaconCommittee>,
|
||||
}
|
||||
|
||||
impl CommitteeStore {
|
||||
fn new() -> Self {
|
||||
CommitteeStore {
|
||||
current_epoch_committees: Vec::new(),
|
||||
previous_epoch_committees: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct PackingEfficiencyHandler<E: EthSpec> {
|
||||
current_slot: Slot,
|
||||
current_epoch: Epoch,
|
||||
prior_skip_slots: u64,
|
||||
available_attestations: HashSet<UniqueAttestation>,
|
||||
included_attestations: HashMap<UniqueAttestation, u64>,
|
||||
committee_store: CommitteeStore,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> PackingEfficiencyHandler<E> {
|
||||
fn new(
|
||||
start_epoch: Epoch,
|
||||
starting_state: BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Self, PackingEfficiencyError> {
|
||||
let mut handler = PackingEfficiencyHandler {
|
||||
current_slot: start_epoch.start_slot(E::slots_per_epoch()),
|
||||
current_epoch: start_epoch,
|
||||
prior_skip_slots: 0,
|
||||
available_attestations: HashSet::new(),
|
||||
included_attestations: HashMap::new(),
|
||||
committee_store: CommitteeStore::new(),
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
handler.compute_epoch(start_epoch, &starting_state, spec)?;
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
fn update_slot(&mut self, slot: Slot) {
|
||||
self.current_slot = slot;
|
||||
if slot % E::slots_per_epoch() == 0 {
|
||||
self.current_epoch = Epoch::new(slot.as_u64() / E::slots_per_epoch());
|
||||
}
|
||||
}
|
||||
|
||||
fn prune_included_attestations(&mut self) {
|
||||
let epoch = self.current_epoch;
|
||||
self.included_attestations.retain(|x, _| {
|
||||
x.slot >= Epoch::new(epoch.as_u64().saturating_sub(2)).start_slot(E::slots_per_epoch())
|
||||
});
|
||||
}
|
||||
|
||||
fn prune_available_attestations(&mut self) {
|
||||
let slot = self.current_slot;
|
||||
self.available_attestations
|
||||
.retain(|x| x.slot >= (slot.as_u64().saturating_sub(E::slots_per_epoch())));
|
||||
}
|
||||
|
||||
fn apply_block(
|
||||
&mut self,
|
||||
block: &SignedBeaconBlock<E, BlindedPayload<E>>,
|
||||
) -> Result<usize, PackingEfficiencyError> {
|
||||
let block_body = block.message().body();
|
||||
let attestations = block_body.attestations();
|
||||
|
||||
let mut attestations_in_block = HashMap::new();
|
||||
for attestation in attestations {
|
||||
match attestation {
|
||||
AttestationRef::Base(attn) => {
|
||||
for (position, voted) in attn.aggregation_bits.iter().enumerate() {
|
||||
if voted {
|
||||
let unique_attestation = UniqueAttestation {
|
||||
slot: attn.data.slot,
|
||||
committee_index: attn.data.index,
|
||||
committee_position: position,
|
||||
};
|
||||
let inclusion_distance: u64 = block
|
||||
.slot()
|
||||
.as_u64()
|
||||
.checked_sub(attn.data.slot.as_u64())
|
||||
.ok_or(PackingEfficiencyError::InvalidAttestationError)?;
|
||||
|
||||
self.available_attestations.remove(&unique_attestation);
|
||||
attestations_in_block.insert(unique_attestation, inclusion_distance);
|
||||
}
|
||||
}
|
||||
}
|
||||
AttestationRef::Electra(attn) => {
|
||||
for (position, voted) in attn.aggregation_bits.iter().enumerate() {
|
||||
if voted {
|
||||
let unique_attestation = UniqueAttestation {
|
||||
slot: attn.data.slot,
|
||||
committee_index: attn.data.index,
|
||||
committee_position: position,
|
||||
};
|
||||
let inclusion_distance: u64 = block
|
||||
.slot()
|
||||
.as_u64()
|
||||
.checked_sub(attn.data.slot.as_u64())
|
||||
.ok_or(PackingEfficiencyError::InvalidAttestationError)?;
|
||||
|
||||
self.available_attestations.remove(&unique_attestation);
|
||||
attestations_in_block.insert(unique_attestation, inclusion_distance);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove duplicate attestations as these yield no reward.
|
||||
attestations_in_block.retain(|x, _| !self.included_attestations.contains_key(x));
|
||||
self.included_attestations
|
||||
.extend(attestations_in_block.clone());
|
||||
|
||||
Ok(attestations_in_block.len())
|
||||
}
|
||||
|
||||
fn add_attestations(&mut self, slot: Slot) -> Result<(), PackingEfficiencyError> {
|
||||
let committees = self.get_committees_at_slot(slot)?;
|
||||
for committee in committees {
|
||||
for position in 0..committee.committee.len() {
|
||||
let unique_attestation = UniqueAttestation {
|
||||
slot,
|
||||
committee_index: committee.index,
|
||||
committee_position: position,
|
||||
};
|
||||
self.available_attestations.insert(unique_attestation);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn compute_epoch(
|
||||
&mut self,
|
||||
epoch: Epoch,
|
||||
state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), PackingEfficiencyError> {
|
||||
// Free some memory by pruning old attestations from the included set.
|
||||
self.prune_included_attestations();
|
||||
|
||||
let new_committees = if state.committee_cache_is_initialized(RelativeEpoch::Current) {
|
||||
state
|
||||
.get_beacon_committees_at_epoch(RelativeEpoch::Current)?
|
||||
.into_iter()
|
||||
.map(BeaconCommittee::into_owned)
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
state
|
||||
.initialize_committee_cache(epoch, spec)?
|
||||
.get_all_beacon_committees()?
|
||||
.into_iter()
|
||||
.map(BeaconCommittee::into_owned)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
self.committee_store
|
||||
.previous_epoch_committees
|
||||
.clone_from(&self.committee_store.current_epoch_committees);
|
||||
|
||||
self.committee_store.current_epoch_committees = new_committees;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_committees_at_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> Result<Vec<OwnedBeaconCommittee>, PackingEfficiencyError> {
|
||||
let mut committees = Vec::new();
|
||||
|
||||
for committee in &self.committee_store.current_epoch_committees {
|
||||
if committee.slot == slot {
|
||||
committees.push(committee.clone());
|
||||
}
|
||||
}
|
||||
for committee in &self.committee_store.previous_epoch_committees {
|
||||
if committee.slot == slot {
|
||||
committees.push(committee.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if committees.is_empty() {
|
||||
return Err(PackingEfficiencyError::CommitteeStoreError(slot));
|
||||
}
|
||||
|
||||
Ok(committees)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_block_packing_efficiency<T: BeaconChainTypes>(
|
||||
query: BlockPackingEfficiencyQuery,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<Vec<BlockPackingEfficiency>, warp::Rejection> {
|
||||
let spec = &chain.spec;
|
||||
|
||||
let start_epoch = query.start_epoch;
|
||||
let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
let prior_slot = start_slot - 1;
|
||||
|
||||
let end_epoch = query.end_epoch;
|
||||
let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Check query is valid.
|
||||
if start_epoch > end_epoch || start_epoch == 0 {
|
||||
return Err(custom_bad_request(format!(
|
||||
"invalid start and end epochs: {}, {}",
|
||||
start_epoch, end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
let prior_epoch = start_epoch - 1;
|
||||
let start_slot_of_prior_epoch = prior_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Load block roots.
|
||||
let mut block_roots: Vec<Hash256> = chain
|
||||
.forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot)
|
||||
.map_err(unhandled_error)?
|
||||
.collect::<Result<Vec<(Hash256, Slot)>, _>>()
|
||||
.map_err(unhandled_error)?
|
||||
.iter()
|
||||
.map(|(root, _)| *root)
|
||||
.collect();
|
||||
block_roots.dedup();
|
||||
|
||||
let first_block_root = block_roots
|
||||
.first()
|
||||
.ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?;
|
||||
|
||||
let first_block = chain
|
||||
.get_blinded_block(first_block_root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Load state for block replay.
|
||||
let starting_state_root = first_block.state_root();
|
||||
|
||||
// This branch is reached from the HTTP API. We assume the user wants
|
||||
// to cache states so that future calls are faster.
|
||||
let starting_state = chain
|
||||
.get_state(&starting_state_root, Some(prior_slot), true)
|
||||
.and_then(|maybe_state| {
|
||||
maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Initialize response vector.
|
||||
let mut response = Vec::new();
|
||||
|
||||
// Initialize handler.
|
||||
let handler = Arc::new(Mutex::new(
|
||||
PackingEfficiencyHandler::new(prior_epoch, starting_state.clone(), spec)
|
||||
.map_err(|e| custom_server_error(format!("{:?}", e)))?,
|
||||
));
|
||||
|
||||
let pre_slot_hook =
|
||||
|_, state: &mut BeaconState<T::EthSpec>| -> Result<(), PackingEfficiencyError> {
|
||||
// Add attestations to `available_attestations`.
|
||||
handler.lock().add_attestations(state.slot())?;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let post_slot_hook = |state: &mut BeaconState<T::EthSpec>,
|
||||
_summary: Option<EpochProcessingSummary<T::EthSpec>>,
|
||||
is_skip_slot: bool|
|
||||
-> Result<(), PackingEfficiencyError> {
|
||||
handler.lock().update_slot(state.slot());
|
||||
|
||||
// Check if this a new epoch.
|
||||
if state.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||
handler.lock().compute_epoch(
|
||||
state.slot().epoch(T::EthSpec::slots_per_epoch()),
|
||||
state,
|
||||
spec,
|
||||
)?;
|
||||
}
|
||||
|
||||
if is_skip_slot {
|
||||
handler.lock().prior_skip_slots += 1;
|
||||
}
|
||||
|
||||
// Remove expired attestations.
|
||||
handler.lock().prune_available_attestations();
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let pre_block_hook = |_state: &mut BeaconState<T::EthSpec>,
|
||||
block: &SignedBeaconBlock<_, BlindedPayload<_>>|
|
||||
-> Result<(), PackingEfficiencyError> {
|
||||
let slot = block.slot();
|
||||
|
||||
let block_message = block.message();
|
||||
// Get block proposer info.
|
||||
let proposer_info = ProposerInfo {
|
||||
validator_index: block_message.proposer_index(),
|
||||
graffiti: block_message.body().graffiti().as_utf8_lossy(),
|
||||
};
|
||||
|
||||
// Store the count of available attestations at this point.
|
||||
// In the future it may be desirable to check that the number of available attestations
|
||||
// does not exceed the maximum possible amount given the length of available committees.
|
||||
let available_count = handler.lock().available_attestations.len();
|
||||
|
||||
// Get all attestations included in the block.
|
||||
let included = handler.lock().apply_block(block)?;
|
||||
|
||||
let efficiency = BlockPackingEfficiency {
|
||||
slot,
|
||||
block_hash: block.canonical_root(),
|
||||
proposer_info,
|
||||
available_attestations: available_count,
|
||||
included_attestations: included,
|
||||
prior_skip_slots: handler.lock().prior_skip_slots,
|
||||
};
|
||||
|
||||
// Write to response.
|
||||
if slot >= start_slot {
|
||||
response.push(efficiency);
|
||||
}
|
||||
|
||||
handler.lock().prior_skip_slots = 0;
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Build BlockReplayer.
|
||||
let mut replayer = BlockReplayer::new(starting_state, spec)
|
||||
.no_state_root_iter()
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.pre_slot_hook(Box::new(pre_slot_hook))
|
||||
.post_slot_hook(Box::new(post_slot_hook))
|
||||
.pre_block_hook(Box::new(pre_block_hook));
|
||||
|
||||
// Iterate through the block roots, loading blocks in chunks to reduce load on memory.
|
||||
for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) {
|
||||
// Load blocks from the block root chunks.
|
||||
let blocks = block_root_chunks
|
||||
.iter()
|
||||
.map(|root| {
|
||||
chain
|
||||
.get_blinded_block(root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root))
|
||||
})
|
||||
.map_err(unhandled_error)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// TODO(gloas): add payloads
|
||||
replayer = replayer
|
||||
.apply_blocks(blocks, vec![], None)
|
||||
.map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?;
|
||||
}
|
||||
|
||||
drop(replayer);
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped};
|
||||
use eth2::lighthouse::{BlockReward, BlockRewardsQuery};
|
||||
use lru::LruCache;
|
||||
use state_processing::BlockReplayer;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, warn};
|
||||
use types::block::BlindedBeaconBlock;
|
||||
use types::execution::StatePayloadStatus;
|
||||
use types::new_non_zero_usize;
|
||||
use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error};
|
||||
|
||||
const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2);
|
||||
|
||||
/// Fetch block rewards for blocks from the canonical chain.
|
||||
pub fn get_block_rewards<T: BeaconChainTypes>(
|
||||
query: BlockRewardsQuery,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<Vec<BlockReward>, warp::Rejection> {
|
||||
let start_slot = query.start_slot;
|
||||
let end_slot = query.end_slot;
|
||||
let prior_slot = start_slot - 1;
|
||||
|
||||
if start_slot > end_slot || start_slot == 0 {
|
||||
return Err(custom_bad_request(format!(
|
||||
"invalid start and end: {}, {}",
|
||||
start_slot, end_slot
|
||||
)));
|
||||
}
|
||||
|
||||
let end_block_root = chain
|
||||
.block_root_at_slot(end_slot, WhenSlotSkipped::Prev)
|
||||
.map_err(unhandled_error)?
|
||||
.ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?;
|
||||
|
||||
let (blocks, envelopes) = chain
|
||||
.store
|
||||
.load_blocks_to_replay(
|
||||
start_slot,
|
||||
end_slot,
|
||||
end_block_root,
|
||||
StatePayloadStatus::Pending,
|
||||
)
|
||||
.map_err(|e| unhandled_error(BeaconChainError::from(e)))?;
|
||||
|
||||
let state_root = chain
|
||||
.state_root_at_slot(prior_slot)
|
||||
.map_err(unhandled_error)?
|
||||
.ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?;
|
||||
|
||||
// This branch is reached from the HTTP API. We assume the user wants
|
||||
// to cache states so that future calls are faster.
|
||||
let mut state = chain
|
||||
.get_state(&state_root, Some(prior_slot), true)
|
||||
.and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root)))
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
state
|
||||
.build_caches(&chain.spec)
|
||||
.map_err(beacon_state_error)?;
|
||||
|
||||
let mut reward_cache = Default::default();
|
||||
let mut block_rewards = Vec::with_capacity(blocks.len());
|
||||
|
||||
let block_replayer = BlockReplayer::new(state, &chain.spec)
|
||||
.pre_block_hook(Box::new(|state, block| {
|
||||
state.build_all_committee_caches(&chain.spec)?;
|
||||
|
||||
// Compute block reward.
|
||||
let block_reward = chain.compute_block_reward(
|
||||
block.message(),
|
||||
block.canonical_root(),
|
||||
state,
|
||||
&mut reward_cache,
|
||||
query.include_attestations,
|
||||
)?;
|
||||
block_rewards.push(block_reward);
|
||||
Ok(())
|
||||
}))
|
||||
.state_root_iter(
|
||||
chain
|
||||
.forwards_iter_state_roots_until(prior_slot, end_slot)
|
||||
.map_err(unhandled_error)?,
|
||||
)
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.apply_blocks(blocks, envelopes, None)
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
if block_replayer.state_root_miss() {
|
||||
warn!(%start_slot, %end_slot, "Block reward state root miss");
|
||||
}
|
||||
|
||||
drop(block_replayer);
|
||||
|
||||
Ok(block_rewards)
|
||||
}
|
||||
|
||||
/// Compute block rewards for blocks passed in as input.
|
||||
pub fn compute_block_rewards<T: BeaconChainTypes>(
|
||||
blocks: Vec<BlindedBeaconBlock<T::EthSpec>>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<Vec<BlockReward>, warp::Rejection> {
|
||||
let mut block_rewards = Vec::with_capacity(blocks.len());
|
||||
let mut state_cache = LruCache::new(STATE_CACHE_SIZE);
|
||||
let mut reward_cache = Default::default();
|
||||
|
||||
for block in blocks {
|
||||
let parent_root = block.parent_root();
|
||||
|
||||
// Check LRU cache for a constructed state from a previous iteration.
|
||||
let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) {
|
||||
debug!(
|
||||
?parent_root,
|
||||
slot = %block.slot(),
|
||||
"Re-using cached state for block rewards"
|
||||
);
|
||||
state
|
||||
} else {
|
||||
debug!(
|
||||
?parent_root,
|
||||
slot = %block.slot(),
|
||||
"Fetching state for block rewards"
|
||||
);
|
||||
let parent_block = chain
|
||||
.get_blinded_block(&parent_root)
|
||||
.map_err(unhandled_error)?
|
||||
.ok_or_else(|| {
|
||||
custom_bad_request(format!(
|
||||
"parent block not known or not canonical: {:?}",
|
||||
parent_root
|
||||
))
|
||||
})?;
|
||||
|
||||
// This branch is reached from the HTTP API. We assume the user wants
|
||||
// to cache states so that future calls are faster.
|
||||
let parent_state = chain
|
||||
.get_state(&parent_block.state_root(), Some(parent_block.slot()), true)
|
||||
.map_err(unhandled_error)?
|
||||
.ok_or_else(|| {
|
||||
custom_bad_request(format!(
|
||||
"no state known for parent block: {:?}",
|
||||
parent_root
|
||||
))
|
||||
})?;
|
||||
|
||||
// TODO(gloas): handle payloads?
|
||||
let block_replayer = BlockReplayer::new(parent_state, &chain.spec)
|
||||
.no_signature_verification()
|
||||
.state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter())
|
||||
.minimal_block_root_verification()
|
||||
.apply_blocks(vec![], vec![], Some(block.slot()))
|
||||
.map_err(unhandled_error::<BeaconChainError>)?;
|
||||
|
||||
if block_replayer.state_root_miss() {
|
||||
warn!(
|
||||
parent_slot = %parent_block.slot(),
|
||||
slot = %block.slot(),
|
||||
"Block reward state root miss"
|
||||
);
|
||||
}
|
||||
|
||||
let mut state = block_replayer.into_state();
|
||||
state
|
||||
.build_all_committee_caches(&chain.spec)
|
||||
.map_err(beacon_state_error)?;
|
||||
|
||||
state_cache.get_or_insert((parent_root, block.slot()), || state)
|
||||
};
|
||||
|
||||
// Compute block reward.
|
||||
let block_reward = chain
|
||||
.compute_block_reward(
|
||||
block.to_ref(),
|
||||
block.canonical_root(),
|
||||
state,
|
||||
&mut reward_cache,
|
||||
true,
|
||||
)
|
||||
.map_err(unhandled_error)?;
|
||||
block_rewards.push(block_reward);
|
||||
}
|
||||
|
||||
Ok(block_rewards)
|
||||
}
|
||||
Reference in New Issue
Block a user