mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-20 21:34:46 +00:00
Remove lighthouse/analysis endpoints (#8968)
Some of our custom `lighthouse/analysis` endpoints will require maintenance for the Gloas hard fork. We have decided instead to remove those endpoints. We don't utilize them internally and they have pretty limited utility and so we feel they are not worth maintaining. Remove `lighthouse/analysis/attestation_performance` and `lighthouse/analysis/block_packing_efficiency` endpoints. Co-Authored-By: Mac L <mjladson@pm.me>
This commit is contained in:
@@ -1,217 +0,0 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use eth2::lighthouse::{
|
||||
AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics,
|
||||
};
|
||||
use state_processing::{
|
||||
BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use types::{BeaconState, BeaconStateError, EthSpec, Hash256};
|
||||
use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error};
|
||||
|
||||
const MAX_REQUEST_RANGE_EPOCHS: usize = 100;
|
||||
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
// We don't use the inner values directly, but they're used in the Debug impl.
|
||||
enum AttestationPerformanceError {
|
||||
BlockReplay(#[allow(dead_code)] BlockReplayError),
|
||||
BeaconState(#[allow(dead_code)] BeaconStateError),
|
||||
UnableToFindValidator(#[allow(dead_code)] usize),
|
||||
}
|
||||
|
||||
impl From<BlockReplayError> for AttestationPerformanceError {
|
||||
fn from(e: BlockReplayError) -> Self {
|
||||
Self::BlockReplay(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for AttestationPerformanceError {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Self::BeaconState(e)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_attestation_performance<T: BeaconChainTypes>(
|
||||
target: String,
|
||||
query: AttestationPerformanceQuery,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<Vec<AttestationPerformance>, warp::Rejection> {
|
||||
let spec = &chain.spec;
|
||||
// We increment by 2 here so that when we build the state from the `prior_slot` it is
|
||||
// still 1 epoch ahead of the first epoch we want to analyse.
|
||||
// This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results
|
||||
// for the correct epoch.
|
||||
let start_epoch = query.start_epoch + 2;
|
||||
let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
let prior_slot = start_slot - 1;
|
||||
|
||||
let end_epoch = query.end_epoch + 2;
|
||||
let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Ensure end_epoch is smaller than the current epoch - 1.
|
||||
let current_epoch = chain.epoch().map_err(unhandled_error)?;
|
||||
if query.end_epoch >= current_epoch - 1 {
|
||||
return Err(custom_bad_request(format!(
|
||||
"end_epoch must be less than the current epoch - 1. current: {}, end: {}",
|
||||
current_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
// Check query is valid.
|
||||
if start_epoch > end_epoch {
|
||||
return Err(custom_bad_request(format!(
|
||||
"start_epoch must not be larger than end_epoch. start: {}, end: {}",
|
||||
query.start_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
// The response size can grow exceptionally large therefore we should check that the
|
||||
// query is within permitted bounds to prevent potential OOM errors.
|
||||
if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS {
|
||||
return Err(custom_bad_request(format!(
|
||||
"end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}",
|
||||
MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
// Either use the global validator set, or the specified index.
|
||||
//
|
||||
// Does no further validation of the indices, so in the event an index has not yet been
|
||||
// activated or does not yet exist (according to the head state), it will return all fields as
|
||||
// `false`.
|
||||
let index_range = if target.to_lowercase() == "global" {
|
||||
chain
|
||||
.with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect()))
|
||||
.map_err(unhandled_error::<BeaconChainError>)?
|
||||
} else {
|
||||
vec![target.parse::<u64>().map_err(|_| {
|
||||
custom_bad_request(format!(
|
||||
"Invalid validator index: {:?}",
|
||||
target.to_lowercase()
|
||||
))
|
||||
})?]
|
||||
};
|
||||
|
||||
// Load block roots.
|
||||
let mut block_roots: Vec<Hash256> = chain
|
||||
.forwards_iter_block_roots_until(start_slot, end_slot)
|
||||
.map_err(unhandled_error)?
|
||||
.map(|res| res.map(|(root, _)| root))
|
||||
.collect::<Result<Vec<Hash256>, _>>()
|
||||
.map_err(unhandled_error)?;
|
||||
block_roots.dedup();
|
||||
|
||||
// Load first block so we can get its parent.
|
||||
let first_block_root = block_roots.first().ok_or_else(|| {
|
||||
custom_server_error(
|
||||
"No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(),
|
||||
)
|
||||
})?;
|
||||
let first_block = chain
|
||||
.get_blinded_block(first_block_root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Load the block of the prior slot which will be used to build the starting state.
|
||||
let prior_block = chain
|
||||
.get_blinded_block(&first_block.parent_root())
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root()))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Load state for block replay.
|
||||
let state_root = prior_block.state_root();
|
||||
|
||||
// This branch is reached from the HTTP API. We assume the user wants
|
||||
// to cache states so that future calls are faster.
|
||||
let state = chain
|
||||
.get_state(&state_root, Some(prior_slot), true)
|
||||
.and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root)))
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Allocate an AttestationPerformance vector for each validator in the range.
|
||||
let mut perfs: Vec<AttestationPerformance> =
|
||||
AttestationPerformance::initialize(index_range.clone());
|
||||
|
||||
let post_slot_hook = |state: &mut BeaconState<T::EthSpec>,
|
||||
summary: Option<EpochProcessingSummary<T::EthSpec>>,
|
||||
_is_skip_slot: bool|
|
||||
-> Result<(), AttestationPerformanceError> {
|
||||
// If a `summary` was not output then an epoch boundary was not crossed
|
||||
// so we move onto the next slot.
|
||||
if let Some(summary) = summary {
|
||||
for (position, i) in index_range.iter().enumerate() {
|
||||
let index = *i as usize;
|
||||
|
||||
let val = perfs
|
||||
.get_mut(position)
|
||||
.ok_or(AttestationPerformanceError::UnableToFindValidator(index))?;
|
||||
|
||||
// We are two epochs ahead since the summary is generated for
|
||||
// `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return
|
||||
// data for the epoch before that.
|
||||
let epoch = state.previous_epoch().as_u64() - 1;
|
||||
|
||||
let is_active = summary.is_active_unslashed_in_previous_epoch(index);
|
||||
|
||||
let received_source_reward = summary.is_previous_epoch_source_attester(index)?;
|
||||
|
||||
let received_head_reward = summary.is_previous_epoch_head_attester(index)?;
|
||||
|
||||
let received_target_reward = summary.is_previous_epoch_target_attester(index)?;
|
||||
|
||||
let inclusion_delay = summary
|
||||
.previous_epoch_inclusion_info(index)
|
||||
.map(|info| info.delay);
|
||||
|
||||
let perf = AttestationPerformanceStatistics {
|
||||
active: is_active,
|
||||
head: received_head_reward,
|
||||
target: received_target_reward,
|
||||
source: received_source_reward,
|
||||
delay: inclusion_delay,
|
||||
};
|
||||
|
||||
val.epochs.insert(epoch, perf);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Initialize block replayer
|
||||
let mut replayer = BlockReplayer::new(state, spec)
|
||||
.no_state_root_iter()
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.post_slot_hook(Box::new(post_slot_hook));
|
||||
|
||||
// Iterate through block roots in chunks to reduce load on memory.
|
||||
for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) {
|
||||
// Load blocks from the block root chunks.
|
||||
let blocks = block_root_chunks
|
||||
.iter()
|
||||
.map(|root| {
|
||||
chain
|
||||
.get_blinded_block(root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root))
|
||||
})
|
||||
.map_err(unhandled_error)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// TODO(gloas): add payloads
|
||||
replayer = replayer
|
||||
.apply_blocks(blocks, vec![], None)
|
||||
.map_err(|e| custom_server_error(format!("{:?}", e)))?;
|
||||
}
|
||||
|
||||
drop(replayer);
|
||||
|
||||
Ok(perfs)
|
||||
}
|
||||
@@ -1,410 +0,0 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use eth2::lighthouse::{
|
||||
BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use state_processing::{
|
||||
BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use types::{
|
||||
AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec,
|
||||
Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot,
|
||||
};
|
||||
use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error};
|
||||
|
||||
/// Load blocks from block roots in chunks to reduce load on memory.
|
||||
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
// We don't use the inner values directly, but they're used in the Debug impl.
|
||||
enum PackingEfficiencyError {
|
||||
BlockReplay(#[allow(dead_code)] BlockReplayError),
|
||||
BeaconState(#[allow(dead_code)] BeaconStateError),
|
||||
CommitteeStoreError(#[allow(dead_code)] Slot),
|
||||
InvalidAttestationError,
|
||||
}
|
||||
|
||||
impl From<BlockReplayError> for PackingEfficiencyError {
|
||||
fn from(e: BlockReplayError) -> Self {
|
||||
Self::BlockReplay(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for PackingEfficiencyError {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Self::BeaconState(e)
|
||||
}
|
||||
}
|
||||
|
||||
struct CommitteeStore {
|
||||
current_epoch_committees: Vec<OwnedBeaconCommittee>,
|
||||
previous_epoch_committees: Vec<OwnedBeaconCommittee>,
|
||||
}
|
||||
|
||||
impl CommitteeStore {
|
||||
fn new() -> Self {
|
||||
CommitteeStore {
|
||||
current_epoch_committees: Vec::new(),
|
||||
previous_epoch_committees: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct PackingEfficiencyHandler<E: EthSpec> {
|
||||
current_slot: Slot,
|
||||
current_epoch: Epoch,
|
||||
prior_skip_slots: u64,
|
||||
available_attestations: HashSet<UniqueAttestation>,
|
||||
included_attestations: HashMap<UniqueAttestation, u64>,
|
||||
committee_store: CommitteeStore,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> PackingEfficiencyHandler<E> {
|
||||
fn new(
|
||||
start_epoch: Epoch,
|
||||
starting_state: BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Self, PackingEfficiencyError> {
|
||||
let mut handler = PackingEfficiencyHandler {
|
||||
current_slot: start_epoch.start_slot(E::slots_per_epoch()),
|
||||
current_epoch: start_epoch,
|
||||
prior_skip_slots: 0,
|
||||
available_attestations: HashSet::new(),
|
||||
included_attestations: HashMap::new(),
|
||||
committee_store: CommitteeStore::new(),
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
handler.compute_epoch(start_epoch, &starting_state, spec)?;
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
fn update_slot(&mut self, slot: Slot) {
|
||||
self.current_slot = slot;
|
||||
if slot % E::slots_per_epoch() == 0 {
|
||||
self.current_epoch = Epoch::new(slot.as_u64() / E::slots_per_epoch());
|
||||
}
|
||||
}
|
||||
|
||||
fn prune_included_attestations(&mut self) {
|
||||
let epoch = self.current_epoch;
|
||||
self.included_attestations.retain(|x, _| {
|
||||
x.slot >= Epoch::new(epoch.as_u64().saturating_sub(2)).start_slot(E::slots_per_epoch())
|
||||
});
|
||||
}
|
||||
|
||||
fn prune_available_attestations(&mut self) {
|
||||
let slot = self.current_slot;
|
||||
self.available_attestations
|
||||
.retain(|x| x.slot >= (slot.as_u64().saturating_sub(E::slots_per_epoch())));
|
||||
}
|
||||
|
||||
fn apply_block(
|
||||
&mut self,
|
||||
block: &SignedBeaconBlock<E, BlindedPayload<E>>,
|
||||
) -> Result<usize, PackingEfficiencyError> {
|
||||
let block_body = block.message().body();
|
||||
let attestations = block_body.attestations();
|
||||
|
||||
let mut attestations_in_block = HashMap::new();
|
||||
for attestation in attestations {
|
||||
match attestation {
|
||||
AttestationRef::Base(attn) => {
|
||||
for (position, voted) in attn.aggregation_bits.iter().enumerate() {
|
||||
if voted {
|
||||
let unique_attestation = UniqueAttestation {
|
||||
slot: attn.data.slot,
|
||||
committee_index: attn.data.index,
|
||||
committee_position: position,
|
||||
};
|
||||
let inclusion_distance: u64 = block
|
||||
.slot()
|
||||
.as_u64()
|
||||
.checked_sub(attn.data.slot.as_u64())
|
||||
.ok_or(PackingEfficiencyError::InvalidAttestationError)?;
|
||||
|
||||
self.available_attestations.remove(&unique_attestation);
|
||||
attestations_in_block.insert(unique_attestation, inclusion_distance);
|
||||
}
|
||||
}
|
||||
}
|
||||
AttestationRef::Electra(attn) => {
|
||||
for (position, voted) in attn.aggregation_bits.iter().enumerate() {
|
||||
if voted {
|
||||
let unique_attestation = UniqueAttestation {
|
||||
slot: attn.data.slot,
|
||||
committee_index: attn.data.index,
|
||||
committee_position: position,
|
||||
};
|
||||
let inclusion_distance: u64 = block
|
||||
.slot()
|
||||
.as_u64()
|
||||
.checked_sub(attn.data.slot.as_u64())
|
||||
.ok_or(PackingEfficiencyError::InvalidAttestationError)?;
|
||||
|
||||
self.available_attestations.remove(&unique_attestation);
|
||||
attestations_in_block.insert(unique_attestation, inclusion_distance);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove duplicate attestations as these yield no reward.
|
||||
attestations_in_block.retain(|x, _| !self.included_attestations.contains_key(x));
|
||||
self.included_attestations
|
||||
.extend(attestations_in_block.clone());
|
||||
|
||||
Ok(attestations_in_block.len())
|
||||
}
|
||||
|
||||
fn add_attestations(&mut self, slot: Slot) -> Result<(), PackingEfficiencyError> {
|
||||
let committees = self.get_committees_at_slot(slot)?;
|
||||
for committee in committees {
|
||||
for position in 0..committee.committee.len() {
|
||||
let unique_attestation = UniqueAttestation {
|
||||
slot,
|
||||
committee_index: committee.index,
|
||||
committee_position: position,
|
||||
};
|
||||
self.available_attestations.insert(unique_attestation);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn compute_epoch(
|
||||
&mut self,
|
||||
epoch: Epoch,
|
||||
state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), PackingEfficiencyError> {
|
||||
// Free some memory by pruning old attestations from the included set.
|
||||
self.prune_included_attestations();
|
||||
|
||||
let new_committees = if state.committee_cache_is_initialized(RelativeEpoch::Current) {
|
||||
state
|
||||
.get_beacon_committees_at_epoch(RelativeEpoch::Current)?
|
||||
.into_iter()
|
||||
.map(BeaconCommittee::into_owned)
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
state
|
||||
.initialize_committee_cache(epoch, spec)?
|
||||
.get_all_beacon_committees()?
|
||||
.into_iter()
|
||||
.map(BeaconCommittee::into_owned)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
self.committee_store
|
||||
.previous_epoch_committees
|
||||
.clone_from(&self.committee_store.current_epoch_committees);
|
||||
|
||||
self.committee_store.current_epoch_committees = new_committees;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_committees_at_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> Result<Vec<OwnedBeaconCommittee>, PackingEfficiencyError> {
|
||||
let mut committees = Vec::new();
|
||||
|
||||
for committee in &self.committee_store.current_epoch_committees {
|
||||
if committee.slot == slot {
|
||||
committees.push(committee.clone());
|
||||
}
|
||||
}
|
||||
for committee in &self.committee_store.previous_epoch_committees {
|
||||
if committee.slot == slot {
|
||||
committees.push(committee.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if committees.is_empty() {
|
||||
return Err(PackingEfficiencyError::CommitteeStoreError(slot));
|
||||
}
|
||||
|
||||
Ok(committees)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_block_packing_efficiency<T: BeaconChainTypes>(
|
||||
query: BlockPackingEfficiencyQuery,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<Vec<BlockPackingEfficiency>, warp::Rejection> {
|
||||
let spec = &chain.spec;
|
||||
|
||||
let start_epoch = query.start_epoch;
|
||||
let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
let prior_slot = start_slot - 1;
|
||||
|
||||
let end_epoch = query.end_epoch;
|
||||
let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Check query is valid.
|
||||
if start_epoch > end_epoch || start_epoch == 0 {
|
||||
return Err(custom_bad_request(format!(
|
||||
"invalid start and end epochs: {}, {}",
|
||||
start_epoch, end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
let prior_epoch = start_epoch - 1;
|
||||
let start_slot_of_prior_epoch = prior_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Load block roots.
|
||||
let mut block_roots: Vec<Hash256> = chain
|
||||
.forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot)
|
||||
.map_err(unhandled_error)?
|
||||
.collect::<Result<Vec<(Hash256, Slot)>, _>>()
|
||||
.map_err(unhandled_error)?
|
||||
.iter()
|
||||
.map(|(root, _)| *root)
|
||||
.collect();
|
||||
block_roots.dedup();
|
||||
|
||||
let first_block_root = block_roots
|
||||
.first()
|
||||
.ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?;
|
||||
|
||||
let first_block = chain
|
||||
.get_blinded_block(first_block_root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Load state for block replay.
|
||||
let starting_state_root = first_block.state_root();
|
||||
|
||||
// This branch is reached from the HTTP API. We assume the user wants
|
||||
// to cache states so that future calls are faster.
|
||||
let starting_state = chain
|
||||
.get_state(&starting_state_root, Some(prior_slot), true)
|
||||
.and_then(|maybe_state| {
|
||||
maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root))
|
||||
})
|
||||
.map_err(unhandled_error)?;
|
||||
|
||||
// Initialize response vector.
|
||||
let mut response = Vec::new();
|
||||
|
||||
// Initialize handler.
|
||||
let handler = Arc::new(Mutex::new(
|
||||
PackingEfficiencyHandler::new(prior_epoch, starting_state.clone(), spec)
|
||||
.map_err(|e| custom_server_error(format!("{:?}", e)))?,
|
||||
));
|
||||
|
||||
let pre_slot_hook =
|
||||
|_, state: &mut BeaconState<T::EthSpec>| -> Result<(), PackingEfficiencyError> {
|
||||
// Add attestations to `available_attestations`.
|
||||
handler.lock().add_attestations(state.slot())?;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let post_slot_hook = |state: &mut BeaconState<T::EthSpec>,
|
||||
_summary: Option<EpochProcessingSummary<T::EthSpec>>,
|
||||
is_skip_slot: bool|
|
||||
-> Result<(), PackingEfficiencyError> {
|
||||
handler.lock().update_slot(state.slot());
|
||||
|
||||
// Check if this a new epoch.
|
||||
if state.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||
handler.lock().compute_epoch(
|
||||
state.slot().epoch(T::EthSpec::slots_per_epoch()),
|
||||
state,
|
||||
spec,
|
||||
)?;
|
||||
}
|
||||
|
||||
if is_skip_slot {
|
||||
handler.lock().prior_skip_slots += 1;
|
||||
}
|
||||
|
||||
// Remove expired attestations.
|
||||
handler.lock().prune_available_attestations();
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let pre_block_hook = |_state: &mut BeaconState<T::EthSpec>,
|
||||
block: &SignedBeaconBlock<_, BlindedPayload<_>>|
|
||||
-> Result<(), PackingEfficiencyError> {
|
||||
let slot = block.slot();
|
||||
|
||||
let block_message = block.message();
|
||||
// Get block proposer info.
|
||||
let proposer_info = ProposerInfo {
|
||||
validator_index: block_message.proposer_index(),
|
||||
graffiti: block_message.body().graffiti().as_utf8_lossy(),
|
||||
};
|
||||
|
||||
// Store the count of available attestations at this point.
|
||||
// In the future it may be desirable to check that the number of available attestations
|
||||
// does not exceed the maximum possible amount given the length of available committees.
|
||||
let available_count = handler.lock().available_attestations.len();
|
||||
|
||||
// Get all attestations included in the block.
|
||||
let included = handler.lock().apply_block(block)?;
|
||||
|
||||
let efficiency = BlockPackingEfficiency {
|
||||
slot,
|
||||
block_hash: block.canonical_root(),
|
||||
proposer_info,
|
||||
available_attestations: available_count,
|
||||
included_attestations: included,
|
||||
prior_skip_slots: handler.lock().prior_skip_slots,
|
||||
};
|
||||
|
||||
// Write to response.
|
||||
if slot >= start_slot {
|
||||
response.push(efficiency);
|
||||
}
|
||||
|
||||
handler.lock().prior_skip_slots = 0;
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Build BlockReplayer.
|
||||
let mut replayer = BlockReplayer::new(starting_state, spec)
|
||||
.no_state_root_iter()
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.pre_slot_hook(Box::new(pre_slot_hook))
|
||||
.post_slot_hook(Box::new(post_slot_hook))
|
||||
.pre_block_hook(Box::new(pre_block_hook));
|
||||
|
||||
// Iterate through the block roots, loading blocks in chunks to reduce load on memory.
|
||||
for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) {
|
||||
// Load blocks from the block root chunks.
|
||||
let blocks = block_root_chunks
|
||||
.iter()
|
||||
.map(|root| {
|
||||
chain
|
||||
.get_blinded_block(root)
|
||||
.and_then(|maybe_block| {
|
||||
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root))
|
||||
})
|
||||
.map_err(unhandled_error)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// TODO(gloas): add payloads
|
||||
replayer = replayer
|
||||
.apply_blocks(blocks, vec![], None)
|
||||
.map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?;
|
||||
}
|
||||
|
||||
drop(replayer);
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
@@ -7,11 +7,9 @@
|
||||
//! used for development.
|
||||
|
||||
mod aggregate_attestation;
|
||||
mod attestation_performance;
|
||||
mod attester_duties;
|
||||
mod beacon;
|
||||
mod block_id;
|
||||
mod block_packing_efficiency;
|
||||
mod build_block_contents;
|
||||
mod builder_states;
|
||||
mod custody;
|
||||
@@ -3091,39 +3089,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/analysis/attestation_performance/{index}
|
||||
let get_lighthouse_attestation_performance = warp::path("lighthouse")
|
||||
.and(warp::path("analysis"))
|
||||
.and(warp::path("attestation_performance"))
|
||||
.and(warp::path::param::<String>())
|
||||
.and(warp::query::<eth2::lighthouse::AttestationPerformanceQuery>())
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
|target, query, task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
|
||||
task_spawner.blocking_json_task(Priority::P1, move || {
|
||||
attestation_performance::get_attestation_performance(target, query, chain)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/analysis/block_packing_efficiency
|
||||
let get_lighthouse_block_packing_efficiency = warp::path("lighthouse")
|
||||
.and(warp::path("analysis"))
|
||||
.and(warp::path("block_packing_efficiency"))
|
||||
.and(warp::query::<eth2::lighthouse::BlockPackingEfficiencyQuery>())
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
|query, task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
|
||||
task_spawner.blocking_json_task(Priority::P1, move || {
|
||||
block_packing_efficiency::get_block_packing_efficiency(query, chain)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
let get_events = eth_v1
|
||||
.clone()
|
||||
.and(warp::path("events"))
|
||||
@@ -3359,12 +3324,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(get_lighthouse_database_info)
|
||||
.uor(get_lighthouse_database_invariants)
|
||||
.uor(get_lighthouse_custody_info)
|
||||
.uor(get_lighthouse_attestation_performance)
|
||||
.uor(get_beacon_light_client_optimistic_update)
|
||||
.uor(get_beacon_light_client_finality_update)
|
||||
.uor(get_beacon_light_client_bootstrap)
|
||||
.uor(get_beacon_light_client_updates)
|
||||
.uor(get_lighthouse_block_packing_efficiency)
|
||||
.uor(get_events)
|
||||
.uor(get_expected_withdrawals)
|
||||
.uor(lighthouse_log_events.boxed())
|
||||
|
||||
Reference in New Issue
Block a user