This commit is contained in:
Eitan Seri- Levi
2026-03-26 22:16:00 -07:00
parent 45b2a6eddc
commit 85ed39040a
6 changed files with 0 additions and 1572 deletions

View File

@@ -1,217 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2::lighthouse::{
AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics,
};
use state_processing::{
BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary,
};
use std::sync::Arc;
use types::{BeaconState, BeaconStateError, EthSpec, Hash256};
use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error};
const MAX_REQUEST_RANGE_EPOCHS: usize = 100;
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
#[derive(Debug)]
// We don't use the inner values directly, but they're used in the Debug impl.
enum AttestationPerformanceError {
BlockReplay(#[allow(dead_code)] BlockReplayError),
BeaconState(#[allow(dead_code)] BeaconStateError),
UnableToFindValidator(#[allow(dead_code)] usize),
}
impl From<BlockReplayError> for AttestationPerformanceError {
fn from(e: BlockReplayError) -> Self {
Self::BlockReplay(e)
}
}
impl From<BeaconStateError> for AttestationPerformanceError {
fn from(e: BeaconStateError) -> Self {
Self::BeaconState(e)
}
}
pub fn get_attestation_performance<T: BeaconChainTypes>(
target: String,
query: AttestationPerformanceQuery,
chain: Arc<BeaconChain<T>>,
) -> Result<Vec<AttestationPerformance>, warp::Rejection> {
let spec = &chain.spec;
// We increment by 2 here so that when we build the state from the `prior_slot` it is
// still 1 epoch ahead of the first epoch we want to analyse.
// This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results
// for the correct epoch.
let start_epoch = query.start_epoch + 2;
let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch());
let prior_slot = start_slot - 1;
let end_epoch = query.end_epoch + 2;
let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch());
// Ensure end_epoch is smaller than the current epoch - 1.
let current_epoch = chain.epoch().map_err(unhandled_error)?;
if query.end_epoch >= current_epoch - 1 {
return Err(custom_bad_request(format!(
"end_epoch must be less than the current epoch - 1. current: {}, end: {}",
current_epoch, query.end_epoch
)));
}
// Check query is valid.
if start_epoch > end_epoch {
return Err(custom_bad_request(format!(
"start_epoch must not be larger than end_epoch. start: {}, end: {}",
query.start_epoch, query.end_epoch
)));
}
// The response size can grow exceptionally large therefore we should check that the
// query is within permitted bounds to prevent potential OOM errors.
if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS {
return Err(custom_bad_request(format!(
"end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}",
MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch
)));
}
// Either use the global validator set, or the specified index.
//
// Does no further validation of the indices, so in the event an index has not yet been
// activated or does not yet exist (according to the head state), it will return all fields as
// `false`.
let index_range = if target.to_lowercase() == "global" {
chain
.with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect()))
.map_err(unhandled_error::<BeaconChainError>)?
} else {
vec![target.parse::<u64>().map_err(|_| {
custom_bad_request(format!(
"Invalid validator index: {:?}",
target.to_lowercase()
))
})?]
};
// Load block roots.
let mut block_roots: Vec<Hash256> = chain
.forwards_iter_block_roots_until(start_slot, end_slot)
.map_err(unhandled_error)?
.map(|res| res.map(|(root, _)| root))
.collect::<Result<Vec<Hash256>, _>>()
.map_err(unhandled_error)?;
block_roots.dedup();
// Load first block so we can get its parent.
let first_block_root = block_roots.first().ok_or_else(|| {
custom_server_error(
"No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(),
)
})?;
let first_block = chain
.get_blinded_block(first_block_root)
.and_then(|maybe_block| {
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root))
})
.map_err(unhandled_error)?;
// Load the block of the prior slot which will be used to build the starting state.
let prior_block = chain
.get_blinded_block(&first_block.parent_root())
.and_then(|maybe_block| {
maybe_block
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root()))
})
.map_err(unhandled_error)?;
// Load state for block replay.
let state_root = prior_block.state_root();
// This branch is reached from the HTTP API. We assume the user wants
// to cache states so that future calls are faster.
let state = chain
.get_state(&state_root, Some(prior_slot), true)
.and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root)))
.map_err(unhandled_error)?;
// Allocate an AttestationPerformance vector for each validator in the range.
let mut perfs: Vec<AttestationPerformance> =
AttestationPerformance::initialize(index_range.clone());
let post_slot_hook = |state: &mut BeaconState<T::EthSpec>,
summary: Option<EpochProcessingSummary<T::EthSpec>>,
_is_skip_slot: bool|
-> Result<(), AttestationPerformanceError> {
// If a `summary` was not output then an epoch boundary was not crossed
// so we move onto the next slot.
if let Some(summary) = summary {
for (position, i) in index_range.iter().enumerate() {
let index = *i as usize;
let val = perfs
.get_mut(position)
.ok_or(AttestationPerformanceError::UnableToFindValidator(index))?;
// We are two epochs ahead since the summary is generated for
// `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return
// data for the epoch before that.
let epoch = state.previous_epoch().as_u64() - 1;
let is_active = summary.is_active_unslashed_in_previous_epoch(index);
let received_source_reward = summary.is_previous_epoch_source_attester(index)?;
let received_head_reward = summary.is_previous_epoch_head_attester(index)?;
let received_target_reward = summary.is_previous_epoch_target_attester(index)?;
let inclusion_delay = summary
.previous_epoch_inclusion_info(index)
.map(|info| info.delay);
let perf = AttestationPerformanceStatistics {
active: is_active,
head: received_head_reward,
target: received_target_reward,
source: received_source_reward,
delay: inclusion_delay,
};
val.epochs.insert(epoch, perf);
}
}
Ok(())
};
// Initialize block replayer
let mut replayer = BlockReplayer::new(state, spec)
.no_state_root_iter()
.no_signature_verification()
.minimal_block_root_verification()
.post_slot_hook(Box::new(post_slot_hook));
// Iterate through block roots in chunks to reduce load on memory.
for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) {
// Load blocks from the block root chunks.
let blocks = block_root_chunks
.iter()
.map(|root| {
chain
.get_blinded_block(root)
.and_then(|maybe_block| {
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root))
})
.map_err(unhandled_error)
})
.collect::<Result<Vec<_>, _>>()?;
// TODO(gloas): add payloads
replayer = replayer
.apply_blocks(blocks, vec![], None)
.map_err(|e| custom_server_error(format!("{:?}", e)))?;
}
drop(replayer);
Ok(perfs)
}

View File

@@ -1,410 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2::lighthouse::{
BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation,
};
use parking_lot::Mutex;
use state_processing::{
BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary,
};
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use std::sync::Arc;
use types::{
AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec,
Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot,
};
use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error};
/// Load blocks from block roots in chunks to reduce load on memory.
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
#[derive(Debug)]
// We don't use the inner values directly, but they're used in the Debug impl.
enum PackingEfficiencyError {
BlockReplay(#[allow(dead_code)] BlockReplayError),
BeaconState(#[allow(dead_code)] BeaconStateError),
CommitteeStoreError(#[allow(dead_code)] Slot),
InvalidAttestationError,
}
impl From<BlockReplayError> for PackingEfficiencyError {
fn from(e: BlockReplayError) -> Self {
Self::BlockReplay(e)
}
}
impl From<BeaconStateError> for PackingEfficiencyError {
fn from(e: BeaconStateError) -> Self {
Self::BeaconState(e)
}
}
struct CommitteeStore {
current_epoch_committees: Vec<OwnedBeaconCommittee>,
previous_epoch_committees: Vec<OwnedBeaconCommittee>,
}
impl CommitteeStore {
fn new() -> Self {
CommitteeStore {
current_epoch_committees: Vec::new(),
previous_epoch_committees: Vec::new(),
}
}
}
struct PackingEfficiencyHandler<E: EthSpec> {
current_slot: Slot,
current_epoch: Epoch,
prior_skip_slots: u64,
available_attestations: HashSet<UniqueAttestation>,
included_attestations: HashMap<UniqueAttestation, u64>,
committee_store: CommitteeStore,
_phantom: PhantomData<E>,
}
impl<E: EthSpec> PackingEfficiencyHandler<E> {
fn new(
start_epoch: Epoch,
starting_state: BeaconState<E>,
spec: &ChainSpec,
) -> Result<Self, PackingEfficiencyError> {
let mut handler = PackingEfficiencyHandler {
current_slot: start_epoch.start_slot(E::slots_per_epoch()),
current_epoch: start_epoch,
prior_skip_slots: 0,
available_attestations: HashSet::new(),
included_attestations: HashMap::new(),
committee_store: CommitteeStore::new(),
_phantom: PhantomData,
};
handler.compute_epoch(start_epoch, &starting_state, spec)?;
Ok(handler)
}
fn update_slot(&mut self, slot: Slot) {
self.current_slot = slot;
if slot % E::slots_per_epoch() == 0 {
self.current_epoch = Epoch::new(slot.as_u64() / E::slots_per_epoch());
}
}
fn prune_included_attestations(&mut self) {
let epoch = self.current_epoch;
self.included_attestations.retain(|x, _| {
x.slot >= Epoch::new(epoch.as_u64().saturating_sub(2)).start_slot(E::slots_per_epoch())
});
}
fn prune_available_attestations(&mut self) {
let slot = self.current_slot;
self.available_attestations
.retain(|x| x.slot >= (slot.as_u64().saturating_sub(E::slots_per_epoch())));
}
fn apply_block(
&mut self,
block: &SignedBeaconBlock<E, BlindedPayload<E>>,
) -> Result<usize, PackingEfficiencyError> {
let block_body = block.message().body();
let attestations = block_body.attestations();
let mut attestations_in_block = HashMap::new();
for attestation in attestations {
match attestation {
AttestationRef::Base(attn) => {
for (position, voted) in attn.aggregation_bits.iter().enumerate() {
if voted {
let unique_attestation = UniqueAttestation {
slot: attn.data.slot,
committee_index: attn.data.index,
committee_position: position,
};
let inclusion_distance: u64 = block
.slot()
.as_u64()
.checked_sub(attn.data.slot.as_u64())
.ok_or(PackingEfficiencyError::InvalidAttestationError)?;
self.available_attestations.remove(&unique_attestation);
attestations_in_block.insert(unique_attestation, inclusion_distance);
}
}
}
AttestationRef::Electra(attn) => {
for (position, voted) in attn.aggregation_bits.iter().enumerate() {
if voted {
let unique_attestation = UniqueAttestation {
slot: attn.data.slot,
committee_index: attn.data.index,
committee_position: position,
};
let inclusion_distance: u64 = block
.slot()
.as_u64()
.checked_sub(attn.data.slot.as_u64())
.ok_or(PackingEfficiencyError::InvalidAttestationError)?;
self.available_attestations.remove(&unique_attestation);
attestations_in_block.insert(unique_attestation, inclusion_distance);
}
}
}
}
}
// Remove duplicate attestations as these yield no reward.
attestations_in_block.retain(|x, _| !self.included_attestations.contains_key(x));
self.included_attestations
.extend(attestations_in_block.clone());
Ok(attestations_in_block.len())
}
fn add_attestations(&mut self, slot: Slot) -> Result<(), PackingEfficiencyError> {
let committees = self.get_committees_at_slot(slot)?;
for committee in committees {
for position in 0..committee.committee.len() {
let unique_attestation = UniqueAttestation {
slot,
committee_index: committee.index,
committee_position: position,
};
self.available_attestations.insert(unique_attestation);
}
}
Ok(())
}
fn compute_epoch(
&mut self,
epoch: Epoch,
state: &BeaconState<E>,
spec: &ChainSpec,
) -> Result<(), PackingEfficiencyError> {
// Free some memory by pruning old attestations from the included set.
self.prune_included_attestations();
let new_committees = if state.committee_cache_is_initialized(RelativeEpoch::Current) {
state
.get_beacon_committees_at_epoch(RelativeEpoch::Current)?
.into_iter()
.map(BeaconCommittee::into_owned)
.collect::<Vec<_>>()
} else {
state
.initialize_committee_cache(epoch, spec)?
.get_all_beacon_committees()?
.into_iter()
.map(BeaconCommittee::into_owned)
.collect::<Vec<_>>()
};
self.committee_store
.previous_epoch_committees
.clone_from(&self.committee_store.current_epoch_committees);
self.committee_store.current_epoch_committees = new_committees;
Ok(())
}
fn get_committees_at_slot(
&self,
slot: Slot,
) -> Result<Vec<OwnedBeaconCommittee>, PackingEfficiencyError> {
let mut committees = Vec::new();
for committee in &self.committee_store.current_epoch_committees {
if committee.slot == slot {
committees.push(committee.clone());
}
}
for committee in &self.committee_store.previous_epoch_committees {
if committee.slot == slot {
committees.push(committee.clone());
}
}
if committees.is_empty() {
return Err(PackingEfficiencyError::CommitteeStoreError(slot));
}
Ok(committees)
}
}
pub fn get_block_packing_efficiency<T: BeaconChainTypes>(
query: BlockPackingEfficiencyQuery,
chain: Arc<BeaconChain<T>>,
) -> Result<Vec<BlockPackingEfficiency>, warp::Rejection> {
let spec = &chain.spec;
let start_epoch = query.start_epoch;
let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch());
let prior_slot = start_slot - 1;
let end_epoch = query.end_epoch;
let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch());
// Check query is valid.
if start_epoch > end_epoch || start_epoch == 0 {
return Err(custom_bad_request(format!(
"invalid start and end epochs: {}, {}",
start_epoch, end_epoch
)));
}
let prior_epoch = start_epoch - 1;
let start_slot_of_prior_epoch = prior_epoch.start_slot(T::EthSpec::slots_per_epoch());
// Load block roots.
let mut block_roots: Vec<Hash256> = chain
.forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot)
.map_err(unhandled_error)?
.collect::<Result<Vec<(Hash256, Slot)>, _>>()
.map_err(unhandled_error)?
.iter()
.map(|(root, _)| *root)
.collect();
block_roots.dedup();
let first_block_root = block_roots
.first()
.ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?;
let first_block = chain
.get_blinded_block(first_block_root)
.and_then(|maybe_block| {
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root))
})
.map_err(unhandled_error)?;
// Load state for block replay.
let starting_state_root = first_block.state_root();
// This branch is reached from the HTTP API. We assume the user wants
// to cache states so that future calls are faster.
let starting_state = chain
.get_state(&starting_state_root, Some(prior_slot), true)
.and_then(|maybe_state| {
maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root))
})
.map_err(unhandled_error)?;
// Initialize response vector.
let mut response = Vec::new();
// Initialize handler.
let handler = Arc::new(Mutex::new(
PackingEfficiencyHandler::new(prior_epoch, starting_state.clone(), spec)
.map_err(|e| custom_server_error(format!("{:?}", e)))?,
));
let pre_slot_hook =
|_, state: &mut BeaconState<T::EthSpec>| -> Result<(), PackingEfficiencyError> {
// Add attestations to `available_attestations`.
handler.lock().add_attestations(state.slot())?;
Ok(())
};
let post_slot_hook = |state: &mut BeaconState<T::EthSpec>,
_summary: Option<EpochProcessingSummary<T::EthSpec>>,
is_skip_slot: bool|
-> Result<(), PackingEfficiencyError> {
handler.lock().update_slot(state.slot());
// Check if this a new epoch.
if state.slot() % T::EthSpec::slots_per_epoch() == 0 {
handler.lock().compute_epoch(
state.slot().epoch(T::EthSpec::slots_per_epoch()),
state,
spec,
)?;
}
if is_skip_slot {
handler.lock().prior_skip_slots += 1;
}
// Remove expired attestations.
handler.lock().prune_available_attestations();
Ok(())
};
let pre_block_hook = |_state: &mut BeaconState<T::EthSpec>,
block: &SignedBeaconBlock<_, BlindedPayload<_>>|
-> Result<(), PackingEfficiencyError> {
let slot = block.slot();
let block_message = block.message();
// Get block proposer info.
let proposer_info = ProposerInfo {
validator_index: block_message.proposer_index(),
graffiti: block_message.body().graffiti().as_utf8_lossy(),
};
// Store the count of available attestations at this point.
// In the future it may be desirable to check that the number of available attestations
// does not exceed the maximum possible amount given the length of available committees.
let available_count = handler.lock().available_attestations.len();
// Get all attestations included in the block.
let included = handler.lock().apply_block(block)?;
let efficiency = BlockPackingEfficiency {
slot,
block_hash: block.canonical_root(),
proposer_info,
available_attestations: available_count,
included_attestations: included,
prior_skip_slots: handler.lock().prior_skip_slots,
};
// Write to response.
if slot >= start_slot {
response.push(efficiency);
}
handler.lock().prior_skip_slots = 0;
Ok(())
};
// Build BlockReplayer.
let mut replayer = BlockReplayer::new(starting_state, spec)
.no_state_root_iter()
.no_signature_verification()
.minimal_block_root_verification()
.pre_slot_hook(Box::new(pre_slot_hook))
.post_slot_hook(Box::new(post_slot_hook))
.pre_block_hook(Box::new(pre_block_hook));
// Iterate through the block roots, loading blocks in chunks to reduce load on memory.
for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) {
// Load blocks from the block root chunks.
let blocks = block_root_chunks
.iter()
.map(|root| {
chain
.get_blinded_block(root)
.and_then(|maybe_block| {
maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root))
})
.map_err(unhandled_error)
})
.collect::<Result<Vec<_>, _>>()?;
// TODO(gloas): add payloads
replayer = replayer
.apply_blocks(blocks, vec![], None)
.map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?;
}
drop(replayer);
Ok(response)
}

View File

@@ -1,185 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped};
use eth2::lighthouse::{BlockReward, BlockRewardsQuery};
use lru::LruCache;
use state_processing::BlockReplayer;
use std::num::NonZeroUsize;
use std::sync::Arc;
use tracing::{debug, warn};
use types::block::BlindedBeaconBlock;
use types::execution::StatePayloadStatus;
use types::new_non_zero_usize;
use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error};
const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2);
/// Fetch block rewards for blocks from the canonical chain.
pub fn get_block_rewards<T: BeaconChainTypes>(
query: BlockRewardsQuery,
chain: Arc<BeaconChain<T>>,
) -> Result<Vec<BlockReward>, warp::Rejection> {
let start_slot = query.start_slot;
let end_slot = query.end_slot;
let prior_slot = start_slot - 1;
if start_slot > end_slot || start_slot == 0 {
return Err(custom_bad_request(format!(
"invalid start and end: {}, {}",
start_slot, end_slot
)));
}
let end_block_root = chain
.block_root_at_slot(end_slot, WhenSlotSkipped::Prev)
.map_err(unhandled_error)?
.ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?;
let (blocks, envelopes) = chain
.store
.load_blocks_to_replay(
start_slot,
end_slot,
end_block_root,
StatePayloadStatus::Pending,
)
.map_err(|e| unhandled_error(BeaconChainError::from(e)))?;
let state_root = chain
.state_root_at_slot(prior_slot)
.map_err(unhandled_error)?
.ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?;
// This branch is reached from the HTTP API. We assume the user wants
// to cache states so that future calls are faster.
let mut state = chain
.get_state(&state_root, Some(prior_slot), true)
.and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root)))
.map_err(unhandled_error)?;
state
.build_caches(&chain.spec)
.map_err(beacon_state_error)?;
let mut reward_cache = Default::default();
let mut block_rewards = Vec::with_capacity(blocks.len());
let block_replayer = BlockReplayer::new(state, &chain.spec)
.pre_block_hook(Box::new(|state, block| {
state.build_all_committee_caches(&chain.spec)?;
// Compute block reward.
let block_reward = chain.compute_block_reward(
block.message(),
block.canonical_root(),
state,
&mut reward_cache,
query.include_attestations,
)?;
block_rewards.push(block_reward);
Ok(())
}))
.state_root_iter(
chain
.forwards_iter_state_roots_until(prior_slot, end_slot)
.map_err(unhandled_error)?,
)
.no_signature_verification()
.minimal_block_root_verification()
.apply_blocks(blocks, envelopes, None)
.map_err(unhandled_error)?;
if block_replayer.state_root_miss() {
warn!(%start_slot, %end_slot, "Block reward state root miss");
}
drop(block_replayer);
Ok(block_rewards)
}
/// Compute block rewards for blocks passed in as input.
pub fn compute_block_rewards<T: BeaconChainTypes>(
blocks: Vec<BlindedBeaconBlock<T::EthSpec>>,
chain: Arc<BeaconChain<T>>,
) -> Result<Vec<BlockReward>, warp::Rejection> {
let mut block_rewards = Vec::with_capacity(blocks.len());
let mut state_cache = LruCache::new(STATE_CACHE_SIZE);
let mut reward_cache = Default::default();
for block in blocks {
let parent_root = block.parent_root();
// Check LRU cache for a constructed state from a previous iteration.
let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) {
debug!(
?parent_root,
slot = %block.slot(),
"Re-using cached state for block rewards"
);
state
} else {
debug!(
?parent_root,
slot = %block.slot(),
"Fetching state for block rewards"
);
let parent_block = chain
.get_blinded_block(&parent_root)
.map_err(unhandled_error)?
.ok_or_else(|| {
custom_bad_request(format!(
"parent block not known or not canonical: {:?}",
parent_root
))
})?;
// This branch is reached from the HTTP API. We assume the user wants
// to cache states so that future calls are faster.
let parent_state = chain
.get_state(&parent_block.state_root(), Some(parent_block.slot()), true)
.map_err(unhandled_error)?
.ok_or_else(|| {
custom_bad_request(format!(
"no state known for parent block: {:?}",
parent_root
))
})?;
// TODO(gloas): handle payloads?
let block_replayer = BlockReplayer::new(parent_state, &chain.spec)
.no_signature_verification()
.state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter())
.minimal_block_root_verification()
.apply_blocks(vec![], vec![], Some(block.slot()))
.map_err(unhandled_error::<BeaconChainError>)?;
if block_replayer.state_root_miss() {
warn!(
parent_slot = %parent_block.slot(),
slot = %block.slot(),
"Block reward state root miss"
);
}
let mut state = block_replayer.into_state();
state
.build_all_committee_caches(&chain.spec)
.map_err(beacon_state_error)?;
state_cache.get_or_insert((parent_root, block.slot()), || state)
};
// Compute block reward.
let block_reward = chain
.compute_block_reward(
block.to_ref(),
block.canonical_root(),
state,
&mut reward_cache,
true,
)
.map_err(unhandled_error)?;
block_rewards.push(block_reward);
}
Ok(block_rewards)
}