mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-20 22:38:34 +00:00
Merge in staging, update validator store/cache
Merge remote-tracking branch 'origin/staging' into tree-states
This commit is contained in:
@@ -72,7 +72,7 @@ use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use ssz::Encode;
|
||||
use state_processing::{
|
||||
common::{get_attesting_indices_from_state, get_indexed_attestation},
|
||||
common::get_attesting_indices_from_state,
|
||||
per_block_processing,
|
||||
per_block_processing::{
|
||||
errors::AttestationValidationError, verify_attestation_for_block_inclusion,
|
||||
@@ -2516,6 +2516,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
confirmed_state_roots,
|
||||
payload_verification_handle,
|
||||
parent_eth1_finalization_data,
|
||||
consensus_context,
|
||||
} = execution_pending_block;
|
||||
|
||||
let PayloadVerificationOutcome {
|
||||
@@ -2569,6 +2570,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
count_unrealized,
|
||||
parent_block,
|
||||
parent_eth1_finalization_data,
|
||||
consensus_context,
|
||||
)
|
||||
},
|
||||
"payload_verification_handle",
|
||||
@@ -2594,68 +2596,34 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
count_unrealized: CountUnrealized,
|
||||
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
// ----------------------------- BLOCK NOT YET ATTESTABLE ----------------------------------
|
||||
// Everything in this initial section is on the hot path between processing the block and
|
||||
// being able to attest to it. DO NOT add any extra processing in this initial section
|
||||
// unless it must run before fork choice.
|
||||
// -----------------------------------------------------------------------------------------
|
||||
let current_slot = self.slot()?;
|
||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
let block = signed_block.message();
|
||||
let post_exec_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_POST_EXEC_PROCESSING);
|
||||
|
||||
let attestation_observation_timer =
|
||||
metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
|
||||
|
||||
// Iterate through the attestations in the block and register them as an "observed
|
||||
// attestation". This will stop us from propagating them on the gossip network.
|
||||
for a in signed_block.message().body().attestations() {
|
||||
match self.observed_attestations.write().observe_item(a, None) {
|
||||
// If the observation was successful or if the slot for the attestation was too
|
||||
// low, continue.
|
||||
//
|
||||
// We ignore `SlotTooLow` since this will be very common whilst syncing.
|
||||
Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {}
|
||||
Err(e) => return Err(BlockError::BeaconChainError(e.into())),
|
||||
}
|
||||
}
|
||||
|
||||
metrics::stop_timer(attestation_observation_timer);
|
||||
|
||||
// If a slasher is configured, provide the attestations from the block.
|
||||
if let Some(slasher) = self.slasher.as_ref() {
|
||||
for attestation in signed_block.message().body().attestations() {
|
||||
let committee =
|
||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
slasher.accept_attestation(indexed_attestation);
|
||||
}
|
||||
}
|
||||
// Check against weak subjectivity checkpoint.
|
||||
self.check_block_against_weak_subjectivity_checkpoint(block, block_root, &state)?;
|
||||
|
||||
// If there are new validators in this block, update our pubkey cache.
|
||||
//
|
||||
// We perform this _before_ adding the block to fork choice because the pubkey cache is
|
||||
// used by attestation processing which will only process an attestation if the block is
|
||||
// known to fork choice. This ordering ensure that the pubkey cache is always up-to-date.
|
||||
self.validator_pubkey_cache
|
||||
// The only keys imported here will be ones for validators deposited in this block, because
|
||||
// the cache *must* already have been updated for the parent block when it was imported.
|
||||
// Newly deposited validators are not active and their keys are not required by other parts
|
||||
// of block processing. The reason we do this here and not after making the block attestable
|
||||
// is so we don't have to think about lock ordering with respect to the fork choice lock.
|
||||
// There are a bunch of places where we lock both fork choice and the pubkey cache and it
|
||||
// would be difficult to check that they all lock fork choice first.
|
||||
let mut kv_store_ops = self
|
||||
.validator_pubkey_cache
|
||||
.write()
|
||||
.import_new_pubkeys(&state, &self.store)?;
|
||||
|
||||
// For the current and next epoch of this state, ensure we have the shuffling from this
|
||||
// block in our cache.
|
||||
for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] {
|
||||
let shuffling_id = AttestationShufflingId::new(block_root, &state, *relative_epoch)?;
|
||||
|
||||
let shuffling_is_cached = self
|
||||
.shuffling_cache
|
||||
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(Error::AttestationCacheLockTimeout)?
|
||||
.contains(&shuffling_id);
|
||||
|
||||
if !shuffling_is_cached {
|
||||
state.build_committee_cache(*relative_epoch, &self.spec)?;
|
||||
let committee_cache = state.committee_cache(*relative_epoch)?;
|
||||
self.shuffling_cache
|
||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(Error::AttestationCacheLockTimeout)?
|
||||
.insert_committee_cache(shuffling_id, committee_cache);
|
||||
}
|
||||
}
|
||||
.import_new_pubkeys(&state)?;
|
||||
|
||||
// Apply the state to the attester cache, only if it is from the previous epoch or later.
|
||||
//
|
||||
@@ -2668,52 +2636,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.map_err(BeaconChainError::from)?;
|
||||
}
|
||||
|
||||
// Alias for readability.
|
||||
let block = signed_block.message();
|
||||
|
||||
// Only perform the weak subjectivity check if it was configured.
|
||||
if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint {
|
||||
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
||||
// choice.
|
||||
//
|
||||
// We are doing this to ensure that we detect changes in finalization. It's possible
|
||||
// that fork choice has already been updated to the finalized checkpoint in the block
|
||||
// we're importing.
|
||||
let current_head_finalized_checkpoint =
|
||||
self.canonical_head.cached_head().finalized_checkpoint();
|
||||
// Compare the existing finalized checkpoint with the incoming block's finalized checkpoint.
|
||||
let new_finalized_checkpoint = state.finalized_checkpoint();
|
||||
|
||||
// This ensures we only perform the check once.
|
||||
if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch)
|
||||
&& (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch)
|
||||
{
|
||||
if let Err(e) =
|
||||
self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, &state)
|
||||
{
|
||||
let mut shutdown_sender = self.shutdown_sender();
|
||||
crit!(
|
||||
self.log,
|
||||
"Weak subjectivity checkpoint verification failed while importing block!";
|
||||
"block_root" => ?block_root,
|
||||
"parent_root" => ?block.parent_root(),
|
||||
"old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch,
|
||||
"new_finalized_epoch" => ?new_finalized_checkpoint.epoch,
|
||||
"weak_subjectivity_epoch" => ?wss_checkpoint.epoch,
|
||||
"error" => ?e,
|
||||
);
|
||||
crit!(self.log, "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network.");
|
||||
shutdown_sender
|
||||
.try_send(ShutdownReason::Failure(
|
||||
"Weak subjectivity checkpoint verification failed. Provided block root is not a checkpoint."
|
||||
))
|
||||
.map_err(|err| BlockError::BeaconChainError(BeaconChainError::WeakSubjectivtyShutdownError(err)))?;
|
||||
return Err(BlockError::WeakSubjectivityConflict);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by
|
||||
// Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by
|
||||
// avoiding taking other locks whilst holding this lock.
|
||||
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
||||
|
||||
@@ -2743,77 +2666,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
}
|
||||
|
||||
// Allow the validator monitor to learn about a new valid state.
|
||||
self.validator_monitor
|
||||
.write()
|
||||
.process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state);
|
||||
let validator_monitor = self.validator_monitor.read();
|
||||
|
||||
// Register each attester slashing in the block with fork choice.
|
||||
for attester_slashing in block.body().attester_slashings() {
|
||||
fork_choice.on_attester_slashing(attester_slashing);
|
||||
}
|
||||
|
||||
// Register each attestation in the block with the fork choice service.
|
||||
for attestation in block.body().attestations() {
|
||||
let _fork_choice_attestation_timer =
|
||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
let attestation_target_epoch = attestation.data.target.epoch;
|
||||
|
||||
let committee =
|
||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
|
||||
match fork_choice.on_attestation(
|
||||
current_slot,
|
||||
&indexed_attestation,
|
||||
AttestationFromBlock::True,
|
||||
&self.spec,
|
||||
) {
|
||||
Ok(()) => Ok(()),
|
||||
// Ignore invalid attestations whilst importing attestations from a block. The
|
||||
// block might be very old and therefore the attestations useless to fork choice.
|
||||
Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()),
|
||||
Err(e) => Err(BlockError::BeaconChainError(e.into())),
|
||||
}?;
|
||||
|
||||
// To avoid slowing down sync, only register attestations for the
|
||||
// `observed_block_attesters` if they are from the previous epoch or later.
|
||||
if attestation_target_epoch + 1 >= current_epoch {
|
||||
let mut observed_block_attesters = self.observed_block_attesters.write();
|
||||
for &validator_index in &indexed_attestation.attesting_indices {
|
||||
if let Err(e) = observed_block_attesters
|
||||
.observe_validator(attestation_target_epoch, validator_index as usize)
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to register observed block attester";
|
||||
"error" => ?e,
|
||||
"epoch" => attestation_target_epoch,
|
||||
"validator_index" => validator_index,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only register this with the validator monitor when the block is sufficiently close to
|
||||
// the current slot.
|
||||
if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch()
|
||||
+ block.slot().as_u64()
|
||||
>= current_slot.as_u64()
|
||||
{
|
||||
match fork_choice.get_block(&block.parent_root()) {
|
||||
Some(parent_block) => validator_monitor.register_attestation_in_block(
|
||||
&indexed_attestation,
|
||||
parent_block.slot,
|
||||
&self.spec,
|
||||
),
|
||||
None => warn!(self.log, "Failed to get parent block"; "slot" => %block.slot()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the block is recent enough and it was not optimistically imported, check to see if it
|
||||
// becomes the head block. If so, apply it to the early attester cache. This will allow
|
||||
// attestations to the block without waiting for the block and state to be inserted to the
|
||||
@@ -2869,56 +2721,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
),
|
||||
}
|
||||
}
|
||||
drop(post_exec_timer);
|
||||
|
||||
// Register sync aggregate with validator monitor
|
||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||
// `SyncCommittee` for the sync_aggregate should correspond to the duty slot
|
||||
let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||
let sync_committee = self.sync_committee_at_epoch(duty_epoch)?;
|
||||
let participant_pubkeys = sync_committee
|
||||
.pubkeys
|
||||
.iter()
|
||||
.zip(sync_aggregate.sync_committee_bits.iter())
|
||||
.filter_map(|(pubkey, bit)| bit.then_some(pubkey))
|
||||
.collect::<Vec<_>>();
|
||||
// ---------------------------- BLOCK PROBABLY ATTESTABLE ----------------------------------
|
||||
// Most blocks are now capable of being attested to thanks to the `early_attester_cache`
|
||||
// cache above. Resume non-essential processing.
|
||||
// -----------------------------------------------------------------------------------------
|
||||
|
||||
validator_monitor.register_sync_aggregate_in_block(
|
||||
block.slot(),
|
||||
block.parent_root(),
|
||||
participant_pubkeys,
|
||||
);
|
||||
}
|
||||
|
||||
for exit in block.body().voluntary_exits() {
|
||||
validator_monitor.register_block_voluntary_exit(&exit.message)
|
||||
}
|
||||
|
||||
for slashing in block.body().attester_slashings() {
|
||||
validator_monitor.register_block_attester_slashing(slashing)
|
||||
}
|
||||
|
||||
for slashing in block.body().proposer_slashings() {
|
||||
validator_monitor.register_block_proposer_slashing(slashing)
|
||||
}
|
||||
|
||||
drop(validator_monitor);
|
||||
|
||||
// Only present some metrics for blocks from the previous epoch or later.
|
||||
//
|
||||
// This helps avoid noise in the metrics during sync.
|
||||
if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 1 >= self.epoch()? {
|
||||
metrics::observe(
|
||||
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
|
||||
block.body().attestations().len() as f64,
|
||||
);
|
||||
|
||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||
metrics::set_gauge(
|
||||
&metrics::BLOCK_SYNC_AGGREGATE_SET_BITS,
|
||||
sync_aggregate.num_set_bits() as i64,
|
||||
);
|
||||
}
|
||||
}
|
||||
self.import_block_update_shuffling_cache(block_root, &mut state)?;
|
||||
self.import_block_observe_attestations(
|
||||
block,
|
||||
&state,
|
||||
&mut consensus_context,
|
||||
current_epoch,
|
||||
);
|
||||
self.import_block_update_validator_monitor(
|
||||
block,
|
||||
&state,
|
||||
&mut consensus_context,
|
||||
current_slot,
|
||||
parent_block.slot(),
|
||||
);
|
||||
self.import_block_update_slasher(block, &state, &mut consensus_context);
|
||||
|
||||
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
|
||||
|
||||
@@ -2935,7 +2759,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
ops.push(StoreOp::PutState(block.state_root(), &state));
|
||||
let txn_lock = self.store.hot_db.begin_rw_transaction();
|
||||
|
||||
if let Err(e) = self.store.do_atomically(ops) {
|
||||
kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?);
|
||||
|
||||
if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) {
|
||||
error!(
|
||||
self.log,
|
||||
"Database write failed!";
|
||||
@@ -2943,6 +2769,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
"error" => ?e,
|
||||
);
|
||||
|
||||
// Clear the early attester cache to prevent attestations which we would later be unable
|
||||
// to verify due to the failure.
|
||||
self.early_attester_cache.clear();
|
||||
|
||||
// Since the write failed, try to revert the canonical head back to what was stored
|
||||
// in the database. This attempts to prevent inconsistency between the database and
|
||||
// fork choice.
|
||||
@@ -2989,22 +2819,312 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.head_tracker
|
||||
.register_block(block_root, parent_root, slot);
|
||||
|
||||
// Send an event to the `events` endpoint after fully processing the block.
|
||||
if let Some(event_handler) = self.event_handler.as_ref() {
|
||||
if event_handler.has_block_subscribers() {
|
||||
event_handler.register(EventKind::Block(SseBlock {
|
||||
slot,
|
||||
block: block_root,
|
||||
execution_optimistic: payload_verification_status.is_optimistic(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
metrics::stop_timer(db_write_timer);
|
||||
|
||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
|
||||
|
||||
let block_delay_total = get_slot_delay_ms(block_time_imported, slot, &self.slot_clock);
|
||||
// Update the deposit contract cache.
|
||||
self.import_block_update_deposit_contract_finalization(
|
||||
block,
|
||||
block_root,
|
||||
current_epoch,
|
||||
current_finalized_checkpoint,
|
||||
current_eth1_finalization_data,
|
||||
parent_eth1_finalization_data,
|
||||
parent_block.slot(),
|
||||
);
|
||||
|
||||
// Inform the unknown block cache, in case it was waiting on this block.
|
||||
self.pre_finalization_block_cache
|
||||
.block_processed(block_root);
|
||||
|
||||
self.import_block_update_metrics_and_events(
|
||||
block,
|
||||
block_root,
|
||||
block_time_imported,
|
||||
payload_verification_status,
|
||||
current_slot,
|
||||
);
|
||||
|
||||
Ok(block_root)
|
||||
}
|
||||
|
||||
/// Check block's consistentency with any configured weak subjectivity checkpoint.
|
||||
fn check_block_against_weak_subjectivity_checkpoint(
|
||||
&self,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
// Only perform the weak subjectivity check if it was configured.
|
||||
let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint {
|
||||
checkpoint
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
||||
// choice.
|
||||
//
|
||||
// We are doing this to ensure that we detect changes in finalization. It's possible
|
||||
// that fork choice has already been updated to the finalized checkpoint in the block
|
||||
// we're importing.
|
||||
let current_head_finalized_checkpoint =
|
||||
self.canonical_head.cached_head().finalized_checkpoint();
|
||||
// Compare the existing finalized checkpoint with the incoming block's finalized checkpoint.
|
||||
let new_finalized_checkpoint = state.finalized_checkpoint();
|
||||
|
||||
// This ensures we only perform the check once.
|
||||
if current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch
|
||||
&& wss_checkpoint.epoch <= new_finalized_checkpoint.epoch
|
||||
{
|
||||
if let Err(e) =
|
||||
self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, state)
|
||||
{
|
||||
let mut shutdown_sender = self.shutdown_sender();
|
||||
crit!(
|
||||
self.log,
|
||||
"Weak subjectivity checkpoint verification failed while importing block!";
|
||||
"block_root" => ?block_root,
|
||||
"parent_root" => ?block.parent_root(),
|
||||
"old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch,
|
||||
"new_finalized_epoch" => ?new_finalized_checkpoint.epoch,
|
||||
"weak_subjectivity_epoch" => ?wss_checkpoint.epoch,
|
||||
"error" => ?e
|
||||
);
|
||||
crit!(
|
||||
self.log,
|
||||
"You must use the `--purge-db` flag to clear the database and restart sync. \
|
||||
You may be on a hostile network."
|
||||
);
|
||||
shutdown_sender
|
||||
.try_send(ShutdownReason::Failure(
|
||||
"Weak subjectivity checkpoint verification failed. \
|
||||
Provided block root is not a checkpoint.",
|
||||
))
|
||||
.map_err(|err| {
|
||||
BlockError::BeaconChainError(
|
||||
BeaconChainError::WeakSubjectivtyShutdownError(err),
|
||||
)
|
||||
})?;
|
||||
return Err(BlockError::WeakSubjectivityConflict);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a block for the validator monitor, including all its constituent messages.
|
||||
fn import_block_update_validator_monitor(
|
||||
&self,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
ctxt: &mut ConsensusContext<T::EthSpec>,
|
||||
current_slot: Slot,
|
||||
parent_block_slot: Slot,
|
||||
) {
|
||||
// Only register blocks with the validator monitor when the block is sufficiently close to
|
||||
// the current slot.
|
||||
if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch()
|
||||
+ block.slot().as_u64()
|
||||
< current_slot.as_u64()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Allow the validator monitor to learn about a new valid state.
|
||||
self.validator_monitor
|
||||
.write()
|
||||
.process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state);
|
||||
|
||||
let validator_monitor = self.validator_monitor.read();
|
||||
|
||||
// Sync aggregate.
|
||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||
// `SyncCommittee` for the sync_aggregate should correspond to the duty slot
|
||||
let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
match self.sync_committee_at_epoch(duty_epoch) {
|
||||
Ok(sync_committee) => {
|
||||
let participant_pubkeys = sync_committee
|
||||
.pubkeys
|
||||
.iter()
|
||||
.zip(sync_aggregate.sync_committee_bits.iter())
|
||||
.filter_map(|(pubkey, bit)| bit.then_some(pubkey))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
validator_monitor.register_sync_aggregate_in_block(
|
||||
block.slot(),
|
||||
block.parent_root(),
|
||||
participant_pubkeys,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
self.log,
|
||||
"Unable to fetch sync committee";
|
||||
"epoch" => duty_epoch,
|
||||
"purpose" => "validator monitor",
|
||||
"error" => ?e,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attestations.
|
||||
for attestation in block.body().attestations() {
|
||||
let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) {
|
||||
Ok(indexed) => indexed,
|
||||
Err(e) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to get indexed attestation";
|
||||
"purpose" => "validator monitor",
|
||||
"attestation_slot" => attestation.data.slot,
|
||||
"error" => ?e,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
validator_monitor.register_attestation_in_block(
|
||||
indexed_attestation,
|
||||
parent_block_slot,
|
||||
&self.spec,
|
||||
);
|
||||
}
|
||||
|
||||
for exit in block.body().voluntary_exits() {
|
||||
validator_monitor.register_block_voluntary_exit(&exit.message)
|
||||
}
|
||||
|
||||
for slashing in block.body().attester_slashings() {
|
||||
validator_monitor.register_block_attester_slashing(slashing)
|
||||
}
|
||||
|
||||
for slashing in block.body().proposer_slashings() {
|
||||
validator_monitor.register_block_proposer_slashing(slashing)
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterate through the attestations in the block and register them as "observed".
|
||||
///
|
||||
/// This will stop us from propagating them on the gossip network.
|
||||
fn import_block_observe_attestations(
|
||||
&self,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
ctxt: &mut ConsensusContext<T::EthSpec>,
|
||||
current_epoch: Epoch,
|
||||
) {
|
||||
// To avoid slowing down sync, only observe attestations if the block is from the
|
||||
// previous epoch or later.
|
||||
if state.current_epoch() + 1 < current_epoch {
|
||||
return;
|
||||
}
|
||||
|
||||
let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
|
||||
|
||||
for a in block.body().attestations() {
|
||||
match self.observed_attestations.write().observe_item(a, None) {
|
||||
// If the observation was successful or if the slot for the attestation was too
|
||||
// low, continue.
|
||||
//
|
||||
// We ignore `SlotTooLow` since this will be very common whilst syncing.
|
||||
Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {}
|
||||
Err(e) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to register observed attestation";
|
||||
"error" => ?e,
|
||||
"epoch" => a.data.target.epoch
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let indexed_attestation = match ctxt.get_indexed_attestation(state, a) {
|
||||
Ok(indexed) => indexed,
|
||||
Err(e) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to get indexed attestation";
|
||||
"purpose" => "observation",
|
||||
"attestation_slot" => a.data.slot,
|
||||
"error" => ?e,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let mut observed_block_attesters = self.observed_block_attesters.write();
|
||||
|
||||
for &validator_index in &indexed_attestation.attesting_indices {
|
||||
if let Err(e) = observed_block_attesters
|
||||
.observe_validator(a.data.target.epoch, validator_index as usize)
|
||||
{
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to register observed block attester";
|
||||
"error" => ?e,
|
||||
"epoch" => a.data.target.epoch,
|
||||
"validator_index" => validator_index,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If a slasher is configured, provide the attestations from the block.
|
||||
fn import_block_update_slasher(
|
||||
&self,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
ctxt: &mut ConsensusContext<T::EthSpec>,
|
||||
) {
|
||||
if let Some(slasher) = self.slasher.as_ref() {
|
||||
for attestation in block.body().attestations() {
|
||||
let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) {
|
||||
Ok(indexed) => indexed,
|
||||
Err(e) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Failed to get indexed attestation";
|
||||
"purpose" => "slasher",
|
||||
"attestation_slot" => attestation.data.slot,
|
||||
"error" => ?e,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
slasher.accept_attestation(indexed_attestation.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn import_block_update_metrics_and_events(
|
||||
&self,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
block_time_imported: Duration,
|
||||
payload_verification_status: PayloadVerificationStatus,
|
||||
current_slot: Slot,
|
||||
) {
|
||||
// Only present some metrics for blocks from the previous epoch or later.
|
||||
//
|
||||
// This helps avoid noise in the metrics during sync.
|
||||
if block.slot() + 2 * T::EthSpec::slots_per_epoch() >= current_slot {
|
||||
metrics::observe(
|
||||
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
|
||||
block.body().attestations().len() as f64,
|
||||
);
|
||||
|
||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||
metrics::set_gauge(
|
||||
&metrics::BLOCK_SYNC_AGGREGATE_SET_BITS,
|
||||
sync_aggregate.num_set_bits() as i64,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let block_delay_total =
|
||||
get_slot_delay_ms(block_time_imported, block.slot(), &self.slot_clock);
|
||||
|
||||
// Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to
|
||||
// the cache during sync.
|
||||
@@ -3036,62 +3156,105 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
);
|
||||
}
|
||||
|
||||
// Do not write to eth1 finalization cache for blocks older than 5 epochs
|
||||
// this helps reduce noise during sync
|
||||
if block_delay_total
|
||||
< self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32)
|
||||
{
|
||||
let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||
if parent_block_epoch < current_epoch {
|
||||
// we've crossed epoch boundary, store Eth1FinalizationData
|
||||
let (checkpoint, eth1_finalization_data) =
|
||||
if current_slot % T::EthSpec::slots_per_epoch() == 0 {
|
||||
// current block is the checkpoint
|
||||
(
|
||||
Checkpoint {
|
||||
epoch: current_epoch,
|
||||
root: block_root,
|
||||
},
|
||||
current_eth1_finalization_data,
|
||||
)
|
||||
} else {
|
||||
// parent block is the checkpoint
|
||||
(
|
||||
Checkpoint {
|
||||
epoch: current_epoch,
|
||||
root: parent_block.canonical_root(),
|
||||
},
|
||||
parent_eth1_finalization_data,
|
||||
)
|
||||
};
|
||||
if let Some(event_handler) = self.event_handler.as_ref() {
|
||||
if event_handler.has_block_subscribers() {
|
||||
event_handler.register(EventKind::Block(SseBlock {
|
||||
slot: block.slot(),
|
||||
block: block_root,
|
||||
execution_optimistic: payload_verification_status.is_optimistic(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(finalized_eth1_data) = self
|
||||
.eth1_finalization_cache
|
||||
.try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|mut cache| {
|
||||
cache.insert(checkpoint, eth1_finalization_data);
|
||||
cache.finalize(¤t_finalized_checkpoint)
|
||||
})
|
||||
{
|
||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||
let finalized_deposit_count = finalized_eth1_data.deposit_count;
|
||||
eth1_chain.finalize_eth1_data(finalized_eth1_data);
|
||||
debug!(
|
||||
self.log,
|
||||
"called eth1_chain.finalize_eth1_data()";
|
||||
"epoch" => current_finalized_checkpoint.epoch,
|
||||
"deposit count" => finalized_deposit_count,
|
||||
);
|
||||
}
|
||||
fn import_block_update_shuffling_cache(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
state: &mut BeaconState<T::EthSpec>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
// For the current and next epoch of this state, ensure we have the shuffling from this
|
||||
// block in our cache.
|
||||
for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] {
|
||||
let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?;
|
||||
|
||||
let shuffling_is_cached = self
|
||||
.shuffling_cache
|
||||
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(Error::AttestationCacheLockTimeout)?
|
||||
.contains(&shuffling_id);
|
||||
|
||||
if !shuffling_is_cached {
|
||||
state.build_committee_cache(relative_epoch, &self.spec)?;
|
||||
let committee_cache = state.committee_cache(relative_epoch)?;
|
||||
self.shuffling_cache
|
||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(Error::AttestationCacheLockTimeout)?
|
||||
.insert_committee_cache(shuffling_id, committee_cache);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn import_block_update_deposit_contract_finalization(
|
||||
&self,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
current_epoch: Epoch,
|
||||
current_finalized_checkpoint: Checkpoint,
|
||||
current_eth1_finalization_data: Eth1FinalizationData,
|
||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
parent_block_slot: Slot,
|
||||
) {
|
||||
// Do not write to eth1 finalization cache for blocks older than 5 epochs.
|
||||
if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch {
|
||||
return;
|
||||
}
|
||||
|
||||
let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
if parent_block_epoch < current_epoch {
|
||||
// we've crossed epoch boundary, store Eth1FinalizationData
|
||||
let (checkpoint, eth1_finalization_data) =
|
||||
if block.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||
// current block is the checkpoint
|
||||
(
|
||||
Checkpoint {
|
||||
epoch: current_epoch,
|
||||
root: block_root,
|
||||
},
|
||||
current_eth1_finalization_data,
|
||||
)
|
||||
} else {
|
||||
// parent block is the checkpoint
|
||||
(
|
||||
Checkpoint {
|
||||
epoch: current_epoch,
|
||||
root: block.parent_root(),
|
||||
},
|
||||
parent_eth1_finalization_data,
|
||||
)
|
||||
};
|
||||
|
||||
if let Some(finalized_eth1_data) = self
|
||||
.eth1_finalization_cache
|
||||
.try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|mut cache| {
|
||||
cache.insert(checkpoint, eth1_finalization_data);
|
||||
cache.finalize(¤t_finalized_checkpoint)
|
||||
})
|
||||
{
|
||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||
let finalized_deposit_count = finalized_eth1_data.deposit_count;
|
||||
eth1_chain.finalize_eth1_data(finalized_eth1_data);
|
||||
debug!(
|
||||
self.log,
|
||||
"called eth1_chain.finalize_eth1_data()";
|
||||
"epoch" => current_finalized_checkpoint.epoch,
|
||||
"deposit count" => finalized_deposit_count,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Inform the unknown block cache, in case it was waiting on this block.
|
||||
self.pre_finalization_block_cache
|
||||
.block_processed(block_root);
|
||||
|
||||
Ok(block_root)
|
||||
}
|
||||
|
||||
/// If configured, wait for the fork choice run at the start of the slot to complete.
|
||||
|
||||
@@ -52,21 +52,22 @@ use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOC
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
use crate::{
|
||||
beacon_chain::{
|
||||
BeaconForkChoice, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
||||
BeaconForkChoice, ForkChoiceError, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
||||
},
|
||||
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||
};
|
||||
use derivative::Derivative;
|
||||
use eth2::types::EventKind;
|
||||
use execution_layer::PayloadStatus;
|
||||
use fork_choice::PayloadVerificationStatus;
|
||||
use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
|
||||
use parking_lot::RwLockReadGuard;
|
||||
use proto_array::Block as ProtoBlock;
|
||||
use safe_arith::ArithError;
|
||||
use slog::{debug, error, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use ssz::Encode;
|
||||
use state_processing::per_block_processing::is_merge_transition_block;
|
||||
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
|
||||
use state_processing::{
|
||||
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
||||
per_block_processing, per_slot_processing,
|
||||
@@ -554,7 +555,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||
let mut consensus_context =
|
||||
ConsensusContext::new(block.slot()).set_current_block_root(*block_root);
|
||||
|
||||
signature_verifier.include_all_signatures(&block, &mut consensus_context)?;
|
||||
signature_verifier.include_all_signatures(block, &mut consensus_context)?;
|
||||
|
||||
// Save the block and its consensus context. The context will have had its proposer index
|
||||
// and attesting indices filled in, which can be used to accelerate later block processing.
|
||||
@@ -621,6 +622,7 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
||||
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||
pub parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
pub confirmed_state_roots: Vec<Hash256>,
|
||||
pub consensus_context: ConsensusContext<T::EthSpec>,
|
||||
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
|
||||
}
|
||||
|
||||
@@ -1137,6 +1139,79 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
|
||||
check_block_relevancy(&block, block_root, chain)?;
|
||||
|
||||
// Define a future that will verify the execution payload with an execution engine.
|
||||
//
|
||||
// We do this as early as possible so that later parts of this function can run in parallel
|
||||
// with the payload verification.
|
||||
let payload_notifier = PayloadNotifier::new(
|
||||
chain.clone(),
|
||||
block.clone(),
|
||||
&parent.pre_state,
|
||||
notify_execution_layer,
|
||||
)?;
|
||||
let is_valid_merge_transition_block =
|
||||
is_merge_transition_block(&parent.pre_state, block.message().body());
|
||||
let payload_verification_future = async move {
|
||||
let chain = payload_notifier.chain.clone();
|
||||
let block = payload_notifier.block.clone();
|
||||
|
||||
// If this block triggers the merge, check to ensure that it references valid execution
|
||||
// blocks.
|
||||
//
|
||||
// The specification defines this check inside `on_block` in the fork-choice specification,
|
||||
// however we perform the check here for two reasons:
|
||||
//
|
||||
// - There's no point in importing a block that will fail fork choice, so it's best to fail
|
||||
// early.
|
||||
// - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no
|
||||
// calls to remote servers.
|
||||
if is_valid_merge_transition_block {
|
||||
validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?;
|
||||
};
|
||||
|
||||
// The specification declares that this should be run *inside* `per_block_processing`,
|
||||
// however we run it here to keep `per_block_processing` pure (i.e., no calls to external
|
||||
// servers).
|
||||
let payload_verification_status = payload_notifier.notify_new_payload().await?;
|
||||
|
||||
// If the payload did not validate or invalidate the block, check to see if this block is
|
||||
// valid for optimistic import.
|
||||
if payload_verification_status.is_optimistic() {
|
||||
let block_hash_opt = block
|
||||
.message()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.map(|full_payload| full_payload.execution_payload.block_hash);
|
||||
|
||||
// Ensure the block is a candidate for optimistic import.
|
||||
if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await?
|
||||
{
|
||||
warn!(
|
||||
chain.log,
|
||||
"Rejecting optimistic block";
|
||||
"block_hash" => ?block_hash_opt,
|
||||
"msg" => "the execution engine is not synced"
|
||||
);
|
||||
return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PayloadVerificationOutcome {
|
||||
payload_verification_status,
|
||||
is_valid_merge_transition_block,
|
||||
})
|
||||
};
|
||||
// Spawn the payload verification future as a new task, but don't wait for it to complete.
|
||||
// The `payload_verification_future` will be awaited later to ensure verification completed
|
||||
// successfully.
|
||||
let payload_verification_handle = chain
|
||||
.task_executor
|
||||
.spawn_handle(
|
||||
payload_verification_future,
|
||||
"execution_payload_verification",
|
||||
)
|
||||
.ok_or(BeaconChainError::RuntimeShutdown)?;
|
||||
|
||||
/*
|
||||
* Advance the given `parent.beacon_state` to the slot of the given `block`.
|
||||
*/
|
||||
@@ -1221,80 +1296,11 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
summaries.push(summary);
|
||||
}
|
||||
}
|
||||
metrics::stop_timer(catchup_timer);
|
||||
|
||||
let block_slot = block.slot();
|
||||
let state_current_epoch = state.current_epoch();
|
||||
|
||||
// Define a future that will verify the execution payload with an execution engine (but
|
||||
// don't execute it yet).
|
||||
let payload_notifier =
|
||||
PayloadNotifier::new(chain.clone(), block.clone(), &state, notify_execution_layer)?;
|
||||
let is_valid_merge_transition_block =
|
||||
is_merge_transition_block(&state, block.message().body());
|
||||
let payload_verification_future = async move {
|
||||
let chain = payload_notifier.chain.clone();
|
||||
let block = payload_notifier.block.clone();
|
||||
|
||||
// If this block triggers the merge, check to ensure that it references valid execution
|
||||
// blocks.
|
||||
//
|
||||
// The specification defines this check inside `on_block` in the fork-choice specification,
|
||||
// however we perform the check here for two reasons:
|
||||
//
|
||||
// - There's no point in importing a block that will fail fork choice, so it's best to fail
|
||||
// early.
|
||||
// - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no
|
||||
// calls to remote servers.
|
||||
if is_valid_merge_transition_block {
|
||||
validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?;
|
||||
};
|
||||
|
||||
// The specification declares that this should be run *inside* `per_block_processing`,
|
||||
// however we run it here to keep `per_block_processing` pure (i.e., no calls to external
|
||||
// servers).
|
||||
//
|
||||
// It is important that this function is called *after* `per_slot_processing`, since the
|
||||
// `randao` may change.
|
||||
let payload_verification_status = payload_notifier.notify_new_payload().await?;
|
||||
|
||||
// If the payload did not validate or invalidate the block, check to see if this block is
|
||||
// valid for optimistic import.
|
||||
if payload_verification_status.is_optimistic() {
|
||||
let block_hash_opt = block
|
||||
.message()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.map(|full_payload| full_payload.execution_payload.block_hash);
|
||||
|
||||
// Ensure the block is a candidate for optimistic import.
|
||||
if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await?
|
||||
{
|
||||
warn!(
|
||||
chain.log,
|
||||
"Rejecting optimistic block";
|
||||
"block_hash" => ?block_hash_opt,
|
||||
"msg" => "the execution engine is not synced"
|
||||
);
|
||||
return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PayloadVerificationOutcome {
|
||||
payload_verification_status,
|
||||
is_valid_merge_transition_block,
|
||||
})
|
||||
};
|
||||
// Spawn the payload verification future as a new task, but don't wait for it to complete.
|
||||
// The `payload_verification_future` will be awaited later to ensure verification completed
|
||||
// successfully.
|
||||
let payload_verification_handle = chain
|
||||
.task_executor
|
||||
.spawn_handle(
|
||||
payload_verification_future,
|
||||
"execution_payload_verification",
|
||||
)
|
||||
.ok_or(BeaconChainError::RuntimeShutdown)?;
|
||||
|
||||
// If the block is sufficiently recent, notify the validator monitor.
|
||||
if let Some(slot) = chain.slot_clock.now() {
|
||||
let epoch = slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
@@ -1321,8 +1327,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
}
|
||||
}
|
||||
|
||||
metrics::stop_timer(catchup_timer);
|
||||
|
||||
/*
|
||||
* Build the committee caches on the state.
|
||||
*/
|
||||
@@ -1411,6 +1415,44 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply the block's attestations to fork choice.
|
||||
*
|
||||
* We're running in parallel with the payload verification at this point, so this is
|
||||
* free real estate.
|
||||
*/
|
||||
let current_slot = chain.slot()?;
|
||||
let mut fork_choice = chain.canonical_head.fork_choice_write_lock();
|
||||
|
||||
// Register each attester slashing in the block with fork choice.
|
||||
for attester_slashing in block.message().body().attester_slashings() {
|
||||
fork_choice.on_attester_slashing(attester_slashing);
|
||||
}
|
||||
|
||||
// Register each attestation in the block with fork choice.
|
||||
for (i, attestation) in block.message().body().attestations().iter().enumerate() {
|
||||
let _fork_choice_attestation_timer =
|
||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
let indexed_attestation = consensus_context
|
||||
.get_indexed_attestation(&state, attestation)
|
||||
.map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?;
|
||||
|
||||
match fork_choice.on_attestation(
|
||||
current_slot,
|
||||
indexed_attestation,
|
||||
AttestationFromBlock::True,
|
||||
&chain.spec,
|
||||
) {
|
||||
Ok(()) => Ok(()),
|
||||
// Ignore invalid attestations whilst importing attestations from a block. The
|
||||
// block might be very old and therefore the attestations useless to fork choice.
|
||||
Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()),
|
||||
Err(e) => Err(BlockError::BeaconChainError(e.into())),
|
||||
}?;
|
||||
}
|
||||
drop(fork_choice);
|
||||
|
||||
Ok(Self {
|
||||
block,
|
||||
block_root,
|
||||
@@ -1418,6 +1460,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
parent_block: parent.beacon_block,
|
||||
parent_eth1_finalization_data,
|
||||
confirmed_state_roots,
|
||||
consensus_context,
|
||||
payload_verification_handle,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -711,10 +711,14 @@ where
|
||||
let validator_pubkey_cache = store.immutable_validators.clone();
|
||||
|
||||
// Update pubkey cache on first start in case we have started from genesis.
|
||||
validator_pubkey_cache
|
||||
let kv_store_ops = validator_pubkey_cache
|
||||
.write()
|
||||
.import_new_pubkeys(&head_snapshot.beacon_state, &store)
|
||||
.import_new_pubkeys(&head_snapshot.beacon_state)
|
||||
.map_err(|e| format!("error initializing pubkey cache: {e:?}"))?;
|
||||
store
|
||||
.hot_db
|
||||
.do_atomically(kv_store_ops)
|
||||
.map_err(|e| format!("error writing validator store: {e:?}"))?;
|
||||
|
||||
let migrator_config = self.store_migrator_config.unwrap_or_default();
|
||||
let store_migrator = BackgroundMigrator::new(
|
||||
|
||||
@@ -69,6 +69,7 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
|
||||
// are cheap and doing them here ensures we protect the execution engine from junk.
|
||||
partially_verify_execution_payload(
|
||||
state,
|
||||
block.slot(),
|
||||
block.message().execution_payload()?,
|
||||
&chain.spec,
|
||||
)
|
||||
@@ -373,7 +374,8 @@ pub fn get_execution_payload<
|
||||
let spec = &chain.spec;
|
||||
let current_epoch = state.current_epoch();
|
||||
let is_merge_transition_complete = is_merge_transition_complete(state);
|
||||
let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?;
|
||||
let timestamp =
|
||||
compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?;
|
||||
let random = *state.get_randao_mix(current_epoch)?;
|
||||
let latest_execution_payload_header_block_hash =
|
||||
state.latest_execution_payload_header()?.block_hash;
|
||||
|
||||
@@ -60,6 +60,11 @@ lazy_static! {
|
||||
"beacon_block_processing_state_root_seconds",
|
||||
"Time spent calculating the state root when processing a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_POST_EXEC_PROCESSING: Result<Histogram> = try_create_histogram_with_buckets(
|
||||
"beacon_block_processing_post_exec_pre_attestable_seconds",
|
||||
"Time between finishing execution processing and the block becoming attestable",
|
||||
linear_buckets(5e-3, 5e-3, 10)
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_DB_WRITE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_db_write_seconds",
|
||||
"Time spent writing a newly processed block and state to DB"
|
||||
|
||||
@@ -589,7 +589,7 @@ where
|
||||
|
||||
pub fn get_timestamp_at_slot(&self) -> u64 {
|
||||
let state = self.get_current_state();
|
||||
compute_timestamp_at_slot(&state, &self.spec).unwrap()
|
||||
compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap()
|
||||
}
|
||||
|
||||
pub fn get_current_state_and_root(&self) -> (BeaconState<E>, Hash256) {
|
||||
|
||||
Reference in New Issue
Block a user