mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-11 18:04:18 +00:00
* First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * SingleAttestation * Add post attestation v2 endpoint logic to attestation service * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * some tests and fixed attestqtion calc * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commitde01f923c7. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Added more test coverage, simplified Attestation conversion, and other minor refactors * Removed unusued codepaths * Fix failing test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit04b3743ec1Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit440e854199Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commitf66e179a40Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commite4bfe71cd1Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit063b79c16aAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit88bedf09bcAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commitde01f923c7. commit32483d385bAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit2e86585b47Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit1095d60a40Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commitde01f923c7Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit0c2c8c4224Merge:21ecb58fff51a292f7Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commitf51a292f77Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit7e0cddef32Author: Akihito Nakano <sora.akatsuki@gmail.com> Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit21ecb58ff8Merge:2fcb2935e9aefb5539Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit2fcb2935ecAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit12c6ef118aAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commitd37733b846Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit52bb581e07Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commite71020e3e6Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit13f9bba647Merge:60100fc6b4e675cf5dAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit60100fc6beAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commita9cb329a22Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit4dc6e6515eAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit25feedfde3Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Cleanup single attestation imports * Fix some single attestation network plumbing * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Merge remote-tracking branch 'origin/electra-alpha10' into single_attestation * Fix some tests * Cargo fmt * lint * fmt * Resolve merge conflicts * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * lint * Linting * fmt * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * Fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment * Merge remote-tracking branch 'origin/electra-alpha10' into single_attestation * Merge remote-tracking branch 'origin/unstable' into single_attestation * Merge remote-tracking branch 'origin/unstable' into single_attestation * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * publish_attestations should accept Either<Attestation,SingleAttestation> * log an error when failing to convert to SingleAttestation * Use Cow to avoid clone * Avoid reconverting to SingleAttestation * Tweak VC error message * update comments * update comments * pass in single attestation as ref to subnetid calculation method * Improved API, new error variants and other minor tweaks * Fix single_attestation event topic boilerplate * fix sse event failure * Add single_attestation event topic test coverage
750 lines
29 KiB
Rust
750 lines
29 KiB
Rust
use crate::duties_service::{DutiesService, DutyAndProof};
|
|
use beacon_node_fallback::{ApiTopic, BeaconNodeFallback};
|
|
use environment::RuntimeContext;
|
|
use futures::future::join_all;
|
|
use slog::{crit, debug, error, info, trace, warn};
|
|
use slot_clock::SlotClock;
|
|
use std::collections::HashMap;
|
|
use std::ops::Deref;
|
|
use std::sync::Arc;
|
|
use tokio::time::{sleep, sleep_until, Duration, Instant};
|
|
use tree_hash::TreeHash;
|
|
use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot};
|
|
use validator_store::{Error as ValidatorStoreError, ValidatorStore};
|
|
|
|
/// Builds an `AttestationService`.
|
|
#[derive(Default)]
|
|
pub struct AttestationServiceBuilder<T: SlotClock + 'static, E: EthSpec> {
|
|
duties_service: Option<Arc<DutiesService<T, E>>>,
|
|
validator_store: Option<Arc<ValidatorStore<T, E>>>,
|
|
slot_clock: Option<T>,
|
|
beacon_nodes: Option<Arc<BeaconNodeFallback<T, E>>>,
|
|
context: Option<RuntimeContext<E>>,
|
|
}
|
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> AttestationServiceBuilder<T, E> {
|
|
pub fn new() -> Self {
|
|
Self {
|
|
duties_service: None,
|
|
validator_store: None,
|
|
slot_clock: None,
|
|
beacon_nodes: None,
|
|
context: None,
|
|
}
|
|
}
|
|
|
|
pub fn duties_service(mut self, service: Arc<DutiesService<T, E>>) -> Self {
|
|
self.duties_service = Some(service);
|
|
self
|
|
}
|
|
|
|
pub fn validator_store(mut self, store: Arc<ValidatorStore<T, E>>) -> Self {
|
|
self.validator_store = Some(store);
|
|
self
|
|
}
|
|
|
|
pub fn slot_clock(mut self, slot_clock: T) -> Self {
|
|
self.slot_clock = Some(slot_clock);
|
|
self
|
|
}
|
|
|
|
pub fn beacon_nodes(mut self, beacon_nodes: Arc<BeaconNodeFallback<T, E>>) -> Self {
|
|
self.beacon_nodes = Some(beacon_nodes);
|
|
self
|
|
}
|
|
|
|
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
|
|
self.context = Some(context);
|
|
self
|
|
}
|
|
|
|
pub fn build(self) -> Result<AttestationService<T, E>, String> {
|
|
Ok(AttestationService {
|
|
inner: Arc::new(Inner {
|
|
duties_service: self
|
|
.duties_service
|
|
.ok_or("Cannot build AttestationService without duties_service")?,
|
|
validator_store: self
|
|
.validator_store
|
|
.ok_or("Cannot build AttestationService without validator_store")?,
|
|
slot_clock: self
|
|
.slot_clock
|
|
.ok_or("Cannot build AttestationService without slot_clock")?,
|
|
beacon_nodes: self
|
|
.beacon_nodes
|
|
.ok_or("Cannot build AttestationService without beacon_nodes")?,
|
|
context: self
|
|
.context
|
|
.ok_or("Cannot build AttestationService without runtime_context")?,
|
|
}),
|
|
})
|
|
}
|
|
}
|
|
|
|
/// Helper to minimise `Arc` usage.
|
|
pub struct Inner<T, E: EthSpec> {
|
|
duties_service: Arc<DutiesService<T, E>>,
|
|
validator_store: Arc<ValidatorStore<T, E>>,
|
|
slot_clock: T,
|
|
beacon_nodes: Arc<BeaconNodeFallback<T, E>>,
|
|
context: RuntimeContext<E>,
|
|
}
|
|
|
|
/// Attempts to produce attestations for all known validators 1/3rd of the way through each slot.
|
|
///
|
|
/// If any validators are on the same committee, a single attestation will be downloaded and
|
|
/// returned to the beacon node. This attestation will have a signature from each of the
|
|
/// validators.
|
|
pub struct AttestationService<T, E: EthSpec> {
|
|
inner: Arc<Inner<T, E>>,
|
|
}
|
|
|
|
impl<T, E: EthSpec> Clone for AttestationService<T, E> {
|
|
fn clone(&self) -> Self {
|
|
Self {
|
|
inner: self.inner.clone(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T, E: EthSpec> Deref for AttestationService<T, E> {
|
|
type Target = Inner<T, E>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
self.inner.deref()
|
|
}
|
|
}
|
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
|
|
/// Starts the service which periodically produces attestations.
|
|
pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
|
|
let log = self.context.log().clone();
|
|
|
|
let slot_duration = Duration::from_secs(spec.seconds_per_slot);
|
|
let duration_to_next_slot = self
|
|
.slot_clock
|
|
.duration_to_next_slot()
|
|
.ok_or("Unable to determine duration to next slot")?;
|
|
|
|
info!(
|
|
log,
|
|
"Attestation production service started";
|
|
"next_update_millis" => duration_to_next_slot.as_millis()
|
|
);
|
|
|
|
let executor = self.context.executor.clone();
|
|
|
|
let interval_fut = async move {
|
|
loop {
|
|
if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() {
|
|
sleep(duration_to_next_slot + slot_duration / 3).await;
|
|
let log = self.context.log();
|
|
|
|
if let Err(e) = self.spawn_attestation_tasks(slot_duration) {
|
|
crit!(
|
|
log,
|
|
"Failed to spawn attestation tasks";
|
|
"error" => e
|
|
)
|
|
} else {
|
|
trace!(
|
|
log,
|
|
"Spawned attestation tasks";
|
|
)
|
|
}
|
|
} else {
|
|
error!(log, "Failed to read slot clock");
|
|
// If we can't read the slot clock, just wait another slot.
|
|
sleep(slot_duration).await;
|
|
continue;
|
|
}
|
|
}
|
|
};
|
|
|
|
executor.spawn(interval_fut, "attestation_service");
|
|
Ok(())
|
|
}
|
|
|
|
/// For each each required attestation, spawn a new task that downloads, signs and uploads the
|
|
/// attestation to the beacon node.
|
|
fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> {
|
|
let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?;
|
|
let duration_to_next_slot = self
|
|
.slot_clock
|
|
.duration_to_next_slot()
|
|
.ok_or("Unable to determine duration to next slot")?;
|
|
|
|
// If a validator needs to publish an aggregate attestation, they must do so at 2/3
|
|
// through the slot. This delay triggers at this time
|
|
let aggregate_production_instant = Instant::now()
|
|
+ duration_to_next_slot
|
|
.checked_sub(slot_duration / 3)
|
|
.unwrap_or_else(|| Duration::from_secs(0));
|
|
|
|
let duties_by_committee_index: HashMap<CommitteeIndex, Vec<DutyAndProof>> = self
|
|
.duties_service
|
|
.attesters(slot)
|
|
.into_iter()
|
|
.fold(HashMap::new(), |mut map, duty_and_proof| {
|
|
map.entry(duty_and_proof.duty.committee_index)
|
|
.or_default()
|
|
.push(duty_and_proof);
|
|
map
|
|
});
|
|
|
|
// For each committee index for this slot:
|
|
//
|
|
// - Create and publish an `Attestation` for all required validators.
|
|
// - Create and publish `SignedAggregateAndProof` for all aggregating validators.
|
|
duties_by_committee_index
|
|
.into_iter()
|
|
.for_each(|(committee_index, validator_duties)| {
|
|
// Spawn a separate task for each attestation.
|
|
self.inner.context.executor.spawn_ignoring_error(
|
|
self.clone().publish_attestations_and_aggregates(
|
|
slot,
|
|
committee_index,
|
|
validator_duties,
|
|
aggregate_production_instant,
|
|
),
|
|
"attestation publish",
|
|
);
|
|
});
|
|
|
|
// Schedule pruning of the slashing protection database once all unaggregated
|
|
// attestations have (hopefully) been signed, i.e. at the same time as aggregate
|
|
// production.
|
|
self.spawn_slashing_protection_pruning_task(slot, aggregate_production_instant);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Performs the first step of the attesting process: downloading `Attestation` objects,
|
|
/// signing them and returning them to the validator.
|
|
///
|
|
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting
|
|
///
|
|
/// ## Detail
|
|
///
|
|
/// The given `validator_duties` should already be filtered to only contain those that match
|
|
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
|
|
async fn publish_attestations_and_aggregates(
|
|
self,
|
|
slot: Slot,
|
|
committee_index: CommitteeIndex,
|
|
validator_duties: Vec<DutyAndProof>,
|
|
aggregate_production_instant: Instant,
|
|
) -> Result<(), ()> {
|
|
let log = self.context.log();
|
|
let attestations_timer = validator_metrics::start_timer_vec(
|
|
&validator_metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[validator_metrics::ATTESTATIONS],
|
|
);
|
|
|
|
// There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have
|
|
// any validators for the given `slot` and `committee_index`.
|
|
if validator_duties.is_empty() {
|
|
return Ok(());
|
|
}
|
|
|
|
// Step 1.
|
|
//
|
|
// Download, sign and publish an `Attestation` for each validator.
|
|
let attestation_opt = self
|
|
.produce_and_publish_attestations(slot, committee_index, &validator_duties)
|
|
.await
|
|
.map_err(move |e| {
|
|
crit!(
|
|
log,
|
|
"Error during attestation routine";
|
|
"error" => format!("{:?}", e),
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
)
|
|
})?;
|
|
|
|
drop(attestations_timer);
|
|
|
|
// Step 2.
|
|
//
|
|
// If an attestation was produced, make an aggregate.
|
|
if let Some(attestation_data) = attestation_opt {
|
|
// First, wait until the `aggregation_production_instant` (2/3rds
|
|
// of the way though the slot). As verified in the
|
|
// `delay_triggers_when_in_the_past` test, this code will still run
|
|
// even if the instant has already elapsed.
|
|
sleep_until(aggregate_production_instant).await;
|
|
|
|
// Start the metrics timer *after* we've done the delay.
|
|
let _aggregates_timer = validator_metrics::start_timer_vec(
|
|
&validator_metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[validator_metrics::AGGREGATES],
|
|
);
|
|
|
|
// Then download, sign and publish a `SignedAggregateAndProof` for each
|
|
// validator that is elected to aggregate for this `slot` and
|
|
// `committee_index`.
|
|
self.produce_and_publish_aggregates(
|
|
&attestation_data,
|
|
committee_index,
|
|
&validator_duties,
|
|
)
|
|
.await
|
|
.map_err(move |e| {
|
|
crit!(
|
|
log,
|
|
"Error during attestation routine";
|
|
"error" => format!("{:?}", e),
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
)
|
|
})?;
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Performs the first step of the attesting process: downloading `Attestation` objects,
|
|
/// signing them and returning them to the validator.
|
|
///
|
|
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting
|
|
///
|
|
/// ## Detail
|
|
///
|
|
/// The given `validator_duties` should already be filtered to only contain those that match
|
|
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
|
|
///
|
|
/// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each
|
|
/// validator and the list of individually-signed `Attestation` objects is returned to the BN.
|
|
async fn produce_and_publish_attestations(
|
|
&self,
|
|
slot: Slot,
|
|
committee_index: CommitteeIndex,
|
|
validator_duties: &[DutyAndProof],
|
|
) -> Result<Option<AttestationData>, String> {
|
|
let log = self.context.log();
|
|
|
|
if validator_duties.is_empty() {
|
|
return Ok(None);
|
|
}
|
|
|
|
let current_epoch = self
|
|
.slot_clock
|
|
.now()
|
|
.ok_or("Unable to determine current slot from clock")?
|
|
.epoch(E::slots_per_epoch());
|
|
|
|
let attestation_data = self
|
|
.beacon_nodes
|
|
.first_success(|beacon_node| async move {
|
|
let _timer = validator_metrics::start_timer_vec(
|
|
&validator_metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[validator_metrics::ATTESTATIONS_HTTP_GET],
|
|
);
|
|
beacon_node
|
|
.get_validator_attestation_data(slot, committee_index)
|
|
.await
|
|
.map_err(|e| format!("Failed to produce attestation data: {:?}", e))
|
|
.map(|result| result.data)
|
|
})
|
|
.await
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
// Create futures to produce signed `Attestation` objects.
|
|
let attestation_data_ref = &attestation_data;
|
|
let signing_futures = validator_duties.iter().map(|duty_and_proof| async move {
|
|
let duty = &duty_and_proof.duty;
|
|
let attestation_data = attestation_data_ref;
|
|
|
|
// Ensure that the attestation matches the duties.
|
|
if !duty.match_attestation_data::<E>(attestation_data, &self.context.eth2_config.spec) {
|
|
crit!(
|
|
log,
|
|
"Inconsistent validator duties during signing";
|
|
"validator" => ?duty.pubkey,
|
|
"duty_slot" => duty.slot,
|
|
"attestation_slot" => attestation_data.slot,
|
|
"duty_index" => duty.committee_index,
|
|
"attestation_index" => attestation_data.index,
|
|
);
|
|
return None;
|
|
}
|
|
|
|
let mut attestation = match Attestation::<E>::empty_for_signing(
|
|
duty.committee_index,
|
|
duty.committee_length as usize,
|
|
attestation_data.slot,
|
|
attestation_data.beacon_block_root,
|
|
attestation_data.source,
|
|
attestation_data.target,
|
|
&self.context.eth2_config.spec,
|
|
) {
|
|
Ok(attestation) => attestation,
|
|
Err(err) => {
|
|
crit!(
|
|
log,
|
|
"Invalid validator duties during signing";
|
|
"validator" => ?duty.pubkey,
|
|
"duty" => ?duty,
|
|
"err" => ?err,
|
|
);
|
|
return None;
|
|
}
|
|
};
|
|
|
|
match self
|
|
.validator_store
|
|
.sign_attestation(
|
|
duty.pubkey,
|
|
duty.validator_committee_index as usize,
|
|
&mut attestation,
|
|
current_epoch,
|
|
)
|
|
.await
|
|
{
|
|
Ok(()) => Some((attestation, duty.validator_index)),
|
|
Err(ValidatorStoreError::UnknownPubkey(pubkey)) => {
|
|
// A pubkey can be missing when a validator was recently
|
|
// removed via the API.
|
|
warn!(
|
|
log,
|
|
"Missing pubkey for attestation";
|
|
"info" => "a validator may have recently been removed from this VC",
|
|
"pubkey" => ?pubkey,
|
|
"validator" => ?duty.pubkey,
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
);
|
|
None
|
|
}
|
|
Err(e) => {
|
|
crit!(
|
|
log,
|
|
"Failed to sign attestation";
|
|
"error" => ?e,
|
|
"validator" => ?duty.pubkey,
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
);
|
|
None
|
|
}
|
|
}
|
|
});
|
|
|
|
// Execute all the futures in parallel, collecting any successful results.
|
|
let (ref attestations, ref validator_indices): (Vec<_>, Vec<_>) = join_all(signing_futures)
|
|
.await
|
|
.into_iter()
|
|
.flatten()
|
|
.unzip();
|
|
|
|
if attestations.is_empty() {
|
|
warn!(log, "No attestations were published");
|
|
return Ok(None);
|
|
}
|
|
let fork_name = self
|
|
.context
|
|
.eth2_config
|
|
.spec
|
|
.fork_name_at_slot::<E>(attestation_data.slot);
|
|
|
|
// Post the attestations to the BN.
|
|
match self
|
|
.beacon_nodes
|
|
.request(ApiTopic::Attestations, |beacon_node| async move {
|
|
let _timer = validator_metrics::start_timer_vec(
|
|
&validator_metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[validator_metrics::ATTESTATIONS_HTTP_POST],
|
|
);
|
|
if fork_name.electra_enabled() {
|
|
let single_attestations = attestations
|
|
.iter()
|
|
.zip(validator_indices)
|
|
.filter_map(|(a, i)| {
|
|
match a.to_single_attestation_with_attester_index(*i as usize) {
|
|
Ok(a) => Some(a),
|
|
Err(e) => {
|
|
// This shouldn't happen unless BN and VC are out of sync with
|
|
// respect to the Electra fork.
|
|
error!(
|
|
log,
|
|
"Unable to convert to SingleAttestation";
|
|
"error" => ?e,
|
|
"committee_index" => attestation_data.index,
|
|
"slot" => slot.as_u64(),
|
|
"type" => "unaggregated",
|
|
);
|
|
None
|
|
}
|
|
}
|
|
})
|
|
.collect::<Vec<_>>();
|
|
beacon_node
|
|
.post_beacon_pool_attestations_v2(&single_attestations, fork_name)
|
|
.await
|
|
} else {
|
|
beacon_node
|
|
.post_beacon_pool_attestations_v1(attestations)
|
|
.await
|
|
}
|
|
})
|
|
.await
|
|
{
|
|
Ok(()) => info!(
|
|
log,
|
|
"Successfully published attestations";
|
|
"count" => attestations.len(),
|
|
"validator_indices" => ?validator_indices,
|
|
"head_block" => ?attestation_data.beacon_block_root,
|
|
"committee_index" => attestation_data.index,
|
|
"slot" => attestation_data.slot.as_u64(),
|
|
"type" => "unaggregated",
|
|
),
|
|
Err(e) => error!(
|
|
log,
|
|
"Unable to publish attestations";
|
|
"error" => %e,
|
|
"committee_index" => attestation_data.index,
|
|
"slot" => slot.as_u64(),
|
|
"type" => "unaggregated",
|
|
),
|
|
}
|
|
|
|
Ok(Some(attestation_data))
|
|
}
|
|
|
|
/// Performs the second step of the attesting process: downloading an aggregated `Attestation`,
|
|
/// converting it into a `SignedAggregateAndProof` and returning it to the BN.
|
|
///
|
|
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-aggregate
|
|
///
|
|
/// ## Detail
|
|
///
|
|
/// The given `validator_duties` should already be filtered to only contain those that match
|
|
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
|
|
///
|
|
/// Only one aggregated `Attestation` is downloaded from the BN. It is then cloned and signed
|
|
/// by each validator and the list of individually-signed `SignedAggregateAndProof` objects is
|
|
/// returned to the BN.
|
|
async fn produce_and_publish_aggregates(
|
|
&self,
|
|
attestation_data: &AttestationData,
|
|
committee_index: CommitteeIndex,
|
|
validator_duties: &[DutyAndProof],
|
|
) -> Result<(), String> {
|
|
let log = self.context.log();
|
|
|
|
if !validator_duties
|
|
.iter()
|
|
.any(|duty_and_proof| duty_and_proof.selection_proof.is_some())
|
|
{
|
|
// Exit early if no validator is aggregator
|
|
return Ok(());
|
|
}
|
|
|
|
let fork_name = self
|
|
.context
|
|
.eth2_config
|
|
.spec
|
|
.fork_name_at_slot::<E>(attestation_data.slot);
|
|
|
|
let aggregated_attestation = &self
|
|
.beacon_nodes
|
|
.first_success(|beacon_node| async move {
|
|
let _timer = validator_metrics::start_timer_vec(
|
|
&validator_metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[validator_metrics::AGGREGATES_HTTP_GET],
|
|
);
|
|
if fork_name.electra_enabled() {
|
|
beacon_node
|
|
.get_validator_aggregate_attestation_v2(
|
|
attestation_data.slot,
|
|
attestation_data.tree_hash_root(),
|
|
committee_index,
|
|
)
|
|
.await
|
|
.map_err(|e| {
|
|
format!("Failed to produce an aggregate attestation: {:?}", e)
|
|
})?
|
|
.ok_or_else(|| format!("No aggregate available for {:?}", attestation_data))
|
|
.map(|result| result.data)
|
|
} else {
|
|
beacon_node
|
|
.get_validator_aggregate_attestation_v1(
|
|
attestation_data.slot,
|
|
attestation_data.tree_hash_root(),
|
|
)
|
|
.await
|
|
.map_err(|e| {
|
|
format!("Failed to produce an aggregate attestation: {:?}", e)
|
|
})?
|
|
.ok_or_else(|| format!("No aggregate available for {:?}", attestation_data))
|
|
.map(|result| result.data)
|
|
}
|
|
})
|
|
.await
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
// Create futures to produce the signed aggregated attestations.
|
|
let signing_futures = validator_duties.iter().map(|duty_and_proof| async move {
|
|
let duty = &duty_and_proof.duty;
|
|
let selection_proof = duty_and_proof.selection_proof.as_ref()?;
|
|
|
|
if !duty.match_attestation_data::<E>(attestation_data, &self.context.eth2_config.spec) {
|
|
crit!(log, "Inconsistent validator duties during signing");
|
|
return None;
|
|
}
|
|
|
|
match self
|
|
.validator_store
|
|
.produce_signed_aggregate_and_proof(
|
|
duty.pubkey,
|
|
duty.validator_index,
|
|
aggregated_attestation.clone(),
|
|
selection_proof.clone(),
|
|
)
|
|
.await
|
|
{
|
|
Ok(aggregate) => Some(aggregate),
|
|
Err(ValidatorStoreError::UnknownPubkey(pubkey)) => {
|
|
// A pubkey can be missing when a validator was recently
|
|
// removed via the API.
|
|
debug!(
|
|
log,
|
|
"Missing pubkey for aggregate";
|
|
"pubkey" => ?pubkey,
|
|
);
|
|
None
|
|
}
|
|
Err(e) => {
|
|
crit!(
|
|
log,
|
|
"Failed to sign aggregate";
|
|
"error" => ?e,
|
|
"pubkey" => ?duty.pubkey,
|
|
);
|
|
None
|
|
}
|
|
}
|
|
});
|
|
|
|
// Execute all the futures in parallel, collecting any successful results.
|
|
let signed_aggregate_and_proofs = join_all(signing_futures)
|
|
.await
|
|
.into_iter()
|
|
.flatten()
|
|
.collect::<Vec<_>>();
|
|
|
|
if !signed_aggregate_and_proofs.is_empty() {
|
|
let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice();
|
|
match self
|
|
.beacon_nodes
|
|
.first_success(|beacon_node| async move {
|
|
let _timer = validator_metrics::start_timer_vec(
|
|
&validator_metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[validator_metrics::AGGREGATES_HTTP_POST],
|
|
);
|
|
if fork_name.electra_enabled() {
|
|
beacon_node
|
|
.post_validator_aggregate_and_proof_v2(
|
|
signed_aggregate_and_proofs_slice,
|
|
fork_name,
|
|
)
|
|
.await
|
|
} else {
|
|
beacon_node
|
|
.post_validator_aggregate_and_proof_v1(
|
|
signed_aggregate_and_proofs_slice,
|
|
)
|
|
.await
|
|
}
|
|
})
|
|
.await
|
|
{
|
|
Ok(()) => {
|
|
for signed_aggregate_and_proof in signed_aggregate_and_proofs {
|
|
let attestation = signed_aggregate_and_proof.message().aggregate();
|
|
info!(
|
|
log,
|
|
"Successfully published attestation";
|
|
"aggregator" => signed_aggregate_and_proof.message().aggregator_index(),
|
|
"signatures" => attestation.num_set_aggregation_bits(),
|
|
"head_block" => format!("{:?}", attestation.data().beacon_block_root),
|
|
"committee_index" => attestation.committee_index(),
|
|
"slot" => attestation.data().slot.as_u64(),
|
|
"type" => "aggregated",
|
|
);
|
|
}
|
|
}
|
|
Err(e) => {
|
|
for signed_aggregate_and_proof in signed_aggregate_and_proofs {
|
|
let attestation = &signed_aggregate_and_proof.message().aggregate();
|
|
crit!(
|
|
log,
|
|
"Failed to publish attestation";
|
|
"error" => %e,
|
|
"aggregator" => signed_aggregate_and_proof.message().aggregator_index(),
|
|
"committee_index" => attestation.committee_index(),
|
|
"slot" => attestation.data().slot.as_u64(),
|
|
"type" => "aggregated",
|
|
);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Spawn a blocking task to run the slashing protection pruning process.
|
|
///
|
|
/// Start the task at `pruning_instant` to avoid interference with other tasks.
|
|
fn spawn_slashing_protection_pruning_task(&self, slot: Slot, pruning_instant: Instant) {
|
|
let attestation_service = self.clone();
|
|
let executor = self.inner.context.executor.clone();
|
|
let current_epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
// Wait for `pruning_instant` in a regular task, and then switch to a blocking one.
|
|
self.inner.context.executor.spawn(
|
|
async move {
|
|
sleep_until(pruning_instant).await;
|
|
|
|
executor.spawn_blocking(
|
|
move || {
|
|
attestation_service
|
|
.validator_store
|
|
.prune_slashing_protection_db(current_epoch, false)
|
|
},
|
|
"slashing_protection_pruning",
|
|
)
|
|
},
|
|
"slashing_protection_pre_pruning",
|
|
);
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use futures::future::FutureExt;
|
|
use parking_lot::RwLock;
|
|
|
|
/// This test is to ensure that a `tokio_timer::Sleep` with an instant in the past will still
|
|
/// trigger.
|
|
#[tokio::test]
|
|
async fn delay_triggers_when_in_the_past() {
|
|
let in_the_past = Instant::now() - Duration::from_secs(2);
|
|
let state_1 = Arc::new(RwLock::new(in_the_past));
|
|
let state_2 = state_1.clone();
|
|
|
|
sleep_until(in_the_past)
|
|
.map(move |()| *state_1.write() = Instant::now())
|
|
.await;
|
|
|
|
assert!(
|
|
*state_2.read() > in_the_past,
|
|
"state should have been updated"
|
|
);
|
|
}
|
|
}
|