mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-11 18:04:18 +00:00
* Attestation superstruct changes for EIP 7549 (#5644)
* update
* experiment
* superstruct changes
* revert
* superstruct changes
* fix tests
* indexed attestation
* indexed attestation superstruct
* updated TODOs
* `superstruct` the `AttesterSlashing` (#5636)
* `superstruct` Attester Fork Variants
* Push a little further
* Deal with Encode / Decode of AttesterSlashing
* not so sure about this..
* Stop Encode/Decode Bounds from Propagating Out
* Tons of Changes..
* More Conversions to AttestationRef
* Add AsReference trait (#15)
* Add AsReference trait
* Fix some snafus
* Got it Compiling! :D
* Got Tests Building
* Get beacon chain tests compiling
---------
Co-authored-by: Michael Sproul <micsproul@gmail.com>
* Merge remote-tracking branch 'upstream/unstable' into electra_attestation_changes
* Make EF Tests Fork-Agnostic (#5713)
* Finish EF Test Fork Agnostic (#5714)
* Superstruct `AggregateAndProof` (#5715)
* Upgrade `superstruct` to `0.8.0`
* superstruct `AggregateAndProof`
* Merge remote-tracking branch 'sigp/unstable' into electra_attestation_changes
* cargo fmt
* Merge pull request #5726 from realbigsean/electra_attestation_changes
Merge unstable into Electra attestation changes
* process withdrawals updates
* cleanup withdrawals processing
* update `process_operations` deposit length check
* add apply_deposit changes
* add execution layer withdrawal request processing
* process deposit receipts
* add consolidation processing
* update process operations function
* exit updates
* clean up
* update slash_validator
* EIP7549 `get_attestation_indices` (#5657)
* get attesting indices electra impl
* fmt
* get tests to pass
* fmt
* fix some beacon chain tests
* fmt
* fix slasher test
* fmt got me again
* fix more tests
* fix tests
* Some small changes (#5739)
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* cargo fmt (#5740)
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* fix attestation verification
* Add new engine api methods
* Fix the versioning of v4 requests
* Handle new engine api methods in mock EL
* Note todo
* Fix todos
* Add support for electra fields in getPayloadBodies
* Add comments for potential versioning confusion
* udpates for aggregate attestation endpoint
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Sketch op pool changes
* fix get attesting indices (#5742)
* fix get attesting indices
* better errors
* fix compile
* only get committee index once
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Ef test fixes (#5753)
* attestation related ef test fixes
* delete commented out stuff
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Fix Aggregation Pool for Electra (#5754)
* Fix Aggregation Pool for Electra
* Remove Outdated Interface
* fix ssz (#5755)
* Get `electra_op_pool` up to date (#5756)
* fix get attesting indices (#5742)
* fix get attesting indices
* better errors
* fix compile
* only get committee index once
* Ef test fixes (#5753)
* attestation related ef test fixes
* delete commented out stuff
* Fix Aggregation Pool for Electra (#5754)
* Fix Aggregation Pool for Electra
* Remove Outdated Interface
* fix ssz (#5755)
---------
Co-authored-by: realbigsean <sean@sigmaprime.io>
* Revert "Get `electra_op_pool` up to date (#5756)" (#5757)
This reverts commit ab9e58aa3d.
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into electra_op_pool
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Compute on chain aggregate impl (#5752)
* add compute_on_chain_agg impl to op pool changes
* fmt
* get op pool tests to pass
* update beacon api aggregate attestationendpoint
* update the naive agg pool interface (#5760)
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* updates after merge
* Fix bugs in cross-committee aggregation
* Add comment to max cover optimisation
* Fix assert
* Electra epoch processing
* add deposit limit for old deposit queue
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Merge pull request #5749 from sigp/electra_op_pool
Optimise Electra op pool aggregation
* don't fail on empty consolidations
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* update committee offset
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* update committee offset
* update committee offset
* update committee offset
* only increment the state deposit index on old deposit flow
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* use correct max eb in epoch cache initialization
* drop initiate validator ordering optimization
* fix initiate exit for single pass
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* accept new payload v4 in mock el
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Fix Electra Fork Choice Tests (#5764)
* Fix Electra Fork Choice Tests (#5764)
* Fix Electra Fork Choice Tests (#5764)
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Fix Consolidation Sigs & Withdrawals
* Merge pull request #5766 from ethDreamer/two_fixes
Fix Consolidation Sigs & Withdrawals
* Merge branches 'block-processing-electra' and 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Send unagg attestation based on fork
* Fix ser/de
* Merge branch 'electra-engine-api' into beacon-api-electra
* Subscribe to the correct subnets for electra attestations (#5782)
* subscribe to the correct att subnets for electra
* subscribe to the correct att subnets for electra
* cargo fmt
* Subscribe to the correct subnets for electra attestations (#5782)
* subscribe to the correct att subnets for electra
* subscribe to the correct att subnets for electra
* cargo fmt
* Subscribe to the correct subnets for electra attestations (#5782)
* subscribe to the correct att subnets for electra
* subscribe to the correct att subnets for electra
* cargo fmt
* Subscribe to the correct subnets for electra attestations (#5782)
* subscribe to the correct att subnets for electra
* subscribe to the correct att subnets for electra
* cargo fmt
* Subscribe to the correct subnets for electra attestations (#5782)
* subscribe to the correct att subnets for electra
* subscribe to the correct att subnets for electra
* cargo fmt
* update electra readiness with new endpoints
* fix slashing handling
* Fix Bug In Block Processing with 0x02 Credentials
* Merge remote-tracking branch 'upstream/unstable'
* Send unagg attestation based on fork
* Publish all aggregates
* just one more check bro plz..
* Merge pull request #5832 from ethDreamer/electra_attestation_changes_merge_unstable
Merge `unstable` into `electra_attestation_changes`
* Merge pull request #5835 from realbigsean/fix-validator-logic
Fix validator logic
* Merge pull request #5816 from realbigsean/electra-attestation-slashing-handling
Electra slashing handling
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* fix: serde rename camle case for execution payload body (#5846)
* Merge branch 'electra-engine-api' into beacon-api-electra
* Electra attestation changes rm decode impl (#5856)
* Remove Crappy Decode impl for Attestation
* Remove Inefficient Attestation Decode impl
* Implement Schema Upgrade / Downgrade
* Update beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs
Co-authored-by: Michael Sproul <micsproul@gmail.com>
---------
Co-authored-by: Michael Sproul <micsproul@gmail.com>
* Fix failing attestation tests and misc electra attestation cleanup (#5810)
* - get attestation related beacon chain tests to pass
- observed attestations are now keyed off of data + committee index
- rename op pool attestationref to compactattestationref
- remove unwraps in agg pool and use options instead
- cherry pick some changes from ef-tests-electra
* cargo fmt
* fix failing test
* Revert dockerfile changes
* make committee_index return option
* function args shouldnt be a ref to attestation ref
* fmt
* fix dup imports
---------
Co-authored-by: realbigsean <seananderson33@GMAIL.com>
* fix some todos (#5817)
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* add consolidations to merkle calc for inclusion proof
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Remove Duplicate KZG Commitment Merkle Proof Code (#5874)
* Remove Duplicate KZG Commitment Merkle Proof Code
* s/tree_lists/fields/
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* fix compile
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Fix slasher tests (#5906)
* Fix electra tests
* Add electra attestations to double vote tests
* Update superstruct to 0.8
* Merge remote-tracking branch 'origin/unstable' into electra_attestation_changes
* Small cleanup in slasher tests
* Clean up Electra observed aggregates (#5929)
* Use consistent key in observed_attestations
* Remove unwraps from observed aggregates
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* De-dup attestation constructor logic
* Remove unwraps in Attestation construction
* Dedup match_attestation_data
* Remove outdated TODO
* Use ForkName Ord in fork-choice tests
* Use ForkName Ord in BeaconBlockBody
* Make to_electra not fallible
* Remove TestRandom impl for IndexedAttestation
* Remove IndexedAttestation faulty Decode impl
* Drop TestRandom impl
* Add PendingAttestationInElectra
* Indexed att on disk (#35)
* indexed att on disk
* fix lints
* Update slasher/src/migrate.rs
Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
---------
Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com>
Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
* add electra fork enabled fn to ForkName impl (#36)
* add electra fork enabled fn to ForkName impl
* remove inadvertent file
* Update common/eth2/src/types.rs
Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
* Dedup attestation constructor logic in attester cache
* Use if let Ok for committee_bits
* Dedup Attestation constructor code
* Diff reduction in tests
* Fix beacon_chain tests
* Diff reduction
* Use Ord for ForkName in pubsub
* Resolve into_attestation_and_indices todo
* Remove stale TODO
* Fix beacon_chain tests
* Test spec invariant
* Use electra_enabled in pubsub
* Remove get_indexed_attestation_from_signed_aggregate
* Use ok_or instead of if let else
* committees are sorted
* remove dup method `get_indexed_attestation_from_committees`
* Merge pull request #5940 from dapplion/electra_attestation_changes_lionreview
Electra attestations #5712 review
* update default persisted op pool deserialization
* ensure aggregate and proof uses serde untagged on ref
* Fork aware ssz static attestation tests
* Electra attestation changes from Lions review (#5971)
* dedup/cleanup and remove unneeded hashset use
* remove irrelevant TODOs
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* Fix Compilation Break
* Merge pull request #5973 from ethDreamer/beacon-api-electra
Fix Compilation Break
* Electra attestation changes sean review (#5972)
* instantiate empty bitlist in unreachable code
* clean up error conversion
* fork enabled bool cleanup
* remove a couple todos
* return bools instead of options in `aggregate` and use the result
* delete commented out code
* use map macros in simple transformations
* remove signers_disjoint_from
* get ef tests compiling
* get ef tests compiling
* update intentionally excluded files
* Avoid changing slasher schema for Electra
* Delete slasher schema v4
* Fix clippy
* Fix compilation of beacon_chain tests
* Update database.rs
* Update per_block_processing.rs
* Add electra lightclient types
* Update slasher/src/database.rs
* fix imports
* Merge pull request #5980 from dapplion/electra-lightclient
Add electra lightclient types
* Merge pull request #5975 from michaelsproul/electra-slasher-no-migration
Avoid changing slasher schema for Electra
* Update beacon_node/beacon_chain/src/attestation_verification.rs
* Update beacon_node/beacon_chain/src/attestation_verification.rs
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* Merge branch 'electra_attestation_changes' of https://github.com/realbigsean/lighthouse into block-processing-electra
* Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc
* Merge branch 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* The great renaming receipt -> request
* Address some more review comments
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra-engine-api
* Update beacon_node/beacon_chain/src/electra_readiness.rs
* Update consensus/types/src/chain_spec.rs
* update GET requests
* update POST requests
* add client updates and test updates
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra-engine-api
* Merge branch 'electra-engine-api' of https://github.com/sigp/lighthouse into beacon-api-electra
* compile after merge
* unwrap -> unwrap_err
* self review
* fix tests
* convert op pool messages to electra in electra
* remove methods to post without content header
* filter instead of convert
752 lines
28 KiB
Rust
752 lines
28 KiB
Rust
use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced};
|
|
use crate::{
|
|
duties_service::{DutiesService, DutyAndProof},
|
|
http_metrics::metrics,
|
|
validator_store::{Error as ValidatorStoreError, ValidatorStore},
|
|
OfflineOnFailure,
|
|
};
|
|
use environment::RuntimeContext;
|
|
use futures::future::join_all;
|
|
use slog::{crit, debug, error, info, trace, warn};
|
|
use slot_clock::SlotClock;
|
|
use std::collections::HashMap;
|
|
use std::ops::Deref;
|
|
use std::sync::Arc;
|
|
use tokio::time::{sleep, sleep_until, Duration, Instant};
|
|
use tree_hash::TreeHash;
|
|
use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot};
|
|
|
|
/// Builds an `AttestationService`.
|
|
pub struct AttestationServiceBuilder<T: SlotClock + 'static, E: EthSpec> {
|
|
duties_service: Option<Arc<DutiesService<T, E>>>,
|
|
validator_store: Option<Arc<ValidatorStore<T, E>>>,
|
|
slot_clock: Option<T>,
|
|
beacon_nodes: Option<Arc<BeaconNodeFallback<T, E>>>,
|
|
context: Option<RuntimeContext<E>>,
|
|
}
|
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> AttestationServiceBuilder<T, E> {
|
|
pub fn new() -> Self {
|
|
Self {
|
|
duties_service: None,
|
|
validator_store: None,
|
|
slot_clock: None,
|
|
beacon_nodes: None,
|
|
context: None,
|
|
}
|
|
}
|
|
|
|
pub fn duties_service(mut self, service: Arc<DutiesService<T, E>>) -> Self {
|
|
self.duties_service = Some(service);
|
|
self
|
|
}
|
|
|
|
pub fn validator_store(mut self, store: Arc<ValidatorStore<T, E>>) -> Self {
|
|
self.validator_store = Some(store);
|
|
self
|
|
}
|
|
|
|
pub fn slot_clock(mut self, slot_clock: T) -> Self {
|
|
self.slot_clock = Some(slot_clock);
|
|
self
|
|
}
|
|
|
|
pub fn beacon_nodes(mut self, beacon_nodes: Arc<BeaconNodeFallback<T, E>>) -> Self {
|
|
self.beacon_nodes = Some(beacon_nodes);
|
|
self
|
|
}
|
|
|
|
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
|
|
self.context = Some(context);
|
|
self
|
|
}
|
|
|
|
pub fn build(self) -> Result<AttestationService<T, E>, String> {
|
|
Ok(AttestationService {
|
|
inner: Arc::new(Inner {
|
|
duties_service: self
|
|
.duties_service
|
|
.ok_or("Cannot build AttestationService without duties_service")?,
|
|
validator_store: self
|
|
.validator_store
|
|
.ok_or("Cannot build AttestationService without validator_store")?,
|
|
slot_clock: self
|
|
.slot_clock
|
|
.ok_or("Cannot build AttestationService without slot_clock")?,
|
|
beacon_nodes: self
|
|
.beacon_nodes
|
|
.ok_or("Cannot build AttestationService without beacon_nodes")?,
|
|
context: self
|
|
.context
|
|
.ok_or("Cannot build AttestationService without runtime_context")?,
|
|
}),
|
|
})
|
|
}
|
|
}
|
|
|
|
/// Helper to minimise `Arc` usage.
|
|
pub struct Inner<T, E: EthSpec> {
|
|
duties_service: Arc<DutiesService<T, E>>,
|
|
validator_store: Arc<ValidatorStore<T, E>>,
|
|
slot_clock: T,
|
|
beacon_nodes: Arc<BeaconNodeFallback<T, E>>,
|
|
context: RuntimeContext<E>,
|
|
}
|
|
|
|
/// Attempts to produce attestations for all known validators 1/3rd of the way through each slot.
|
|
///
|
|
/// If any validators are on the same committee, a single attestation will be downloaded and
|
|
/// returned to the beacon node. This attestation will have a signature from each of the
|
|
/// validators.
|
|
pub struct AttestationService<T, E: EthSpec> {
|
|
inner: Arc<Inner<T, E>>,
|
|
}
|
|
|
|
impl<T, E: EthSpec> Clone for AttestationService<T, E> {
|
|
fn clone(&self) -> Self {
|
|
Self {
|
|
inner: self.inner.clone(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T, E: EthSpec> Deref for AttestationService<T, E> {
|
|
type Target = Inner<T, E>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
self.inner.deref()
|
|
}
|
|
}
|
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
|
|
/// Starts the service which periodically produces attestations.
|
|
pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
|
|
let log = self.context.log().clone();
|
|
|
|
let slot_duration = Duration::from_secs(spec.seconds_per_slot);
|
|
let duration_to_next_slot = self
|
|
.slot_clock
|
|
.duration_to_next_slot()
|
|
.ok_or("Unable to determine duration to next slot")?;
|
|
|
|
info!(
|
|
log,
|
|
"Attestation production service started";
|
|
"next_update_millis" => duration_to_next_slot.as_millis()
|
|
);
|
|
|
|
let executor = self.context.executor.clone();
|
|
|
|
let interval_fut = async move {
|
|
loop {
|
|
if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() {
|
|
sleep(duration_to_next_slot + slot_duration / 3).await;
|
|
let log = self.context.log();
|
|
|
|
if let Err(e) = self.spawn_attestation_tasks(slot_duration) {
|
|
crit!(
|
|
log,
|
|
"Failed to spawn attestation tasks";
|
|
"error" => e
|
|
)
|
|
} else {
|
|
trace!(
|
|
log,
|
|
"Spawned attestation tasks";
|
|
)
|
|
}
|
|
} else {
|
|
error!(log, "Failed to read slot clock");
|
|
// If we can't read the slot clock, just wait another slot.
|
|
sleep(slot_duration).await;
|
|
continue;
|
|
}
|
|
}
|
|
};
|
|
|
|
executor.spawn(interval_fut, "attestation_service");
|
|
Ok(())
|
|
}
|
|
|
|
/// For each each required attestation, spawn a new task that downloads, signs and uploads the
|
|
/// attestation to the beacon node.
|
|
fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> {
|
|
let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?;
|
|
let duration_to_next_slot = self
|
|
.slot_clock
|
|
.duration_to_next_slot()
|
|
.ok_or("Unable to determine duration to next slot")?;
|
|
|
|
// If a validator needs to publish an aggregate attestation, they must do so at 2/3
|
|
// through the slot. This delay triggers at this time
|
|
let aggregate_production_instant = Instant::now()
|
|
+ duration_to_next_slot
|
|
.checked_sub(slot_duration / 3)
|
|
.unwrap_or_else(|| Duration::from_secs(0));
|
|
|
|
let duties_by_committee_index: HashMap<CommitteeIndex, Vec<DutyAndProof>> = self
|
|
.duties_service
|
|
.attesters(slot)
|
|
.into_iter()
|
|
.fold(HashMap::new(), |mut map, duty_and_proof| {
|
|
map.entry(duty_and_proof.duty.committee_index)
|
|
.or_default()
|
|
.push(duty_and_proof);
|
|
map
|
|
});
|
|
|
|
// For each committee index for this slot:
|
|
//
|
|
// - Create and publish an `Attestation` for all required validators.
|
|
// - Create and publish `SignedAggregateAndProof` for all aggregating validators.
|
|
duties_by_committee_index
|
|
.into_iter()
|
|
.for_each(|(committee_index, validator_duties)| {
|
|
// Spawn a separate task for each attestation.
|
|
self.inner.context.executor.spawn_ignoring_error(
|
|
self.clone().publish_attestations_and_aggregates(
|
|
slot,
|
|
committee_index,
|
|
validator_duties,
|
|
aggregate_production_instant,
|
|
),
|
|
"attestation publish",
|
|
);
|
|
});
|
|
|
|
// Schedule pruning of the slashing protection database once all unaggregated
|
|
// attestations have (hopefully) been signed, i.e. at the same time as aggregate
|
|
// production.
|
|
self.spawn_slashing_protection_pruning_task(slot, aggregate_production_instant);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Performs the first step of the attesting process: downloading `Attestation` objects,
|
|
/// signing them and returning them to the validator.
|
|
///
|
|
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting
|
|
///
|
|
/// ## Detail
|
|
///
|
|
/// The given `validator_duties` should already be filtered to only contain those that match
|
|
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
|
|
async fn publish_attestations_and_aggregates(
|
|
self,
|
|
slot: Slot,
|
|
committee_index: CommitteeIndex,
|
|
validator_duties: Vec<DutyAndProof>,
|
|
aggregate_production_instant: Instant,
|
|
) -> Result<(), ()> {
|
|
let log = self.context.log();
|
|
let attestations_timer = metrics::start_timer_vec(
|
|
&metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[metrics::ATTESTATIONS],
|
|
);
|
|
|
|
// There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have
|
|
// any validators for the given `slot` and `committee_index`.
|
|
if validator_duties.is_empty() {
|
|
return Ok(());
|
|
}
|
|
|
|
// Step 1.
|
|
//
|
|
// Download, sign and publish an `Attestation` for each validator.
|
|
let attestation_opt = self
|
|
.produce_and_publish_attestations(slot, committee_index, &validator_duties)
|
|
.await
|
|
.map_err(move |e| {
|
|
crit!(
|
|
log,
|
|
"Error during attestation routine";
|
|
"error" => format!("{:?}", e),
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
)
|
|
})?;
|
|
|
|
drop(attestations_timer);
|
|
|
|
// Step 2.
|
|
//
|
|
// If an attestation was produced, make an aggregate.
|
|
if let Some(attestation_data) = attestation_opt {
|
|
// First, wait until the `aggregation_production_instant` (2/3rds
|
|
// of the way though the slot). As verified in the
|
|
// `delay_triggers_when_in_the_past` test, this code will still run
|
|
// even if the instant has already elapsed.
|
|
sleep_until(aggregate_production_instant).await;
|
|
|
|
// Start the metrics timer *after* we've done the delay.
|
|
let _aggregates_timer = metrics::start_timer_vec(
|
|
&metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[metrics::AGGREGATES],
|
|
);
|
|
|
|
// Then download, sign and publish a `SignedAggregateAndProof` for each
|
|
// validator that is elected to aggregate for this `slot` and
|
|
// `committee_index`.
|
|
self.produce_and_publish_aggregates(
|
|
&attestation_data,
|
|
committee_index,
|
|
&validator_duties,
|
|
)
|
|
.await
|
|
.map_err(move |e| {
|
|
crit!(
|
|
log,
|
|
"Error during attestation routine";
|
|
"error" => format!("{:?}", e),
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
)
|
|
})?;
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Performs the first step of the attesting process: downloading `Attestation` objects,
|
|
/// signing them and returning them to the validator.
|
|
///
|
|
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting
|
|
///
|
|
/// ## Detail
|
|
///
|
|
/// The given `validator_duties` should already be filtered to only contain those that match
|
|
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
|
|
///
|
|
/// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each
|
|
/// validator and the list of individually-signed `Attestation` objects is returned to the BN.
|
|
async fn produce_and_publish_attestations(
|
|
&self,
|
|
slot: Slot,
|
|
committee_index: CommitteeIndex,
|
|
validator_duties: &[DutyAndProof],
|
|
) -> Result<Option<AttestationData>, String> {
|
|
let log = self.context.log();
|
|
|
|
if validator_duties.is_empty() {
|
|
return Ok(None);
|
|
}
|
|
|
|
let current_epoch = self
|
|
.slot_clock
|
|
.now()
|
|
.ok_or("Unable to determine current slot from clock")?
|
|
.epoch(E::slots_per_epoch());
|
|
|
|
let attestation_data = self
|
|
.beacon_nodes
|
|
.first_success(
|
|
RequireSynced::No,
|
|
OfflineOnFailure::Yes,
|
|
|beacon_node| async move {
|
|
let _timer = metrics::start_timer_vec(
|
|
&metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[metrics::ATTESTATIONS_HTTP_GET],
|
|
);
|
|
beacon_node
|
|
.get_validator_attestation_data(slot, committee_index)
|
|
.await
|
|
.map_err(|e| format!("Failed to produce attestation data: {:?}", e))
|
|
.map(|result| result.data)
|
|
},
|
|
)
|
|
.await
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
// Create futures to produce signed `Attestation` objects.
|
|
let attestation_data_ref = &attestation_data;
|
|
let signing_futures = validator_duties.iter().map(|duty_and_proof| async move {
|
|
let duty = &duty_and_proof.duty;
|
|
let attestation_data = attestation_data_ref;
|
|
|
|
// Ensure that the attestation matches the duties.
|
|
if !duty.match_attestation_data::<E>(attestation_data, &self.context.eth2_config.spec) {
|
|
crit!(
|
|
log,
|
|
"Inconsistent validator duties during signing";
|
|
"validator" => ?duty.pubkey,
|
|
"duty_slot" => duty.slot,
|
|
"attestation_slot" => attestation_data.slot,
|
|
"duty_index" => duty.committee_index,
|
|
"attestation_index" => attestation_data.index,
|
|
);
|
|
return None;
|
|
}
|
|
|
|
let mut attestation = match Attestation::<E>::empty_for_signing(
|
|
duty.committee_index,
|
|
duty.committee_length as usize,
|
|
attestation_data.slot,
|
|
attestation_data.beacon_block_root,
|
|
attestation_data.source,
|
|
attestation_data.target,
|
|
&self.context.eth2_config.spec,
|
|
) {
|
|
Ok(attestation) => attestation,
|
|
Err(err) => {
|
|
crit!(
|
|
log,
|
|
"Invalid validator duties during signing";
|
|
"validator" => ?duty.pubkey,
|
|
"duty" => ?duty,
|
|
"err" => ?err,
|
|
);
|
|
return None;
|
|
}
|
|
};
|
|
|
|
match self
|
|
.validator_store
|
|
.sign_attestation(
|
|
duty.pubkey,
|
|
duty.validator_committee_index as usize,
|
|
&mut attestation,
|
|
current_epoch,
|
|
)
|
|
.await
|
|
{
|
|
Ok(()) => Some((attestation, duty.validator_index)),
|
|
Err(ValidatorStoreError::UnknownPubkey(pubkey)) => {
|
|
// A pubkey can be missing when a validator was recently
|
|
// removed via the API.
|
|
warn!(
|
|
log,
|
|
"Missing pubkey for attestation";
|
|
"info" => "a validator may have recently been removed from this VC",
|
|
"pubkey" => ?pubkey,
|
|
"validator" => ?duty.pubkey,
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
);
|
|
None
|
|
}
|
|
Err(e) => {
|
|
crit!(
|
|
log,
|
|
"Failed to sign attestation";
|
|
"error" => ?e,
|
|
"validator" => ?duty.pubkey,
|
|
"committee_index" => committee_index,
|
|
"slot" => slot.as_u64(),
|
|
);
|
|
None
|
|
}
|
|
}
|
|
});
|
|
|
|
// Execute all the futures in parallel, collecting any successful results.
|
|
let (ref attestations, ref validator_indices): (Vec<_>, Vec<_>) = join_all(signing_futures)
|
|
.await
|
|
.into_iter()
|
|
.flatten()
|
|
.unzip();
|
|
|
|
if attestations.is_empty() {
|
|
warn!(log, "No attestations were published");
|
|
return Ok(None);
|
|
}
|
|
let fork_name = self
|
|
.context
|
|
.eth2_config
|
|
.spec
|
|
.fork_name_at_slot::<E>(attestation_data.slot);
|
|
|
|
// Post the attestations to the BN.
|
|
match self
|
|
.beacon_nodes
|
|
.request(
|
|
RequireSynced::No,
|
|
OfflineOnFailure::Yes,
|
|
ApiTopic::Attestations,
|
|
|beacon_node| async move {
|
|
let _timer = metrics::start_timer_vec(
|
|
&metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[metrics::ATTESTATIONS_HTTP_POST],
|
|
);
|
|
if fork_name.electra_enabled() {
|
|
beacon_node
|
|
.post_beacon_pool_attestations_v2(attestations, fork_name)
|
|
.await
|
|
} else {
|
|
beacon_node
|
|
.post_beacon_pool_attestations_v1(attestations)
|
|
.await
|
|
}
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
Ok(()) => info!(
|
|
log,
|
|
"Successfully published attestations";
|
|
"count" => attestations.len(),
|
|
"validator_indices" => ?validator_indices,
|
|
"head_block" => ?attestation_data.beacon_block_root,
|
|
"committee_index" => attestation_data.index,
|
|
"slot" => attestation_data.slot.as_u64(),
|
|
"type" => "unaggregated",
|
|
),
|
|
Err(e) => error!(
|
|
log,
|
|
"Unable to publish attestations";
|
|
"error" => %e,
|
|
"committee_index" => attestation_data.index,
|
|
"slot" => slot.as_u64(),
|
|
"type" => "unaggregated",
|
|
),
|
|
}
|
|
|
|
Ok(Some(attestation_data))
|
|
}
|
|
|
|
/// Performs the second step of the attesting process: downloading an aggregated `Attestation`,
|
|
/// converting it into a `SignedAggregateAndProof` and returning it to the BN.
|
|
///
|
|
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-aggregate
|
|
///
|
|
/// ## Detail
|
|
///
|
|
/// The given `validator_duties` should already be filtered to only contain those that match
|
|
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
|
|
///
|
|
/// Only one aggregated `Attestation` is downloaded from the BN. It is then cloned and signed
|
|
/// by each validator and the list of individually-signed `SignedAggregateAndProof` objects is
|
|
/// returned to the BN.
|
|
async fn produce_and_publish_aggregates(
|
|
&self,
|
|
attestation_data: &AttestationData,
|
|
committee_index: CommitteeIndex,
|
|
validator_duties: &[DutyAndProof],
|
|
) -> Result<(), String> {
|
|
let log = self.context.log();
|
|
|
|
if !validator_duties
|
|
.iter()
|
|
.any(|duty_and_proof| duty_and_proof.selection_proof.is_some())
|
|
{
|
|
// Exit early if no validator is aggregator
|
|
return Ok(());
|
|
}
|
|
|
|
let fork_name = self
|
|
.context
|
|
.eth2_config
|
|
.spec
|
|
.fork_name_at_slot::<E>(attestation_data.slot);
|
|
|
|
let aggregated_attestation = &self
|
|
.beacon_nodes
|
|
.first_success(
|
|
RequireSynced::No,
|
|
OfflineOnFailure::Yes,
|
|
|beacon_node| async move {
|
|
let _timer = metrics::start_timer_vec(
|
|
&metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[metrics::AGGREGATES_HTTP_GET],
|
|
);
|
|
if fork_name.electra_enabled() {
|
|
beacon_node
|
|
.get_validator_aggregate_attestation_v2(
|
|
attestation_data.slot,
|
|
attestation_data.tree_hash_root(),
|
|
committee_index,
|
|
)
|
|
.await
|
|
.map_err(|e| {
|
|
format!("Failed to produce an aggregate attestation: {:?}", e)
|
|
})?
|
|
.ok_or_else(|| {
|
|
format!("No aggregate available for {:?}", attestation_data)
|
|
})
|
|
.map(|result| result.data)
|
|
} else {
|
|
beacon_node
|
|
.get_validator_aggregate_attestation_v1(
|
|
attestation_data.slot,
|
|
attestation_data.tree_hash_root(),
|
|
)
|
|
.await
|
|
.map_err(|e| {
|
|
format!("Failed to produce an aggregate attestation: {:?}", e)
|
|
})?
|
|
.ok_or_else(|| {
|
|
format!("No aggregate available for {:?}", attestation_data)
|
|
})
|
|
.map(|result| result.data)
|
|
}
|
|
},
|
|
)
|
|
.await
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
// Create futures to produce the signed aggregated attestations.
|
|
let signing_futures = validator_duties.iter().map(|duty_and_proof| async move {
|
|
let duty = &duty_and_proof.duty;
|
|
let selection_proof = duty_and_proof.selection_proof.as_ref()?;
|
|
|
|
if !duty.match_attestation_data::<E>(attestation_data, &self.context.eth2_config.spec) {
|
|
crit!(log, "Inconsistent validator duties during signing");
|
|
return None;
|
|
}
|
|
|
|
match self
|
|
.validator_store
|
|
.produce_signed_aggregate_and_proof(
|
|
duty.pubkey,
|
|
duty.validator_index,
|
|
aggregated_attestation.clone(),
|
|
selection_proof.clone(),
|
|
)
|
|
.await
|
|
{
|
|
Ok(aggregate) => Some(aggregate),
|
|
Err(ValidatorStoreError::UnknownPubkey(pubkey)) => {
|
|
// A pubkey can be missing when a validator was recently
|
|
// removed via the API.
|
|
debug!(
|
|
log,
|
|
"Missing pubkey for aggregate";
|
|
"pubkey" => ?pubkey,
|
|
);
|
|
None
|
|
}
|
|
Err(e) => {
|
|
crit!(
|
|
log,
|
|
"Failed to sign aggregate";
|
|
"error" => ?e,
|
|
"pubkey" => ?duty.pubkey,
|
|
);
|
|
None
|
|
}
|
|
}
|
|
});
|
|
|
|
// Execute all the futures in parallel, collecting any successful results.
|
|
let signed_aggregate_and_proofs = join_all(signing_futures)
|
|
.await
|
|
.into_iter()
|
|
.flatten()
|
|
.collect::<Vec<_>>();
|
|
|
|
if !signed_aggregate_and_proofs.is_empty() {
|
|
let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice();
|
|
match self
|
|
.beacon_nodes
|
|
.first_success(
|
|
RequireSynced::No,
|
|
OfflineOnFailure::Yes,
|
|
|beacon_node| async move {
|
|
let _timer = metrics::start_timer_vec(
|
|
&metrics::ATTESTATION_SERVICE_TIMES,
|
|
&[metrics::AGGREGATES_HTTP_POST],
|
|
);
|
|
if fork_name.electra_enabled() {
|
|
beacon_node
|
|
.post_validator_aggregate_and_proof_v2(
|
|
signed_aggregate_and_proofs_slice,
|
|
fork_name,
|
|
)
|
|
.await
|
|
} else {
|
|
beacon_node
|
|
.post_validator_aggregate_and_proof_v1(
|
|
signed_aggregate_and_proofs_slice,
|
|
)
|
|
.await
|
|
}
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
Ok(()) => {
|
|
for signed_aggregate_and_proof in signed_aggregate_and_proofs {
|
|
let attestation = signed_aggregate_and_proof.message().aggregate();
|
|
info!(
|
|
log,
|
|
"Successfully published attestation";
|
|
"aggregator" => signed_aggregate_and_proof.message().aggregator_index(),
|
|
"signatures" => attestation.num_set_aggregation_bits(),
|
|
"head_block" => format!("{:?}", attestation.data().beacon_block_root),
|
|
"committee_index" => attestation.committee_index(),
|
|
"slot" => attestation.data().slot.as_u64(),
|
|
"type" => "aggregated",
|
|
);
|
|
}
|
|
}
|
|
Err(e) => {
|
|
for signed_aggregate_and_proof in signed_aggregate_and_proofs {
|
|
let attestation = &signed_aggregate_and_proof.message().aggregate();
|
|
crit!(
|
|
log,
|
|
"Failed to publish attestation";
|
|
"error" => %e,
|
|
"aggregator" => signed_aggregate_and_proof.message().aggregator_index(),
|
|
"committee_index" => attestation.committee_index(),
|
|
"slot" => attestation.data().slot.as_u64(),
|
|
"type" => "aggregated",
|
|
);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Spawn a blocking task to run the slashing protection pruning process.
|
|
///
|
|
/// Start the task at `pruning_instant` to avoid interference with other tasks.
|
|
fn spawn_slashing_protection_pruning_task(&self, slot: Slot, pruning_instant: Instant) {
|
|
let attestation_service = self.clone();
|
|
let executor = self.inner.context.executor.clone();
|
|
let current_epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
// Wait for `pruning_instant` in a regular task, and then switch to a blocking one.
|
|
self.inner.context.executor.spawn(
|
|
async move {
|
|
sleep_until(pruning_instant).await;
|
|
|
|
executor.spawn_blocking(
|
|
move || {
|
|
attestation_service
|
|
.validator_store
|
|
.prune_slashing_protection_db(current_epoch, false)
|
|
},
|
|
"slashing_protection_pruning",
|
|
)
|
|
},
|
|
"slashing_protection_pre_pruning",
|
|
);
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use futures::future::FutureExt;
|
|
use parking_lot::RwLock;
|
|
|
|
/// This test is to ensure that a `tokio_timer::Sleep` with an instant in the past will still
|
|
/// trigger.
|
|
#[tokio::test]
|
|
async fn delay_triggers_when_in_the_past() {
|
|
let in_the_past = Instant::now() - Duration::from_secs(2);
|
|
let state_1 = Arc::new(RwLock::new(in_the_past));
|
|
let state_2 = state_1.clone();
|
|
|
|
sleep_until(in_the_past)
|
|
.map(move |()| *state_1.write() = Instant::now())
|
|
.await;
|
|
|
|
assert!(
|
|
*state_2.read() > in_the_past,
|
|
"state should have been updated"
|
|
);
|
|
}
|
|
}
|