Port validator_client to stable futures (#1114)

* Add PH & MS slot clock changes

* Account for genesis time

* Add progress on duties refactor

* Add simple is_aggregator bool to val subscription

* Start work on attestation_verification.rs

* Add progress on ObservedAttestations

* Progress with ObservedAttestations

* Fix tests

* Add observed attestations to the beacon chain

* Add attestation observation to processing code

* Add progress on attestation verification

* Add first draft of ObservedAttesters

* Add more tests

* Add observed attesters to beacon chain

* Add observers to attestation processing

* Add more attestation verification

* Create ObservedAggregators map

* Remove commented-out code

* Add observed aggregators into chain

* Add progress

* Finish adding features to attestation verification

* Ensure beacon chain compiles

* Link attn verification into chain

* Integrate new attn verification in chain

* Remove old attestation processing code

* Start trying to fix beacon_chain tests

* Split adding into pools into two functions

* Add aggregation to harness

* Get test harness working again

* Adjust the number of aggregators for test harness

* Fix edge-case in harness

* Integrate new attn processing in network

* Fix compile bug in validator_client

* Update validator API endpoints

* Fix aggreagation in test harness

* Fix enum thing

* Fix attestation observation bug:

* Patch failing API tests

* Start adding comments to attestation verification

* Remove unused attestation field

* Unify "is block known" logic

* Update comments

* Supress fork choice errors for network processing

* Add todos

* Tidy

* Add gossip attn tests

* Disallow test harness to produce old attns

* Comment out in-progress tests

* Partially address pruning tests

* Fix failing store test

* Add aggregate tests

* Add comments about which spec conditions we check

* Dont re-aggregate

* Split apart test harness attn production

* Fix compile error in network

* Make progress on commented-out test

* Fix skipping attestation test

* Add fork choice verification tests

* Tidy attn tests, remove dead code

* Remove some accidentally added code

* Fix clippy lint

* Rename test file

* Add block tests, add cheap block proposer check

* Rename block testing file

* Add observed_block_producers

* Tidy

* Switch around block signature verification

* Finish block testing

* Remove gossip from signature tests

* First pass of self review

* Fix deviation in spec

* Update test spec tags

* Start moving over to hashset

* Finish moving observed attesters to hashmap

* Move aggregation pool over to hashmap

* Make fc attn borrow again

* Fix rest_api compile error

* Fix missing comments

* Fix monster test

* Uncomment increasing slots test

* Address remaining comments

* Remove unsafe, use cfg test

* Remove cfg test flag

* Fix dodgy comment

* Revert "Update hashmap hashset to stable futures"

This reverts commit d432378a3c.

* Revert "Adds panic test to hashset delay"

This reverts commit 281502396f.

* Ported attestation_service

* Ported duties_service

* Ported fork_service

* More ports

* Port block_service

* Minor fixes

* VC compiles

* Update TODOS

* Borrow self where possible

* Ignore aggregates that are already known.

* Unify aggregator modulo logic

* Fix typo in logs

* Refactor validator subscription logic

* Avoid reproducing selection proof

* Skip HTTP call if no subscriptions

* Rename DutyAndState -> DutyAndProof

* Tidy logs

* Print root as dbg

* Fix compile errors in tests

* Fix compile error in test

* Re-Fix attestation and duties service

* Minor fixes

Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
Pawan Dhananjay
2020-05-07 10:29:26 +05:30
committed by GitHub
parent 3e44d7a258
commit c99b134e0f
9 changed files with 826 additions and 970 deletions

1
Cargo.lock generated
View File

@@ -5253,7 +5253,6 @@ dependencies = [
"slot_clock", "slot_clock",
"tempdir", "tempdir",
"tokio 0.2.20", "tokio 0.2.20",
"tokio-timer 0.2.13",
"tree_hash", "tree_hash",
"types", "types",
"web3", "web3",

View File

@@ -23,11 +23,10 @@ serde_json = "1.0.52"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog-async = "2.5.0" slog-async = "2.5.0"
slog-term = "2.5.0" slog-term = "2.5.0"
tokio = "0.2.20" tokio = {version = "0.2.20", features = ["time"]}
tokio-timer = "0.2.13"
error-chain = "0.12.2" error-chain = "0.12.2"
bincode = "1.2.1" bincode = "1.2.1"
futures = "0.3.4" futures = {version ="0.3.4", features = ["compat"]}
dirs = "2.0.2" dirs = "2.0.2"
logging = { path = "../eth2/utils/logging" } logging = { path = "../eth2/utils/logging" }
environment = { path = "../lighthouse/environment" } environment = { path = "../lighthouse/environment" }

View File

@@ -4,15 +4,14 @@ use crate::{
}; };
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use exit_future::Signal;
use futures::{future, Future, Stream}; use futures::{FutureExt, StreamExt};
use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use slog::{crit, debug, info, trace}; use slog::{crit, debug, info, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::collections::HashMap; use std::collections::HashMap;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use tokio::time::{delay_until, interval_at, Duration, Instant};
use tokio::timer::{Delay, Interval};
use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot}; use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot};
/// Builds an `AttestationService`. /// Builds an `AttestationService`.
@@ -130,7 +129,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?; .ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
let interval = { let interval = {
Interval::new( // Note: `interval_at` panics if `slot_duration` is 0
interval_at(
Instant::now() + duration_to_next_slot + slot_duration / 3, Instant::now() + duration_to_next_slot + slot_duration / 3,
slot_duration, slot_duration,
) )
@@ -140,38 +140,28 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let service = self.clone(); let service = self.clone();
let log_1 = log.clone(); let log_1 = log.clone();
let log_2 = log.clone(); let log_2 = log.clone();
let log_3 = log.clone();
context.executor.spawn( let interval_fut = interval.for_each(move |_| {
exit_fut
.until(
interval
.map_err(move |e| {
crit! {
log_1,
"Timer thread failed";
"error" => format!("{}", e)
}
})
.for_each(move |_| {
if let Err(e) = service.spawn_attestation_tasks(slot_duration) { if let Err(e) = service.spawn_attestation_tasks(slot_duration) {
crit!( crit!(
log_2, log_1,
"Failed to spawn attestation tasks"; "Failed to spawn attestation tasks";
"error" => e "error" => e
) )
} else { } else {
trace!( trace!(
log_2, log_1,
"Spawned attestation tasks"; "Spawned attestation tasks";
) )
} }
futures::future::ready(())
});
Ok(()) let future = futures::future::select(
}), interval_fut,
) exit_fut.map(move |_| info!(log_2, "Shutdown complete")),
.map(move |_| info!(log_3, "Shutdown complete")),
); );
tokio::task::spawn(future);
Ok(exit_signal) Ok(exit_signal)
} }
@@ -181,11 +171,11 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> { fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> {
let service = self.clone(); let service = self.clone();
let slot = service let slot = self
.slot_clock .slot_clock
.now() .now()
.ok_or_else(|| "Failed to read slot clock".to_string())?; .ok_or_else(|| "Failed to read slot clock".to_string())?;
let duration_to_next_slot = service let duration_to_next_slot = self
.slot_clock .slot_clock
.duration_to_next_slot() .duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?; .ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
@@ -197,7 +187,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
.checked_sub(slot_duration / 3) .checked_sub(slot_duration / 3)
.unwrap_or_else(|| Duration::from_secs(0)); .unwrap_or_else(|| Duration::from_secs(0));
let duties_by_committee_index: HashMap<CommitteeIndex, Vec<DutyAndProof>> = service let duties_by_committee_index: HashMap<CommitteeIndex, Vec<DutyAndProof>> = self
.duties_service .duties_service
.attesters(slot) .attesters(slot)
.into_iter() .into_iter()
@@ -219,10 +209,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
.into_iter() .into_iter()
.for_each(|(committee_index, validator_duties)| { .for_each(|(committee_index, validator_duties)| {
// Spawn a separate task for each attestation. // Spawn a separate task for each attestation.
service tokio::task::spawn(service.clone().publish_attestations_and_aggregates(
.context
.executor
.spawn(self.clone().publish_attestations_and_aggregates(
slot, slot,
committee_index, committee_index,
validator_duties, validator_duties,
@@ -242,33 +229,40 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
/// ///
/// The given `validator_duties` should already be filtered to only contain those that match /// The given `validator_duties` should already be filtered to only contain those that match
/// `slot` and `committee_index`. Critical errors will be logged if this is not the case. /// `slot` and `committee_index`. Critical errors will be logged if this is not the case.
fn publish_attestations_and_aggregates( async fn publish_attestations_and_aggregates(
&self, self,
slot: Slot, slot: Slot,
committee_index: CommitteeIndex, committee_index: CommitteeIndex,
validator_duties: Vec<DutyAndProof>, validator_duties: Vec<DutyAndProof>,
aggregate_production_instant: Instant, aggregate_production_instant: Instant,
) -> Box<dyn Future<Item = (), Error = ()> + Send> { ) -> Result<(), ()> {
// There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have
// any validators for the given `slot` and `committee_index`. // any validators for the given `slot` and `committee_index`.
if validator_duties.is_empty() { if validator_duties.is_empty() {
return Box::new(future::ok(())); return Ok(());
} }
let service_1 = self.clone();
let log_1 = self.context.log.clone(); let log_1 = self.context.log.clone();
let log_2 = self.context.log.clone();
let validator_duties_1 = Arc::new(validator_duties); let validator_duties_1 = Arc::new(validator_duties);
let validator_duties_2 = validator_duties_1.clone(); let validator_duties_2 = validator_duties_1.clone();
Box::new(
// Step 1. // Step 1.
// //
// Download, sign and publish an `Attestation` for each validator. // Download, sign and publish an `Attestation` for each validator.
self.produce_and_publish_attestations(slot, committee_index, validator_duties_1) let attestation_opt = self
.and_then::<_, Box<dyn Future<Item = _, Error = _> + Send>>( .produce_and_publish_attestations(slot, committee_index, validator_duties_1)
move |attestation_opt| { .await
.map_err(move |e| {
crit!(
log_1,
"Error during attestation routine";
"error" => format!("{:?}", e),
"committee_index" => committee_index,
"slot" => slot.as_u64(),
)
})?;
if let Some(attestation) = attestation_opt { if let Some(attestation) = attestation_opt {
Box::new(
// Step 2. (Only if step 1 produced an attestation) // Step 2. (Only if step 1 produced an attestation)
// //
// First, wait until the `aggregation_production_instant` (2/3rds // First, wait until the `aggregation_production_instant` (2/3rds
@@ -279,38 +273,24 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
// Then download, sign and publish a `SignedAggregateAndProof` for each // Then download, sign and publish a `SignedAggregateAndProof` for each
// validator that is elected to aggregate for this `slot` and // validator that is elected to aggregate for this `slot` and
// `committee_index`. // `committee_index`.
Delay::new(aggregate_production_instant) delay_until(aggregate_production_instant).await;
.map_err(|e| { self.produce_and_publish_aggregates(attestation, validator_duties_2)
format!( .await
"Unable to create aggregate production delay: {:?}",
e
)
})
.and_then(move |()| {
service_1.produce_and_publish_aggregates(
attestation,
validator_duties_2,
)
}),
)
} else { } else {
// If `produce_and_publish_attestations` did not download any // If `produce_and_publish_attestations` did not download any
// attestations then there is no need to produce any // attestations then there is no need to produce any
// `SignedAggregateAndProof`. // `SignedAggregateAndProof`.
Box::new(future::ok(())) Ok(())
} }
},
)
.map_err(move |e| { .map_err(move |e| {
crit!( crit!(
log_1, log_2,
"Error during attestation routine"; "Error during attestation routine";
"error" => format!("{:?}", e), "error" => format!("{:?}", e),
"committee_index" => committee_index, "committee_index" => committee_index,
"slot" => slot.as_u64(), "slot" => slot.as_u64(),
) )
}), })
)
} }
/// Performs the first step of the attesting process: downloading `Attestation` objects, /// Performs the first step of the attesting process: downloading `Attestation` objects,
@@ -325,26 +305,25 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
/// ///
/// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each /// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each
/// validator and the list of individually-signed `Attestation` objects is returned to the BN. /// validator and the list of individually-signed `Attestation` objects is returned to the BN.
fn produce_and_publish_attestations( async fn produce_and_publish_attestations(
&self, &self,
slot: Slot, slot: Slot,
committee_index: CommitteeIndex, committee_index: CommitteeIndex,
validator_duties: Arc<Vec<DutyAndProof>>, validator_duties: Arc<Vec<DutyAndProof>>,
) -> Box<dyn Future<Item = Option<Attestation<E>>, Error = String> + Send> { ) -> Result<Option<Attestation<E>>, String> {
if validator_duties.is_empty() { if validator_duties.is_empty() {
return Box::new(future::ok(None)); return Ok(None);
} }
let service = self.clone(); let attestation = self
.beacon_node
Box::new(
self.beacon_node
.http .http
.validator() .validator()
.produce_attestation(slot, committee_index) .produce_attestation(slot, committee_index)
.map_err(|e| format!("Failed to produce attestation: {:?}", e)) .await
.and_then::<_, Box<dyn Future<Item = _, Error = _> + Send>>(move |attestation| { .map_err(|e| format!("Failed to produce attestation: {:?}", e))?;
let log = service.context.log.clone();
let log = self.context.log.clone();
// For each validator in `validator_duties`, clone the `attestation` and add // For each validator in `validator_duties`, clone the `attestation` and add
// their signature. // their signature.
@@ -353,8 +332,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let signed_attestations = validator_duties let signed_attestations = validator_duties
.iter() .iter()
.filter_map(|duty| { .filter_map(|duty| {
let log = service.context.log.clone(); let log = self.context.log.clone();
// Ensure that all required fields are present in the validator duty. // Ensure that all required fields are present in the validator duty.
let (duty_slot, duty_committee_index, validator_committee_position, _) = let (duty_slot, duty_committee_index, validator_committee_position, _) =
if let Some(tuple) = duty.attestation_duties() { if let Some(tuple) = duty.attestation_duties() {
@@ -386,7 +364,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let mut attestation = attestation.clone(); let mut attestation = attestation.clone();
if service if self
.validator_store .validator_store
.sign_attestation( .sign_attestation(
duty.validator_pubkey(), duty.validator_pubkey(),
@@ -415,12 +393,11 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let num_attestations = signed_attestations.len(); let num_attestations = signed_attestations.len();
let beacon_block_root = attestation.data.beacon_block_root; let beacon_block_root = attestation.data.beacon_block_root;
Box::new( self.beacon_node
service
.beacon_node
.http .http
.validator() .validator()
.publish_attestations(signed_attestations) .publish_attestations(signed_attestations)
.await
.map_err(|e| format!("Failed to publish attestation: {:?}", e)) .map_err(|e| format!("Failed to publish attestation: {:?}", e))
.map(move |publish_status| match publish_status { .map(move |publish_status| match publish_status {
PublishStatus::Valid => info!( PublishStatus::Valid => info!(
@@ -440,13 +417,11 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
"slot" => slot.as_u64(), "slot" => slot.as_u64(),
"type" => "unaggregated", "type" => "unaggregated",
), ),
PublishStatus::Unknown => crit!( PublishStatus::Unknown => {
log, crit!(log, "Unknown condition when publishing unagg. attestation")
"Unknown condition when publishing unagg. attestation" }
),
}) })
.map(|()| Some(attestation)), .map(|()| Some(attestation))
)
} else { } else {
debug!( debug!(
log, log,
@@ -454,10 +429,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
"committee_index" => committee_index, "committee_index" => committee_index,
"slot" => slot.as_u64(), "slot" => slot.as_u64(),
); );
Box::new(future::ok(None)) return Ok(None);
} }
}),
)
} }
/// Performs the second step of the attesting process: downloading an aggregated `Attestation`, /// Performs the second step of the attesting process: downloading an aggregated `Attestation`,
@@ -473,21 +446,21 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
/// Only one aggregated `Attestation` is downloaded from the BN. It is then cloned and signed /// Only one aggregated `Attestation` is downloaded from the BN. It is then cloned and signed
/// by each validator and the list of individually-signed `SignedAggregateAndProof` objects is /// by each validator and the list of individually-signed `SignedAggregateAndProof` objects is
/// returned to the BN. /// returned to the BN.
fn produce_and_publish_aggregates( async fn produce_and_publish_aggregates(
&self, &self,
attestation: Attestation<E>, attestation: Attestation<E>,
validator_duties: Arc<Vec<DutyAndProof>>, validator_duties: Arc<Vec<DutyAndProof>>,
) -> impl Future<Item = (), Error = String> { ) -> Result<(), String> {
let service_1 = self.clone();
let log_1 = self.context.log.clone(); let log_1 = self.context.log.clone();
self.beacon_node let aggregated_attestation = self
.beacon_node
.http .http
.validator() .validator()
.produce_aggregate_attestation(&attestation.data) .produce_aggregate_attestation(&attestation.data)
.map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e)) .await
.and_then::<_, Box<dyn Future<Item = _, Error = _> + Send>>( .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))?;
move |aggregated_attestation| {
// For each validator, clone the `aggregated_attestation` and convert it into // For each validator, clone the `aggregated_attestation` and convert it into
// a `SignedAggregateAndProof` // a `SignedAggregateAndProof`
let signed_aggregate_and_proofs = validator_duties let signed_aggregate_and_proofs = validator_duties
@@ -512,9 +485,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
return None; return None;
} }
if let Some(signed_aggregate_and_proof) = service_1 if let Some(signed_aggregate_and_proof) =
.validator_store self.validator_store.produce_signed_aggregate_and_proof(
.produce_signed_aggregate_and_proof(
pubkey, pubkey,
validator_index, validator_index,
aggregated_attestation.clone(), aggregated_attestation.clone(),
@@ -534,14 +506,14 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
if let Some(first) = signed_aggregate_and_proofs.first().cloned() { if let Some(first) = signed_aggregate_and_proofs.first().cloned() {
let attestation = first.message.aggregate; let attestation = first.message.aggregate;
Box::new(service_1 let publish_status = self
.beacon_node .beacon_node
.http .http
.validator() .validator()
.publish_aggregate_and_proof(signed_aggregate_and_proofs) .publish_aggregate_and_proof(signed_aggregate_and_proofs)
.map(|publish_status| (attestation, publish_status)) .await
.map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e)) .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e))?;
.map(move |(attestation, publish_status)| match publish_status { match publish_status {
PublishStatus::Valid => info!( PublishStatus::Valid => info!(
log_1, log_1,
"Successfully published attestations"; "Successfully published attestations";
@@ -562,7 +534,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
PublishStatus::Unknown => { PublishStatus::Unknown => {
crit!(log_1, "Unknown condition when publishing agg. attestation") crit!(log_1, "Unknown condition when publishing agg. attestation")
} }
})) };
Ok(())
} else { } else {
debug!( debug!(
log_1, log_1,
@@ -570,10 +543,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
"committee_index" => attestation.data.index, "committee_index" => attestation.data.index,
"slot" => attestation.data.slot.as_u64(), "slot" => attestation.data.slot.as_u64(),
); );
Box::new(future::ok(())) Ok(())
} }
},
)
} }
} }
@@ -591,16 +562,14 @@ mod tests {
let state_1 = Arc::new(RwLock::new(in_the_past)); let state_1 = Arc::new(RwLock::new(in_the_past));
let state_2 = state_1.clone(); let state_2 = state_1.clone();
let future = Delay::new(in_the_past) let future = delay_until(in_the_past).map(move |()| *state_1.write() = Instant::now());
.map_err(|_| panic!("Failed to create duration"))
.map(move |()| *state_1.write() = Instant::now());
let mut runtime = RuntimeBuilder::new() let mut runtime = RuntimeBuilder::new()
.core_threads(1) .core_threads(1)
.build() .build()
.expect("failed to start runtime"); .expect("failed to start runtime");
runtime.block_on(future).expect("failed to complete future"); runtime.block_on(future);
assert!( assert!(
*state_2.read() > in_the_past, *state_2.read() > in_the_past,

View File

@@ -1,15 +1,14 @@
use crate::{duties_service::DutiesService, validator_store::ValidatorStore}; use crate::{duties_service::DutiesService, validator_store::ValidatorStore};
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use exit_future::Signal;
use futures::{stream, Future, IntoFuture, Stream}; use futures::{FutureExt, StreamExt};
use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use slog::{crit, error, info, trace}; use slog::{crit, error, info, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use tokio::timer::Interval; use types::{ChainSpec, EthSpec, PublicKey, Slot};
use types::{ChainSpec, EthSpec};
/// Delay this period of time after the slot starts. This allows the node to process the new slot. /// Delay this period of time after the slot starts. This allows the node to process the new slot.
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100);
@@ -124,7 +123,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
let interval = { let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new( // Note: interval_at panics if slot_duration = 0
interval_at(
Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT, Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT,
slot_duration, slot_duration,
) )
@@ -132,42 +132,29 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
let (exit_signal, exit_fut) = exit_future::signal(); let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone(); let service = self.clone();
let log_1 = log.clone(); let interval_fut = interval.for_each(move |_| {
let log_2 = log.clone(); let _ = service.clone().do_update();
futures::future::ready(())
});
self.context.executor.spawn( let future = futures::future::select(
exit_fut interval_fut,
.until( exit_fut.map(move |_| info!(log, "Shutdown complete")),
interval
.map_err(move |e| {
crit! {
log_1,
"Timer thread failed";
"error" => format!("{}", e)
}
})
.for_each(move |_| service.clone().do_update().then(|_| Ok(()))),
)
.map(move |_| info!(log_2, "Shutdown complete")),
); );
tokio::task::spawn(future);
Ok(exit_signal) Ok(exit_signal)
} }
/// Attempt to produce a block for any block producers in the `ValidatorStore`. /// Attempt to produce a block for any block producers in the `ValidatorStore`.
fn do_update(self) -> impl Future<Item = (), Error = ()> { fn do_update(&self) -> Result<(), ()> {
let service = self.clone();
let log_1 = self.context.log.clone(); let log_1 = self.context.log.clone();
let log_2 = self.context.log.clone(); let log_2 = self.context.log.clone();
self.slot_clock let slot = self.slot_clock.now().ok_or_else(move || {
.now()
.ok_or_else(move || {
crit!(log_1, "Duties manager failed to read slot clock"); crit!(log_1, "Duties manager failed to read slot clock");
}) })?;
.into_future() let iter = self.duties_service.block_producers(slot).into_iter();
.and_then(move |slot| {
let iter = service.duties_service.block_producers(slot).into_iter();
if iter.len() == 0 { if iter.len() == 0 {
trace!( trace!(
@@ -185,82 +172,62 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
) )
} }
stream::unfold(iter, move |mut block_producers| { // TODO: check if the logic is same as stream::unfold version
let log_1 = service.context.log.clone(); let _ = futures::stream::iter(iter).for_each(|validator_pubkey| async {
let log_2 = service.context.log.clone(); match self.publish_block(slot, validator_pubkey).await {
let service_1 = service.clone(); Ok(()) => (),
let service_2 = service.clone(); Err(e) => crit!(
let service_3 = service.clone(); log_2,
"Error whilst producing block";
"message" => e
),
}
});
block_producers.next().map(move |validator_pubkey| { Ok(())
service_1 }
/// Produce a block at the given slot for validator_pubkey
async fn publish_block(&self, slot: Slot, validator_pubkey: PublicKey) -> Result<(), String> {
let log_1 = self.context.log.clone();
let randao_reveal = self
.validator_store .validator_store
.randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch())) .randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch()))
.ok_or_else(|| "Unable to produce randao reveal".to_string()) .ok_or_else(|| "Unable to produce randao reveal".to_string())?;
.into_future() let block = self
.and_then(move |randao_reveal| {
service_1
.beacon_node .beacon_node
.http .http
.validator() .validator()
.produce_block(slot, randao_reveal) .produce_block(slot, randao_reveal)
.map_err(|e| { .await
format!( .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))?;
"Error from beacon node when producing block: {:?}", let signed_block = self
e
)
})
})
.and_then(move |block| {
service_2
.validator_store .validator_store
.sign_block(&validator_pubkey, block) .sign_block(&validator_pubkey, block)
.ok_or_else(|| "Unable to sign block".to_string()) .ok_or_else(|| "Unable to sign block".to_string())?;
}) let publish_status = self
.and_then(move |block| {
service_3
.beacon_node .beacon_node
.http .http
.validator() .validator()
.publish_block(block.clone()) .publish_block(signed_block.clone())
.map(|publish_status| (block, publish_status)) .await
.map_err(|e| { .map_err(|e| format!("Error from beacon node when publishing block: {:?}", e))?;
format!( match publish_status {
"Error from beacon node when publishing block: {:?}",
e
)
})
})
.map(move |(block, publish_status)| match publish_status {
PublishStatus::Valid => info!( PublishStatus::Valid => info!(
log_1, log_1,
"Successfully published block"; "Successfully published block";
"deposits" => block.message.body.deposits.len(), "deposits" => signed_block.message.body.deposits.len(),
"attestations" => block.message.body.attestations.len(), "attestations" => signed_block.message.body.attestations.len(),
"slot" => block.slot().as_u64(), "slot" => signed_block.slot().as_u64(),
), ),
PublishStatus::Invalid(msg) => crit!( PublishStatus::Invalid(msg) => crit!(
log_1, log_1,
"Published block was invalid"; "Published block was invalid";
"message" => msg, "message" => msg,
"slot" => block.slot().as_u64(), "slot" => signed_block.slot().as_u64(),
), ),
PublishStatus::Unknown => { PublishStatus::Unknown => crit!(log_1, "Unknown condition when publishing block"),
crit!(log_1, "Unknown condition when publishing block")
} }
}) Ok(())
.map_err(move |e| {
crit!(
log_2,
"Error whilst producing block";
"message" => e
)
})
.then(|_| Ok(((), block_producers)))
})
})
.collect()
.map(|_| ())
})
} }
} }

View File

@@ -1,18 +1,17 @@
use crate::validator_store::ValidatorStore; use crate::validator_store::ValidatorStore;
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use exit_future::Signal;
use futures::{future, Future, IntoFuture, Stream}; use futures::{FutureExt, StreamExt};
use parking_lot::RwLock; use parking_lot::RwLock;
use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription}; use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription};
use slog::{crit, debug, error, info, trace, warn}; use slog::{debug, error, info, trace, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use tokio::timer::Interval;
use types::{ChainSpec, CommitteeIndex, Epoch, EthSpec, PublicKey, SelectionProof, Slot}; use types::{ChainSpec, CommitteeIndex, Epoch, EthSpec, PublicKey, SelectionProof, Slot};
/// Delay this period of time after the slot starts. This allows the node to process the new slot. /// Delay this period of time after the slot starts. This allows the node to process the new slot.
@@ -440,54 +439,53 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
let interval = { let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new( // Note: `interval_at` panics if `slot_duration` is 0
interval_at(
Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT, Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT,
slot_duration, slot_duration,
) )
}; };
let (exit_signal, exit_fut) = exit_future::signal(); let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone(); let service_1 = self.clone();
let log_1 = log.clone(); let service_2 = self.clone();
let log_2 = log.clone();
// Run an immediate update before starting the updater service. // Run an immediate update before starting the updater service.
self.context.executor.spawn(service.clone().do_update()); tokio::task::spawn(service_1.do_update());
self.context.executor.spawn( let interval_fut = interval.for_each(move |_| {
exit_fut let _ = service_2.clone().do_update();
.until( futures::future::ready(())
interval });
.map_err(move |e| {
crit! { let future = futures::future::select(
log_1, interval_fut,
"Timer thread failed"; exit_fut.map(move |_| info!(log, "Shutdown complete")),
"error" => format!("{}", e)
}
})
.for_each(move |_| service.clone().do_update().then(|_| Ok(()))),
)
.map(move |_| info!(log_2, "Shutdown complete")),
); );
tokio::task::spawn(future);
Ok(exit_signal) Ok(exit_signal)
} }
/// Attempt to download the duties of all managed validators for this epoch and the next. /// Attempt to download the duties of all managed validators for this epoch and the next.
fn do_update(&self) -> impl Future<Item = (), Error = ()> { async fn do_update(self) -> Result<(), ()> {
let log_1 = self.context.log.clone();
let log_2 = self.context.log.clone();
let log_3 = self.context.log.clone();
let log = self.context.log.clone();
let service_1 = self.clone(); let service_1 = self.clone();
let service_2 = self.clone(); let service_2 = self.clone();
let service_3 = self.clone(); let service_3 = self.clone();
let service_4 = self.clone(); let service_4 = self.clone();
let log_1 = self.context.log.clone(); let service_5 = self.clone();
let log_2 = self.context.log.clone();
self.slot_clock let current_epoch = service_1
.slot_clock
.now() .now()
.ok_or_else(move || { .ok_or_else(move || {
error!(log_1, "Duties manager failed to read slot clock"); error!(log_1, "Duties manager failed to read slot clock");
}) })
.into_future()
.map(move |slot| { .map(move |slot| {
let epoch = slot.epoch(E::slots_per_epoch()); let epoch = slot.epoch(E::slots_per_epoch());
@@ -501,45 +499,36 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
"current_epoch" => epoch.as_u64(), "current_epoch" => epoch.as_u64(),
); );
service_1.store.prune(prune_below); self.store.prune(prune_below);
} }
epoch epoch
}) })?;
.and_then(move |epoch| {
let log = service_2.context.log.clone();
service_2 let beacon_head_epoch = service_2
.beacon_node .beacon_node
.http .http
.beacon() .beacon()
.get_head() .get_head()
.map(move |head| (epoch, head.slot.epoch(E::slots_per_epoch()))) .await
.map(|head| head.slot.epoch(E::slots_per_epoch()))
.map_err(move |e| { .map_err(move |e| {
error!( error!(
log, log_3,
"Failed to contact beacon node"; "Failed to contact beacon node";
"error" => format!("{:?}", e) "error" => format!("{:?}", e)
) )
}) })?;
})
.and_then(move |(current_epoch, beacon_head_epoch)| {
let log = service_3.context.log.clone();
let future: Box<dyn Future<Item = (), Error = ()> + Send> = if beacon_head_epoch + 1 if beacon_head_epoch + 1 < current_epoch && !service_3.allow_unsynced_beacon_node {
< current_epoch
&& !service_3.allow_unsynced_beacon_node
{
error!( error!(
log, log,
"Beacon node is not synced"; "Beacon node is not synced";
"node_head_epoch" => format!("{}", beacon_head_epoch), "node_head_epoch" => format!("{}", beacon_head_epoch),
"current_epoch" => format!("{}", current_epoch), "current_epoch" => format!("{}", current_epoch),
); );
Box::new(future::ok(()))
} else { } else {
Box::new(service_3.update_epoch(current_epoch).then(move |result| { let result = service_4.clone().update_epoch(current_epoch).await;
if let Err(e) = result { if let Err(e) = result {
error!( error!(
log, log,
@@ -548,38 +537,33 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
); );
} }
let log = service_4.context.log.clone(); service_5
service_4.update_epoch(current_epoch + 1).map_err(move |e| { .clone()
.update_epoch(current_epoch + 1)
.await
.map_err(move |e| {
error!( error!(
log, log,
"Failed to get next epoch duties"; "Failed to get next epoch duties";
"http_error" => format!("{:?}", e) "http_error" => format!("{:?}", e)
); );
}) })?;
}))
}; };
Ok(())
future
})
.map(|_| ())
} }
/// Attempt to download the duties of all managed validators for the given `epoch`. /// Attempt to download the duties of all managed validators for the given `epoch`.
fn update_epoch(self, epoch: Epoch) -> impl Future<Item = (), Error = String> { async fn update_epoch(self, epoch: Epoch) -> Result<(), String> {
let service_1 = self.clone(); let pubkeys = self.validator_store.voting_pubkeys();
let service_2 = self.clone(); let all_duties = self
let service_3 = self;
let pubkeys = service_1.validator_store.voting_pubkeys();
service_1
.beacon_node .beacon_node
.http .http
.validator() .validator()
.get_duties(epoch, pubkeys.as_slice()) .get_duties(epoch, pubkeys.as_slice())
.map(move |all_duties| (epoch, all_duties)) .await
.map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e)) .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?;
.and_then(move |(epoch, all_duties)| {
let log = service_2.context.log.clone(); let log = self.context.log.clone();
let mut new_validator = 0; let mut new_validator = 0;
let mut new_epoch = 0; let mut new_epoch = 0;
@@ -589,26 +573,37 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
// For each of the duties, attempt to insert them into our local store and build a // For each of the duties, attempt to insert them into our local store and build a
// list of new or changed selections proofs for any aggregating validators. // list of new or changed selections proofs for any aggregating validators.
let validator_subscriptions = all_duties.into_iter().filter_map(|remote_duties| { let validator_subscriptions = all_duties
.into_iter()
.filter_map(|remote_duties| {
// Convert the remote duties into our local representation. // Convert the remote duties into our local representation.
let duties: DutyAndProof = remote_duties let duties: DutyAndProof = remote_duties
.try_into() .try_into()
.map_err(|e| error!( .map_err(|e| {
error!(
log, log,
"Unable to convert remote duties"; "Unable to convert remote duties";
"error" => e "error" => e
)) )
})
.ok()?; .ok()?;
// Attempt to update our local store. // Attempt to update our local store.
let outcome = service_2 let outcome = self
.store .store
.insert(epoch, duties.clone(), E::slots_per_epoch(), &service_2.validator_store) .insert(
.map_err(|e| error!( epoch,
duties.clone(),
E::slots_per_epoch(),
&self.validator_store,
)
.map_err(|e| {
error!(
log, log,
"Unable to store duties"; "Unable to store duties";
"error" => e "error" => e
)) )
})
.ok()?; .ok()?;
match &outcome { match &outcome {
@@ -638,7 +633,8 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
} else { } else {
None None
} }
}).collect::<Vec<_>>(); })
.collect::<Vec<_>>();
if invalid > 0 { if invalid > 0 {
error!( error!(
@@ -667,53 +663,40 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
) )
} }
Ok(validator_subscriptions) let log = self.context.log.clone();
})
.and_then::<_, Box<dyn Future<Item = _, Error = _> + Send>>(move |validator_subscriptions| {
let log = service_3.context.log.clone();
let count = validator_subscriptions.len(); let count = validator_subscriptions.len();
if count == 0 { if count == 0 {
debug!( debug!(log, "No new subscriptions required");
log,
"No new subscriptions required"
);
Box::new(future::ok(())) Ok(())
} else { } else {
Box::new(service_3.beacon_node self.beacon_node
.http .http
.validator() .validator()
.subscribe(validator_subscriptions) .subscribe(validator_subscriptions)
.await
.map_err(|e| format!("Failed to subscribe validators: {:?}", e)) .map_err(|e| format!("Failed to subscribe validators: {:?}", e))
.map(move |status| { .map(move |status| {
match status { match status {
PublishStatus::Valid => { PublishStatus::Valid => debug!(
debug!(
log, log,
"Successfully subscribed validators"; "Successfully subscribed validators";
"count" => count "count" => count
) ),
}, PublishStatus::Unknown => error!(
PublishStatus::Unknown => {
error!(
log, log,
"Unknown response from subscription"; "Unknown response from subscription";
) ),
}, PublishStatus::Invalid(e) => error!(
PublishStatus::Invalid(e) => {
error!(
log, log,
"Failed to subscribe validator"; "Failed to subscribe validator";
"error" => e "error" => e
) ),
},
}; };
}))
}
}) })
} }
}
} }
/// Returns `true` if the slots in the `duties` are from the given `epoch` /// Returns `true` if the slots in the `duties` are from the given `epoch`

View File

@@ -1,14 +1,13 @@
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use exit_future::Signal;
use futures::{Future, Stream}; use futures::{FutureExt, StreamExt};
use parking_lot::RwLock; use parking_lot::RwLock;
use remote_beacon_node::RemoteBeaconNode; use remote_beacon_node::RemoteBeaconNode;
use slog::{crit, info, trace}; use slog::{info, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use tokio::timer::Interval;
use types::{ChainSpec, EthSpec, Fork}; use types::{ChainSpec, EthSpec, Fork};
/// Delay this period of time after the slot starts. This allows the node to process the new slot. /// Delay this period of time after the slot starts. This allows the node to process the new slot.
@@ -111,51 +110,46 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
let interval = { let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new( // Note: interval_at panics if `slot_duration * E::slots_per_epoch()` = 0
interval_at(
Instant::now() + duration_to_next_epoch + TIME_DELAY_FROM_SLOT, Instant::now() + duration_to_next_epoch + TIME_DELAY_FROM_SLOT,
slot_duration * E::slots_per_epoch() as u32, slot_duration * E::slots_per_epoch() as u32,
) )
}; };
let (exit_signal, exit_fut) = exit_future::signal(); let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone();
let log_1 = log.clone();
let log_2 = log.clone();
// Run an immediate update before starting the updater service. // Run an immediate update before starting the updater service.
self.context.executor.spawn(service.clone().do_update()); let service_1 = self.clone();
let service_2 = self.clone();
tokio::task::spawn(service_1.do_update());
self.context.executor.spawn( let interval_fut = interval.for_each(move |_| {
exit_fut let _ = service_2.clone().do_update();
.until( futures::future::ready(())
interval });
.map_err(move |e| {
crit! { let future = futures::future::select(
log_1, interval_fut,
"Timer thread failed"; exit_fut.map(move |_| info!(log, "Shutdown complete")),
"error" => format!("{}", e)
}
})
.for_each(move |_| service.do_update().then(|_| Ok(()))),
)
.map(move |_| info!(log_2, "Shutdown complete")),
); );
tokio::task::spawn(future);
Ok(exit_signal) Ok(exit_signal)
} }
/// Attempts to download the `Fork` from the server. /// Attempts to download the `Fork` from the server.
fn do_update(&self) -> impl Future<Item = (), Error = ()> { async fn do_update(self) -> Result<(), ()> {
let service_1 = self.clone(); let log_1 = self.context.log.clone();
let log_1 = service_1.context.log.clone(); let log_2 = self.context.log.clone();
let log_2 = service_1.context.log.clone(); let _ = self
.inner
self.inner
.beacon_node .beacon_node
.http .http
.beacon() .beacon()
.get_fork() .get_fork()
.map(move |fork| *(service_1.fork.write()) = Some(fork)) .await
.map(move |fork| *(self.fork.write()) = Some(fork))
.map(move |_| trace!(log_1, "Fork update success")) .map(move |_| trace!(log_1, "Fork update success"))
.map_err(move |e| { .map_err(move |e| {
trace!( trace!(
@@ -163,9 +157,9 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
"Fork update failed"; "Fork update failed";
"error" => format!("Error retrieving fork: {:?}", e) "error" => format!("Error retrieving fork: {:?}", e)
) )
}) });
// Returning an error will stop the interval. This is not desired, a single failure // Returning an error will stop the interval. This is not desired, a single failure
// should not stop all future attempts. // should not stop all future attempts.
.then(|_| Ok(())) Ok(())
} }
} }

View File

@@ -19,18 +19,13 @@ use duties_service::{DutiesService, DutiesServiceBuilder};
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use exit_future::Signal;
use fork_service::{ForkService, ForkServiceBuilder}; use fork_service::{ForkService, ForkServiceBuilder};
use futures::{
future::{self, loop_fn, Loop},
Future, IntoFuture,
};
use notifier::spawn_notifier; use notifier::spawn_notifier;
use remote_beacon_node::RemoteBeaconNode; use remote_beacon_node::RemoteBeaconNode;
use slog::{error, info, Logger}; use slog::{error, info, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use slot_clock::SystemTimeSlotClock; use slot_clock::SystemTimeSlotClock;
use std::time::{Duration, Instant};
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tokio::timer::Delay; use tokio::time::{delay_for, Duration};
use types::EthSpec; use types::EthSpec;
use validator_store::ValidatorStore; use validator_store::ValidatorStore;
@@ -52,22 +47,18 @@ pub struct ProductionValidatorClient<T: EthSpec> {
impl<T: EthSpec> ProductionValidatorClient<T> { impl<T: EthSpec> ProductionValidatorClient<T> {
/// Instantiates the validator client, _without_ starting the timers to trigger block /// Instantiates the validator client, _without_ starting the timers to trigger block
/// and attestation production. /// and attestation production.
pub fn new_from_cli( pub async fn new_from_cli(
context: RuntimeContext<T>, context: RuntimeContext<T>,
cli_args: &ArgMatches, cli_args: &ArgMatches<'_>,
) -> impl Future<Item = Self, Error = String> { ) -> Result<Self, String> {
Config::from_cli(&cli_args) let config = Config::from_cli(&cli_args)
.into_future() .map_err(|e| format!("Unable to initialize config: {}", e))?;
.map_err(|e| format!("Unable to initialize config: {}", e)) Self::new(context, config).await
.and_then(|config| Self::new(context, config))
} }
/// Instantiates the validator client, _without_ starting the timers to trigger block /// Instantiates the validator client, _without_ starting the timers to trigger block
/// and attestation production. /// and attestation production.
pub fn new( pub async fn new(mut context: RuntimeContext<T>, config: Config) -> Result<Self, String> {
mut context: RuntimeContext<T>,
config: Config,
) -> impl Future<Item = Self, Error = String> {
let log_1 = context.log.clone(); let log_1 = context.log.clone();
let log_2 = context.log.clone(); let log_2 = context.log.clone();
let log_3 = context.log.clone(); let log_3 = context.log.clone();
@@ -80,32 +71,27 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
"datadir" => format!("{:?}", config.data_dir), "datadir" => format!("{:?}", config.data_dir),
); );
let beacon_node =
RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT)
.map_err(|e| format!("Unable to init beacon node http client: {}", e)) .map_err(|e| format!("Unable to init beacon node http client: {}", e))?;
.into_future()
.and_then(move |beacon_node| wait_for_node(beacon_node, log_2)) // TODO: check if all logs in wait_for_node are produed while awaiting
.and_then(|beacon_node| { let beacon_node = wait_for_node(beacon_node, log_2).await?;
beacon_node let eth2_config = beacon_node
.http .http
.spec() .spec()
.get_eth2_config() .get_eth2_config()
.map(|eth2_config| (beacon_node, eth2_config)) .await
.map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e)) .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?;
}) let genesis_time = beacon_node
.and_then(|(beacon_node, eth2_config)| {
beacon_node
.http .http
.beacon() .beacon()
.get_genesis_time() .get_genesis_time()
.map(|genesis_time| (beacon_node, eth2_config, genesis_time)) .await
.map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e)) .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?;
}) let now = SystemTime::now()
.and_then(move |(beacon_node, remote_eth2_config, genesis_time)| {
SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.into_future() .map_err(|e| format!("Unable to read system time: {:?}", e))?;
.map_err(|e| format!("Unable to read system time: {:?}", e))
.and_then::<_, Box<dyn Future<Item = _, Error = _> + Send>>(move |now| {
let log = log_3.clone(); let log = log_3.clone();
let genesis = Duration::from_secs(genesis_time); let genesis = Duration::from_secs(genesis_time);
@@ -121,53 +107,32 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
"seconds_to_wait" => (genesis - now).as_secs() "seconds_to_wait" => (genesis - now).as_secs()
); );
Box::new( delay_for(genesis - now).await
Delay::new(Instant::now() + (genesis - now))
.map_err(|e| {
format!("Unable to create genesis wait delay: {:?}", e)
})
.map(move |_| (beacon_node, remote_eth2_config, genesis_time)),
)
} else { } else {
info!( info!(
log, log,
"Genesis has already occurred"; "Genesis has already occurred";
"seconds_ago" => (now - genesis).as_secs() "seconds_ago" => (now - genesis).as_secs()
); );
Box::new(future::ok((beacon_node, remote_eth2_config, genesis_time)))
} }
}) let genesis_validators_root = beacon_node
})
.and_then(|(beacon_node, eth2_config, genesis_time)| {
beacon_node
.http .http
.beacon() .beacon()
.get_genesis_validators_root() .get_genesis_validators_root()
.map(move |genesis_validators_root| { .await
(
beacon_node,
eth2_config,
genesis_time,
genesis_validators_root,
)
})
.map_err(|e| { .map_err(|e| {
format!( format!(
"Unable to read genesis validators root from beacon node: {:?}", "Unable to read genesis validators root from beacon node: {:?}",
e e
) )
}) })?;
})
.and_then(
move |(beacon_node, remote_eth2_config, genesis_time, genesis_validators_root)| {
let log = log_4.clone(); let log = log_4.clone();
// Do not permit a connection to a beacon node using different spec constants. // Do not permit a connection to a beacon node using different spec constants.
if context.eth2_config.spec_constants != remote_eth2_config.spec_constants { if context.eth2_config.spec_constants != eth2_config.spec_constants {
return Err(format!( return Err(format!(
"Beacon node is using an incompatible spec. Got {}, expected {}", "Beacon node is using an incompatible spec. Got {}, expected {}",
remote_eth2_config.spec_constants, context.eth2_config.spec_constants eth2_config.spec_constants, context.eth2_config.spec_constants
)); ));
} }
@@ -178,7 +143,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
// different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being // different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being
// produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant // produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant
// for Lighthouse. // for Lighthouse.
context.eth2_config = remote_eth2_config; context.eth2_config = eth2_config;
let slot_clock = SystemTimeSlotClock::new( let slot_clock = SystemTimeSlotClock::new(
context.eth2_config.spec.genesis_slot, context.eth2_config.spec.genesis_slot,
@@ -192,8 +157,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
.runtime_context(context.service_context("fork".into())) .runtime_context(context.service_context("fork".into()))
.build()?; .build()?;
let validator_store: ValidatorStore<SystemTimeSlotClock, T> = let validator_store: ValidatorStore<SystemTimeSlotClock, T> = match &config.key_source {
match &config.key_source {
// Load pre-existing validators from the data dir. // Load pre-existing validators from the data dir.
// //
// Use the `account_manager` to generate these files. // Use the `account_manager` to generate these files.
@@ -207,15 +171,13 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
// Generate ephemeral insecure keypairs for testing purposes. // Generate ephemeral insecure keypairs for testing purposes.
// //
// Do not use in production. // Do not use in production.
KeySource::InsecureKeypairs(indices) => { KeySource::InsecureKeypairs(indices) => ValidatorStore::insecure_ephemeral_validators(
ValidatorStore::insecure_ephemeral_validators(
&indices, &indices,
genesis_validators_root, genesis_validators_root,
context.eth2_config.spec.clone(), context.eth2_config.spec.clone(),
fork_service.clone(), fork_service.clone(),
log.clone(), log.clone(),
)? )?,
}
}; };
info!( info!(
@@ -256,8 +218,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
attestation_service, attestation_service,
exit_signals: vec![], exit_signals: vec![],
}) })
},
)
} }
pub fn start_service(&mut self) -> Result<(), String> { pub fn start_service(&mut self) -> Result<(), String> {
@@ -298,22 +258,22 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
/// Request the version from the node, looping back and trying again on failure. Exit once the node /// Request the version from the node, looping back and trying again on failure. Exit once the node
/// has been contacted. /// has been contacted.
fn wait_for_node<E: EthSpec>( async fn wait_for_node<E: EthSpec>(
beacon_node: RemoteBeaconNode<E>, beacon_node: RemoteBeaconNode<E>,
log: Logger, log: Logger,
) -> impl Future<Item = RemoteBeaconNode<E>, Error = String> { ) -> Result<RemoteBeaconNode<E>, String> {
// Try to get the version string from the node, looping until success is returned. // Try to get the version string from the node, looping until success is returned.
loop_fn(beacon_node.clone(), move |beacon_node| { loop {
let log = log.clone(); let log = log.clone();
beacon_node let result = beacon_node
.clone() .clone()
.http .http
.node() .node()
.get_version() .get_version()
.map_err(|e| format!("{:?}", e)) .await
.then(move |result| { .map_err(|e| format!("{:?}", e));
let future: Box<dyn Future<Item = Loop<_, _>, Error = String> + Send> = match result
{ match result {
Ok(version) => { Ok(version) => {
info!( info!(
log, log,
@@ -321,7 +281,7 @@ fn wait_for_node<E: EthSpec>(
"version" => version, "version" => version,
); );
Box::new(future::ok(Loop::Break(beacon_node))) return Ok(beacon_node);
} }
Err(e) => { Err(e) => {
error!( error!(
@@ -329,17 +289,8 @@ fn wait_for_node<E: EthSpec>(
"Unable to connect to beacon node"; "Unable to connect to beacon node";
"error" => format!("{:?}", e), "error" => format!("{:?}", e),
); );
delay_for(RETRY_DELAY).await;
Box::new( }
Delay::new(Instant::now() + RETRY_DELAY) }
.map_err(|e| format!("Failed to trigger delay: {:?}", e))
.and_then(|_| future::ok(Loop::Continue(beacon_node))),
)
} }
};
future
})
})
.map(|_| beacon_node)
} }

View File

@@ -1,10 +1,9 @@
use crate::ProductionValidatorClient; use crate::ProductionValidatorClient;
use exit_future::Signal; use exit_future::Signal;
use futures::{Future, Stream}; use futures::{FutureExt, StreamExt};
use slog::{error, info}; use slog::{error, info};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::time::{Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use tokio::timer::Interval;
use types::EthSpec; use types::EthSpec;
/// Spawns a notifier service which periodically logs information about the node. /// Spawns a notifier service which periodically logs information about the node.
@@ -26,14 +25,10 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
let duties_service = client.duties_service.clone(); let duties_service = client.duties_service.clone();
let log_1 = context.log.clone(); let log_1 = context.log.clone();
let log_2 = context.log.clone();
let interval_future = Interval::new(start_instant, interval_duration) // Note: interval_at panics if `interval_duration` is 0
.map_err( let interval_fut = interval_at(start_instant, interval_duration).for_each(move |_| {
move |e| error!(log_1, "Slot notifier timer failed"; "error" => format!("{:?}", e)), let log = log_1.clone();
)
.for_each(move |_| {
let log = log_2.clone();
if let Some(slot) = duties_service.slot_clock.now() { if let Some(slot) = duties_service.slot_clock.now() {
let epoch = slot.epoch(T::slots_per_epoch()); let epoch = slot.epoch(T::slots_per_epoch());
@@ -46,7 +41,7 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
error!(log, "No validators present") error!(log, "No validators present")
} else if total_validators == attesting_validators { } else if total_validators == attesting_validators {
info!( info!(
log_2, log_1,
"All validators active"; "All validators active";
"proposers" => proposing_validators, "proposers" => proposing_validators,
"active_validators" => attesting_validators, "active_validators" => attesting_validators,
@@ -56,7 +51,7 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
); );
} else if attesting_validators > 0 { } else if attesting_validators > 0 {
info!( info!(
log_2, log_1,
"Some validators active"; "Some validators active";
"proposers" => proposing_validators, "proposers" => proposing_validators,
"active_validators" => attesting_validators, "active_validators" => attesting_validators,
@@ -66,7 +61,7 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
); );
} else { } else {
info!( info!(
log_2, log_1,
"Awaiting activation"; "Awaiting activation";
"validators" => total_validators, "validators" => total_validators,
"epoch" => format!("{}", epoch), "epoch" => format!("{}", epoch),
@@ -77,15 +72,16 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
error!(log, "Unable to read slot clock"); error!(log, "Unable to read slot clock");
} }
Ok(()) futures::future::ready(())
}); });
let (exit_signal, exit) = exit_future::signal(); let (exit_signal, exit) = exit_future::signal();
let log = context.log.clone(); let log = context.log.clone();
client.context.executor.spawn( let future = futures::future::select(
exit.until(interval_future) interval_fut,
.map(move |_| info!(log, "Shutdown complete")), exit.map(move |_| info!(log, "Shutdown complete")),
); );
tokio::task::spawn(future);
Ok(exit_signal) Ok(exit_signal)
} }

View File

@@ -1,6 +1,6 @@
use bls::get_withdrawal_credentials; use bls::get_withdrawal_credentials;
use deposit_contract::{encode_eth1_tx_data, DEPOSIT_GAS}; use deposit_contract::{encode_eth1_tx_data, DEPOSIT_GAS};
use futures::{Future, IntoFuture}; use futures::compat::Future01CompatExt;
use hex; use hex;
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
@@ -303,15 +303,13 @@ impl ValidatorDirectoryBuilder {
Ok(self) Ok(self)
} }
pub fn submit_eth1_deposit<T: Transport>( pub async fn submit_eth1_deposit<T: Transport>(
self, &self,
web3: Web3<T>, web3: Web3<T>,
from: Address, from: Address,
deposit_contract: Address, deposit_contract: Address,
) -> impl Future<Item = (Self, Hash256), Error = String> { ) -> Result<Hash256, String> {
self.get_deposit_data() let (deposit_data, deposit_amount) = self.get_deposit_data()?;
.into_future()
.and_then(move |(deposit_data, deposit_amount)| {
web3.eth() web3.eth()
.send_transaction(TransactionRequest { .send_transaction(TransactionRequest {
from, from,
@@ -323,9 +321,9 @@ impl ValidatorDirectoryBuilder {
nonce: None, nonce: None,
condition: None, condition: None,
}) })
.compat()
.await
.map_err(|e| format!("Failed to send transaction: {:?}", e)) .map_err(|e| format!("Failed to send transaction: {:?}", e))
})
.map(|tx| (self, tx))
} }
pub fn build(self) -> Result<ValidatorDirectory, String> { pub fn build(self) -> Result<ValidatorDirectory, String> {