Upgrade to tokio 0.3 (#1839)

## Description

This PR updates Lighthouse to tokio 0.3. It includes a number of dependency updates and some structural changes as to how we create and spawn tasks.

This also brings with it a number of various improvements:

- Discv5 update
- Libp2p update
- Fix for recompilation issues
- Improved UPnP port mapping handling
- Futures dependency update
- Log downgrade to traces for rejecting peers when we've reached our max



Co-authored-by: blacktemplar <blacktemplar@a1.net>
This commit is contained in:
Age Manning
2020-11-28 05:30:57 +00:00
parent 5a3b94cbb4
commit a567f788bd
81 changed files with 3666 additions and 2762 deletions

View File

@@ -9,9 +9,10 @@ name = "validator_client"
path = "src/lib.rs"
[dev-dependencies]
tokio = { version = "0.2.22", features = ["time", "rt-threaded", "macros"] }
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] }
tempfile = "3.1.0"
deposit_contract = { path = "../common/deposit_contract" }
tokio-compat-02 = "0.1"
[dependencies]
eth2_ssz = "0.1.2"
@@ -30,8 +31,8 @@ serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog-async = "2.5.0"
slog-term = "2.6.0"
tokio = { version = "0.2.22", features = ["time"] }
futures = { version = "0.3.5", features = ["compat"] }
tokio = { version = "0.3.2", features = ["time"] }
futures = { version = "0.3.7", features = ["compat"] }
dirs = "3.0.1"
directory = { path = "../common/directory" }
lockfile = { path = "../common/lockfile" }
@@ -53,7 +54,7 @@ eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" }
lighthouse_version = { path = "../common/lighthouse_version" }
warp_utils = { path = "../common/warp_utils" }
warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" }
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
hyper = "0.13.8"
serde_utils = { path = "../consensus/serde_utils" }
libsecp256k1 = "0.3.5"

View File

@@ -5,13 +5,14 @@ use crate::{
};
use environment::RuntimeContext;
use eth2::BeaconNodeHttpClient;
use futures::future::FutureExt;
use futures::StreamExt;
use slog::{crit, error, info, trace};
use slot_clock::SlotClock;
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Arc;
use tokio::time::{delay_until, interval_at, Duration, Instant};
use tokio::time::{interval_at, sleep_until, Duration, Instant};
use tree_hash::TreeHash;
use types::{
AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec,
@@ -211,13 +212,16 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
.into_iter()
.for_each(|(committee_index, validator_duties)| {
// Spawn a separate task for each attestation.
self.inner.context.executor.runtime_handle().spawn(
self.clone().publish_attestations_and_aggregates(
slot,
committee_index,
validator_duties,
aggregate_production_instant,
),
self.inner.context.executor.spawn(
self.clone()
.publish_attestations_and_aggregates(
slot,
committee_index,
validator_duties,
aggregate_production_instant,
)
.map(|_| ()),
"attestation publish",
);
});
@@ -278,7 +282,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
// of the way though the slot). As verified in the
// `delay_triggers_when_in_the_past` test, this code will still run
// even if the instant has already elapsed.
delay_until(aggregate_production_instant).await;
sleep_until(aggregate_production_instant).await;
// Start the metrics timer *after* we've done the delay.
let _aggregates_timer = metrics::start_timer_vec(
@@ -552,7 +556,7 @@ mod tests {
use futures::future::FutureExt;
use parking_lot::RwLock;
/// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still
/// This test is to ensure that a `tokio_timer::Sleep` with an instant in the past will still
/// trigger.
#[tokio::test]
async fn delay_triggers_when_in_the_past() {
@@ -560,7 +564,7 @@ mod tests {
let state_1 = Arc::new(RwLock::new(in_the_past));
let state_2 = state_1.clone();
delay_until(in_the_past)
sleep_until(in_the_past)
.map(move |()| *state_1.write() = Instant::now())
.await;

View File

@@ -188,21 +188,22 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
)
}
proposers.into_iter().for_each(|validator_pubkey| {
for validator_pubkey in proposers {
let service = self.clone();
let log = log.clone();
self.inner.context.executor.runtime_handle().spawn(
self.inner.context.executor.spawn(
service
.publish_block(slot, validator_pubkey)
.map_err(move |e| {
.unwrap_or_else(move |e| {
crit!(
log,
"Error whilst producing block";
"message" => e
)
);
}),
"block service",
);
});
}
Ok(())
}

View File

@@ -481,15 +481,14 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
let duties_service = self.clone();
let mut block_service_tx_clone = block_service_tx.clone();
let inner_spec = spec.clone();
self.inner
.context
.executor
.runtime_handle()
.spawn(async move {
self.inner.context.executor.spawn(
async move {
duties_service
.do_update(&mut block_service_tx_clone, &inner_spec)
.await
});
},
"duties update",
);
let executor = self.inner.context.executor.clone();

View File

@@ -1,6 +1,7 @@
use crate::http_metrics::metrics;
use environment::RuntimeContext;
use eth2::{types::StateId, BeaconNodeHttpClient};
use futures::future::FutureExt;
use futures::StreamExt;
use parking_lot::RwLock;
use slog::Logger;
@@ -144,8 +145,7 @@ impl<T: SlotClock + 'static> ForkService<T> {
// Run an immediate update before starting the updater service.
context
.executor
.runtime_handle()
.spawn(self.clone().do_update());
.spawn(self.clone().do_update().map(|_| ()), "fork service update");
let executor = context.executor.clone();

View File

@@ -21,7 +21,7 @@ use validator_dir::Builder as ValidatorDirBuilder;
///
/// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at
/// this point.
pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
pub async fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
mnemonic_opt: Option<Mnemonic>,
key_derivation_path_offset: Option<u32>,
validator_requests: &[api_types::ValidatorRequest],
@@ -129,12 +129,9 @@ pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
let voting_keystore_path = validator_dir.voting_keystore_path();
drop(validator_dir);
tokio::runtime::Handle::current()
.block_on(validator_store.add_validator_keystore(
voting_keystore_path,
voting_password_string,
request.enable,
))
validator_store
.add_validator_keystore(voting_keystore_path, voting_password_string, request.enable)
.await
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"failed to initialize validator: {:?}",

View File

@@ -14,7 +14,8 @@ use std::future::Future;
use std::marker::PhantomData;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::{Arc, Weak};
use tokio::runtime::Runtime;
use types::{ChainSpec, EthSpec, YamlConfig};
use validator_dir::Builder as ValidatorDirBuilder;
use warp::{
@@ -50,6 +51,7 @@ impl From<String> for Error {
///
/// The server will gracefully handle the case where any fields are `None`.
pub struct Context<T: Clone, E: EthSpec> {
pub runtime: Weak<Runtime>,
pub api_secret: ApiSecret,
pub validator_store: Option<ValidatorStore<T, E>>,
pub validator_dir: Option<PathBuf>,
@@ -138,6 +140,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
})
});
let inner_runtime = ctx.runtime.clone();
let runtime_filter = warp::any().map(move || inner_runtime.clone());
let inner_validator_dir = ctx.validator_dir.clone();
let validator_dir_filter = warp::any()
.map(move || inner_validator_dir.clone())
@@ -258,26 +263,34 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(validator_store_filter.clone())
.and(spec_filter.clone())
.and(signer.clone())
.and(runtime_filter.clone())
.and_then(
|body: Vec<api_types::ValidatorRequest>,
validator_dir: PathBuf,
validator_store: ValidatorStore<T, E>,
spec: Arc<ChainSpec>,
signer| {
signer,
runtime: Weak<Runtime>| {
blocking_signed_json_task(signer, move || {
let (validators, mnemonic) = create_validators(
None,
None,
&body,
&validator_dir,
&validator_store,
&spec,
)?;
let response = api_types::PostValidatorsResponseData {
mnemonic: mnemonic.into_phrase().into(),
validators,
};
Ok(api_types::GenericResponse::from(response))
if let Some(runtime) = runtime.upgrade() {
let (validators, mnemonic) = runtime.block_on(create_validators(
None,
None,
&body,
&validator_dir,
&validator_store,
&spec,
))?;
let response = api_types::PostValidatorsResponseData {
mnemonic: mnemonic.into_phrase().into(),
validators,
};
Ok(api_types::GenericResponse::from(response))
} else {
Err(warp_utils::reject::custom_server_error(
"Runtime shutdown".into(),
))
}
})
},
);
@@ -292,25 +305,37 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(validator_store_filter.clone())
.and(spec_filter)
.and(signer.clone())
.and(runtime_filter.clone())
.and_then(
|body: api_types::CreateValidatorsMnemonicRequest,
validator_dir: PathBuf,
validator_store: ValidatorStore<T, E>,
spec: Arc<ChainSpec>,
signer| {
signer,
runtime: Weak<Runtime>| {
blocking_signed_json_task(signer, move || {
let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| {
warp_utils::reject::custom_bad_request(format!("invalid mnemonic: {:?}", e))
})?;
let (validators, _mnemonic) = create_validators(
Some(mnemonic),
Some(body.key_derivation_path_offset),
&body.validators,
&validator_dir,
&validator_store,
&spec,
)?;
Ok(api_types::GenericResponse::from(validators))
if let Some(runtime) = runtime.upgrade() {
let mnemonic =
mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| {
warp_utils::reject::custom_bad_request(format!(
"invalid mnemonic: {:?}",
e
))
})?;
let (validators, _mnemonic) = runtime.block_on(create_validators(
Some(mnemonic),
Some(body.key_derivation_path_offset),
&body.validators,
&validator_dir,
&validator_store,
&spec,
))?;
Ok(api_types::GenericResponse::from(validators))
} else {
Err(warp_utils::reject::custom_server_error(
"Runtime shutdown".into(),
))
}
})
},
);
@@ -324,11 +349,13 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(validator_dir_filter)
.and(validator_store_filter.clone())
.and(signer.clone())
.and(runtime_filter.clone())
.and_then(
|body: api_types::KeystoreValidatorsPostRequest,
validator_dir: PathBuf,
validator_store: ValidatorStore<T, E>,
signer| {
signer,
runtime: Weak<Runtime>| {
blocking_signed_json_task(signer, move || {
// Check to ensure the password is correct.
let keypair = body
@@ -357,18 +384,26 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
drop(validator_dir);
let voting_password = body.password.clone();
let validator_def = tokio::runtime::Handle::current()
.block_on(validator_store.add_validator_keystore(
voting_keystore_path,
voting_password,
body.enable,
))
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"failed to initialize validator: {:?}",
e
))
})?;
let validator_def = {
if let Some(runtime) = runtime.upgrade() {
runtime
.block_on(validator_store.add_validator_keystore(
voting_keystore_path,
voting_password,
body.enable,
))
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"failed to initialize validator: {:?}",
e
))
})?
} else {
return Err(warp_utils::reject::custom_server_error(
"Runtime shutdown".into(),
));
}
};
Ok(api_types::GenericResponse::from(api_types::ValidatorData {
enabled: body.enable,
@@ -387,11 +422,13 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(warp::body::json())
.and(validator_store_filter)
.and(signer)
.and(runtime_filter)
.and_then(
|validator_pubkey: PublicKey,
body: api_types::ValidatorPatchRequest,
validator_store: ValidatorStore<T, E>,
signer| {
signer,
runtime: Weak<Runtime>| {
blocking_signed_json_task(signer, move || {
let initialized_validators_rw_lock = validator_store.initialized_validators();
let mut initialized_validators = initialized_validators_rw_lock.write();
@@ -403,19 +440,24 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
))),
Some(enabled) if enabled == body.enabled => Ok(()),
Some(_) => {
tokio::runtime::Handle::current()
.block_on(
initialized_validators
.set_validator_status(&validator_pubkey, body.enabled),
)
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"unable to set validator status: {:?}",
e
))
})?;
Ok(())
if let Some(runtime) = runtime.upgrade() {
runtime
.block_on(
initialized_validators
.set_validator_status(&validator_pubkey, body.enabled),
)
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"unable to set validator status: {:?}",
e
))
})?;
Ok(())
} else {
Err(warp_utils::reject::custom_server_error(
"Runtime shutdown".into(),
))
}
}
}
})
@@ -471,8 +513,8 @@ pub async fn blocking_signed_json_task<S, F, T>(
) -> Result<impl warp::Reply, warp::Rejection>
where
S: Fn(&[u8]) -> String,
F: Fn() -> Result<T, warp::Rejection>,
T: Serialize,
F: Fn() -> Result<T, warp::Rejection> + Send + 'static,
T: Serialize + Send + 'static,
{
warp_utils::task::blocking_task(func)
.await

View File

@@ -23,7 +23,9 @@ use std::marker::PhantomData;
use std::net::Ipv4Addr;
use std::sync::Arc;
use tempfile::{tempdir, TempDir};
use tokio::runtime::Runtime;
use tokio::sync::oneshot;
use tokio_compat_02::FutureExt;
const PASSWORD_BYTES: &[u8] = &[42, 50, 37];
@@ -37,8 +39,18 @@ struct ApiTester {
_validator_dir: TempDir,
}
// Builds a runtime to be used in the testing configuration.
fn build_runtime() -> Arc<Runtime> {
Arc::new(
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Should be able to build a testing runtime"),
)
}
impl ApiTester {
pub async fn new() -> Self {
pub async fn new(runtime: std::sync::Weak<Runtime>) -> Self {
let log = null_logger().unwrap();
let validator_dir = tempdir().unwrap();
@@ -80,6 +92,7 @@ impl ApiTester {
let initialized_validators = validator_store.initialized_validators();
let context: Arc<Context<TestingSlotClock, E>> = Arc::new(Context {
runtime,
api_secret,
validator_dir: Some(validator_dir.path().into()),
validator_store: Some(validator_store),
@@ -420,110 +433,145 @@ struct KeystoreValidatorScenario {
correct_password: bool,
}
#[tokio::test(core_threads = 2)]
async fn invalid_pubkey() {
ApiTester::new()
.await
.invalidate_api_token()
.test_get_lighthouse_version_invalid()
.await;
#[test]
fn invalid_pubkey() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.invalidate_api_token()
.test_get_lighthouse_version_invalid()
.await;
}
.compat(),
);
}
#[tokio::test(core_threads = 2)]
async fn simple_getters() {
ApiTester::new()
.await
.test_get_lighthouse_version()
.await
.test_get_lighthouse_health()
.await
.test_get_lighthouse_spec()
.await;
#[test]
fn simple_getters() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.test_get_lighthouse_version()
.await
.test_get_lighthouse_health()
.await
.test_get_lighthouse_spec()
.await;
}
.compat(),
);
}
#[tokio::test(core_threads = 2)]
async fn hd_validator_creation() {
ApiTester::new()
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: true,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.create_hd_validators(HdValidatorScenario {
count: 1,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![0],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3)
.create_hd_validators(HdValidatorScenario {
count: 0,
specify_mnemonic: true,
key_derivation_path_offset: 4,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3);
#[test]
fn hd_validator_creation() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: true,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.create_hd_validators(HdValidatorScenario {
count: 1,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![0],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3)
.create_hd_validators(HdValidatorScenario {
count: 0,
specify_mnemonic: true,
key_derivation_path_offset: 4,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3);
}
.compat(),
);
}
#[tokio::test(core_threads = 2)]
async fn validator_enabling() {
ApiTester::new()
.await
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.set_validator_enabled(0, false)
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2)
.set_validator_enabled(0, true)
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2);
#[test]
fn validator_enabling() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.set_validator_enabled(0, false)
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2)
.set_validator_enabled(0, true)
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2);
}
.compat(),
);
}
#[tokio::test(core_threads = 2)]
async fn keystore_validator_creation() {
ApiTester::new()
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: false,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: false,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2);
#[test]
fn keystore_validator_creation() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: false,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: false,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2);
}
.compat(),
);
}

View File

@@ -38,7 +38,7 @@ use std::marker::PhantomData;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{delay_for, Duration};
use tokio::time::{sleep, Duration};
use types::{EthSpec, Hash256};
use validator_store::ValidatorStore;
@@ -337,6 +337,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
self.http_api_listen_addr = if self.config.http_api.enabled {
let ctx: Arc<http_api::Context<SystemTimeSlotClock, T>> = Arc::new(http_api::Context {
runtime: self.context.executor.runtime(),
api_secret,
validator_store: Some(self.validator_store.clone()),
validator_dir: Some(self.config.validator_dir.clone()),
@@ -415,7 +416,7 @@ async fn init_from_beacon_node<E: EthSpec>(
}
}
delay_for(RETRY_DELAY).await;
sleep(RETRY_DELAY).await;
};
Ok((genesis.genesis_time, genesis.genesis_validators_root))
@@ -447,7 +448,7 @@ async fn wait_for_genesis<E: EthSpec>(
// timer runs out.
tokio::select! {
result = poll_whilst_waiting_for_genesis(beacon_node, genesis_time, context.log()) => result?,
() = delay_for(genesis_time - now) => ()
() = sleep(genesis_time - now) => ()
};
info!(
@@ -497,7 +498,7 @@ async fn wait_for_connectivity(
"Unable to connect to beacon node";
"error" => format!("{:?}", e),
);
delay_for(RETRY_DELAY).await;
sleep(RETRY_DELAY).await;
}
}
}
@@ -546,6 +547,6 @@ async fn poll_whilst_waiting_for_genesis(
}
}
delay_for(WAITING_FOR_GENESIS_POLL_TIME).await;
sleep(WAITING_FOR_GENESIS_POLL_TIME).await;
}
}