mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-24 00:08:27 +00:00
Update to tokio 1.1 (#2172)
## Issue Addressed resolves #2129 resolves #2099 addresses some of #1712 unblocks #2076 unblocks #2153 ## Proposed Changes - Updates all the dependencies mentioned in #2129, except for web3. They haven't merged their tokio 1.0 update because they are waiting on some dependencies of their own. Since we only use web3 in tests, I think updating it in a separate issue is fine. If they are able to merge soon though, I can update in this PR. - Updates `tokio_util` to 0.6.2 and `bytes` to 1.0.1. - We haven't made a discv5 release since merging tokio 1.0 updates so I'm using a commit rather than release atm. **Edit:** I think we should merge an update of `tokio_util` to 0.6.2 into discv5 before this release because it has panic fixes in `DelayQueue` --> PR in discv5: https://github.com/sigp/discv5/pull/58 ## Additional Info tokio 1.0 changes that required some changes in lighthouse: - `interval.next().await.is_some()` -> `interval.tick().await` - `sleep` future is now `!Unpin` -> https://github.com/tokio-rs/tokio/issues/3028 - `try_recv` has been temporarily removed from `mpsc` -> https://github.com/tokio-rs/tokio/issues/3350 - stream features have moved to `tokio-stream` and `broadcast::Receiver::into_stream()` has been temporarily removed -> `https://github.com/tokio-rs/tokio/issues/2870 - I've copied over the `BroadcastStream` wrapper from this PR, but can update to use `tokio-stream` once it's merged https://github.com/tokio-rs/tokio/pull/3384 Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
@@ -10,7 +10,6 @@ path = "src/lib.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
node_test_rig = { path = "../testing/node_test_rig" }
|
||||
tokio-compat-02 = "0.1"
|
||||
|
||||
[features]
|
||||
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
||||
@@ -27,7 +26,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr
|
||||
slog-term = "2.6.0"
|
||||
slog-async = "2.5.0"
|
||||
ctrlc = { version = "3.1.6", features = ["termination"] }
|
||||
tokio = { version = "0.3.2", features = ["time"] }
|
||||
tokio = { version = "1.1.0", features = ["time"] }
|
||||
exit-future = "0.2.0"
|
||||
dirs = "3.0.1"
|
||||
logging = { path = "../common/logging" }
|
||||
@@ -41,7 +40,7 @@ eth2_libp2p = { path = "./eth2_libp2p" }
|
||||
eth2_ssz = "0.1.2"
|
||||
serde = "1.0.116"
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
hyper = "0.13.8"
|
||||
hyper = "0.14.4"
|
||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||
hex = "0.4.2"
|
||||
slasher = { path = "../slasher" }
|
||||
|
||||
@@ -40,7 +40,7 @@ eth2_ssz_derive = "0.1.0"
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
tree_hash = "0.1.1"
|
||||
types = { path = "../../consensus/types" }
|
||||
tokio = "0.3.2"
|
||||
tokio = "1.1.0"
|
||||
eth1 = { path = "../eth1" }
|
||||
futures = "0.3.7"
|
||||
genesis = { path = "../genesis" }
|
||||
|
||||
@@ -26,10 +26,10 @@ error-chain = "0.12.4"
|
||||
serde_yaml = "0.8.13"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-async = "2.5.0"
|
||||
tokio = "0.3.2"
|
||||
tokio = "1.1.0"
|
||||
dirs = "3.0.1"
|
||||
futures = "0.3.7"
|
||||
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
|
||||
url = "2.1.1"
|
||||
eth1 = { path = "../eth1" }
|
||||
genesis = { path = "../genesis" }
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::metrics;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use eth2_libp2p::NetworkGlobals;
|
||||
use futures::prelude::*;
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, error, info, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
@@ -64,26 +63,32 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
}
|
||||
|
||||
// Perform post-genesis logging.
|
||||
while interval.next().await.is_some() {
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let connected_peer_count = network.connected_peers();
|
||||
let sync_state = network.sync_state();
|
||||
|
||||
let head_info = beacon_chain.head_info().map_err(|e| {
|
||||
error!(
|
||||
log,
|
||||
"Failed to get beacon chain head info";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
})?;
|
||||
let head_info = match beacon_chain.head_info() {
|
||||
Ok(head_info) => head_info,
|
||||
Err(e) => {
|
||||
error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e));
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
let head_slot = head_info.slot;
|
||||
let current_slot = beacon_chain.slot().map_err(|e| {
|
||||
error!(
|
||||
log,
|
||||
"Unable to read current slot";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
})?;
|
||||
let current_slot = match beacon_chain.slot() {
|
||||
Ok(slot) => slot,
|
||||
Err(e) => {
|
||||
error!(
|
||||
log,
|
||||
"Unable to read current slot";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
let finalized_epoch = head_info.finalized_checkpoint.epoch;
|
||||
let finalized_root = head_info.finalized_checkpoint.root;
|
||||
@@ -175,11 +180,10 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
|
||||
eth1_logging(&beacon_chain, &log);
|
||||
}
|
||||
Ok::<(), ()>(())
|
||||
};
|
||||
|
||||
// run the notifier on the current executor
|
||||
executor.spawn(interval_future.unwrap_or_else(|_| ()), "notifier");
|
||||
executor.spawn(interval_future, "notifier");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ environment = { path = "../../lighthouse/environment" }
|
||||
tokio-compat-02 = "0.1"
|
||||
|
||||
[dependencies]
|
||||
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
|
||||
futures = "0.3.7"
|
||||
serde_json = "1.0.58"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
@@ -26,7 +26,7 @@ tree_hash = "0.1.1"
|
||||
eth2_hashing = "0.1.0"
|
||||
parking_lot = "0.11.0"
|
||||
slog = "2.5.2"
|
||||
tokio = { version = "0.3.2", features = ["full"] }
|
||||
tokio = { version = "1.1.0", features = ["full"] }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
libflate = "1.0.2"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::{
|
||||
inner::{DepositUpdater, Inner},
|
||||
};
|
||||
use fallback::{Fallback, FallbackError};
|
||||
use futures::{future::TryFutureExt, StreamExt};
|
||||
use futures::future::TryFutureExt;
|
||||
use parking_lot::{RwLock, RwLockReadGuard};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||
@@ -721,7 +721,8 @@ impl Service {
|
||||
let mut interval = interval_at(Instant::now(), update_interval);
|
||||
|
||||
let update_future = async move {
|
||||
while interval.next().await.is_some() {
|
||||
loop {
|
||||
interval.tick().await;
|
||||
self.do_update(update_interval).await.ok();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -5,8 +5,8 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] }
|
||||
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "dep-update", features = ["codec"] }
|
||||
discv5 = { version = "0.1.0-beta.3", features = ["libp2p"] }
|
||||
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
||||
types = { path = "../../consensus/types" }
|
||||
hashset_delay = { path = "../../common/hashset_delay" }
|
||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
@@ -16,15 +16,16 @@ eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||
tokio = { version = "0.3.2", features = ["time", "macros"] }
|
||||
tokio = { version = "1.1.0", features = ["time", "macros"] }
|
||||
futures = "0.3.7"
|
||||
futures-io = "0.3.7"
|
||||
error-chain = "0.12.4"
|
||||
dirs = "3.0.1"
|
||||
fnv = "1.0.7"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
smallvec = "1.6.1"
|
||||
tokio-io-timeout = "0.5.0"
|
||||
tokio-io-timeout = "1.1.1"
|
||||
lru = "0.6.0"
|
||||
parking_lot = "0.11.0"
|
||||
sha2 = "0.9.1"
|
||||
@@ -32,7 +33,7 @@ base64 = "0.13.0"
|
||||
snap = "1.0.1"
|
||||
void = "1.0.2"
|
||||
hex = "0.4.2"
|
||||
tokio-util = { version = "0.4.0", features = ["codec", "compat", "time"] }
|
||||
tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] }
|
||||
tiny-keccak = "2.0.2"
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
rand = "0.7.3"
|
||||
@@ -41,14 +42,12 @@ regex = "1.3.9"
|
||||
strum = { version = "0.20", features = ["derive"] }
|
||||
|
||||
[dependencies.libp2p]
|
||||
#version = "0.23.0"
|
||||
git = "https://github.com/sigp/rust-libp2p"
|
||||
rev = "97000533e4710183124abde017c6c3d68287c1ae"
|
||||
version = "0.34.0"
|
||||
default-features = false
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.3.2", features = ["full"] }
|
||||
tokio = { version = "1.1.0", features = ["full"] }
|
||||
slog-term = "2.6.0"
|
||||
slog-async = "2.5.0"
|
||||
tempfile = "3.1.0"
|
||||
|
||||
@@ -832,7 +832,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
if let Some((peer_id, reason)) = self.peers_to_dc.pop_front() {
|
||||
return Poll::Ready(NBAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::All,
|
||||
handler: NotifyHandler::Any,
|
||||
event: BehaviourHandlerIn::Shutdown(
|
||||
reason.map(|reason| (RequestId::Behaviour, RPCRequest::Goodbye(reason))),
|
||||
),
|
||||
@@ -893,7 +893,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
}
|
||||
|
||||
// perform gossipsub score updates when necessary
|
||||
while let Poll::Ready(Some(_)) = self.update_gossipsub_scores.poll_next_unpin(cx) {
|
||||
while let Poll::Ready(_) = self.update_gossipsub_scores.poll_tick(cx) {
|
||||
self.peer_manager.update_gossipsub_scores(&self.gossipsub);
|
||||
}
|
||||
|
||||
|
||||
@@ -221,8 +221,9 @@ impl CombinedKeyExt for CombinedKey {
|
||||
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
|
||||
match key {
|
||||
Keypair::Secp256k1(key) => {
|
||||
let secret = discv5::enr::k256::ecdsa::SigningKey::new(&key.secret().to_bytes())
|
||||
.expect("libp2p key must be valid");
|
||||
let secret =
|
||||
discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
|
||||
.expect("libp2p key must be valid");
|
||||
Ok(CombinedKey::Secp256k1(secret))
|
||||
}
|
||||
Keypair::Ed25519(key) => {
|
||||
@@ -277,7 +278,7 @@ mod tests {
|
||||
fn test_secp256k1_peer_id_conversion() {
|
||||
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
|
||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||
let secret_key = discv5::enr::k256::ecdsa::SigningKey::new(&sk_bytes).unwrap();
|
||||
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap();
|
||||
|
||||
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();
|
||||
|
||||
@@ -896,7 +896,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
}
|
||||
EventStream::InActive => {} // ignore checking the stream
|
||||
EventStream::Present(ref mut stream) => {
|
||||
while let Ok(event) = stream.try_recv() {
|
||||
while let Poll::Ready(Some(event)) = stream.poll_recv(cx) {
|
||||
match event {
|
||||
// We filter out unwanted discv5 events here and only propagate useful results to
|
||||
// the peer manager.
|
||||
|
||||
@@ -972,7 +972,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// perform the heartbeat when necessary
|
||||
while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) {
|
||||
while let Poll::Ready(_) = self.heartbeat.poll_tick(cx) {
|
||||
self.heartbeat();
|
||||
}
|
||||
|
||||
@@ -1011,8 +1011,10 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
||||
}
|
||||
}
|
||||
|
||||
if !matches!(self.network_globals.sync_state(), SyncState::SyncingFinalized{..}|SyncState::SyncingHead{..})
|
||||
{
|
||||
if !matches!(
|
||||
self.network_globals.sync_state(),
|
||||
SyncState::SyncingFinalized { .. } | SyncState::SyncingHead { .. }
|
||||
) {
|
||||
loop {
|
||||
match self.status_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
|
||||
@@ -156,7 +156,10 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
|
||||
/// Checks if the status is connected.
|
||||
pub fn is_connected(&self) -> bool {
|
||||
matches!(self.connection_status, PeerConnectionStatus::Connected { .. })
|
||||
matches!(
|
||||
self.connection_status,
|
||||
PeerConnectionStatus::Connected { .. }
|
||||
)
|
||||
}
|
||||
|
||||
/// Checks if the status is connected.
|
||||
|
||||
@@ -29,12 +29,20 @@ pub struct SyncInfo {
|
||||
|
||||
impl std::cmp::PartialEq for PeerSyncStatus {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!((self, other),
|
||||
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) |
|
||||
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) |
|
||||
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) |
|
||||
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) |
|
||||
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown))
|
||||
matches!(
|
||||
(self, other),
|
||||
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. })
|
||||
| (
|
||||
PeerSyncStatus::Advanced { .. },
|
||||
PeerSyncStatus::Advanced { .. }
|
||||
)
|
||||
| (PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. })
|
||||
| (
|
||||
PeerSyncStatus::IrrelevantPeer,
|
||||
PeerSyncStatus::IrrelevantPeer
|
||||
)
|
||||
| (PeerSyncStatus::Unknown, PeerSyncStatus::Unknown)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -137,14 +137,20 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
|
||||
/// If we are connected or currently dialing the peer returns true.
|
||||
pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool {
|
||||
matches!(self.connection_status(peer_id), Some(PeerConnectionStatus::Connected { .. })
|
||||
| Some(PeerConnectionStatus::Dialing { .. }))
|
||||
matches!(
|
||||
self.connection_status(peer_id),
|
||||
Some(PeerConnectionStatus::Connected { .. })
|
||||
| Some(PeerConnectionStatus::Dialing { .. })
|
||||
)
|
||||
}
|
||||
|
||||
/// If we are connected or in the process of disconnecting
|
||||
pub fn is_connected_or_disconnecting(&self, peer_id: &PeerId) -> bool {
|
||||
matches!(self.connection_status(peer_id), Some(PeerConnectionStatus::Connected { .. })
|
||||
| Some(PeerConnectionStatus::Disconnecting { .. }))
|
||||
matches!(
|
||||
self.connection_status(peer_id),
|
||||
Some(PeerConnectionStatus::Connected { .. })
|
||||
| Some(PeerConnectionStatus::Disconnecting { .. })
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns true if the peer is synced at least to our current head.
|
||||
|
||||
@@ -7,6 +7,7 @@ use super::{RPCReceived, RPCSend};
|
||||
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::prelude::*;
|
||||
use futures::{Sink, SinkExt};
|
||||
use libp2p::core::upgrade::{
|
||||
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
|
||||
};
|
||||
@@ -133,7 +134,7 @@ enum HandlerState {
|
||||
///
|
||||
/// While in this state the handler rejects new requests but tries to finish existing ones.
|
||||
/// Once the timer expires, all messages are killed.
|
||||
ShuttingDown(Sleep),
|
||||
ShuttingDown(Box<Sleep>),
|
||||
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
|
||||
/// received.
|
||||
Deactivated,
|
||||
@@ -239,9 +240,9 @@ where
|
||||
self.dial_queue.push((id, req));
|
||||
}
|
||||
|
||||
self.state = HandlerState::ShuttingDown(sleep_until(
|
||||
self.state = HandlerState::ShuttingDown(Box::new(sleep_until(
|
||||
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
|
||||
));
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ use crate::rpc::{
|
||||
MaxRequestBlocks, MAX_REQUEST_BLOCKS,
|
||||
};
|
||||
use futures::future::BoxFuture;
|
||||
use futures::prelude::*;
|
||||
use futures::prelude::{AsyncRead, AsyncWrite};
|
||||
use futures::{FutureExt, SinkExt, StreamExt};
|
||||
use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
|
||||
use ssz::Encode;
|
||||
use ssz_types::VariableList;
|
||||
@@ -278,7 +278,7 @@ impl ProtocolName for ProtocolId {
|
||||
|
||||
pub type InboundOutput<TSocket, TSpec> = (RPCRequest<TSpec>, InboundFramed<TSocket, TSpec>);
|
||||
pub type InboundFramed<TSocket, TSpec> =
|
||||
Framed<TimeoutStream<Compat<TSocket>>, InboundCodec<TSpec>>;
|
||||
Framed<std::pin::Pin<Box<TimeoutStream<Compat<TSocket>>>>, InboundCodec<TSpec>>;
|
||||
|
||||
impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec>
|
||||
where
|
||||
@@ -304,7 +304,7 @@ where
|
||||
let mut timed_socket = TimeoutStream::new(socket);
|
||||
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
|
||||
|
||||
let socket = Framed::new(timed_socket, codec);
|
||||
let socket = Framed::new(Box::pin(timed_socket), codec);
|
||||
|
||||
// MetaData requests should be empty, return the stream
|
||||
match protocol_name {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::rpc::{Protocol, RPCRequest};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::StreamExt;
|
||||
use libp2p::PeerId;
|
||||
use std::convert::TryInto;
|
||||
use std::future::Future;
|
||||
@@ -241,7 +240,7 @@ impl Future for RPCRateLimiter {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
while let Poll::Ready(Some(_)) = self.prune_interval.poll_next_unpin(cx) {
|
||||
while let Poll::Ready(_) = self.prune_interval.poll_tick(cx) {
|
||||
self.prune();
|
||||
}
|
||||
|
||||
|
||||
@@ -23,12 +23,16 @@ pub enum SyncState {
|
||||
|
||||
impl PartialEq for SyncState {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!((self, other),
|
||||
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) |
|
||||
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) |
|
||||
(SyncState::Synced, SyncState::Synced) |
|
||||
(SyncState::Stalled, SyncState::Stalled) |
|
||||
(SyncState::SyncTransition, SyncState::SyncTransition))
|
||||
matches!(
|
||||
(self, other),
|
||||
(
|
||||
SyncState::SyncingFinalized { .. },
|
||||
SyncState::SyncingFinalized { .. }
|
||||
) | (SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. })
|
||||
| (SyncState::Synced, SyncState::Synced)
|
||||
| (SyncState::Stalled, SyncState::Stalled)
|
||||
| (SyncState::SyncTransition, SyncState::SyncTransition)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -315,7 +315,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
||||
// sent in the timeout
|
||||
match futures::future::select(
|
||||
Box::pin(receiver.next_event()),
|
||||
tokio::time::sleep(Duration::from_secs(1)),
|
||||
Box::pin(tokio::time::sleep(Duration::from_secs(1))),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -692,7 +692,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
||||
// sent in the timeout
|
||||
match futures::future::select(
|
||||
Box::pin(receiver.next_event()),
|
||||
tokio::time::sleep(Duration::from_millis(1000)),
|
||||
Box::pin(tokio::time::sleep(Duration::from_secs(1))),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_hashing = "0.1.0"
|
||||
tree_hash = "0.1.1"
|
||||
tokio = { version = "0.3.2", features = ["full"] }
|
||||
tokio = { version = "1.1.0", features = ["full"] }
|
||||
parking_lot = "0.11.0"
|
||||
slog = "2.5.2"
|
||||
exit-future = "0.2.0"
|
||||
|
||||
@@ -5,9 +5,11 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
|
||||
warp = "0.3.0"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
tokio = { version = "0.3.2", features = ["macros","stream","sync"] }
|
||||
tokio = { version = "1.1.0", features = ["macros","sync"] }
|
||||
tokio-stream = "0.1.2"
|
||||
tokio-util = "0.6.3"
|
||||
parking_lot = "0.11.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
hex = "0.4.2"
|
||||
@@ -32,5 +34,4 @@ futures = "0.3.8"
|
||||
store = { path = "../store" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
tree_hash = "0.1.1"
|
||||
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] }
|
||||
tokio-compat-02 = "0.1"
|
||||
discv5 = { version = "0.1.0-beta.3" }
|
||||
|
||||
66
beacon_node/http_api/src/broadcast_stream.rs
Normal file
66
beacon_node/http_api/src/broadcast_stream.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
// TODO: this should be replaced with the tokio's `BroadcastStream` once it's added to
|
||||
// tokio-stream (https://github.com/tokio-rs/tokio/pull/3384)
|
||||
|
||||
use std::fmt;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::broadcast::Receiver;
|
||||
use tokio_stream::Stream;
|
||||
use tokio_util::sync::ReusableBoxFuture;
|
||||
|
||||
/// A wrapper around [`tokio::sync::broadcast::Receiver`] that implements [`Stream`].
|
||||
///
|
||||
/// [`tokio::sync::broadcast::Receiver`]: struct@tokio::sync::broadcast::Receiver
|
||||
/// [`Stream`]: trait@crate::Stream
|
||||
pub struct BroadcastStream<T> {
|
||||
inner: ReusableBoxFuture<(Result<T, RecvError>, Receiver<T>)>,
|
||||
}
|
||||
|
||||
/// An error returned from the inner stream of a [`BroadcastStream`].
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BroadcastStreamRecvError {
|
||||
/// The receiver lagged too far behind. Attempting to receive again will
|
||||
/// return the oldest message still retained by the channel.
|
||||
///
|
||||
/// Includes the number of skipped messages.
|
||||
Lagged(u64),
|
||||
}
|
||||
|
||||
async fn make_future<T: Clone>(mut rx: Receiver<T>) -> (Result<T, RecvError>, Receiver<T>) {
|
||||
let result = rx.recv().await;
|
||||
(result, rx)
|
||||
}
|
||||
|
||||
impl<T: 'static + Clone + Send> BroadcastStream<T> {
|
||||
/// Create a new `BroadcastStream`.
|
||||
pub fn new(rx: Receiver<T>) -> Self {
|
||||
Self {
|
||||
inner: ReusableBoxFuture::new(make_future(rx)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: 'static + Clone + Send> Stream for BroadcastStream<T> {
|
||||
type Item = Result<T, BroadcastStreamRecvError>;
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let (result, rx) = match self.inner.poll(cx) {
|
||||
std::task::Poll::Ready(t) => t,
|
||||
std::task::Poll::Pending => return std::task::Poll::Pending,
|
||||
};
|
||||
self.inner.set(make_future(rx));
|
||||
match result {
|
||||
Ok(item) => Poll::Ready(Some(Ok(item))),
|
||||
Err(RecvError::Closed) => Poll::Ready(None),
|
||||
Err(RecvError::Lagged(n)) => {
|
||||
Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(n))))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for BroadcastStream<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("BroadcastStream").finish()
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
mod beacon_proposer_cache;
|
||||
mod block_id;
|
||||
mod broadcast_stream;
|
||||
mod metrics;
|
||||
mod state_id;
|
||||
mod validator_inclusion;
|
||||
@@ -18,7 +19,7 @@ use beacon_chain::{
|
||||
};
|
||||
use beacon_proposer_cache::BeaconProposerCache;
|
||||
use block_id::BlockId;
|
||||
use eth2::types::{self as api_types, EventKind, ValidatorId};
|
||||
use eth2::types::{self as api_types, ValidatorId};
|
||||
use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
||||
use lighthouse_version::version_with_platform;
|
||||
use network::NetworkMessage;
|
||||
@@ -34,19 +35,17 @@ use std::convert::TryInto;
|
||||
use std::future::Future;
|
||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::sync::Arc;
|
||||
use tokio::stream::{StreamExt, StreamMap};
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio_stream::StreamExt;
|
||||
use types::{
|
||||
Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec,
|
||||
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
|
||||
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
|
||||
};
|
||||
use warp::http::StatusCode;
|
||||
use warp::sse::ServerSentEvent;
|
||||
use warp::sse::Event;
|
||||
use warp::Reply;
|
||||
use warp::{http::Response, Filter, Stream};
|
||||
use warp_utils::reject::ServerSentEventError;
|
||||
use warp::{http::Response, Filter};
|
||||
use warp_utils::task::{blocking_json_task, blocking_task};
|
||||
|
||||
const API_PREFIX: &str = "eth";
|
||||
@@ -1610,9 +1609,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("duties"))
|
||||
.and(warp::path("proposer"))
|
||||
.and(warp::path::param::<Epoch>().or_else(|_| async {
|
||||
Err(warp_utils::reject::custom_bad_request(
|
||||
"Invalid epoch".to_string(),
|
||||
))
|
||||
Err(warp_utils::reject::custom_bad_request(
|
||||
"Invalid epoch".to_string(),
|
||||
))
|
||||
}))
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
@@ -1637,7 +1636,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
if epoch == current_epoch {
|
||||
let dependent_root_slot = current_epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
||||
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
|
||||
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
|
||||
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
|
||||
} else {
|
||||
chain
|
||||
@@ -1649,7 +1648,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
beacon_proposer_cache
|
||||
.lock()
|
||||
.get_proposers(&chain, epoch)
|
||||
.map(|duties| api_types::DutiesResponse{ data: duties, dependent_root} )
|
||||
.map(|duties| api_types::DutiesResponse { data: duties, dependent_root })
|
||||
} else {
|
||||
let state =
|
||||
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
||||
@@ -1657,7 +1656,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|
||||
let dependent_root_slot = state.current_epoch()
|
||||
.start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
||||
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
|
||||
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
|
||||
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
|
||||
} else {
|
||||
chain
|
||||
@@ -1691,8 +1690,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
})
|
||||
.collect::<Result<Vec<api_types::ProposerData>, _>>()
|
||||
.map(|duties| {
|
||||
|
||||
api_types::DutiesResponse{
|
||||
api_types::DutiesResponse {
|
||||
dependent_root,
|
||||
data: duties,
|
||||
}
|
||||
@@ -2053,7 +2051,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
"attestation_slot" => aggregate.message.aggregate.data.slot,
|
||||
);
|
||||
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2087,7 +2085,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|
||||
if !failures.is_empty() {
|
||||
Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(),
|
||||
failures
|
||||
failures,
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -2358,24 +2356,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
})
|
||||
});
|
||||
|
||||
fn merge_streams<T: EthSpec>(
|
||||
stream_map: StreamMap<
|
||||
String,
|
||||
impl Stream<Item = Result<EventKind<T>, RecvError>> + Unpin + Send + 'static,
|
||||
>,
|
||||
) -> impl Stream<Item = Result<impl ServerSentEvent + Send + 'static, ServerSentEventError>>
|
||||
+ Send
|
||||
+ 'static {
|
||||
// Convert messages into Server-Sent Events and return resulting stream.
|
||||
stream_map.map(move |(topic_name, msg)| match msg {
|
||||
Ok(data) => Ok((warp::sse::event(topic_name), warp::sse::json(data)).boxed()),
|
||||
Err(e) => Err(warp_utils::reject::server_sent_event_error(format!(
|
||||
"{:?}",
|
||||
e
|
||||
))),
|
||||
})
|
||||
}
|
||||
|
||||
let get_events = eth1_v1
|
||||
.and(warp::path("events"))
|
||||
.and(warp::path::end())
|
||||
@@ -2385,7 +2365,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|topics: api_types::EventQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_task(move || {
|
||||
// for each topic subscribed spawn a new subscription
|
||||
let mut stream_map = StreamMap::with_capacity(topics.topics.0.len());
|
||||
let mut receivers = Vec::with_capacity(topics.topics.0.len());
|
||||
|
||||
if let Some(event_handler) = chain.event_handler.as_ref() {
|
||||
for topic in topics.topics.0.clone() {
|
||||
@@ -2402,7 +2382,24 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
event_handler.subscribe_finalized()
|
||||
}
|
||||
};
|
||||
stream_map.insert(topic.to_string(), Box::pin(receiver.into_stream()));
|
||||
|
||||
receivers.push(broadcast_stream::BroadcastStream::new(receiver).map(
|
||||
|msg| {
|
||||
match msg {
|
||||
Ok(data) => Event::default()
|
||||
.event(data.topic_name())
|
||||
.json_data(data)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::server_sent_event_error(
|
||||
format!("{:?}", e),
|
||||
)
|
||||
}),
|
||||
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
||||
format!("{:?}", e),
|
||||
)),
|
||||
}
|
||||
},
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(warp_utils::reject::custom_server_error(
|
||||
@@ -2410,11 +2407,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
));
|
||||
}
|
||||
|
||||
let stream = merge_streams(stream_map);
|
||||
let s = futures::stream::select_all(receivers);
|
||||
|
||||
Ok::<_, warp::Rejection>(warp::sse::reply(
|
||||
warp::sse::keep_alive().stream(stream),
|
||||
))
|
||||
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
@@ -15,6 +15,7 @@ use eth2_libp2p::{
|
||||
Enr, EnrExt, NetworkGlobals, PeerId,
|
||||
};
|
||||
use futures::stream::{Stream, StreamExt};
|
||||
use futures::FutureExt;
|
||||
use http_api::{Config, Context};
|
||||
use network::NetworkMessage;
|
||||
use state_processing::per_slot_processing;
|
||||
@@ -25,7 +26,6 @@ use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::time::Duration;
|
||||
use tokio_compat_02::FutureExt;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain,
|
||||
@@ -933,7 +933,7 @@ impl ApiTester {
|
||||
self.client.post_beacon_blocks(next_block).await.unwrap();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"valid blocks should be sent to network"
|
||||
);
|
||||
|
||||
@@ -947,7 +947,7 @@ impl ApiTester {
|
||||
assert!(self.client.post_beacon_blocks(&next_block).await.is_err());
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"invalid blocks should be sent to network"
|
||||
);
|
||||
|
||||
@@ -997,7 +997,7 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"valid attestation should be sent to network"
|
||||
);
|
||||
|
||||
@@ -1034,7 +1034,7 @@ impl ApiTester {
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"if some attestations are valid, we should send them to the network"
|
||||
);
|
||||
|
||||
@@ -1064,7 +1064,7 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"valid attester slashing should be sent to network"
|
||||
);
|
||||
|
||||
@@ -1081,7 +1081,7 @@ impl ApiTester {
|
||||
.unwrap_err();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_err(),
|
||||
self.network_rx.recv().now_or_never().is_none(),
|
||||
"invalid attester slashing should not be sent to network"
|
||||
);
|
||||
|
||||
@@ -1110,7 +1110,7 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"valid proposer slashing should be sent to network"
|
||||
);
|
||||
|
||||
@@ -1127,7 +1127,7 @@ impl ApiTester {
|
||||
.unwrap_err();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_err(),
|
||||
self.network_rx.recv().now_or_never().is_none(),
|
||||
"invalid proposer slashing should not be sent to network"
|
||||
);
|
||||
|
||||
@@ -1156,7 +1156,7 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_ok(),
|
||||
self.network_rx.recv().await.is_some(),
|
||||
"valid exit should be sent to network"
|
||||
);
|
||||
|
||||
@@ -1173,7 +1173,7 @@ impl ApiTester {
|
||||
.unwrap_err();
|
||||
|
||||
assert!(
|
||||
self.network_rx.try_recv().is_err(),
|
||||
self.network_rx.recv().now_or_never().is_none(),
|
||||
"invalid exit should not be sent to network"
|
||||
);
|
||||
|
||||
@@ -1822,7 +1822,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(self.network_rx.try_recv().is_ok());
|
||||
assert!(self.network_rx.recv().await.is_some());
|
||||
|
||||
self
|
||||
}
|
||||
@@ -1837,7 +1837,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
assert!(self.network_rx.try_recv().is_err());
|
||||
assert!(self.network_rx.recv().now_or_never().is_none());
|
||||
|
||||
self
|
||||
}
|
||||
@@ -1856,7 +1856,7 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
self.network_rx.try_recv().unwrap();
|
||||
self.network_rx.recv().now_or_never().unwrap();
|
||||
|
||||
self
|
||||
}
|
||||
@@ -2127,83 +2127,71 @@ async fn poll_events<S: Stream<Item = Result<EventKind<T>, eth2::Error>> + Unpin
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_events() {
|
||||
ApiTester::new().test_get_events().compat().await;
|
||||
ApiTester::new().test_get_events().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_events_from_genesis() {
|
||||
ApiTester::new_from_genesis()
|
||||
.test_get_events_from_genesis()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn beacon_get() {
|
||||
async {
|
||||
ApiTester::new()
|
||||
.test_beacon_genesis()
|
||||
.await
|
||||
.test_beacon_states_root()
|
||||
.await
|
||||
.test_beacon_states_fork()
|
||||
.await
|
||||
.test_beacon_states_finality_checkpoints()
|
||||
.await
|
||||
.test_beacon_states_validators()
|
||||
.await
|
||||
.test_beacon_states_validator_balances()
|
||||
.await
|
||||
.test_beacon_states_committees()
|
||||
.await
|
||||
.test_beacon_states_validator_id()
|
||||
.await
|
||||
.test_beacon_headers_all_slots()
|
||||
.await
|
||||
.test_beacon_headers_all_parents()
|
||||
.await
|
||||
.test_beacon_headers_block_id()
|
||||
.await
|
||||
.test_beacon_blocks()
|
||||
.await
|
||||
.test_beacon_blocks_attestations()
|
||||
.await
|
||||
.test_beacon_blocks_root()
|
||||
.await
|
||||
.test_get_beacon_pool_attestations()
|
||||
.await
|
||||
.test_get_beacon_pool_attester_slashings()
|
||||
.await
|
||||
.test_get_beacon_pool_proposer_slashings()
|
||||
.await
|
||||
.test_get_beacon_pool_voluntary_exits()
|
||||
.await;
|
||||
}
|
||||
.compat()
|
||||
.await;
|
||||
ApiTester::new()
|
||||
.test_beacon_genesis()
|
||||
.await
|
||||
.test_beacon_states_root()
|
||||
.await
|
||||
.test_beacon_states_fork()
|
||||
.await
|
||||
.test_beacon_states_finality_checkpoints()
|
||||
.await
|
||||
.test_beacon_states_validators()
|
||||
.await
|
||||
.test_beacon_states_validator_balances()
|
||||
.await
|
||||
.test_beacon_states_committees()
|
||||
.await
|
||||
.test_beacon_states_validator_id()
|
||||
.await
|
||||
.test_beacon_headers_all_slots()
|
||||
.await
|
||||
.test_beacon_headers_all_parents()
|
||||
.await
|
||||
.test_beacon_headers_block_id()
|
||||
.await
|
||||
.test_beacon_blocks()
|
||||
.await
|
||||
.test_beacon_blocks_attestations()
|
||||
.await
|
||||
.test_beacon_blocks_root()
|
||||
.await
|
||||
.test_get_beacon_pool_attestations()
|
||||
.await
|
||||
.test_get_beacon_pool_attester_slashings()
|
||||
.await
|
||||
.test_get_beacon_pool_proposer_slashings()
|
||||
.await
|
||||
.test_get_beacon_pool_voluntary_exits()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn post_beacon_blocks_valid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_blocks_valid()
|
||||
.compat()
|
||||
.await;
|
||||
ApiTester::new().test_post_beacon_blocks_valid().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn post_beacon_blocks_invalid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_blocks_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
ApiTester::new().test_post_beacon_blocks_invalid().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn beacon_pools_post_attestations_valid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_attestations_valid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2211,7 +2199,6 @@ async fn beacon_pools_post_attestations_valid() {
|
||||
async fn beacon_pools_post_attestations_invalid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_attestations_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2219,7 +2206,6 @@ async fn beacon_pools_post_attestations_invalid() {
|
||||
async fn beacon_pools_post_attester_slashings_valid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_attester_slashings_valid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2227,7 +2213,6 @@ async fn beacon_pools_post_attester_slashings_valid() {
|
||||
async fn beacon_pools_post_attester_slashings_invalid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_attester_slashings_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2235,7 +2220,6 @@ async fn beacon_pools_post_attester_slashings_invalid() {
|
||||
async fn beacon_pools_post_proposer_slashings_valid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_proposer_slashings_valid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2243,7 +2227,6 @@ async fn beacon_pools_post_proposer_slashings_valid() {
|
||||
async fn beacon_pools_post_proposer_slashings_invalid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_proposer_slashings_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2251,7 +2234,6 @@ async fn beacon_pools_post_proposer_slashings_invalid() {
|
||||
async fn beacon_pools_post_voluntary_exits_valid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_voluntary_exits_valid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2259,7 +2241,6 @@ async fn beacon_pools_post_voluntary_exits_valid() {
|
||||
async fn beacon_pools_post_voluntary_exits_invalid() {
|
||||
ApiTester::new()
|
||||
.test_post_beacon_pool_voluntary_exits_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2267,13 +2248,10 @@ async fn beacon_pools_post_voluntary_exits_invalid() {
|
||||
async fn config_get() {
|
||||
ApiTester::new()
|
||||
.test_get_config_fork_schedule()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_config_spec()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_config_deposit_contract()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2281,10 +2259,8 @@ async fn config_get() {
|
||||
async fn debug_get() {
|
||||
ApiTester::new()
|
||||
.test_get_debug_beacon_states()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_debug_beacon_heads()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2292,34 +2268,24 @@ async fn debug_get() {
|
||||
async fn node_get() {
|
||||
ApiTester::new()
|
||||
.test_get_node_version()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_node_syncing()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_node_identity()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_node_health()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_node_peers_by_id()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_node_peers()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_node_peer_count()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_validator_duties_attester() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_duties_attester()
|
||||
.compat()
|
||||
.await;
|
||||
ApiTester::new().test_get_validator_duties_attester().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -2327,16 +2293,12 @@ async fn get_validator_duties_attester_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_get_validator_duties_attester()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_validator_duties_proposer() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_duties_proposer()
|
||||
.compat()
|
||||
.await;
|
||||
ApiTester::new().test_get_validator_duties_proposer().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -2344,13 +2306,12 @@ async fn get_validator_duties_proposer_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_get_validator_duties_proposer()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn block_production() {
|
||||
ApiTester::new().test_block_production().compat().await;
|
||||
ApiTester::new().test_block_production().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -2358,16 +2319,12 @@ async fn block_production_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_block_production()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_validator_attestation_data() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_attestation_data()
|
||||
.compat()
|
||||
.await;
|
||||
ApiTester::new().test_get_validator_attestation_data().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -2375,7 +2332,6 @@ async fn get_validator_attestation_data_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_get_validator_attestation_data()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2383,7 +2339,6 @@ async fn get_validator_attestation_data_with_skip_slots() {
|
||||
async fn get_validator_aggregate_attestation() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_aggregate_attestation()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2392,7 +2347,6 @@ async fn get_validator_aggregate_attestation_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_get_validator_aggregate_attestation()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2400,7 +2354,6 @@ async fn get_validator_aggregate_attestation_with_skip_slots() {
|
||||
async fn get_validator_aggregate_and_proofs_valid() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_aggregate_and_proofs_valid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2409,7 +2362,6 @@ async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_get_validator_aggregate_and_proofs_valid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2417,7 +2369,6 @@ async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
|
||||
async fn get_validator_aggregate_and_proofs_invalid() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_aggregate_and_proofs_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2426,7 +2377,6 @@ async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
|
||||
ApiTester::new()
|
||||
.skip_slots(E::slots_per_epoch() * 2)
|
||||
.test_get_validator_aggregate_and_proofs_invalid()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2434,7 +2384,6 @@ async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
|
||||
async fn get_validator_beacon_committee_subscriptions() {
|
||||
ApiTester::new()
|
||||
.test_get_validator_beacon_committee_subscriptions()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2442,33 +2391,23 @@ async fn get_validator_beacon_committee_subscriptions() {
|
||||
async fn lighthouse_endpoints() {
|
||||
ApiTester::new()
|
||||
.test_get_lighthouse_health()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_syncing()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_proto_array()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_validator_inclusion()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_validator_inclusion_global()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_eth1_syncing()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_eth1_block_cache()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_eth1_deposit_cache()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_beacon_states_ssz()
|
||||
.compat()
|
||||
.await
|
||||
.test_get_lighthouse_staking()
|
||||
.compat()
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
prometheus = "0.11.0"
|
||||
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
|
||||
warp = "0.3.0"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
slog = "2.5.2"
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
@@ -22,8 +22,7 @@ lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||
warp_utils = { path = "../../common/warp_utils" }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.3.2", features = ["sync"] }
|
||||
reqwest = { version = "0.10.8", features = ["json"] }
|
||||
tokio = { version = "1.1.0", features = ["sync"] }
|
||||
reqwest = { version = "0.11.0", features = ["json"] }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
types = { path = "../../consensus/types" }
|
||||
tokio-compat-02 = "0.1"
|
||||
|
||||
@@ -5,7 +5,6 @@ use reqwest::StatusCode;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_compat_02::FutureExt;
|
||||
use types::MainnetEthSpec;
|
||||
|
||||
type Context = http_metrics::Context<EphemeralHarnessType<MainnetEthSpec>>;
|
||||
@@ -46,6 +45,5 @@ async fn returns_200_ok() {
|
||||
|
||||
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
|
||||
}
|
||||
.compat()
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -30,12 +30,13 @@ eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
tree_hash = "0.1.1"
|
||||
futures = "0.3.7"
|
||||
error-chain = "0.12.4"
|
||||
tokio = { version = "0.3.2", features = ["full"] }
|
||||
tokio = { version = "1.1.0", features = ["full"] }
|
||||
tokio-stream = "0.1.2"
|
||||
parking_lot = "0.11.0"
|
||||
smallvec = "1.6.1"
|
||||
rand = "0.7.3"
|
||||
fnv = "1.0.7"
|
||||
rlp = "0.4.6"
|
||||
rlp = "0.5.0"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
|
||||
@@ -38,7 +38,7 @@ impl StoreItem for PersistedDht {
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
rlp::encode_list(&self.enrs)
|
||||
rlp::encode_list(&self.enrs).to_vec()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||
|
||||
@@ -19,6 +19,7 @@ use processor::Processor;
|
||||
use slog::{debug, o, trace};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
use types::EthSpec;
|
||||
|
||||
/// Handles messages received from the network and client and organises syncing. This
|
||||
@@ -101,7 +102,7 @@ impl<T: BeaconChainTypes> Router<T> {
|
||||
executor.spawn(
|
||||
async move {
|
||||
debug!(log, "Network message router started");
|
||||
handler_recv
|
||||
UnboundedReceiverStream::new(handler_recv)
|
||||
.for_each(move |msg| future::ready(handler.handle_message(msg)))
|
||||
.await;
|
||||
},
|
||||
|
||||
@@ -266,7 +266,7 @@ fn spawn_service<T: BeaconChainTypes>(
|
||||
info!(service.log, "Network service shutdown");
|
||||
return;
|
||||
}
|
||||
_ = service.metrics_update.next() => {
|
||||
_ = service.metrics_update.tick() => {
|
||||
// update various network metrics
|
||||
metric_update_counter +=1;
|
||||
if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 {
|
||||
@@ -283,7 +283,7 @@ fn spawn_service<T: BeaconChainTypes>(
|
||||
metrics::update_sync_metrics(&service.network_globals);
|
||||
|
||||
}
|
||||
_ = service.gossipsub_parameter_update.next() => {
|
||||
_ = service.gossipsub_parameter_update.tick() => {
|
||||
if let Ok(slot) = service.beacon_chain.slot() {
|
||||
if let Some(active_validators) = service.beacon_chain.with_head(|head| {
|
||||
Ok::<_, BeaconChainError>(
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
#![cfg(test)]
|
||||
|
||||
//TODO: Drop compat library once reqwest and other libraries update to tokio 0.3
|
||||
|
||||
use beacon_chain::StateSkipConfig;
|
||||
use node_test_rig::{
|
||||
environment::{Environment, EnvironmentBuilder},
|
||||
eth2::types::StateId,
|
||||
testing_client_config, LocalBeaconNode,
|
||||
};
|
||||
use tokio_compat_02::FutureExt;
|
||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> {
|
||||
@@ -44,11 +41,7 @@ fn http_server_genesis_state() {
|
||||
|
||||
let api_state = env
|
||||
.runtime()
|
||||
.block_on(
|
||||
remote_node
|
||||
.get_debug_beacon_states(StateId::Slot(Slot::new(0)))
|
||||
.compat(),
|
||||
)
|
||||
.block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0))))
|
||||
.expect("should fetch state from http api")
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
@@ -8,7 +8,7 @@ edition = "2018"
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
types = { path = "../../consensus/types" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
tokio = { version = "0.3.2", features = ["full"] }
|
||||
tokio = { version = "1.1.0", features = ["full"] }
|
||||
slog = "2.5.2"
|
||||
parking_lot = "0.11.0"
|
||||
futures = "0.3.7"
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
//! This service allows task execution on the beacon node for various functionality.
|
||||
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use futures::stream::StreamExt;
|
||||
use slog::info;
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
@@ -26,7 +25,8 @@ pub fn spawn_timer<T: BeaconChainTypes>(
|
||||
// Warning: `interval_at` panics if `seconds_per_slot` = 0.
|
||||
let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot));
|
||||
let timer_future = async move {
|
||||
while interval.next().await.is_some() {
|
||||
loop {
|
||||
interval.tick().await;
|
||||
beacon_chain.per_slot_task();
|
||||
}
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user