Merge branch 'master' into proto-array

This commit is contained in:
Paul Hauner
2020-01-24 17:00:32 +11:00
115 changed files with 3943 additions and 1029 deletions

View File

@@ -3,6 +3,7 @@ use crate::rpc::{RPCEvent, RPCMessage, RPC};
use crate::GossipTopic;
use crate::{error, NetworkConfig};
use crate::{Topic, TopicHash};
use enr::Enr;
use futures::prelude::*;
use libp2p::{
core::identity::Keypair,
@@ -57,7 +58,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
net_conf: &NetworkConfig,
log: &slog::Logger,
) -> error::Result<Self> {
let local_peer_id = local_key.public().clone().into_peer_id();
let local_peer_id = local_key.public().into_peer_id();
let behaviour_log = log.new(o!());
let ping_config = PingConfig::new()
@@ -74,7 +75,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
Ok(Behaviour {
eth2_rpc: RPC::new(log.clone()),
gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()),
gossipsub: Gossipsub::new(local_peer_id, net_conf.gs_config.clone()),
discovery: Discovery::new(local_key, net_conf, log)?,
ping: Ping::new(ping_config),
identify,
@@ -254,6 +255,16 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
pub fn peer_unbanned(&mut self, peer_id: &PeerId) {
self.discovery.peer_unbanned(peer_id);
}
/// Returns an iterator over all enr entries in the DHT.
pub fn enr_entries(&mut self) -> impl Iterator<Item = &Enr> {
self.discovery.enr_entries()
}
/// Add an ENR to the routing table of the discovery mechanism.
pub fn add_enr(&mut self, enr: Enr) {
self.discovery.add_enr(enr);
}
}
/// The types of events than can be obtained from polling the behaviour.

View File

@@ -20,8 +20,9 @@ pub struct Config {
/// The TCP port that libp2p listens on.
pub libp2p_port: u16,
/// The address to broadcast to peers about which address we are listening on.
pub discovery_address: std::net::IpAddr,
/// The address to broadcast to peers about which address we are listening on. None indicates
/// that no discovery address has been set in the CLI args.
pub discovery_address: Option<std::net::IpAddr>,
/// UDP port that discovery listens on.
pub discovery_port: u16,
@@ -86,7 +87,7 @@ impl Default for Config {
network_dir,
listen_address: "127.0.0.1".parse().expect("valid ip address"),
libp2p_port: 9000,
discovery_address: "127.0.0.1".parse().expect("valid ip address"),
discovery_address: None,
discovery_port: 9000,
max_peers: 10,
secret_key_hex: None,

View File

@@ -148,6 +148,11 @@ impl<TSubstream> Discovery<TSubstream> {
self.banned_peers.remove(peer_id);
}
/// Returns an iterator over all enr entries in the DHT.
pub fn enr_entries(&mut self) -> impl Iterator<Item = &Enr> {
self.discovery.enr_entries()
}
/// Search for new peers using the underlying discovery mechanism.
fn find_peers(&mut self) {
// pick a random NodeId
@@ -310,7 +315,9 @@ fn load_enr(
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
// majority of our peers.
let mut local_enr = EnrBuilder::new("v4")
.ip(config.discovery_address)
.ip(config
.discovery_address
.unwrap_or_else(|| "127.0.0.1".parse().expect("valid ip")))
.tcp(config.libp2p_port)
.udp(config.discovery_port)
.build(&local_key)
@@ -325,7 +332,8 @@ fn load_enr(
match Enr::from_str(&enr_string) {
Ok(enr) => {
if enr.node_id() == local_enr.node_id() {
if enr.ip().map(Into::into) == Some(config.discovery_address)
if (config.discovery_address.is_none()
|| enr.ip().map(Into::into) == config.discovery_address)
&& enr.tcp() == Some(config.libp2p_port)
&& enr.udp() == Some(config.discovery_port)
{

View File

@@ -145,7 +145,7 @@ where
// When terminating a stream, report the stream termination to the requesting user via
// an RPC error
let error = RPCErrorResponse::ServerError(ErrorMessage {
error_message: "Request timed out".as_bytes().to_vec(),
error_message: b"Request timed out".to_vec(),
});
// The stream termination type is irrelevant, this will terminate the
@@ -163,11 +163,16 @@ where
*self = InboundSubstreamState::ResponsePendingSend { substream, closing }
}
InboundSubstreamState::ResponseIdle(substream) => {
*self = InboundSubstreamState::ResponsePendingSend {
substream: substream.send(error),
closing: true,
};
InboundSubstreamState::ResponseIdle(mut substream) => {
// check if the stream is already closed
if let Ok(Async::Ready(None)) = substream.poll() {
*self = InboundSubstreamState::Closing(substream);
} else {
*self = InboundSubstreamState::ResponsePendingSend {
substream: substream.send(error),
closing: true,
};
}
}
InboundSubstreamState::Closing(substream) => {
// let the stream close
@@ -422,6 +427,8 @@ where
};
if self.pending_error.is_none() {
self.pending_error = Some((request_id, error));
} else {
crit!(self.log, "Couldn't add error");
}
}
@@ -452,6 +459,7 @@ where
}
ProtocolsHandlerUpgrErr::Timeout | ProtocolsHandlerUpgrErr::Timer => {
// negotiation timeout, mark the request as failed
debug!(self.log, "Active substreams before timeout"; "len" => self.outbound_substreams.len());
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Error(
request_id,
@@ -514,7 +522,7 @@ where
// notify the user
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Error(
stream_id.get_ref().clone(),
*stream_id.get_ref(),
RPCError::Custom("Stream timed out".into()),
),
)));
@@ -711,21 +719,18 @@ where
}
// establish outbound substreams
if !self.dial_queue.is_empty() {
if self.dial_negotiated < self.max_dial_negotiated {
self.dial_negotiated += 1;
let rpc_event = self.dial_queue.remove(0);
if let RPCEvent::Request(id, req) = rpc_event {
return Ok(Async::Ready(
ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(req.clone()),
info: RPCEvent::Request(id, req),
},
));
}
}
} else {
if !self.dial_queue.is_empty() && self.dial_negotiated < self.max_dial_negotiated {
self.dial_negotiated += 1;
let rpc_event = self.dial_queue.remove(0);
self.dial_queue.shrink_to_fit();
if let RPCEvent::Request(id, req) = rpc_event {
return Ok(Async::Ready(
ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(req.clone()),
info: RPCEvent::Request(id, req),
},
));
}
}
Ok(Async::NotReady)
}

View File

@@ -43,8 +43,7 @@ pub fn build_libp2p_instance(
) -> LibP2PService {
let config = build_config(port, boot_nodes, secret_key);
// launch libp2p service
let libp2p_service = LibP2PService::new(config.clone(), log.clone()).unwrap();
libp2p_service
LibP2PService::new(config, log.clone()).unwrap()
}
#[allow(dead_code)]
@@ -64,10 +63,10 @@ pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option<u16>) ->
.map(|x| get_enr(&x).multiaddr()[1].clone())
.collect();
for i in 0..n {
for j in i..n {
for (i, node) in nodes.iter_mut().enumerate().take(n) {
for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) {
if i != j {
match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[j].clone()) {
match libp2p::Swarm::dial_addr(&mut node.swarm, multiaddr.clone()) {
Ok(()) => debug!(log, "Connected"),
Err(_) => error!(log, "Failed to connect"),
};

View File

@@ -62,7 +62,7 @@ fn test_gossipsub_forward() {
// Every node except the corner nodes are connected to 2 nodes.
if subscribed_count == (num_nodes * 2) - 2 {
node.swarm.publish(
&vec![Topic::new(topic.into_string())],
&[Topic::new(topic.into_string())],
pubsub_message.clone(),
);
}
@@ -96,45 +96,37 @@ fn test_gossipsub_full_mesh_publish() {
let mut received_count = 0;
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
for node in nodes.iter_mut() {
loop {
match node.poll().unwrap() {
Async::Ready(Some(Libp2pEvent::PubsubMessage {
topics, message, ..
})) => {
assert_eq!(topics.len(), 1);
// Assert topic is the published topic
assert_eq!(
topics.first().unwrap(),
&TopicHash::from_raw(publishing_topic.clone())
);
// Assert message received is the correct one
assert_eq!(message, pubsub_message.clone());
received_count += 1;
if received_count == num_nodes - 1 {
return Ok(Async::Ready(()));
}
}
_ => break,
while let Async::Ready(Some(Libp2pEvent::PubsubMessage {
topics, message, ..
})) = node.poll().unwrap()
{
assert_eq!(topics.len(), 1);
// Assert topic is the published topic
assert_eq!(
topics.first().unwrap(),
&TopicHash::from_raw(publishing_topic.clone())
);
// Assert message received is the correct one
assert_eq!(message, pubsub_message.clone());
received_count += 1;
if received_count == num_nodes - 1 {
return Ok(Async::Ready(()));
}
}
}
loop {
match publishing_node.poll().unwrap() {
Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) => {
// Received topics is one of subscribed eth2 topics
assert!(topic.clone().into_string().starts_with("/eth2/"));
// Publish on beacon block topic
if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") {
subscribed_count += 1;
if subscribed_count == num_nodes - 1 {
publishing_node.swarm.publish(
&vec![Topic::new(topic.into_string())],
pubsub_message.clone(),
);
}
}
while let Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) =
publishing_node.poll().unwrap()
{
// Received topics is one of subscribed eth2 topics
assert!(topic.clone().into_string().starts_with("/eth2/"));
// Publish on beacon block topic
if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") {
subscribed_count += 1;
if subscribed_count == num_nodes - 1 {
publishing_node
.swarm
.publish(&[Topic::new(topic.into_string())], pubsub_message.clone());
}
_ => break,
}
}
Ok(Async::NotReady)

View File

@@ -3,6 +3,7 @@ use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::*;
use eth2_libp2p::{Libp2pEvent, RPCEvent};
use slog::{warn, Level};
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tokio::prelude::*;
@@ -106,20 +107,19 @@ fn test_status_rpc() {
});
// execute the futures and check the result
let test_result = Arc::new(Mutex::new(false));
let test_result = Arc::new(AtomicBool::new(false));
let error_result = test_result.clone();
let thread_result = test_result.clone();
tokio::run(
sender_future
.select(receiver_future)
.timeout(Duration::from_millis(1000))
.map_err(move |_| *error_result.lock().unwrap() = false)
.map_err(move |_| error_result.store(false, Relaxed))
.map(move |result| {
*thread_result.lock().unwrap() = result.0;
()
thread_result.store(result.0, Relaxed);
}),
);
assert!(*test_result.lock().unwrap());
assert!(test_result.load(Relaxed));
}
#[test]
@@ -236,20 +236,19 @@ fn test_blocks_by_range_chunked_rpc() {
});
// execute the futures and check the result
let test_result = Arc::new(Mutex::new(false));
let test_result = Arc::new(AtomicBool::new(false));
let error_result = test_result.clone();
let thread_result = test_result.clone();
tokio::run(
sender_future
.select(receiver_future)
.timeout(Duration::from_millis(1000))
.map_err(move |_| *error_result.lock().unwrap() = false)
.map_err(move |_| error_result.store(false, Relaxed))
.map(move |result| {
*thread_result.lock().unwrap() = result.0;
()
thread_result.store(result.0, Relaxed);
}),
);
assert!(*test_result.lock().unwrap());
assert!(test_result.load(Relaxed));
}
#[test]
@@ -359,20 +358,19 @@ fn test_blocks_by_range_single_empty_rpc() {
});
// execute the futures and check the result
let test_result = Arc::new(Mutex::new(false));
let test_result = Arc::new(AtomicBool::new(false));
let error_result = test_result.clone();
let thread_result = test_result.clone();
tokio::run(
sender_future
.select(receiver_future)
.timeout(Duration::from_millis(1000))
.map_err(move |_| *error_result.lock().unwrap() = false)
.map_err(move |_| error_result.store(false, Relaxed))
.map(move |result| {
*thread_result.lock().unwrap() = result.0;
()
thread_result.store(result.0, Relaxed);
}),
);
assert!(*test_result.lock().unwrap());
assert!(test_result.load(Relaxed));
}
#[test]
@@ -486,20 +484,19 @@ fn test_blocks_by_root_chunked_rpc() {
});
// execute the futures and check the result
let test_result = Arc::new(Mutex::new(false));
let test_result = Arc::new(AtomicBool::new(false));
let error_result = test_result.clone();
let thread_result = test_result.clone();
tokio::run(
sender_future
.select(receiver_future)
.timeout(Duration::from_millis(1000))
.map_err(move |_| *error_result.lock().unwrap() = false)
.map_err(move |_| error_result.store(false, Relaxed))
.map(move |result| {
*thread_result.lock().unwrap() = result.0;
()
thread_result.store(result.0, Relaxed);
}),
);
assert!(*test_result.lock().unwrap());
assert!(test_result.load(Relaxed));
}
#[test]
@@ -558,18 +555,17 @@ fn test_goodbye_rpc() {
});
// execute the futures and check the result
let test_result = Arc::new(Mutex::new(false));
let test_result = Arc::new(AtomicBool::new(false));
let error_result = test_result.clone();
let thread_result = test_result.clone();
tokio::run(
sender_future
.select(receiver_future)
.timeout(Duration::from_millis(1000))
.map_err(move |_| *error_result.lock().unwrap() = false)
.map_err(move |_| error_result.store(false, Relaxed))
.map(move |result| {
*thread_result.lock().unwrap() = result.0;
()
thread_result.store(result.0, Relaxed);
}),
);
assert!(*test_result.lock().unwrap());
assert!(test_result.load(Relaxed));
}