Files
lighthouse/beacon_node/network/src/service.rs
Paul Hauner f229bbba1c Eth1 Integration (#542)
* Refactor to cache Eth1Data

* Fix merge conflicts and minor refactorings

* Rename Eth1Cache to Eth1DataCache

* Refactor events subscription

* Add deposits module to interface with BeaconChain deposits

* Remove utils

* Rename to types.rs and add trait constraints to Eth1DataFetcher

* Confirm to trait constraints. Make Web3DataFetcher cloneable

* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests

* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging

* Run `cargo fmt` and make tests pass

* Convert sync functions to async

* Add timeouts to web3 functions

* Return futures from cache functions

* Add failed chaining of futures

* Working cache updation

* Clean up tests and `update_cache` function

* Refactor `get_eth1_data` functions to work with future returning functions

* Refactor eth1 `run` function to work with modified `update_cache` api

* Minor changes

* Add distance parameter to `update_cache`

* Fix tests and other minor fixes

* Working integration with cache and deposits

* Add merkle_tree construction, proof generation and verification code

* Add function to construct and fetch Deposits for BeaconNode

* Add error handling

* Import ssz

* Add error handling to eth1 cache and fix minor errors

* Run rustfmt

* Fix minor bug

* Rename Eth1Error and change to Result<T>

* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes

* Fix formatting

* Initial commit. web3 api working.

* Tidied up lib. Add function for fetching logs.

* Refactor with `Eth1DataFetcher` trait

* Add parsing for deposit contract logs and get_eth1_data function

* Add `get_eth1_votes` function

* Refactor to cache Eth1Data

* Fix merge conflicts and minor refactorings

* Rename Eth1Cache to Eth1DataCache

* Refactor events subscription

* Add deposits module to interface with BeaconChain deposits

* Remove utils

* Rename to types.rs and add trait constraints to Eth1DataFetcher

* Confirm to trait constraints. Make Web3DataFetcher cloneable

* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests

* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging

* Run `cargo fmt` and make tests pass

* Convert sync functions to async

* Add timeouts to web3 functions

* Return futures from cache functions

* Add failed chaining of futures

* Working cache updation

* Clean up tests and `update_cache` function

* Refactor `get_eth1_data` functions to work with future returning functions

* Refactor eth1 `run` function to work with modified `update_cache` api

* Minor changes

* Add distance parameter to `update_cache`

* Fix tests and other minor fixes

* Working integration with cache and deposits

* Add merkle_tree construction, proof generation and verification code

* Add function to construct and fetch Deposits for BeaconNode

* Add error handling

* Import ssz

* Add error handling to eth1 cache and fix minor errors

* Run rustfmt

* Fix minor bug

* Rename Eth1Error and change to Result<T>

* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes

* Fix formatting

* Fix merge issue

* Refactor with `Config` struct. Remote `ContractConfig`

* Rename eth1_chain crate to eth1

* Rename files and read abi file using `fs::read`

* Move eth1 to lib

* Remove unnecessary mutability constraint

* Add `Web3Backend` for returning actual eth1 data

* Refactor `get_eth1_votes` to return a Result

* Delete `eth1_chain` crate

* Return `Result` from `get_deposits`

* Fix range of deposits to return to beacon chain

* Add `get_block_height_by_hash` trait function

* Add naive method for getting `previous_eth1_distance`

* Add eth1 config params to main config

* Add instructions for setting up eth1 testing environment

* Add build script to fetch deposit contract abi

* Contract ABI is part of compiled binary

* Fix minor bugs

* Move docs to lib

* Add timeout to config

* Remove print statements

* Change warn to error

* Fix typos

* Removed prints in test and get timeout value from config

* Fixed error types

* Added logging to web3_fetcher

* Refactor for modified web3 api

* Fix minor stuff

* Add build script

* Tidy, hide eth1 integration tests behind flag

* Add http crate

* Add first stages of eth1_test_rig

* Fix deposits on test rig

* Fix bug with deposit count method

* Add block hash getter to http eth1

* Clean eth1 http crate and tests

* Add script to start ganache

* Adds deposit tree to eth1-http

* Extend deposit tree tests

* Tidy tests in eth1-http

* Add more detail to get block request

* Add block cache to eth1-http

* Rename deposit tree to deposit cache

* Add inital updating to eth1-http

* Tidy updater

* Fix compile bugs in tests

* Adds an Eth1DataCache builder

* Reorg eth1-http files

* Add (failing) tests for eth1 updater

* Rename files, fix bug in eth1-http

* Ensure that ganache timestamps are increasing

* Fix bugs with getting eth1data ancestors

* Improve eth1 testing, fix bugs

* Add truncate method to block cache

* Add pruning to block cache update process

* Add tests for block pruning

* Allow for dropping an expired cache.

* Add more comments

* Add first compiling version of deposit updater

* Add common fn for getting range of required blocks

* Add passing deposit update test

* Improve tests

* Fix block pruning bug

* Add tests for running two updates at once

* Add updater services to eth1

* Add deposit collection to beacon chain

* Add incomplete builder experiments

* Add first working version of beacon chain builder

* Update test harness to new beacon chain type

* Rename builder file, tidy

* Add first working client builder

* Progress further on client builder

* Update becaon node binary to use client builder

* Ensure release tests compile

* Remove old eth1 crate

* Add first pass of new lighthouse binary

* Fix websocket server startup

* Remove old binary code from beacon_node crate

* Add first working beacon node tests

* Add genesis crate, new eth1 cache_2

* Add Serivce to Eth1Cache

* Refactor with general eth1 improvements

* Add passing genesis test

* Tidy, add comments

* Add more comments to eth1 service

* Add further eth1 progress

* Fix some bugs with genesis

* Fix eth1 bugs, make eth1 linking more efficient

* Shift logic in genesis service

* Add more comments to genesis service

* Add gzip, max request values, timeouts to http

* Update testnet parameters to suit goerli testnet

* Add ability to vary Fork, fix custom spec

* Be more explicit about deposit fork version

* Start adding beacon chain eth1 option

* Add more flexibility to prod client

* Further runtime refactoring

* Allow for starting from store

* Add bootstrapping to client config

* Add remote_beacon_node crate

* Update eth1 service for more configurability

* Update eth1 tests to use less runtimes

* Patch issues with tests using too many files

* Move dummy eth1 backend flag

* Ensure all tests pass

* Add ganache-cli to Dockerfile

* Use a special docker hub image for testing

* Appease clippy

* Move validator client into lighthouse binary

* Allow starting with dummy eth1 backend

* Improve logging

* Fix dummy eth1 backend from cli

* Add extra testnet command

* Ensure consistent spec in beacon node

* Update eth1 rig to work on goerli

* Tidy lcli, start adding support for yaml config

* Add incomplete YamlConfig struct

* Remove efforts at YamlConfig

* Add incomplete eth1 voting. Blocked on spec issues

* Add (untested) first pass at eth1 vote algo

* Add tests for winning vote

* Add more tests for eth1 chain

* Add more eth1 voting tests

* Added more eth1 voting testing

* Change test name

* Add more tests to eth1 chain

* Tidy eth1 generics, add more tests

* Improve comments

* Tidy beacon_node tests

* Tidy, rename JsonRpc.. to Caching..

* Tidy voting logic

* Tidy builder docs

* Add comments, tidy eth1

* Add more comments to eth1

* Fix bug with winning_vote

* Add doc comments to the `ClientBuilder`

* Remove commented-out code

* Improve `ClientBuilder` docs

* Add comments to client config

* Add decoding test for `ClientConfig`

* Remove unused `DepositSet` struct

* Tidy `block_cache`

* Remove commented out lines

* Remove unused code in `eth1` crate

* Remove old validator binary `main.rs`

* Tidy, fix tests compile error

* Add initial tests for get_deposits

* Remove dead code in eth1_test_rig

* Update TestingDepositBuilder

* Add testing for getting eth1 deposits

* Fix duplicate rand dep

* Remove dead code

* Remove accidentally-added files

* Fix comment in eth1_genesis_service

* Add .gitignore for eth1_test_rig

* Fix bug in eth1_genesis_service

* Remove dead code from eth2_config

* Fix tabs/spaces in root Cargo.toml

* Tidy eth1 crate

* Allow for re-use of eth1 service after genesis

* Update docs for new CLI

* Change README gif

* Tidy eth1 http module

* Tidy eth1 service

* Tidy environment crate

* Remove unused file

* Tidy, add comments

* Remove commented-out code

* Address majority of Michael's comments

* Address other PR comments

* Add link to issue alongside TODO
2019-11-15 14:47:51 +11:00

251 lines
8.8 KiB
Rust

use crate::error;
use crate::message_handler::{HandlerMessage, MessageHandler};
use crate::NetworkConfig;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use core::marker::PhantomData;
use eth2_libp2p::Service as LibP2PService;
use eth2_libp2p::Topic;
use eth2_libp2p::{Enr, Libp2pEvent, Multiaddr, PeerId, Swarm};
use eth2_libp2p::{PubsubMessage, RPCEvent};
use futures::prelude::*;
use futures::Stream;
use parking_lot::Mutex;
use slog::{debug, info, trace};
use std::sync::Arc;
use tokio::runtime::TaskExecutor;
use tokio::sync::{mpsc, oneshot};
/// Service that handles communication between internal services and the eth2_libp2p network service.
pub struct Service<T: BeaconChainTypes> {
libp2p_service: Arc<Mutex<LibP2PService>>,
libp2p_port: u16,
_libp2p_exit: oneshot::Sender<()>,
_network_send: mpsc::UnboundedSender<NetworkMessage>,
_phantom: PhantomData<T>,
}
impl<T: BeaconChainTypes + 'static> Service<T> {
pub fn new(
beacon_chain: Arc<BeaconChain<T>>,
config: &NetworkConfig,
executor: &TaskExecutor,
network_log: slog::Logger,
) -> error::Result<(Arc<Self>, mpsc::UnboundedSender<NetworkMessage>)> {
// build the network channel
let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
// launch message handler thread
let message_handler_send = MessageHandler::spawn(
beacon_chain,
network_send.clone(),
executor,
network_log.clone(),
)?;
// launch libp2p service
let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(
config.clone(),
network_log.clone(),
)?));
let libp2p_exit = spawn_service(
libp2p_service.clone(),
network_recv,
message_handler_send,
executor,
network_log,
)?;
let network_service = Service {
libp2p_service,
libp2p_port: config.libp2p_port,
_libp2p_exit: libp2p_exit,
_network_send: network_send.clone(),
_phantom: PhantomData,
};
Ok((Arc::new(network_service), network_send))
}
/// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect
/// to.
pub fn local_enr(&self) -> Enr {
self.libp2p_service
.lock()
.swarm
.discovery()
.local_enr()
.clone()
}
/// Returns the local libp2p PeerID.
pub fn local_peer_id(&self) -> PeerId {
self.libp2p_service.lock().local_peer_id.clone()
}
/// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on.
pub fn listen_multiaddrs(&self) -> Vec<Multiaddr> {
Swarm::listeners(&self.libp2p_service.lock().swarm)
.cloned()
.collect()
}
/// Returns the libp2p port that this node has been configured to listen using.
pub fn listen_port(&self) -> u16 {
self.libp2p_port
}
/// Returns the number of libp2p connected peers.
pub fn connected_peers(&self) -> usize {
self.libp2p_service.lock().swarm.connected_peers()
}
/// Returns the set of `PeerId` that are connected via libp2p.
pub fn connected_peer_set(&self) -> Vec<PeerId> {
self.libp2p_service
.lock()
.swarm
.discovery()
.connected_peer_set()
.iter()
.cloned()
.collect()
}
/// Provides a reference to the underlying libp2p service.
pub fn libp2p_service(&self) -> Arc<Mutex<LibP2PService>> {
self.libp2p_service.clone()
}
}
fn spawn_service(
libp2p_service: Arc<Mutex<LibP2PService>>,
network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
message_handler_send: mpsc::UnboundedSender<HandlerMessage>,
executor: &TaskExecutor,
log: slog::Logger,
) -> error::Result<tokio::sync::oneshot::Sender<()>> {
let (network_exit, exit_rx) = tokio::sync::oneshot::channel();
// spawn on the current executor
executor.spawn(
network_service(
libp2p_service,
network_recv,
message_handler_send,
log.clone(),
)
// allow for manual termination
.select(exit_rx.then(|_| Ok(())))
.then(move |_| {
info!(log.clone(), "Network service shutdown");
Ok(())
}),
);
Ok(network_exit)
}
//TODO: Potentially handle channel errors
fn network_service(
libp2p_service: Arc<Mutex<LibP2PService>>,
mut network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
mut message_handler_send: mpsc::UnboundedSender<HandlerMessage>,
log: slog::Logger,
) -> impl futures::Future<Item = (), Error = eth2_libp2p::error::Error> {
futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> {
// if the network channel is not ready, try the swarm
loop {
// poll the network channel
match network_recv.poll() {
Ok(Async::Ready(Some(message))) => match message {
NetworkMessage::RPC(peer_id, rpc_event) => {
trace!(log, "{}", rpc_event);
libp2p_service.lock().swarm.send_rpc(peer_id, rpc_event);
}
NetworkMessage::Propagate {
propagation_source,
message_id,
} => {
trace!(log, "Propagating gossipsub message";
"propagation_peer" => format!("{:?}", propagation_source),
"message_id" => format!("{}", message_id),
);
libp2p_service
.lock()
.swarm
.propagate_message(&propagation_source, message_id);
}
NetworkMessage::Publish { topics, message } => {
debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics));
libp2p_service.lock().swarm.publish(&topics, message);
}
},
Ok(Async::NotReady) => break,
Ok(Async::Ready(None)) => {
return Err(eth2_libp2p::error::Error::from("Network channel closed"));
}
Err(_) => {
return Err(eth2_libp2p::error::Error::from("Network channel error"));
}
}
}
loop {
// poll the swarm
match libp2p_service.lock().poll() {
Ok(Async::Ready(Some(event))) => match event {
Libp2pEvent::RPC(peer_id, rpc_event) => {
trace!(log, "{}", rpc_event);
message_handler_send
.try_send(HandlerMessage::RPC(peer_id, rpc_event))
.map_err(|_| "Failed to send RPC to handler")?;
}
Libp2pEvent::PeerDialed(peer_id) => {
debug!(log, "Peer Dialed"; "PeerID" => format!("{:?}", peer_id));
message_handler_send
.try_send(HandlerMessage::PeerDialed(peer_id))
.map_err(|_| "Failed to send PeerDialed to handler")?;
}
Libp2pEvent::PeerDisconnected(peer_id) => {
debug!(log, "Peer Disconnected"; "PeerID" => format!("{:?}", peer_id));
message_handler_send
.try_send(HandlerMessage::PeerDisconnected(peer_id))
.map_err(|_| "Failed to send PeerDisconnected to handler")?;
}
Libp2pEvent::PubsubMessage {
id,
source,
message,
..
} => {
message_handler_send
.try_send(HandlerMessage::PubsubMessage(id, source, message))
.map_err(|_| "Failed to send pubsub message to handler")?;
}
},
Ok(Async::Ready(None)) => unreachable!("Stream never ends"),
Ok(Async::NotReady) => break,
Err(_) => break,
}
}
Ok(Async::NotReady)
})
}
/// Types of messages that the network service can receive.
#[derive(Debug)]
pub enum NetworkMessage {
/// Send an RPC message to the libp2p service.
RPC(PeerId, RPCEvent),
/// Publish a message to gossipsub.
Publish {
topics: Vec<Topic>,
message: PubsubMessage,
},
/// Propagate a received gossipsub message
Propagate {
propagation_source: PeerId,
message_id: String,
},
}