Move peer db writes to eth2 libp2p (#2724)

## Issue Addressed
Part of a bigger effort to make the network globals read only. This moves all writes to the `PeerDB` to the `eth2_libp2p` crate. Limiting writes to the peer manager is a slightly more complicated issue for a next PR, to keep things reviewable.

## Proposed Changes
- Make the peers field in the globals a private field.
- Allow mutable access to the peers field to `eth2_libp2p` for now.
- Add a new network message to update the sync state.

Co-authored-by: Age Manning <Age@AgeManning.com>
This commit is contained in:
Divma
2021-11-19 04:42:31 +00:00
parent 31386277c3
commit 53562010ec
16 changed files with 139 additions and 154 deletions

View File

@@ -786,7 +786,7 @@ pub fn update_gossip_metrics<T: EthSpec>(
let mut peer_to_client = HashMap::new();
let mut scores_per_client: HashMap<&'static str, Vec<f64>> = HashMap::new();
{
let peers = network_globals.peers.read();
let peers = network_globals.peers();
for (peer_id, _) in gossipsub.all_peers() {
let client = peers
.peer_info(peer_id)
@@ -916,8 +916,7 @@ pub fn update_sync_metrics<T: EthSpec>(network_globals: &Arc<NetworkGlobals<T>>)
// count per sync status, the number of connected peers
let mut peers_per_sync_type = FnvHashMap::default();
for sync_type in network_globals
.peers
.read()
.peers()
.connected_peers()
.map(|(_peer_id, info)| info.sync_status().as_str())
{

View File

@@ -154,7 +154,7 @@ impl<T: BeaconChainTypes> Router<T> {
/// A new RPC request has been received from the network.
fn handle_rpc_request(&mut self, peer_id: PeerId, id: PeerRequestId, request: Request) {
if !self.network_globals.peers.read().is_connected(&peer_id) {
if !self.network_globals.peers().is_connected(&peer_id) {
debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request);
return;
}

View File

@@ -17,7 +17,7 @@ use lighthouse_network::{
types::{GossipEncoding, GossipTopic},
BehaviourEvent, MessageId, NetworkGlobals, PeerId,
};
use lighthouse_network::{MessageAcceptance, Service as LibP2PService};
use lighthouse_network::{MessageAcceptance, Service as LibP2PService, SyncStatus};
use slog::{crit, debug, error, info, o, trace, warn};
use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration};
use store::HotColdDB;
@@ -100,6 +100,10 @@ pub enum NetworkMessage<T: EthSpec> {
reason: GoodbyeReason,
source: ReportSource,
},
UpdatePeerSyncStatus {
peer_id: PeerId,
sync_status: SyncStatus,
},
}
/// Service that handles communication between internal services and the `lighthouse_network` network service.
@@ -527,6 +531,9 @@ fn spawn_service<T: BeaconChainTypes>(
);
}
}
NetworkMessage::UpdatePeerSyncStatus{peer_id, sync_status} => {
service.libp2p.swarm.behaviour_mut().update_peers_sync_status(&peer_id, sync_status);
}
}
}
// process any attestation service events

View File

@@ -213,14 +213,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
match self.state() {
BackFillState::Syncing => {} // already syncing ignore.
BackFillState::Paused => {
if self
.network_globals
.peers
.read()
.synced_peers()
.next()
.is_some()
{
if self.network_globals.peers().synced_peers().next().is_some() {
// If there are peers to resume with, begin the resume.
debug!(self.log, "Resuming backfill sync"; "start_epoch" => self.current_start, "awaiting_batches" => self.batches.len(), "processing_target" => self.processing_target);
self.set_state(BackFillState::Syncing);
@@ -906,8 +899,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
let new_peer = {
let mut priorized_peers = self
.network_globals
.peers
.read()
.peers()
.synced_peers()
.map(|peer| {
(
@@ -1026,8 +1018,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
let mut rng = rand::thread_rng();
let mut idle_peers = self
.network_globals
.peers
.read()
.peers()
.synced_peers()
.filter(|peer_id| {
self.active_requests

View File

@@ -294,7 +294,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
let sync_type = remote_sync_type(&local, &remote, &self.chain);
// update the state of the peer.
let should_add = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type);
let should_add = self.update_peer_sync_state(peer_id, &local, &remote, &sync_type);
if matches!(sync_type, PeerSyncType::Advanced) && should_add {
self.range_sync
@@ -646,7 +646,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
/// connection status.
fn update_peer_sync_state(
&mut self,
peer_id: &PeerId,
peer_id: PeerId,
local_sync_info: &SyncInfo,
remote_sync_info: &SyncInfo,
sync_type: &PeerSyncType,
@@ -656,15 +656,10 @@ impl<T: BeaconChainTypes> SyncManager<T> {
let new_state = sync_type.as_sync_status(remote_sync_info);
let rpr = new_state.as_str();
// Drop the write lock
let update_sync_status = self
.network_globals
.peers
.write()
.update_sync_status(peer_id, new_state.clone());
if let Some(was_updated) = update_sync_status {
let is_connected = self.network_globals.peers.read().is_connected(peer_id);
if was_updated {
if let Some(info) = self.network_globals.peers().peer_info(&peer_id) {
let is_connected = info.is_connected();
if !info.sync_status().is_same_kind(&new_state) {
debug!(self.log, "Peer transitioned sync state"; "peer_id" => %peer_id, "new_state" => rpr,
"our_head_slot" => local_sync_info.head_slot, "out_finalized_epoch" => local_sync_info.finalized_epoch,
"their_head_slot" => remote_sync_info.head_slot, "their_finalized_epoch" => remote_sync_info.finalized_epoch,
@@ -675,6 +670,8 @@ impl<T: BeaconChainTypes> SyncManager<T> {
if new_state.is_synced() {
self.backfill_sync.fully_synced_peer_joined();
}
self.network.update_peer_sync_status(peer_id, new_state);
}
is_connected
} else {
@@ -712,7 +709,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
let head = self.chain.best_slot().unwrap_or_else(|_| Slot::new(0));
let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0));
let peers = self.network_globals.peers.read();
let peers = self.network_globals.peers();
if current_slot >= head
&& current_slot.sub(head) <= (SLOT_IMPORT_TOLERANCE as u64)
&& head > 0

View File

@@ -10,7 +10,9 @@ use fnv::FnvHashMap;
use lighthouse_network::rpc::{
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, RequestId,
};
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request};
use lighthouse_network::{
Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request, SyncStatus,
};
use slog::{debug, trace, warn};
use std::sync::Arc;
use tokio::sync::mpsc;
@@ -52,12 +54,7 @@ impl<T: EthSpec> SyncNetworkContext<T> {
/// Returns the Client type of the peer if known
pub fn client_type(&self, peer_id: &PeerId) -> Client {
self.network_globals
.peers
.read()
.peer_info(peer_id)
.map(|info| info.client().clone())
.unwrap_or_default()
self.network_globals.client(peer_id)
}
pub fn status_peers<C: ToStatusMessage>(
@@ -208,10 +205,17 @@ impl<T: EthSpec> SyncNetworkContext<T> {
});
}
pub fn update_peer_sync_status(&self, peer_id: PeerId, new_status: SyncStatus) {
let _ = self.send_network_msg(NetworkMessage::UpdatePeerSyncStatus {
peer_id,
sync_status: new_status,
});
}
/// Sends an arbitrary network message.
fn send_network_msg(&mut self, msg: NetworkMessage<T>) -> Result<(), &'static str> {
self.network_send.send(msg).map_err(|_| {
debug!(self.log, "Could not send message to the network service");
fn send_network_msg(&self, msg: NetworkMessage<T>) -> Result<(), &'static str> {
self.network_send.send(msg).map_err(|msg| {
warn!(self.log, "Could not send message to the network service"; "msg" => ?msg.0);
"Network channel send Failed"
})
}