mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-16 19:32:55 +00:00
Remove total_requests_per_peer
This commit is contained in:
@@ -26,7 +26,7 @@ use logging::crit;
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{
|
||||
btree_map::{BTreeMap, Entry},
|
||||
HashMap, HashSet,
|
||||
HashSet,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
@@ -932,8 +932,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
RangeRequestId::BackfillSync { batch_id },
|
||||
self.peers.clone(),
|
||||
&failed_peers,
|
||||
// Does not track total requests per peers for now
|
||||
&HashMap::new(),
|
||||
) {
|
||||
Ok(request_id) => {
|
||||
// inform the batch about the new request
|
||||
|
||||
@@ -480,21 +480,14 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
requester: RangeRequestId,
|
||||
peers: Arc<RwLock<HashSet<PeerId>>>,
|
||||
peers_to_deprioritize: &HashSet<PeerId>,
|
||||
total_requests_per_peer: &HashMap<PeerId, usize>,
|
||||
) -> Result<Id, RpcRequestSendError> {
|
||||
let id = ComponentsByRangeRequestId {
|
||||
id: self.next_id(),
|
||||
requester,
|
||||
};
|
||||
|
||||
let req = BlockComponentsByRangeRequest::new(
|
||||
id,
|
||||
request,
|
||||
peers,
|
||||
peers_to_deprioritize,
|
||||
total_requests_per_peer,
|
||||
self,
|
||||
)?;
|
||||
let req =
|
||||
BlockComponentsByRangeRequest::new(id, request, peers, peers_to_deprioritize, self)?;
|
||||
|
||||
self.block_components_by_range_requests.insert(id, req);
|
||||
|
||||
|
||||
@@ -105,7 +105,6 @@ impl<T: BeaconChainTypes> BlockComponentsByRangeRequest<T> {
|
||||
request: BlocksByRangeRequest,
|
||||
peers: Arc<RwLock<HashSet<PeerId>>>,
|
||||
peers_to_deprioritize: &HashSet<PeerId>,
|
||||
total_requests_per_peer: &HashMap<PeerId, usize>,
|
||||
cx: &mut SyncNetworkContext<T>,
|
||||
) -> Result<Self, RpcRequestSendError> {
|
||||
// Induces a compile time panic if this doesn't hold true.
|
||||
@@ -129,19 +128,13 @@ impl<T: BeaconChainTypes> BlockComponentsByRangeRequest<T> {
|
||||
(
|
||||
// If contains -> 1 (order after), not contains -> 0 (order first)
|
||||
peers_to_deprioritize.contains(peer),
|
||||
// TODO(das): Should we use active_request_count_by_peer?
|
||||
// Prefer peers with less overall requests
|
||||
// active_request_count_by_peer.get(peer).copied().unwrap_or(0),
|
||||
// Prefer peers with less total cummulative requests, so we fetch data from a
|
||||
// diverse set of peers
|
||||
total_requests_per_peer.get(peer).copied().unwrap_or(0),
|
||||
// Random factor to break ties, otherwise the PeerID breaks ties
|
||||
rand::random::<u32>(),
|
||||
peer,
|
||||
)
|
||||
})
|
||||
.min()
|
||||
.map(|(_, _, _, peer)| *peer)
|
||||
.map(|(_, _, peer)| *peer)
|
||||
else {
|
||||
// When a peer disconnects and is removed from the SyncingChain peer set, if the set
|
||||
// reaches zero the SyncingChain is removed.
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::sync::network_context::PeerGroup;
|
||||
use beacon_chain::block_verification_types::RpcBlock;
|
||||
use itertools::Itertools;
|
||||
use lighthouse_network::rpc::methods::BlocksByRangeRequest;
|
||||
use lighthouse_network::service::api_types::Id;
|
||||
use lighthouse_network::PeerId;
|
||||
@@ -46,12 +45,6 @@ impl BatchPeers {
|
||||
pub fn column(&self, index: &ColumnIndex) -> Option<&PeerId> {
|
||||
self.column_peers.of_index(&((*index) as usize))
|
||||
}
|
||||
|
||||
pub fn iter_unique_peers(&self) -> impl Iterator<Item = &PeerId> {
|
||||
std::iter::once(&self.block_peer)
|
||||
.chain(self.column_peers.all())
|
||||
.unique()
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows customisation of the above constants used in other sync methods such as BackFillSync.
|
||||
|
||||
@@ -11,7 +11,7 @@ use lighthouse_network::service::api_types::Id;
|
||||
use lighthouse_network::{PeerAction, PeerId};
|
||||
use logging::crit;
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{btree_map::Entry, BTreeMap, HashMap, HashSet};
|
||||
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use strum::IntoStaticStr;
|
||||
use tracing::{debug, instrument, warn};
|
||||
@@ -90,15 +90,8 @@ pub struct SyncingChain<T: BeaconChainTypes> {
|
||||
|
||||
/// The peers that agree on the `target_head_slot` and `target_head_root` as a canonical chain
|
||||
/// and thus available to download this chain from.
|
||||
///
|
||||
/// Also, For each peer tracks the total requests done per peer as part of this SyncingChain
|
||||
/// `HashMap<peer, total_requests_per_peer>`
|
||||
peers: Arc<RwLock<HashSet<PeerId>>>,
|
||||
|
||||
/// Tracks the total requests done to each peer for this SyncingChain. Forces us to fetch data
|
||||
/// from all peers to prevent eclipse attacks
|
||||
requests_per_peer: HashMap<PeerId, usize>,
|
||||
|
||||
/// Starting epoch of the next batch that needs to be downloaded.
|
||||
to_be_downloaded: BatchId,
|
||||
|
||||
@@ -160,7 +153,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
target_head_root,
|
||||
batches: BTreeMap::new(),
|
||||
peers: Arc::new(RwLock::new(HashSet::from_iter([peer_id]))),
|
||||
requests_per_peer: HashMap::from_iter([(peer_id, <_>::default())]),
|
||||
to_be_downloaded: start_epoch,
|
||||
processing_target: start_epoch,
|
||||
optimistic_start: None,
|
||||
@@ -221,7 +213,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
#[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)]
|
||||
pub fn remove_peer(&mut self, peer_id: &PeerId) -> ProcessingResult {
|
||||
self.peers.write().remove(peer_id);
|
||||
self.requests_per_peer.remove(peer_id);
|
||||
|
||||
if self.peers.read().is_empty() {
|
||||
Err(RemoveChain::EmptyPeerPool)
|
||||
@@ -250,12 +241,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
request_id: Id,
|
||||
blocks: Vec<RpcBlock<T::EthSpec>>,
|
||||
) -> ProcessingResult {
|
||||
// Account for one more requests to this peer
|
||||
// TODO(das): this code assumes that we do a single request per peer per RpcBlock
|
||||
for peer in batch_peers.iter_unique_peers() {
|
||||
*self.requests_per_peer.entry(*peer).or_default() += 1;
|
||||
}
|
||||
|
||||
// check if we have this batch
|
||||
let batch = match self.batches.get_mut(&batch_id) {
|
||||
None => {
|
||||
@@ -873,7 +858,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
peer_id: PeerId,
|
||||
) -> ProcessingResult {
|
||||
self.peers.write().insert(peer_id);
|
||||
self.requests_per_peer.insert(peer_id, <_>::default());
|
||||
self.request_batches(network)
|
||||
}
|
||||
|
||||
@@ -955,7 +939,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
},
|
||||
self.peers.clone(),
|
||||
&failed_peers,
|
||||
&self.requests_per_peer,
|
||||
) {
|
||||
Ok(request_id) => {
|
||||
// inform the batch about the new request
|
||||
|
||||
Reference in New Issue
Block a user