Libp2p v0.48.0 upgrade (#3547)

## Issue Addressed

Upgrades libp2p to v.0.47.0. This is the compilation of
- [x] #3495 
- [x] #3497 
- [x] #3491 
- [x] #3546 
- [x] #3553 

Co-authored-by: Age Manning <Age@AgeManning.com>
This commit is contained in:
Divma
2022-09-29 01:50:11 +00:00
parent 6779912fe4
commit b1d2510d1b
31 changed files with 1506 additions and 1614 deletions

View File

@@ -0,0 +1,101 @@
use std::sync::Arc;
use libp2p::core::connection::ConnectionId;
use types::{EthSpec, SignedBeaconBlock};
use crate::rpc::{
methods::{
BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse,
RPCResponse, ResponseTermination, StatusMessage,
},
OutboundRequest, SubstreamId,
};
/// Identifier of requests sent by a peer.
pub type PeerRequestId = (ConnectionId, SubstreamId);
/// Identifier of a request.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RequestId<AppReqId> {
Application(AppReqId),
Internal,
}
/// The type of RPC requests the Behaviour informs it has received and allows for sending.
///
// NOTE: This is an application-level wrapper over the lower network level requests that can be
// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't
// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`.
#[derive(Debug, Clone, PartialEq)]
pub enum Request {
/// A Status message.
Status(StatusMessage),
/// A blocks by range request.
BlocksByRange(BlocksByRangeRequest),
/// A request blocks root request.
BlocksByRoot(BlocksByRootRequest),
}
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
fn from(req: Request) -> OutboundRequest<TSpec> {
match req {
Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r),
Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => {
OutboundRequest::BlocksByRange(OldBlocksByRangeRequest {
start_slot,
count,
step: 1,
})
}
Request::Status(s) => OutboundRequest::Status(s),
}
}
}
/// The type of RPC responses the Behaviour informs it has received, and allows for sending.
///
// NOTE: This is an application-level wrapper over the lower network level responses that can be
// sent. The main difference is the absense of Pong and Metadata, which don't leave the
// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and
// `RPCCodedResponse`.
#[derive(Debug, Clone, PartialEq)]
pub enum Response<TSpec: EthSpec> {
/// A Status message.
Status(StatusMessage),
/// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch.
BlocksByRange(Option<Arc<SignedBeaconBlock<TSpec>>>),
/// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
}
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
fn from(resp: Response<TSpec>) -> RPCCodedResponse<TSpec> {
match resp {
Response::BlocksByRoot(r) => match r {
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot),
},
Response::BlocksByRange(r) => match r {
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
},
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
}
}
}
impl<AppReqId: std::fmt::Debug> slog::Value for RequestId<AppReqId> {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
match self {
RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer),
RequestId::Application(ref id) => {
slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer)
}
}
}
}

View File

@@ -0,0 +1,34 @@
use crate::discovery::Discovery;
use crate::peer_manager::PeerManager;
use crate::rpc::{ReqId, RPC};
use crate::types::SnappyTransform;
use libp2p::gossipsub::subscription_filter::{
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
};
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
use libp2p::identify::Identify;
use libp2p::swarm::NetworkBehaviour;
use libp2p::NetworkBehaviour;
use types::EthSpec;
use super::api_types::RequestId;
pub type SubscriptionFilter = MaxCountSubscriptionFilter<WhitelistSubscriptionFilter>;
pub type Gossipsub = BaseGossipsub<SnappyTransform, SubscriptionFilter>;
#[derive(NetworkBehaviour)]
pub(crate) struct Behaviour<AppReqId: ReqId, TSpec: EthSpec> {
/// The routing pub-sub mechanism for eth2.
pub gossipsub: Gossipsub,
/// The Eth2 RPC specified in the wire-0 protocol.
pub eth2_rpc: RPC<RequestId<AppReqId>, TSpec>,
/// Discv5 Discovery protocol.
pub discovery: Discovery<TSpec>,
/// Keep regular connection to peers and disconnect if absent.
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
/// Provides IP addresses and peer information.
pub identify: Identify,
/// The peer manager that keeps track of peer's reputation and status.
pub peer_manager: PeerManager<TSpec>,
}

View File

@@ -0,0 +1,247 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use crate::types::GossipKind;
use crate::GossipTopic;
use tokio_util::time::delay_queue::{DelayQueue, Key};
/// Store of gossip messages that we failed to publish and will try again later. By default, all
/// messages are ignored. This behaviour can be changed using `GossipCacheBuilder::default_timeout`
/// to apply the same delay to every kind. Individual timeouts for specific kinds can be set and
/// will overwrite the default_timeout if present.
pub struct GossipCache {
/// Expire timeouts for each topic-msg pair.
expirations: DelayQueue<(GossipTopic, Vec<u8>)>,
/// Messages cached for each topic.
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
/// Timeout for blocks.
beacon_block: Option<Duration>,
/// Timeout for aggregate attestations.
aggregates: Option<Duration>,
/// Timeout for attestations.
attestation: Option<Duration>,
/// Timeout for voluntary exits.
voluntary_exit: Option<Duration>,
/// Timeout for proposer slashings.
proposer_slashing: Option<Duration>,
/// Timeout for attester slashings.
attester_slashing: Option<Duration>,
/// Timeout for aggregated sync committee signatures.
signed_contribution_and_proof: Option<Duration>,
/// Timeout for sync committee messages.
sync_committee_message: Option<Duration>,
}
#[derive(Default)]
pub struct GossipCacheBuilder {
default_timeout: Option<Duration>,
/// Timeout for blocks.
beacon_block: Option<Duration>,
/// Timeout for aggregate attestations.
aggregates: Option<Duration>,
/// Timeout for attestations.
attestation: Option<Duration>,
/// Timeout for voluntary exits.
voluntary_exit: Option<Duration>,
/// Timeout for proposer slashings.
proposer_slashing: Option<Duration>,
/// Timeout for attester slashings.
attester_slashing: Option<Duration>,
/// Timeout for aggregated sync committee signatures.
signed_contribution_and_proof: Option<Duration>,
/// Timeout for sync committee messages.
sync_committee_message: Option<Duration>,
}
#[allow(dead_code)]
impl GossipCacheBuilder {
/// By default, all timeouts all disabled. Setting a default timeout will enable all timeout
/// that are not already set.
pub fn default_timeout(mut self, timeout: Duration) -> Self {
self.default_timeout = Some(timeout);
self
}
/// Timeout for blocks.
pub fn beacon_block_timeout(mut self, timeout: Duration) -> Self {
self.beacon_block = Some(timeout);
self
}
/// Timeout for aggregate attestations.
pub fn aggregates_timeout(mut self, timeout: Duration) -> Self {
self.aggregates = Some(timeout);
self
}
/// Timeout for attestations.
pub fn attestation_timeout(mut self, timeout: Duration) -> Self {
self.attestation = Some(timeout);
self
}
/// Timeout for voluntary exits.
pub fn voluntary_exit_timeout(mut self, timeout: Duration) -> Self {
self.voluntary_exit = Some(timeout);
self
}
/// Timeout for proposer slashings.
pub fn proposer_slashing_timeout(mut self, timeout: Duration) -> Self {
self.proposer_slashing = Some(timeout);
self
}
/// Timeout for attester slashings.
pub fn attester_slashing_timeout(mut self, timeout: Duration) -> Self {
self.attester_slashing = Some(timeout);
self
}
/// Timeout for aggregated sync committee signatures.
pub fn signed_contribution_and_proof_timeout(mut self, timeout: Duration) -> Self {
self.signed_contribution_and_proof = Some(timeout);
self
}
/// Timeout for sync committee messages.
pub fn sync_committee_message_timeout(mut self, timeout: Duration) -> Self {
self.sync_committee_message = Some(timeout);
self
}
pub fn build(self) -> GossipCache {
let GossipCacheBuilder {
default_timeout,
beacon_block,
aggregates,
attestation,
voluntary_exit,
proposer_slashing,
attester_slashing,
signed_contribution_and_proof,
sync_committee_message,
} = self;
GossipCache {
expirations: DelayQueue::default(),
topic_msgs: HashMap::default(),
beacon_block: beacon_block.or(default_timeout),
aggregates: aggregates.or(default_timeout),
attestation: attestation.or(default_timeout),
voluntary_exit: voluntary_exit.or(default_timeout),
proposer_slashing: proposer_slashing.or(default_timeout),
attester_slashing: attester_slashing.or(default_timeout),
signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout),
sync_committee_message: sync_committee_message.or(default_timeout),
}
}
}
impl GossipCache {
/// Get a builder of a `GossipCache`. Topic kinds for which no timeout is defined will be
/// ignored if added in `insert`.
pub fn builder() -> GossipCacheBuilder {
GossipCacheBuilder::default()
}
// Insert a message to be sent later.
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
let expire_timeout = match topic.kind() {
GossipKind::BeaconBlock => self.beacon_block,
GossipKind::BeaconAggregateAndProof => self.aggregates,
GossipKind::Attestation(_) => self.attestation,
GossipKind::VoluntaryExit => self.voluntary_exit,
GossipKind::ProposerSlashing => self.proposer_slashing,
GossipKind::AttesterSlashing => self.attester_slashing,
GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof,
GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message,
};
let expire_timeout = match expire_timeout {
Some(expire_timeout) => expire_timeout,
None => return,
};
match self
.topic_msgs
.entry(topic.clone())
.or_default()
.entry(data.clone())
{
Entry::Occupied(key) => self.expirations.reset(key.get(), expire_timeout),
Entry::Vacant(entry) => {
let key = self.expirations.insert((topic, data), expire_timeout);
entry.insert(key);
}
}
}
// Get the registered messages for this topic.
pub fn retrieve(&mut self, topic: &GossipTopic) -> Option<impl Iterator<Item = Vec<u8>> + '_> {
if let Some(msgs) = self.topic_msgs.remove(topic) {
for (_, key) in msgs.iter() {
self.expirations.remove(key);
}
Some(msgs.into_keys())
} else {
None
}
}
}
impl futures::stream::Stream for GossipCache {
type Item = Result<GossipTopic, String>; // We don't care to retrieve the expired data.
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.expirations.poll_expired(cx) {
Poll::Ready(Some(Ok(expired))) => {
let expected_key = expired.key();
let (topic, data) = expired.into_inner();
match self.topic_msgs.get_mut(&topic) {
Some(msgs) => {
let key = msgs.remove(&data);
debug_assert_eq!(key, Some(expected_key));
if msgs.is_empty() {
// no more messages for this topic.
self.topic_msgs.remove(&topic);
}
}
None => {
#[cfg(debug_assertions)]
panic!("Topic for registered message is not present.")
}
}
Poll::Ready(Some(Ok(topic)))
}
Poll::Ready(Some(Err(x))) => Poll::Ready(Some(Err(x.to_string()))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
#[cfg(test)]
mod tests {
use crate::types::GossipKind;
use super::*;
use futures::stream::StreamExt;
#[tokio::test]
async fn test_stream() {
let mut cache = GossipCache::builder()
.default_timeout(Duration::from_millis(300))
.build();
let test_topic = GossipTopic::new(
GossipKind::Attestation(1u64.into()),
crate::types::GossipEncoding::SSZSnappy,
[0u8; 4],
);
cache.insert(test_topic, vec![]);
tokio::time::sleep(Duration::from_millis(300)).await;
while cache.next().await.is_some() {}
assert!(cache.expirations.is_empty());
assert!(cache.topic_msgs.is_empty());
}
}

View File

@@ -0,0 +1,359 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::{error, TopicHash};
use libp2p::gossipsub::{
GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams,
};
use std::cmp::max;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::time::Duration;
use types::{ChainSpec, EnrForkId, EthSpec, Slot, SubnetId};
const MAX_IN_MESH_SCORE: f64 = 10.0;
const MAX_FIRST_MESSAGE_DELIVERIES_SCORE: f64 = 40.0;
const BEACON_BLOCK_WEIGHT: f64 = 0.5;
const BEACON_AGGREGATE_PROOF_WEIGHT: f64 = 0.5;
const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05;
const PROPOSER_SLASHING_WEIGHT: f64 = 0.05;
const ATTESTER_SLASHING_WEIGHT: f64 = 0.05;
/// The time window (seconds) that we expect messages to be forwarded to us in the mesh.
const MESH_MESSAGE_DELIVERIES_WINDOW: u64 = 2;
// Const as this is used in the peer manager to prevent gossip from disconnecting peers.
pub const GREYLIST_THRESHOLD: f64 = -16000.0;
/// Builds the peer score thresholds.
pub fn lighthouse_gossip_thresholds() -> PeerScoreThresholds {
PeerScoreThresholds {
gossip_threshold: -4000.0,
publish_threshold: -8000.0,
graylist_threshold: GREYLIST_THRESHOLD,
accept_px_threshold: 100.0,
opportunistic_graft_threshold: 5.0,
}
}
pub struct PeerScoreSettings<TSpec: EthSpec> {
slot: Duration,
epoch: Duration,
beacon_attestation_subnet_weight: f64,
max_positive_score: f64,
decay_interval: Duration,
decay_to_zero: f64,
mesh_n: usize,
max_committees_per_slot: usize,
target_committee_size: usize,
target_aggregators_per_committee: usize,
attestation_subnet_count: u64,
phantom: PhantomData<TSpec>,
}
impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
pub fn new(chain_spec: &ChainSpec, gs_config: &GossipsubConfig) -> PeerScoreSettings<TSpec> {
let slot = Duration::from_secs(chain_spec.seconds_per_slot);
let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64;
let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE)
* (BEACON_BLOCK_WEIGHT
+ BEACON_AGGREGATE_PROOF_WEIGHT
+ beacon_attestation_subnet_weight * chain_spec.attestation_subnet_count as f64
+ VOLUNTARY_EXIT_WEIGHT
+ PROPOSER_SLASHING_WEIGHT
+ ATTESTER_SLASHING_WEIGHT);
PeerScoreSettings {
slot,
epoch: slot * TSpec::slots_per_epoch() as u32,
beacon_attestation_subnet_weight,
max_positive_score,
decay_interval: max(Duration::from_secs(1), slot),
decay_to_zero: 0.01,
mesh_n: gs_config.mesh_n(),
max_committees_per_slot: chain_spec.max_committees_per_slot,
target_committee_size: chain_spec.target_committee_size,
target_aggregators_per_committee: chain_spec.target_aggregators_per_committee as usize,
attestation_subnet_count: chain_spec.attestation_subnet_count,
phantom: PhantomData,
}
}
pub fn get_peer_score_params(
&self,
active_validators: usize,
thresholds: &PeerScoreThresholds,
enr_fork_id: &EnrForkId,
current_slot: Slot,
) -> error::Result<PeerScoreParams> {
let mut params = PeerScoreParams {
decay_interval: self.decay_interval,
decay_to_zero: self.decay_to_zero,
retain_score: self.epoch * 100,
app_specific_weight: 1.0,
ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP
behaviour_penalty_threshold: 6.0,
behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10),
..Default::default()
};
let target_value = Self::decay_convergence(
params.behaviour_penalty_decay,
10.0 / TSpec::slots_per_epoch() as f64,
) - params.behaviour_penalty_threshold;
params.behaviour_penalty_weight = thresholds.gossip_threshold / target_value.powi(2);
params.topic_score_cap = self.max_positive_score * 0.5;
params.ip_colocation_factor_weight = -params.topic_score_cap;
params.topics = HashMap::new();
let get_hash = |kind: GossipKind| -> TopicHash {
let topic: Topic =
GossipTopic::new(kind, GossipEncoding::default(), enr_fork_id.fork_digest).into();
topic.hash()
};
//first all fixed topics
params.topics.insert(
get_hash(GossipKind::VoluntaryExit),
Self::get_topic_params(
self,
VOLUNTARY_EXIT_WEIGHT,
4.0 / TSpec::slots_per_epoch() as f64,
self.epoch * 100,
None,
),
);
params.topics.insert(
get_hash(GossipKind::AttesterSlashing),
Self::get_topic_params(
self,
ATTESTER_SLASHING_WEIGHT,
1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
self.epoch * 100,
None,
),
);
params.topics.insert(
get_hash(GossipKind::ProposerSlashing),
Self::get_topic_params(
self,
PROPOSER_SLASHING_WEIGHT,
1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
self.epoch * 100,
None,
),
);
//dynamic topics
let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) =
self.get_dynamic_topic_params(active_validators, current_slot)?;
params
.topics
.insert(get_hash(GossipKind::BeaconBlock), beacon_block_params);
params.topics.insert(
get_hash(GossipKind::BeaconAggregateAndProof),
beacon_aggregate_proof_params,
);
for i in 0..self.attestation_subnet_count {
params.topics.insert(
get_hash(GossipKind::Attestation(SubnetId::new(i))),
beacon_attestation_subnet_params.clone(),
);
}
Ok(params)
}
pub fn get_dynamic_topic_params(
&self,
active_validators: usize,
current_slot: Slot,
) -> error::Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams)> {
let (aggregators_per_slot, committees_per_slot) =
self.expected_aggregator_count_per_slot(active_validators)?;
let multiple_bursts_per_subnet_per_epoch = committees_per_slot as u64
>= 2 * self.attestation_subnet_count / TSpec::slots_per_epoch();
let beacon_block_params = Self::get_topic_params(
self,
BEACON_BLOCK_WEIGHT,
1.0,
self.epoch * 20,
Some((TSpec::slots_per_epoch() * 5, 3.0, self.epoch, current_slot)),
);
let beacon_aggregate_proof_params = Self::get_topic_params(
self,
BEACON_AGGREGATE_PROOF_WEIGHT,
aggregators_per_slot,
self.epoch,
Some((TSpec::slots_per_epoch() * 2, 4.0, self.epoch, current_slot)),
);
let beacon_attestation_subnet_params = Self::get_topic_params(
self,
self.beacon_attestation_subnet_weight,
active_validators as f64
/ self.attestation_subnet_count as f64
/ TSpec::slots_per_epoch() as f64,
self.epoch
* (if multiple_bursts_per_subnet_per_epoch {
1
} else {
4
}),
Some((
TSpec::slots_per_epoch()
* (if multiple_bursts_per_subnet_per_epoch {
4
} else {
16
}),
16.0,
if multiple_bursts_per_subnet_per_epoch {
self.slot * (TSpec::slots_per_epoch() as u32 / 2 + 1)
} else {
self.epoch * 3
},
current_slot,
)),
);
Ok((
beacon_block_params,
beacon_aggregate_proof_params,
beacon_attestation_subnet_params,
))
}
pub fn attestation_subnet_count(&self) -> u64 {
self.attestation_subnet_count
}
fn score_parameter_decay_with_base(
decay_time: Duration,
decay_interval: Duration,
decay_to_zero: f64,
) -> f64 {
let ticks = decay_time.as_secs_f64() / decay_interval.as_secs_f64();
decay_to_zero.powf(1.0 / ticks)
}
fn decay_convergence(decay: f64, rate: f64) -> f64 {
rate / (1.0 - decay)
}
fn threshold(decay: f64, rate: f64) -> f64 {
Self::decay_convergence(decay, rate) * decay
}
fn expected_aggregator_count_per_slot(
&self,
active_validators: usize,
) -> error::Result<(f64, usize)> {
let committees_per_slot = TSpec::get_committee_count_per_slot_with(
active_validators,
self.max_committees_per_slot,
self.target_committee_size,
)
.map_err(|e| format!("Could not get committee count from spec: {:?}", e))?;
let committees = committees_per_slot * TSpec::slots_per_epoch() as usize;
let smaller_committee_size = active_validators / committees;
let num_larger_committees = active_validators - smaller_committee_size * committees;
let modulo_smaller = max(
1,
smaller_committee_size / self.target_aggregators_per_committee as usize,
);
let modulo_larger = max(
1,
(smaller_committee_size + 1) / self.target_aggregators_per_committee as usize,
);
Ok((
(((committees - num_larger_committees) * smaller_committee_size) as f64
/ modulo_smaller as f64
+ (num_larger_committees * (smaller_committee_size + 1)) as f64
/ modulo_larger as f64)
/ TSpec::slots_per_epoch() as f64,
committees_per_slot,
))
}
fn score_parameter_decay(&self, decay_time: Duration) -> f64 {
Self::score_parameter_decay_with_base(decay_time, self.decay_interval, self.decay_to_zero)
}
fn get_topic_params(
&self,
topic_weight: f64,
expected_message_rate: f64,
first_message_decay_time: Duration,
// decay slots (decay time in slots), cap factor, activation window, current slot
mesh_message_info: Option<(u64, f64, Duration, Slot)>,
) -> TopicScoreParams {
let mut t_params = TopicScoreParams::default();
t_params.topic_weight = topic_weight;
t_params.time_in_mesh_quantum = self.slot;
t_params.time_in_mesh_cap = 3600.0 / t_params.time_in_mesh_quantum.as_secs_f64();
t_params.time_in_mesh_weight = 10.0 / t_params.time_in_mesh_cap;
t_params.first_message_deliveries_decay =
self.score_parameter_decay(first_message_decay_time);
t_params.first_message_deliveries_cap = Self::decay_convergence(
t_params.first_message_deliveries_decay,
2.0 * expected_message_rate / self.mesh_n as f64,
);
t_params.first_message_deliveries_weight = 40.0 / t_params.first_message_deliveries_cap;
if let Some((decay_slots, cap_factor, activation_window, current_slot)) = mesh_message_info
{
let decay_time = self.slot * decay_slots as u32;
t_params.mesh_message_deliveries_decay = self.score_parameter_decay(decay_time);
t_params.mesh_message_deliveries_threshold = Self::threshold(
t_params.mesh_message_deliveries_decay,
expected_message_rate / 50.0,
);
t_params.mesh_message_deliveries_cap =
if cap_factor * t_params.mesh_message_deliveries_threshold < 2.0 {
2.0
} else {
cap_factor * t_params.mesh_message_deliveries_threshold
};
t_params.mesh_message_deliveries_activation = activation_window;
t_params.mesh_message_deliveries_window =
Duration::from_secs(MESH_MESSAGE_DELIVERIES_WINDOW);
t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay;
t_params.mesh_message_deliveries_weight = -t_params.topic_weight;
t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight;
if decay_slots >= current_slot.as_u64() {
t_params.mesh_message_deliveries_threshold = 0.0;
t_params.mesh_message_deliveries_weight = 0.0;
}
} else {
t_params.mesh_message_deliveries_weight = 0.0;
t_params.mesh_message_deliveries_threshold = 0.0;
t_params.mesh_message_deliveries_decay = 0.0;
t_params.mesh_message_deliveries_cap = 0.0;
t_params.mesh_message_deliveries_window = Duration::from_secs(0);
t_params.mesh_message_deliveries_activation = Duration::from_secs(0);
t_params.mesh_failure_penalty_decay = 0.0;
t_params.mesh_failure_penalty_weight = 0.0;
}
t_params.invalid_message_deliveries_weight =
-self.max_positive_score / t_params.topic_weight;
t_params.invalid_message_deliveries_decay = self.score_parameter_decay(self.epoch * 50);
t_params
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,288 @@
use crate::multiaddr::Protocol;
use crate::rpc::{MetaData, MetaDataV1, MetaDataV2};
use crate::types::{
error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind,
};
use crate::{GossipTopic, NetworkConfig};
use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks};
use libp2p::core::{
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
};
use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter;
use libp2p::gossipsub::IdentTopic as Topic;
use libp2p::{core, noise, PeerId, Transport};
use prometheus_client::registry::Registry;
use slog::{debug, warn};
use ssz::Decode;
use ssz::Encode;
use std::collections::HashSet;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId};
pub const NETWORK_KEY_FILENAME: &str = "key";
/// The maximum simultaneous libp2p connections per peer.
pub const MAX_CONNECTIONS_PER_PEER: u32 = 1;
/// The filename to store our local metadata.
pub const METADATA_FILENAME: &str = "metadata";
pub struct Context<'a> {
pub config: &'a NetworkConfig,
pub enr_fork_id: EnrForkId,
pub fork_context: Arc<ForkContext>,
pub chain_spec: &'a ChainSpec,
pub gossipsub_registry: Option<&'a mut Registry>,
}
type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
/// mplex as the multiplexing layer.
pub fn build_transport(
local_private_key: Keypair,
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
let tcp =
libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true));
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
#[cfg(feature = "libp2p-websocket")]
let transport = {
let trans_clone = transport.clone();
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
};
let (transport, bandwidth) = BandwidthLogging::new(transport);
// mplex config
let mut mplex_config = libp2p::mplex::MplexConfig::new();
mplex_config.set_max_buffer_size(256);
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
// yamux config
let mut yamux_config = libp2p::yamux::YamuxConfig::default();
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read());
// Authentication
Ok((
transport
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
yamux_config,
mplex_config,
))
.timeout(Duration::from_secs(10))
.boxed(),
bandwidth,
))
}
// Useful helper functions for debugging. Currently not used in the client.
#[allow(dead_code)]
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") {
stripped.to_string()
} else {
hex_bytes.to_string()
};
hex::decode(&hex_bytes)
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
.and_then(keypair_from_bytes)
}
#[allow(dead_code)]
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
.map(|secret| {
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into();
Keypair::Secp256k1(keypair)
})
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
}
/// Loads a private key from disk. If this fails, a new key is
/// generated and is then saved to disk.
///
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
// check for key from disk
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
match network_key_file.read_to_end(&mut key_bytes) {
Err(_) => debug!(log, "Could not read network key file"),
Ok(_) => {
// only accept secp256k1 keys for now
if let Ok(secret_key) =
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
{
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
debug!(log, "Loaded network key from disk.");
return Keypair::Secp256k1(kp);
} else {
debug!(log, "Network key file is not a valid secp256k1 key");
}
}
}
}
// if a key could not be loaded from disk, generate a new one and save it
let local_private_key = Keypair::generate_secp256k1();
if let Keypair::Secp256k1(key) = local_private_key.clone() {
let _ = std::fs::create_dir_all(&config.network_dir);
match File::create(network_key_f.clone())
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
{
Ok(_) => {
debug!(log, "New network key generated and written to disk");
}
Err(e) => {
warn!(
log,
"Could not write node key to file: {:?}. error: {}", network_key_f, e
);
}
}
}
local_private_key
}
/// Generate authenticated XX Noise config from identity keys
fn generate_noise_config(
identity_keypair: &Keypair,
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
.into_authentic(identity_keypair)
.expect("signing can fail only once during starting a node");
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
}
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
/// only supports dialing to an address without providing the peer id.
pub fn strip_peer_id(addr: &mut Multiaddr) {
let last = addr.pop();
match last {
Some(Protocol::P2p(_)) => {}
Some(other) => addr.push(other),
_ => {}
}
}
/// Load metadata from persisted file. Return default metadata if loading fails.
pub fn load_or_build_metadata<E: EthSpec>(
network_dir: &std::path::Path,
log: &slog::Logger,
) -> MetaData<E> {
// We load a V2 metadata version by default (regardless of current fork)
// since a V2 metadata can be converted to V1. The RPC encoder is responsible
// for sending the correct metadata version based on the negotiated protocol version.
let mut meta_data = MetaDataV2 {
seq_number: 0,
attnets: EnrAttestationBitfield::<E>::default(),
syncnets: EnrSyncCommitteeBitfield::<E>::default(),
};
// Read metadata from persisted file if available
let metadata_path = network_dir.join(METADATA_FILENAME);
if let Ok(mut metadata_file) = File::open(metadata_path) {
let mut metadata_ssz = Vec::new();
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
// Attempt to read a MetaDataV2 version from the persisted file,
// if that fails, read MetaDataV1
match MetaDataV2::<E>::from_ssz_bytes(&metadata_ssz) {
Ok(persisted_metadata) => {
meta_data.seq_number = persisted_metadata.seq_number;
// Increment seq number if persisted attnet is not default
if persisted_metadata.attnets != meta_data.attnets
|| persisted_metadata.syncnets != meta_data.syncnets
{
meta_data.seq_number += 1;
}
debug!(log, "Loaded metadata from disk");
}
Err(_) => {
match MetaDataV1::<E>::from_ssz_bytes(&metadata_ssz) {
Ok(persisted_metadata) => {
let persisted_metadata = MetaData::V1(persisted_metadata);
// Increment seq number as the persisted metadata version is updated
meta_data.seq_number = *persisted_metadata.seq_number() + 1;
debug!(log, "Loaded metadata from disk");
}
Err(e) => {
debug!(
log,
"Metadata from file could not be decoded";
"error" => ?e,
);
}
}
}
}
}
};
// Wrap the MetaData
let meta_data = MetaData::V2(meta_data);
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number());
save_metadata_to_disk(network_dir, meta_data.clone(), log);
meta_data
}
/// Creates a whitelist topic filter that covers all possible topics using the given set of
/// possible fork digests.
pub(crate) fn create_whitelist_filter(
possible_fork_digests: Vec<[u8; 4]>,
attestation_subnet_count: u64,
sync_committee_subnet_count: u64,
) -> WhitelistSubscriptionFilter {
let mut possible_hashes = HashSet::new();
for fork_digest in possible_fork_digests {
let mut add = |kind| {
let topic: Topic =
GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into();
possible_hashes.insert(topic.hash());
};
use GossipKind::*;
add(BeaconBlock);
add(BeaconAggregateAndProof);
add(VoluntaryExit);
add(ProposerSlashing);
add(AttesterSlashing);
add(SignedContributionAndProof);
for id in 0..attestation_subnet_count {
add(Attestation(SubnetId::new(id)));
}
for id in 0..sync_committee_subnet_count {
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
}
}
WhitelistSubscriptionFilter(possible_hashes)
}
/// Persist metadata to disk
pub(crate) fn save_metadata_to_disk<E: EthSpec>(
dir: &Path,
metadata: MetaData<E>,
log: &slog::Logger,
) {
let _ = std::fs::create_dir_all(&dir);
match File::create(dir.join(METADATA_FILENAME))
.and_then(|mut f| f.write_all(&metadata.as_ssz_bytes()))
{
Ok(_) => {
debug!(log, "Metadata written to disk");
}
Err(e) => {
warn!(
log,
"Could not write metadata to disk";
"file" => format!("{:?}{:?}", dir, METADATA_FILENAME),
"error" => %e
);
}
}
}