mirror of
https://github.com/sigp/lighthouse.git
synced 2026-05-01 03:33:47 +00:00
Rename eth2_libp2p to lighthouse_network (#2702)
## Description The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration. As of this writing it currently houses the following high-level lighthouse-specific logic: - Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings - Integration and handling of ENRs with respect to libp2p and eth2 - Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers. - Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc. - Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api. - Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks. - Lighthouse specific types for managing gossipsub topics, sync status and ENR fields - Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring - Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p. Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network` Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
@@ -0,0 +1,359 @@
|
||||
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
||||
use crate::{error, TopicHash};
|
||||
use libp2p::gossipsub::{
|
||||
GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams,
|
||||
};
|
||||
use std::cmp::max;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
use types::{ChainSpec, EnrForkId, EthSpec, Slot, SubnetId};
|
||||
|
||||
const MAX_IN_MESH_SCORE: f64 = 10.0;
|
||||
const MAX_FIRST_MESSAGE_DELIVERIES_SCORE: f64 = 40.0;
|
||||
const BEACON_BLOCK_WEIGHT: f64 = 0.5;
|
||||
const BEACON_AGGREGATE_PROOF_WEIGHT: f64 = 0.5;
|
||||
const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05;
|
||||
const PROPOSER_SLASHING_WEIGHT: f64 = 0.05;
|
||||
const ATTESTER_SLASHING_WEIGHT: f64 = 0.05;
|
||||
|
||||
/// The time window (seconds) that we expect messages to be forwarded to us in the mesh.
|
||||
const MESH_MESSAGE_DELIVERIES_WINDOW: u64 = 2;
|
||||
|
||||
// Const as this is used in the peer manager to prevent gossip from disconnecting peers.
|
||||
pub const GREYLIST_THRESHOLD: f64 = -16000.0;
|
||||
|
||||
/// Builds the peer score thresholds.
|
||||
pub fn lighthouse_gossip_thresholds() -> PeerScoreThresholds {
|
||||
PeerScoreThresholds {
|
||||
gossip_threshold: -4000.0,
|
||||
publish_threshold: -8000.0,
|
||||
graylist_threshold: GREYLIST_THRESHOLD,
|
||||
accept_px_threshold: 100.0,
|
||||
opportunistic_graft_threshold: 5.0,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PeerScoreSettings<TSpec: EthSpec> {
|
||||
slot: Duration,
|
||||
epoch: Duration,
|
||||
|
||||
beacon_attestation_subnet_weight: f64,
|
||||
max_positive_score: f64,
|
||||
|
||||
decay_interval: Duration,
|
||||
decay_to_zero: f64,
|
||||
|
||||
mesh_n: usize,
|
||||
max_committees_per_slot: usize,
|
||||
target_committee_size: usize,
|
||||
target_aggregators_per_committee: usize,
|
||||
attestation_subnet_count: u64,
|
||||
phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
||||
pub fn new(chain_spec: &ChainSpec, gs_config: &GossipsubConfig) -> PeerScoreSettings<TSpec> {
|
||||
let slot = Duration::from_secs(chain_spec.seconds_per_slot);
|
||||
let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64;
|
||||
let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE)
|
||||
* (BEACON_BLOCK_WEIGHT
|
||||
+ BEACON_AGGREGATE_PROOF_WEIGHT
|
||||
+ beacon_attestation_subnet_weight * chain_spec.attestation_subnet_count as f64
|
||||
+ VOLUNTARY_EXIT_WEIGHT
|
||||
+ PROPOSER_SLASHING_WEIGHT
|
||||
+ ATTESTER_SLASHING_WEIGHT);
|
||||
|
||||
PeerScoreSettings {
|
||||
slot,
|
||||
epoch: slot * TSpec::slots_per_epoch() as u32,
|
||||
beacon_attestation_subnet_weight,
|
||||
max_positive_score,
|
||||
decay_interval: max(Duration::from_secs(1), slot),
|
||||
decay_to_zero: 0.01,
|
||||
mesh_n: gs_config.mesh_n(),
|
||||
max_committees_per_slot: chain_spec.max_committees_per_slot,
|
||||
target_committee_size: chain_spec.target_committee_size,
|
||||
target_aggregators_per_committee: chain_spec.target_aggregators_per_committee as usize,
|
||||
attestation_subnet_count: chain_spec.attestation_subnet_count,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_peer_score_params(
|
||||
&self,
|
||||
active_validators: usize,
|
||||
thresholds: &PeerScoreThresholds,
|
||||
enr_fork_id: &EnrForkId,
|
||||
current_slot: Slot,
|
||||
) -> error::Result<PeerScoreParams> {
|
||||
let mut params = PeerScoreParams {
|
||||
decay_interval: self.decay_interval,
|
||||
decay_to_zero: self.decay_to_zero,
|
||||
retain_score: self.epoch * 100,
|
||||
app_specific_weight: 1.0,
|
||||
ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP
|
||||
behaviour_penalty_threshold: 6.0,
|
||||
behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let target_value = Self::decay_convergence(
|
||||
params.behaviour_penalty_decay,
|
||||
10.0 / TSpec::slots_per_epoch() as f64,
|
||||
) - params.behaviour_penalty_threshold;
|
||||
params.behaviour_penalty_weight = thresholds.gossip_threshold / target_value.powi(2);
|
||||
|
||||
params.topic_score_cap = self.max_positive_score * 0.5;
|
||||
params.ip_colocation_factor_weight = -params.topic_score_cap;
|
||||
|
||||
params.topics = HashMap::new();
|
||||
|
||||
let get_hash = |kind: GossipKind| -> TopicHash {
|
||||
let topic: Topic =
|
||||
GossipTopic::new(kind, GossipEncoding::default(), enr_fork_id.fork_digest).into();
|
||||
topic.hash()
|
||||
};
|
||||
|
||||
//first all fixed topics
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::VoluntaryExit),
|
||||
Self::get_topic_params(
|
||||
self,
|
||||
VOLUNTARY_EXIT_WEIGHT,
|
||||
4.0 / TSpec::slots_per_epoch() as f64,
|
||||
self.epoch * 100,
|
||||
None,
|
||||
),
|
||||
);
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::AttesterSlashing),
|
||||
Self::get_topic_params(
|
||||
self,
|
||||
ATTESTER_SLASHING_WEIGHT,
|
||||
1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
|
||||
self.epoch * 100,
|
||||
None,
|
||||
),
|
||||
);
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::ProposerSlashing),
|
||||
Self::get_topic_params(
|
||||
self,
|
||||
PROPOSER_SLASHING_WEIGHT,
|
||||
1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
|
||||
self.epoch * 100,
|
||||
None,
|
||||
),
|
||||
);
|
||||
|
||||
//dynamic topics
|
||||
let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) =
|
||||
self.get_dynamic_topic_params(active_validators, current_slot)?;
|
||||
|
||||
params
|
||||
.topics
|
||||
.insert(get_hash(GossipKind::BeaconBlock), beacon_block_params);
|
||||
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::BeaconAggregateAndProof),
|
||||
beacon_aggregate_proof_params,
|
||||
);
|
||||
|
||||
for i in 0..self.attestation_subnet_count {
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::Attestation(SubnetId::new(i))),
|
||||
beacon_attestation_subnet_params.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(params)
|
||||
}
|
||||
|
||||
pub fn get_dynamic_topic_params(
|
||||
&self,
|
||||
active_validators: usize,
|
||||
current_slot: Slot,
|
||||
) -> error::Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams)> {
|
||||
let (aggregators_per_slot, committees_per_slot) =
|
||||
self.expected_aggregator_count_per_slot(active_validators)?;
|
||||
let multiple_bursts_per_subnet_per_epoch = committees_per_slot as u64
|
||||
>= 2 * self.attestation_subnet_count / TSpec::slots_per_epoch();
|
||||
|
||||
let beacon_block_params = Self::get_topic_params(
|
||||
self,
|
||||
BEACON_BLOCK_WEIGHT,
|
||||
1.0,
|
||||
self.epoch * 20,
|
||||
Some((TSpec::slots_per_epoch() * 5, 3.0, self.epoch, current_slot)),
|
||||
);
|
||||
|
||||
let beacon_aggregate_proof_params = Self::get_topic_params(
|
||||
self,
|
||||
BEACON_AGGREGATE_PROOF_WEIGHT,
|
||||
aggregators_per_slot,
|
||||
self.epoch,
|
||||
Some((TSpec::slots_per_epoch() * 2, 4.0, self.epoch, current_slot)),
|
||||
);
|
||||
let beacon_attestation_subnet_params = Self::get_topic_params(
|
||||
self,
|
||||
self.beacon_attestation_subnet_weight,
|
||||
active_validators as f64
|
||||
/ self.attestation_subnet_count as f64
|
||||
/ TSpec::slots_per_epoch() as f64,
|
||||
self.epoch
|
||||
* (if multiple_bursts_per_subnet_per_epoch {
|
||||
1
|
||||
} else {
|
||||
4
|
||||
}),
|
||||
Some((
|
||||
TSpec::slots_per_epoch()
|
||||
* (if multiple_bursts_per_subnet_per_epoch {
|
||||
4
|
||||
} else {
|
||||
16
|
||||
}),
|
||||
16.0,
|
||||
if multiple_bursts_per_subnet_per_epoch {
|
||||
self.slot * (TSpec::slots_per_epoch() as u32 / 2 + 1)
|
||||
} else {
|
||||
self.epoch * 3
|
||||
},
|
||||
current_slot,
|
||||
)),
|
||||
);
|
||||
|
||||
Ok((
|
||||
beacon_block_params,
|
||||
beacon_aggregate_proof_params,
|
||||
beacon_attestation_subnet_params,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn attestation_subnet_count(&self) -> u64 {
|
||||
self.attestation_subnet_count
|
||||
}
|
||||
|
||||
fn score_parameter_decay_with_base(
|
||||
decay_time: Duration,
|
||||
decay_interval: Duration,
|
||||
decay_to_zero: f64,
|
||||
) -> f64 {
|
||||
let ticks = decay_time.as_secs_f64() / decay_interval.as_secs_f64();
|
||||
decay_to_zero.powf(1.0 / ticks)
|
||||
}
|
||||
|
||||
fn decay_convergence(decay: f64, rate: f64) -> f64 {
|
||||
rate / (1.0 - decay)
|
||||
}
|
||||
|
||||
fn threshold(decay: f64, rate: f64) -> f64 {
|
||||
Self::decay_convergence(decay, rate) * decay
|
||||
}
|
||||
|
||||
fn expected_aggregator_count_per_slot(
|
||||
&self,
|
||||
active_validators: usize,
|
||||
) -> error::Result<(f64, usize)> {
|
||||
let committees_per_slot = TSpec::get_committee_count_per_slot_with(
|
||||
active_validators,
|
||||
self.max_committees_per_slot,
|
||||
self.target_committee_size,
|
||||
)
|
||||
.map_err(|e| format!("Could not get committee count from spec: {:?}", e))?;
|
||||
|
||||
let committees = committees_per_slot * TSpec::slots_per_epoch() as usize;
|
||||
|
||||
let smaller_committee_size = active_validators / committees;
|
||||
let num_larger_committees = active_validators - smaller_committee_size * committees;
|
||||
|
||||
let modulo_smaller = max(
|
||||
1,
|
||||
smaller_committee_size / self.target_aggregators_per_committee as usize,
|
||||
);
|
||||
let modulo_larger = max(
|
||||
1,
|
||||
(smaller_committee_size + 1) / self.target_aggregators_per_committee as usize,
|
||||
);
|
||||
|
||||
Ok((
|
||||
(((committees - num_larger_committees) * smaller_committee_size) as f64
|
||||
/ modulo_smaller as f64
|
||||
+ (num_larger_committees * (smaller_committee_size + 1)) as f64
|
||||
/ modulo_larger as f64)
|
||||
/ TSpec::slots_per_epoch() as f64,
|
||||
committees_per_slot,
|
||||
))
|
||||
}
|
||||
|
||||
fn score_parameter_decay(&self, decay_time: Duration) -> f64 {
|
||||
Self::score_parameter_decay_with_base(decay_time, self.decay_interval, self.decay_to_zero)
|
||||
}
|
||||
|
||||
fn get_topic_params(
|
||||
&self,
|
||||
topic_weight: f64,
|
||||
expected_message_rate: f64,
|
||||
first_message_decay_time: Duration,
|
||||
// decay slots (decay time in slots), cap factor, activation window, current slot
|
||||
mesh_message_info: Option<(u64, f64, Duration, Slot)>,
|
||||
) -> TopicScoreParams {
|
||||
let mut t_params = TopicScoreParams::default();
|
||||
|
||||
t_params.topic_weight = topic_weight;
|
||||
|
||||
t_params.time_in_mesh_quantum = self.slot;
|
||||
t_params.time_in_mesh_cap = 3600.0 / t_params.time_in_mesh_quantum.as_secs_f64();
|
||||
t_params.time_in_mesh_weight = 10.0 / t_params.time_in_mesh_cap;
|
||||
|
||||
t_params.first_message_deliveries_decay =
|
||||
self.score_parameter_decay(first_message_decay_time);
|
||||
t_params.first_message_deliveries_cap = Self::decay_convergence(
|
||||
t_params.first_message_deliveries_decay,
|
||||
2.0 * expected_message_rate / self.mesh_n as f64,
|
||||
);
|
||||
t_params.first_message_deliveries_weight = 40.0 / t_params.first_message_deliveries_cap;
|
||||
|
||||
if let Some((decay_slots, cap_factor, activation_window, current_slot)) = mesh_message_info
|
||||
{
|
||||
let decay_time = self.slot * decay_slots as u32;
|
||||
t_params.mesh_message_deliveries_decay = self.score_parameter_decay(decay_time);
|
||||
t_params.mesh_message_deliveries_threshold = Self::threshold(
|
||||
t_params.mesh_message_deliveries_decay,
|
||||
expected_message_rate / 50.0,
|
||||
);
|
||||
t_params.mesh_message_deliveries_cap =
|
||||
if cap_factor * t_params.mesh_message_deliveries_threshold < 2.0 {
|
||||
2.0
|
||||
} else {
|
||||
cap_factor * t_params.mesh_message_deliveries_threshold
|
||||
};
|
||||
t_params.mesh_message_deliveries_activation = activation_window;
|
||||
t_params.mesh_message_deliveries_window =
|
||||
Duration::from_secs(MESH_MESSAGE_DELIVERIES_WINDOW);
|
||||
t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay;
|
||||
t_params.mesh_message_deliveries_weight = -t_params.topic_weight;
|
||||
t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight;
|
||||
if decay_slots >= current_slot.as_u64() {
|
||||
t_params.mesh_message_deliveries_threshold = 0.0;
|
||||
t_params.mesh_message_deliveries_weight = 0.0;
|
||||
}
|
||||
} else {
|
||||
t_params.mesh_message_deliveries_weight = 0.0;
|
||||
t_params.mesh_message_deliveries_threshold = 0.0;
|
||||
t_params.mesh_message_deliveries_decay = 0.0;
|
||||
t_params.mesh_message_deliveries_cap = 0.0;
|
||||
t_params.mesh_message_deliveries_window = Duration::from_secs(0);
|
||||
t_params.mesh_message_deliveries_activation = Duration::from_secs(0);
|
||||
t_params.mesh_failure_penalty_decay = 0.0;
|
||||
t_params.mesh_failure_penalty_weight = 0.0;
|
||||
}
|
||||
|
||||
t_params.invalid_message_deliveries_weight =
|
||||
-self.max_positive_score / t_params.topic_weight;
|
||||
t_params.invalid_message_deliveries_decay = self.score_parameter_decay(self.epoch * 50);
|
||||
|
||||
t_params
|
||||
}
|
||||
}
|
||||
1220
beacon_node/lighthouse_network/src/behaviour/mod.rs
Normal file
1220
beacon_node/lighthouse_network/src/behaviour/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
254
beacon_node/lighthouse_network/src/config.rs
Normal file
254
beacon_node/lighthouse_network/src/config.rs
Normal file
@@ -0,0 +1,254 @@
|
||||
use crate::types::GossipKind;
|
||||
use crate::{Enr, PeerIdSerialized};
|
||||
use directory::{
|
||||
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
|
||||
};
|
||||
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
||||
use libp2p::gossipsub::{
|
||||
FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId,
|
||||
RawGossipsubMessage, ValidationMode,
|
||||
};
|
||||
use libp2p::Multiaddr;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use types::{ForkContext, ForkName};
|
||||
|
||||
/// The maximum transmit size of gossip messages in bytes.
|
||||
pub const GOSSIP_MAX_SIZE: usize = 1_048_576;
|
||||
/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh.
|
||||
pub const MESH_N_LOW: usize = 6;
|
||||
|
||||
/// The cache time is set to accommodate the circulation time of an attestation.
|
||||
///
|
||||
/// The p2p spec declares that we accept attestations within the following range:
|
||||
///
|
||||
/// ```ignore
|
||||
/// ATTESTATION_PROPAGATION_SLOT_RANGE = 32
|
||||
/// attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot
|
||||
/// ```
|
||||
///
|
||||
/// Therefore, we must accept attestations across a span of 33 slots (where each slot is 12
|
||||
/// seconds). We add an additional second to account for the 500ms gossip clock disparity, and
|
||||
/// another 500ms for "fudge factor".
|
||||
pub const DUPLICATE_CACHE_TIME: Duration = Duration::from_secs(33 * 12 + 1);
|
||||
|
||||
// We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the
|
||||
// specification. We leave it here for posterity.
|
||||
// const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0];
|
||||
const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0];
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
/// Network configuration for lighthouse.
|
||||
pub struct Config {
|
||||
/// Data directory where node's keyfile is stored
|
||||
pub network_dir: PathBuf,
|
||||
|
||||
/// IP address to listen on.
|
||||
pub listen_address: std::net::IpAddr,
|
||||
|
||||
/// The TCP port that libp2p listens on.
|
||||
pub libp2p_port: u16,
|
||||
|
||||
/// UDP port that discovery listens on.
|
||||
pub discovery_port: u16,
|
||||
|
||||
/// The address to broadcast to peers about which address we are listening on. None indicates
|
||||
/// that no discovery address has been set in the CLI args.
|
||||
pub enr_address: Option<std::net::IpAddr>,
|
||||
|
||||
/// The udp port to broadcast to peers in order to reach back for discovery.
|
||||
pub enr_udp_port: Option<u16>,
|
||||
|
||||
/// The tcp port to broadcast to peers in order to reach back for libp2p services.
|
||||
pub enr_tcp_port: Option<u16>,
|
||||
|
||||
/// Target number of connected peers.
|
||||
pub target_peers: usize,
|
||||
|
||||
/// Gossipsub configuration parameters.
|
||||
#[serde(skip)]
|
||||
pub gs_config: GossipsubConfig,
|
||||
|
||||
/// Discv5 configuration parameters.
|
||||
#[serde(skip)]
|
||||
pub discv5_config: Discv5Config,
|
||||
|
||||
/// List of nodes to initially connect to.
|
||||
pub boot_nodes_enr: Vec<Enr>,
|
||||
|
||||
/// List of nodes to initially connect to, on Multiaddr format.
|
||||
pub boot_nodes_multiaddr: Vec<Multiaddr>,
|
||||
|
||||
/// List of libp2p nodes to initially connect to.
|
||||
pub libp2p_nodes: Vec<Multiaddr>,
|
||||
|
||||
/// List of trusted libp2p nodes which are not scored.
|
||||
pub trusted_peers: Vec<PeerIdSerialized>,
|
||||
|
||||
/// Client version
|
||||
pub client_version: String,
|
||||
|
||||
/// Disables the discovery protocol from starting.
|
||||
pub disable_discovery: bool,
|
||||
|
||||
/// Attempt to construct external port mappings with UPnP.
|
||||
pub upnp_enabled: bool,
|
||||
|
||||
/// Subscribe to all subnets for the duration of the runtime.
|
||||
pub subscribe_all_subnets: bool,
|
||||
|
||||
/// Import/aggregate all attestations recieved on subscribed subnets for the duration of the
|
||||
/// runtime.
|
||||
pub import_all_attestations: bool,
|
||||
|
||||
/// Indicates if the user has set the network to be in private mode. Currently this
|
||||
/// prevents sending client identifying information over identify.
|
||||
pub private: bool,
|
||||
|
||||
/// Shutdown beacon node after sync is completed.
|
||||
pub shutdown_after_sync: bool,
|
||||
|
||||
/// List of extra topics to initially subscribe to as strings.
|
||||
pub topics: Vec<GossipKind>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
/// Generate a default network configuration.
|
||||
fn default() -> Self {
|
||||
// WARNING: this directory default should be always overwritten with parameters
|
||||
// from cli for specific networks.
|
||||
let network_dir = dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join(DEFAULT_ROOT_DIR)
|
||||
.join(DEFAULT_HARDCODED_NETWORK)
|
||||
.join(DEFAULT_BEACON_NODE_DIR)
|
||||
.join(DEFAULT_NETWORK_DIR);
|
||||
|
||||
// Note: Using the default config here. Use `gossipsub_config` function for getting
|
||||
// Lighthouse specific configuration for gossipsub.
|
||||
let gs_config = GossipsubConfigBuilder::default()
|
||||
.build()
|
||||
.expect("valid gossipsub configuration");
|
||||
|
||||
// Discv5 Unsolicited Packet Rate Limiter
|
||||
let filter_rate_limiter = Some(
|
||||
discv5::RateLimiterBuilder::new()
|
||||
.total_n_every(10, Duration::from_secs(1)) // Allow bursts, average 10 per second
|
||||
.ip_n_every(9, Duration::from_secs(1)) // Allow bursts, average 9 per second
|
||||
.node_n_every(8, Duration::from_secs(1)) // Allow bursts, average 8 per second
|
||||
.build()
|
||||
.expect("The total rate limit has been specified"),
|
||||
);
|
||||
|
||||
// discv5 configuration
|
||||
let discv5_config = Discv5ConfigBuilder::new()
|
||||
.enable_packet_filter()
|
||||
.session_cache_capacity(5000)
|
||||
.request_timeout(Duration::from_secs(1))
|
||||
.query_peer_timeout(Duration::from_secs(2))
|
||||
.query_timeout(Duration::from_secs(30))
|
||||
.request_retries(1)
|
||||
.enr_peer_update_min(10)
|
||||
.query_parallelism(5)
|
||||
.disable_report_discovered_peers()
|
||||
.ip_limit() // limits /24 IP's in buckets.
|
||||
.incoming_bucket_limit(8) // half the bucket size
|
||||
.filter_rate_limiter(filter_rate_limiter)
|
||||
.filter_max_bans_per_ip(Some(5))
|
||||
.filter_max_nodes_per_ip(Some(10))
|
||||
.ban_duration(Some(Duration::from_secs(3600)))
|
||||
.ping_interval(Duration::from_secs(300))
|
||||
.build();
|
||||
|
||||
// NOTE: Some of these get overridden by the corresponding CLI default values.
|
||||
Config {
|
||||
network_dir,
|
||||
listen_address: "0.0.0.0".parse().expect("valid ip address"),
|
||||
libp2p_port: 9000,
|
||||
discovery_port: 9000,
|
||||
enr_address: None,
|
||||
enr_udp_port: None,
|
||||
enr_tcp_port: None,
|
||||
target_peers: 50,
|
||||
gs_config,
|
||||
discv5_config,
|
||||
boot_nodes_enr: vec![],
|
||||
boot_nodes_multiaddr: vec![],
|
||||
libp2p_nodes: vec![],
|
||||
trusted_peers: vec![],
|
||||
client_version: lighthouse_version::version_with_platform(),
|
||||
disable_discovery: false,
|
||||
upnp_enabled: true,
|
||||
private: false,
|
||||
subscribe_all_subnets: false,
|
||||
import_all_attestations: false,
|
||||
shutdown_after_sync: false,
|
||||
topics: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
|
||||
pub fn gossipsub_config(fork_context: Arc<ForkContext>) -> GossipsubConfig {
|
||||
// The function used to generate a gossipsub message id
|
||||
// We use the first 8 bytes of SHA256(data) for content addressing
|
||||
let fast_gossip_message_id =
|
||||
|message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]);
|
||||
fn prefix(
|
||||
prefix: [u8; 4],
|
||||
message: &GossipsubMessage,
|
||||
fork_context: Arc<ForkContext>,
|
||||
) -> Vec<u8> {
|
||||
let topic_bytes = message.topic.as_str().as_bytes();
|
||||
match fork_context.current_fork() {
|
||||
ForkName::Altair => {
|
||||
let topic_len_bytes = topic_bytes.len().to_le_bytes();
|
||||
let mut vec = Vec::with_capacity(
|
||||
prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(),
|
||||
);
|
||||
vec.extend_from_slice(&prefix);
|
||||
vec.extend_from_slice(&topic_len_bytes);
|
||||
vec.extend_from_slice(topic_bytes);
|
||||
vec.extend_from_slice(&message.data);
|
||||
vec
|
||||
}
|
||||
ForkName::Base => {
|
||||
let mut vec = Vec::with_capacity(prefix.len() + message.data.len());
|
||||
vec.extend_from_slice(&prefix);
|
||||
vec.extend_from_slice(&message.data);
|
||||
vec
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let gossip_message_id = move |message: &GossipsubMessage| {
|
||||
MessageId::from(
|
||||
&Sha256::digest(
|
||||
prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(),
|
||||
)[..20],
|
||||
)
|
||||
};
|
||||
GossipsubConfigBuilder::default()
|
||||
.max_transmit_size(GOSSIP_MAX_SIZE)
|
||||
.heartbeat_interval(Duration::from_millis(700))
|
||||
.mesh_n(8)
|
||||
.mesh_n_low(MESH_N_LOW)
|
||||
.mesh_n_high(12)
|
||||
.gossip_lazy(6)
|
||||
.fanout_ttl(Duration::from_secs(60))
|
||||
.history_length(12)
|
||||
.max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large
|
||||
.history_gossip(3)
|
||||
.validate_messages() // require validation before propagation
|
||||
.validation_mode(ValidationMode::Anonymous)
|
||||
.duplicate_cache_time(DUPLICATE_CACHE_TIME)
|
||||
.message_id_fn(gossip_message_id)
|
||||
.fast_message_id_fn(fast_gossip_message_id)
|
||||
.allow_self_origin(true)
|
||||
.build()
|
||||
.expect("valid gossipsub configuration")
|
||||
}
|
||||
236
beacon_node/lighthouse_network/src/discovery/enr.rs
Normal file
236
beacon_node/lighthouse_network/src/discovery/enr.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
//! Helper functions and an extension trait for Ethereum 2 ENRs.
|
||||
|
||||
pub use discv5::enr::{self, CombinedKey, EnrBuilder};
|
||||
|
||||
use super::enr_ext::CombinedKeyExt;
|
||||
use super::ENR_FILENAME;
|
||||
use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield};
|
||||
use crate::NetworkConfig;
|
||||
use discv5::enr::EnrKey;
|
||||
use libp2p::core::identity::Keypair;
|
||||
use slog::{debug, warn};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_types::BitVector;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use types::{EnrForkId, EthSpec};
|
||||
|
||||
/// The ENR field specifying the fork id.
|
||||
pub const ETH2_ENR_KEY: &str = "eth2";
|
||||
/// The ENR field specifying the attestation subnet bitfield.
|
||||
pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets";
|
||||
/// The ENR field specifying the sync committee subnet bitfield.
|
||||
pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets";
|
||||
|
||||
/// Extension trait for ENR's within Eth2.
|
||||
pub trait Eth2Enr {
|
||||
/// The attestation subnet bitfield associated with the ENR.
|
||||
fn attestation_bitfield<TSpec: EthSpec>(
|
||||
&self,
|
||||
) -> Result<EnrAttestationBitfield<TSpec>, &'static str>;
|
||||
|
||||
/// The sync committee subnet bitfield associated with the ENR.
|
||||
fn sync_committee_bitfield<TSpec: EthSpec>(
|
||||
&self,
|
||||
) -> Result<EnrSyncCommitteeBitfield<TSpec>, &'static str>;
|
||||
|
||||
fn eth2(&self) -> Result<EnrForkId, &'static str>;
|
||||
}
|
||||
|
||||
impl Eth2Enr for Enr {
|
||||
fn attestation_bitfield<TSpec: EthSpec>(
|
||||
&self,
|
||||
) -> Result<EnrAttestationBitfield<TSpec>, &'static str> {
|
||||
let bitfield_bytes = self
|
||||
.get(ATTESTATION_BITFIELD_ENR_KEY)
|
||||
.ok_or("ENR attestation bitfield non-existent")?;
|
||||
|
||||
BitVector::<TSpec::SubnetBitfieldLength>::from_ssz_bytes(bitfield_bytes)
|
||||
.map_err(|_| "Could not decode the ENR attnets bitfield")
|
||||
}
|
||||
|
||||
fn sync_committee_bitfield<TSpec: EthSpec>(
|
||||
&self,
|
||||
) -> Result<EnrSyncCommitteeBitfield<TSpec>, &'static str> {
|
||||
let bitfield_bytes = self
|
||||
.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY)
|
||||
.ok_or("ENR sync committee bitfield non-existent")?;
|
||||
|
||||
BitVector::<TSpec::SyncCommitteeSubnetCount>::from_ssz_bytes(bitfield_bytes)
|
||||
.map_err(|_| "Could not decode the ENR syncnets bitfield")
|
||||
}
|
||||
|
||||
fn eth2(&self) -> Result<EnrForkId, &'static str> {
|
||||
let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?;
|
||||
|
||||
EnrForkId::from_ssz_bytes(eth2_bytes).map_err(|_| "Could not decode EnrForkId")
|
||||
}
|
||||
}
|
||||
|
||||
/// Either use the given ENR or load an ENR from file if it exists and matches the current NodeId
|
||||
/// and sequence number.
|
||||
/// If an ENR exists, with the same NodeId, this function checks to see if the loaded ENR from
|
||||
/// disk is suitable to use, otherwise we increment the given ENR's sequence number.
|
||||
pub fn use_or_load_enr(
|
||||
enr_key: &CombinedKey,
|
||||
local_enr: &mut Enr,
|
||||
config: &NetworkConfig,
|
||||
log: &slog::Logger,
|
||||
) -> Result<(), String> {
|
||||
let enr_f = config.network_dir.join(ENR_FILENAME);
|
||||
if let Ok(mut enr_file) = File::open(enr_f.clone()) {
|
||||
let mut enr_string = String::new();
|
||||
match enr_file.read_to_string(&mut enr_string) {
|
||||
Err(_) => debug!(log, "Could not read ENR from file"),
|
||||
Ok(_) => {
|
||||
match Enr::from_str(&enr_string) {
|
||||
Ok(disk_enr) => {
|
||||
// if the same node id, then we may need to update our sequence number
|
||||
if local_enr.node_id() == disk_enr.node_id() {
|
||||
if compare_enr(local_enr, &disk_enr) {
|
||||
debug!(log, "ENR loaded from disk"; "file" => ?enr_f);
|
||||
// the stored ENR has the same configuration, use it
|
||||
*local_enr = disk_enr;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// same node id, different configuration - update the sequence number
|
||||
// Note: local_enr is generated with default(0) attnets value,
|
||||
// so a non default value in persisted enr will also update sequence number.
|
||||
let new_seq_no = disk_enr.seq().checked_add(1).ok_or("ENR sequence number on file is too large. Remove it to generate a new NodeId")?;
|
||||
local_enr.set_seq(new_seq_no, enr_key).map_err(|e| {
|
||||
format!("Could not update ENR sequence number: {:?}", e)
|
||||
})?;
|
||||
debug!(log, "ENR sequence number increased"; "seq" => new_seq_no);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(log, "ENR from file could not be decoded"; "error" => ?e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
save_enr_to_disk(&config.network_dir, local_enr, log);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none
|
||||
/// exists, generates a new one.
|
||||
///
|
||||
/// If an ENR exists, with the same NodeId, this function checks to see if the loaded ENR from
|
||||
/// disk is suitable to use, otherwise we increment our newly generated ENR's sequence number.
|
||||
pub fn build_or_load_enr<T: EthSpec>(
|
||||
local_key: Keypair,
|
||||
config: &NetworkConfig,
|
||||
enr_fork_id: EnrForkId,
|
||||
log: &slog::Logger,
|
||||
) -> Result<Enr, String> {
|
||||
// Build the local ENR.
|
||||
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
|
||||
// majority of our peers, if the CLI doesn't expressly forbid it.
|
||||
let enr_key = CombinedKey::from_libp2p(&local_key)?;
|
||||
let mut local_enr = build_enr::<T>(&enr_key, config, enr_fork_id)?;
|
||||
|
||||
use_or_load_enr(&enr_key, &mut local_enr, config, log)?;
|
||||
Ok(local_enr)
|
||||
}
|
||||
|
||||
pub fn create_enr_builder_from_config<T: EnrKey>(
|
||||
config: &NetworkConfig,
|
||||
enable_tcp: bool,
|
||||
) -> EnrBuilder<T> {
|
||||
let mut builder = EnrBuilder::new("v4");
|
||||
if let Some(enr_address) = config.enr_address {
|
||||
builder.ip(enr_address);
|
||||
}
|
||||
if let Some(udp_port) = config.enr_udp_port {
|
||||
builder.udp(udp_port);
|
||||
}
|
||||
// we always give it our listening tcp port
|
||||
if enable_tcp {
|
||||
let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port);
|
||||
builder.tcp(tcp_port);
|
||||
}
|
||||
builder
|
||||
}
|
||||
|
||||
/// Builds a lighthouse ENR given a `NetworkConfig`.
|
||||
pub fn build_enr<T: EthSpec>(
|
||||
enr_key: &CombinedKey,
|
||||
config: &NetworkConfig,
|
||||
enr_fork_id: EnrForkId,
|
||||
) -> Result<Enr, String> {
|
||||
let mut builder = create_enr_builder_from_config(config, true);
|
||||
|
||||
// set the `eth2` field on our ENR
|
||||
builder.add_value(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes());
|
||||
|
||||
// set the "attnets" field on our ENR
|
||||
let bitfield = BitVector::<T::SubnetBitfieldLength>::new();
|
||||
|
||||
builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes());
|
||||
|
||||
// set the "syncnets" field on our ENR
|
||||
let bitfield = BitVector::<T::SyncCommitteeSubnetCount>::new();
|
||||
|
||||
builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes());
|
||||
|
||||
builder
|
||||
.build(enr_key)
|
||||
.map_err(|e| format!("Could not build Local ENR: {:?}", e))
|
||||
}
|
||||
|
||||
/// Defines the conditions under which we use the locally built ENR or the one stored on disk.
|
||||
/// If this function returns true, we use the `disk_enr`.
|
||||
fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
|
||||
// take preference over disk_enr address if one is not specified
|
||||
(local_enr.ip().is_none() || local_enr.ip() == disk_enr.ip())
|
||||
// tcp ports must match
|
||||
&& local_enr.tcp() == disk_enr.tcp()
|
||||
// must match on the same fork
|
||||
&& local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY)
|
||||
// take preference over disk udp port if one is not specified
|
||||
&& (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp())
|
||||
// we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match,
|
||||
// otherwise we use a new ENR. This will likely only be true for non-validating nodes
|
||||
&& local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY)
|
||||
&& local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY)
|
||||
}
|
||||
|
||||
/// Loads enr from the given directory
|
||||
pub fn load_enr_from_disk(dir: &Path) -> Result<Enr, String> {
|
||||
let enr_f = dir.join(ENR_FILENAME);
|
||||
let mut enr_file =
|
||||
File::open(enr_f).map_err(|e| format!("Failed to open enr file: {:?}", e))?;
|
||||
let mut enr_string = String::new();
|
||||
match enr_file.read_to_string(&mut enr_string) {
|
||||
Err(_) => Err("Could not read ENR from file".to_string()),
|
||||
Ok(_) => match Enr::from_str(&enr_string) {
|
||||
Ok(disk_enr) => Ok(disk_enr),
|
||||
Err(e) => Err(format!("ENR from file could not be decoded: {:?}", e)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Saves an ENR to disk
|
||||
pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) {
|
||||
let _ = std::fs::create_dir_all(dir);
|
||||
match File::create(dir.join(Path::new(ENR_FILENAME)))
|
||||
.and_then(|mut f| f.write_all(enr.to_base64().as_bytes()))
|
||||
{
|
||||
Ok(_) => {
|
||||
debug!(log, "ENR written to disk");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => %e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
314
beacon_node/lighthouse_network/src/discovery/enr_ext.rs
Normal file
314
beacon_node/lighthouse_network/src/discovery/enr_ext.rs
Normal file
@@ -0,0 +1,314 @@
|
||||
//! ENR extension trait to support libp2p integration.
|
||||
use crate::{Enr, Multiaddr, PeerId};
|
||||
use discv5::enr::{CombinedKey, CombinedPublicKey};
|
||||
use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol};
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
/// Extend ENR for libp2p types.
|
||||
pub trait EnrExt {
|
||||
/// The libp2p `PeerId` for the record.
|
||||
fn peer_id(&self) -> PeerId;
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
fn multiaddr(&self) -> Vec<Multiaddr>;
|
||||
|
||||
/// Returns a list of multiaddrs with the `PeerId` prepended.
|
||||
fn multiaddr_p2p(&self) -> Vec<Multiaddr>;
|
||||
|
||||
/// Returns any multiaddrs that contain the TCP protocol with the `PeerId` prepended.
|
||||
fn multiaddr_p2p_tcp(&self) -> Vec<Multiaddr>;
|
||||
|
||||
/// Returns any multiaddrs that contain the UDP protocol with the `PeerId` prepended.
|
||||
fn multiaddr_p2p_udp(&self) -> Vec<Multiaddr>;
|
||||
|
||||
/// Returns any multiaddrs that contain the TCP protocol.
|
||||
fn multiaddr_tcp(&self) -> Vec<Multiaddr>;
|
||||
}
|
||||
|
||||
/// Extend ENR CombinedPublicKey for libp2p types.
|
||||
pub trait CombinedKeyPublicExt {
|
||||
/// Converts the publickey into a peer id, without consuming the key.
|
||||
fn as_peer_id(&self) -> PeerId;
|
||||
}
|
||||
|
||||
/// Extend ENR CombinedKey for conversion to libp2p keys.
|
||||
pub trait CombinedKeyExt {
|
||||
/// Converts a libp2p key into an ENR combined key.
|
||||
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>;
|
||||
}
|
||||
|
||||
impl EnrExt for Enr {
|
||||
/// The libp2p `PeerId` for the record.
|
||||
fn peer_id(&self) -> PeerId {
|
||||
self.public_key().as_peer_id()
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
fn multiaddr(&self) -> Vec<Multiaddr> {
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(udp) = self.udp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Udp(udp));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(udp6) = self.udp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Udp(udp6));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp6) = self.tcp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp6));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
///
|
||||
/// This also prepends the `PeerId` into each multiaddr with the `P2p` protocol.
|
||||
fn multiaddr_p2p(&self) -> Vec<Multiaddr> {
|
||||
let peer_id = self.peer_id();
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(udp) = self.udp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Udp(udp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(udp6) = self.udp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Udp(udp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp6) = self.tcp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and a `tcp` key **or** an `ip6` and a `tcp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
///
|
||||
/// This also prepends the `PeerId` into each multiaddr with the `P2p` protocol.
|
||||
fn multiaddr_p2p_tcp(&self) -> Vec<Multiaddr> {
|
||||
let peer_id = self.peer_id();
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(tcp6) = self.tcp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and a `udp` key **or** an `ip6` and a `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
///
|
||||
/// This also prepends the `PeerId` into each multiaddr with the `P2p` protocol.
|
||||
fn multiaddr_p2p_udp(&self) -> Vec<Multiaddr> {
|
||||
let peer_id = self.peer_id();
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(udp) = self.udp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Udp(udp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(udp6) = self.udp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Udp(udp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
fn multiaddr_tcp(&self) -> Vec<Multiaddr> {
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(tcp6) = self.tcp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp6));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
}
|
||||
|
||||
impl CombinedKeyPublicExt for CombinedPublicKey {
|
||||
/// Converts the publickey into a peer id, without consuming the key.
|
||||
///
|
||||
/// This is only available with the `libp2p` feature flag.
|
||||
fn as_peer_id(&self) -> PeerId {
|
||||
match self {
|
||||
Self::Secp256k1(pk) => {
|
||||
let pk_bytes = pk.to_bytes();
|
||||
let libp2p_pk = libp2p::core::PublicKey::Secp256k1(
|
||||
libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes)
|
||||
.expect("valid public key"),
|
||||
);
|
||||
PeerId::from_public_key(&libp2p_pk)
|
||||
}
|
||||
Self::Ed25519(pk) => {
|
||||
let pk_bytes = pk.to_bytes();
|
||||
let libp2p_pk = libp2p::core::PublicKey::Ed25519(
|
||||
libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes)
|
||||
.expect("valid public key"),
|
||||
);
|
||||
PeerId::from_public_key(&libp2p_pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CombinedKeyExt for CombinedKey {
|
||||
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
|
||||
match key {
|
||||
Keypair::Secp256k1(key) => {
|
||||
let secret =
|
||||
discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
|
||||
.expect("libp2p key must be valid");
|
||||
Ok(CombinedKey::Secp256k1(secret))
|
||||
}
|
||||
Keypair::Ed25519(key) => {
|
||||
let ed_keypair =
|
||||
discv5::enr::ed25519_dalek::SecretKey::from_bytes(&key.encode()[..32])
|
||||
.expect("libp2p key must be valid");
|
||||
Ok(CombinedKey::from(ed_keypair))
|
||||
}
|
||||
_ => Err("ENR: Unsupported libp2p key type"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p
|
||||
// peer_ids
|
||||
pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, String> {
|
||||
// A libp2p peer id byte representation should be 2 length bytes + 4 protobuf bytes + compressed pk bytes
|
||||
// if generated from a PublicKey with Identity multihash.
|
||||
let pk_bytes = &peer_id.to_bytes()[2..];
|
||||
|
||||
match PublicKey::from_protobuf_encoding(pk_bytes).map_err(|e| {
|
||||
format!(
|
||||
" Cannot parse libp2p public key public key from peer id: {}",
|
||||
e
|
||||
)
|
||||
})? {
|
||||
PublicKey::Secp256k1(pk) => {
|
||||
let uncompressed_key_bytes = &pk.encode_uncompressed()[1..];
|
||||
let mut output = [0_u8; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
PublicKey::Ed25519(pk) => {
|
||||
let uncompressed_key_bytes = pk.encode();
|
||||
let mut output = [0_u8; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
_ => Err("Unsupported public key".into()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_secp256k1_peer_id_conversion() {
|
||||
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
|
||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap();
|
||||
|
||||
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();
|
||||
let libp2p_kp = Keypair::Secp256k1(secp256k1_kp);
|
||||
let peer_id = libp2p_kp.public().to_peer_id();
|
||||
|
||||
let enr = discv5::enr::EnrBuilder::new("v4")
|
||||
.build(&secret_key)
|
||||
.unwrap();
|
||||
let node_id = peer_id_to_node_id(&peer_id).unwrap();
|
||||
|
||||
assert_eq!(enr.node_id(), node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ed25519_peer_conversion() {
|
||||
let sk_hex = "4dea8a5072119927e9d243a7d953f2f4bc95b70f110978e2f9bc7a9000e4b261";
|
||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||
let secret = discv5::enr::ed25519_dalek::SecretKey::from_bytes(&sk_bytes).unwrap();
|
||||
let public = discv5::enr::ed25519_dalek::PublicKey::from(&secret);
|
||||
let keypair = discv5::enr::ed25519_dalek::Keypair { secret, public };
|
||||
|
||||
let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||
let ed25519_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into();
|
||||
let libp2p_kp = Keypair::Ed25519(ed25519_kp);
|
||||
let peer_id = libp2p_kp.public().to_peer_id();
|
||||
|
||||
let enr = discv5::enr::EnrBuilder::new("v4").build(&keypair).unwrap();
|
||||
let node_id = peer_id_to_node_id(&peer_id).unwrap();
|
||||
|
||||
assert_eq!(enr.node_id(), node_id);
|
||||
}
|
||||
}
|
||||
1192
beacon_node/lighthouse_network/src/discovery/mod.rs
Normal file
1192
beacon_node/lighthouse_network/src/discovery/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,47 @@
|
||||
///! The subnet predicate used for searching for a particular subnet.
|
||||
use super::*;
|
||||
use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield};
|
||||
use slog::trace;
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Returns the predicate for a given subnet.
|
||||
pub fn subnet_predicate<TSpec>(
|
||||
subnets: Vec<Subnet>,
|
||||
log: &slog::Logger,
|
||||
) -> impl Fn(&Enr) -> bool + Send
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
let log_clone = log.clone();
|
||||
|
||||
move |enr: &Enr| {
|
||||
let attestation_bitfield: EnrAttestationBitfield<TSpec> =
|
||||
match enr.attestation_bitfield::<TSpec>() {
|
||||
Ok(b) => b,
|
||||
Err(_e) => return false,
|
||||
};
|
||||
|
||||
// Pre-fork/fork-boundary enrs may not contain a syncnets field.
|
||||
// Don't return early here
|
||||
let sync_committee_bitfield: Result<EnrSyncCommitteeBitfield<TSpec>, _> =
|
||||
enr.sync_committee_bitfield::<TSpec>();
|
||||
|
||||
let predicate = subnets.iter().any(|subnet| match subnet {
|
||||
Subnet::Attestation(s) => attestation_bitfield
|
||||
.get(*s.deref() as usize)
|
||||
.unwrap_or(false),
|
||||
Subnet::SyncCommittee(s) => sync_committee_bitfield
|
||||
.as_ref()
|
||||
.map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)),
|
||||
});
|
||||
|
||||
if !predicate {
|
||||
trace!(
|
||||
log_clone,
|
||||
"Peer found but not on any of the desired subnets";
|
||||
"peer_id" => %enr.peer_id()
|
||||
);
|
||||
}
|
||||
predicate
|
||||
}
|
||||
}
|
||||
82
beacon_node/lighthouse_network/src/lib.rs
Normal file
82
beacon_node/lighthouse_network/src/lib.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
/// This crate contains the main link for lighthouse to rust-libp2p. It therefore re-exports
|
||||
/// all required libp2p functionality.
|
||||
///
|
||||
/// This crate builds and manages the libp2p services required by the beacon node.
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod behaviour;
|
||||
mod config;
|
||||
|
||||
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||
pub mod discovery;
|
||||
mod metrics;
|
||||
mod peer_manager;
|
||||
pub mod rpc;
|
||||
mod service;
|
||||
pub mod types;
|
||||
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// Wrapper over a libp2p `PeerId` which implements `Serialize` and `Deserialize`
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PeerIdSerialized(libp2p::PeerId);
|
||||
|
||||
impl From<PeerIdSerialized> for PeerId {
|
||||
fn from(peer_id: PeerIdSerialized) -> Self {
|
||||
peer_id.0
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PeerIdSerialized {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self(
|
||||
PeerId::from_str(s).map_err(|e| format!("Invalid peer id: {}", e))?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for PeerIdSerialized {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for PeerIdSerialized {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s: String = Deserialize::deserialize(deserializer)?;
|
||||
Ok(Self(PeerId::from_str(&s).map_err(|e| {
|
||||
de::Error::custom(format!("Failed to deserialise peer id: {:?}", e))
|
||||
})?))
|
||||
}
|
||||
}
|
||||
|
||||
pub use crate::types::{
|
||||
error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet,
|
||||
SubnetDiscovery,
|
||||
};
|
||||
pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response};
|
||||
pub use config::Config as NetworkConfig;
|
||||
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
||||
pub use discv5;
|
||||
pub use libp2p::bandwidth::BandwidthSinks;
|
||||
pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash};
|
||||
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
|
||||
pub use libp2p::{multiaddr, Multiaddr};
|
||||
pub use metrics::scrape_discovery_metrics;
|
||||
pub use peer_manager::{
|
||||
peerdb::client::Client,
|
||||
peerdb::score::{PeerAction, ReportSource},
|
||||
peerdb::PeerDB,
|
||||
ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus,
|
||||
};
|
||||
pub use service::{load_private_key, Libp2pEvent, Service, NETWORK_KEY_FILENAME};
|
||||
94
beacon_node/lighthouse_network/src/metrics.rs
Normal file
94
beacon_node/lighthouse_network/src/metrics.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref ADDRESS_UPDATE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_address_update_total",
|
||||
"Count of libp2p socked updated events (when our view of our IP address has changed)"
|
||||
);
|
||||
pub static ref PEERS_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
||||
"libp2p_peer_connected_peers_total",
|
||||
"Count of libp2p peers currently connected"
|
||||
);
|
||||
pub static ref PEERS_CONNECTED_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("libp2p_peers", "Count of libp2p peers currently connected");
|
||||
pub static ref PEER_CONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_connect_event_total",
|
||||
"Count of libp2p peer connect events (not the current number of connected peers)"
|
||||
);
|
||||
pub static ref PEER_DISCONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_disconnect_event_total",
|
||||
"Count of libp2p peer disconnect events"
|
||||
);
|
||||
pub static ref DISCOVERY_QUEUE: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_queue_size",
|
||||
"The number of discovery queries awaiting execution"
|
||||
);
|
||||
pub static ref DISCOVERY_REQS: Result<Gauge> = try_create_float_gauge(
|
||||
"discovery_requests",
|
||||
"The number of unsolicited discovery requests per second"
|
||||
);
|
||||
pub static ref DISCOVERY_SESSIONS: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_sessions",
|
||||
"The number of active discovery sessions with peers"
|
||||
);
|
||||
pub static ref DISCOVERY_REQS_IP: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"discovery_reqs_per_ip",
|
||||
"Unsolicited discovery requests per ip per second",
|
||||
&["Addresses"]
|
||||
);
|
||||
pub static ref PEERS_PER_CLIENT: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"libp2p_peers_per_client",
|
||||
"The connected peers via client implementation",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref FAILED_ATTESTATION_PUBLISHES_PER_SUBNET: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"gossipsub_failed_attestation_publishes_per_subnet",
|
||||
"Failed attestation publishes per subnet",
|
||||
&["subnet"]
|
||||
);
|
||||
pub static ref FAILED_PUBLISHES_PER_MAIN_TOPIC: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"gossipsub_failed_publishes_per_main_topic",
|
||||
"Failed gossip publishes",
|
||||
&["topic_hash"]
|
||||
);
|
||||
pub static ref TOTAL_RPC_ERRORS_PER_CLIENT: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"libp2p_rpc_errors_per_client",
|
||||
"RPC errors per client",
|
||||
&["client", "rpc_error", "direction"]
|
||||
);
|
||||
pub static ref PEER_ACTION_EVENTS_PER_CLIENT: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"libp2p_peer_actions_per_client",
|
||||
"Score reports per client",
|
||||
&["client", "action", "source"]
|
||||
);
|
||||
pub static ref GOSSIP_UNACCEPTED_MESSAGES_PER_CLIENT: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"gossipsub_unaccepted_messages_per_client",
|
||||
"Gossipsub messages that we did not accept, per client",
|
||||
&["client", "validation_result"]
|
||||
);
|
||||
}
|
||||
|
||||
pub fn scrape_discovery_metrics() {
|
||||
let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics());
|
||||
|
||||
set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second);
|
||||
|
||||
set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64);
|
||||
|
||||
let process_gauge_vec = |gauge: &Result<GaugeVec>, metrics: discv5::metrics::Metrics| {
|
||||
if let Ok(gauge_vec) = gauge {
|
||||
gauge_vec.reset();
|
||||
for (ip, value) in metrics.requests_per_ip_per_second.iter() {
|
||||
if let Ok(metric) = gauge_vec.get_metric_with_label_values(&[&format!("{:?}", ip)])
|
||||
{
|
||||
metric.set(*value);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
process_gauge_vec(&DISCOVERY_REQS_IP, metrics);
|
||||
}
|
||||
1323
beacon_node/lighthouse_network/src/peer_manager/mod.rs
Normal file
1323
beacon_node/lighthouse_network/src/peer_manager/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
1842
beacon_node/lighthouse_network/src/peer_manager/peerdb.rs
Normal file
1842
beacon_node/lighthouse_network/src/peer_manager/peerdb.rs
Normal file
File diff suppressed because it is too large
Load Diff
201
beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs
Normal file
201
beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
//! Known Ethereum 2.0 clients and their fingerprints.
|
||||
//!
|
||||
//! Currently using identify to fingerprint.
|
||||
|
||||
use libp2p::identify::IdentifyInfo;
|
||||
use serde::Serialize;
|
||||
use strum::{AsRefStr, AsStaticStr};
|
||||
|
||||
/// Various client and protocol information related to a node.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct Client {
|
||||
/// The client's name (Ex: lighthouse, prism, nimbus, etc)
|
||||
pub kind: ClientKind,
|
||||
/// The client's version.
|
||||
pub version: String,
|
||||
/// The OS version of the client.
|
||||
pub os_version: String,
|
||||
/// The libp2p protocol version.
|
||||
pub protocol_version: String,
|
||||
/// Identify agent string
|
||||
pub agent_string: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr)]
|
||||
pub enum ClientKind {
|
||||
/// A lighthouse node (the best kind).
|
||||
Lighthouse,
|
||||
/// A Nimbus node.
|
||||
Nimbus,
|
||||
/// A Teku node.
|
||||
Teku,
|
||||
/// A Prysm node.
|
||||
Prysm,
|
||||
/// A lodestar node.
|
||||
Lodestar,
|
||||
/// An unknown client.
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl Default for Client {
|
||||
fn default() -> Self {
|
||||
Client {
|
||||
kind: ClientKind::Unknown,
|
||||
version: "unknown".into(),
|
||||
os_version: "unknown".into(),
|
||||
protocol_version: "unknown".into(),
|
||||
agent_string: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// Builds a `Client` from `IdentifyInfo`.
|
||||
pub fn from_identify_info(info: &IdentifyInfo) -> Self {
|
||||
let (kind, version, os_version) = client_from_agent_version(&info.agent_version);
|
||||
|
||||
Client {
|
||||
kind,
|
||||
version,
|
||||
os_version,
|
||||
protocol_version: info.protocol_version.clone(),
|
||||
agent_string: Some(info.agent_version.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Client {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.kind {
|
||||
ClientKind::Lighthouse => write!(
|
||||
f,
|
||||
"Lighthouse: version: {}, os_version: {}",
|
||||
self.version, self.os_version
|
||||
),
|
||||
ClientKind::Teku => write!(
|
||||
f,
|
||||
"Teku: version: {}, os_version: {}",
|
||||
self.version, self.os_version
|
||||
),
|
||||
ClientKind::Nimbus => write!(
|
||||
f,
|
||||
"Nimbus: version: {}, os_version: {}",
|
||||
self.version, self.os_version
|
||||
),
|
||||
ClientKind::Prysm => write!(
|
||||
f,
|
||||
"Prysm: version: {}, os_version: {}",
|
||||
self.version, self.os_version
|
||||
),
|
||||
ClientKind::Lodestar => write!(f, "Lodestar: version: {}", self.version),
|
||||
ClientKind::Unknown => {
|
||||
if let Some(agent_string) = &self.agent_string {
|
||||
write!(f, "Unknown: {}", agent_string)
|
||||
} else {
|
||||
write!(f, "Unknown")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ClientKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
// helper function to identify clients from their agent_version. Returns the client
|
||||
// kind and it's associated version and the OS kind.
|
||||
fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String) {
|
||||
let mut agent_split = agent_version.split('/');
|
||||
match agent_split.next() {
|
||||
Some("Lighthouse") => {
|
||||
let kind = ClientKind::Lighthouse;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
Some("teku") => {
|
||||
let kind = ClientKind::Teku;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if agent_split.next().is_some() {
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
Some("github.com") => {
|
||||
let kind = ClientKind::Prysm;
|
||||
let unknown = String::from("unknown");
|
||||
(kind, unknown.clone(), unknown)
|
||||
}
|
||||
Some("Prysm") => {
|
||||
let kind = ClientKind::Prysm;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if agent_split.next().is_some() {
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
Some("nimbus") => {
|
||||
let kind = ClientKind::Nimbus;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if agent_split.next().is_some() {
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
Some("nim-libp2p") => {
|
||||
let kind = ClientKind::Nimbus;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
Some("js-libp2p") => {
|
||||
let kind = ClientKind::Lodestar;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
_ => {
|
||||
let unknown = String::from("unknown");
|
||||
(ClientKind::Unknown, unknown.clone(), unknown)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,505 @@
|
||||
use super::client::Client;
|
||||
use super::score::{PeerAction, Score, ScoreState};
|
||||
use super::sync_status::SyncStatus;
|
||||
use crate::Multiaddr;
|
||||
use crate::{rpc::MetaData, types::Subnet};
|
||||
use discv5::Enr;
|
||||
use serde::{
|
||||
ser::{SerializeStruct, Serializer},
|
||||
Serialize,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::time::Instant;
|
||||
use strum::AsRefStr;
|
||||
use types::EthSpec;
|
||||
use PeerConnectionStatus::*;
|
||||
|
||||
/// Information about a given connected peer.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct PeerInfo<T: EthSpec> {
|
||||
/// The connection status of the peer
|
||||
_status: PeerStatus,
|
||||
/// The peers reputation
|
||||
score: Score,
|
||||
/// Client managing this peer
|
||||
client: Client,
|
||||
/// Connection status of this peer
|
||||
connection_status: PeerConnectionStatus,
|
||||
/// The known listening addresses of this peer. This is given by identify and can be arbitrary
|
||||
/// (including local IPs).
|
||||
listening_addresses: Vec<Multiaddr>,
|
||||
/// This is addresses we have physically seen and this is what we use for banning/un-banning
|
||||
/// peers.
|
||||
seen_addresses: HashSet<SocketAddr>,
|
||||
/// The current syncing state of the peer. The state may be determined after it's initial
|
||||
/// connection.
|
||||
sync_status: SyncStatus,
|
||||
/// The ENR subnet bitfield of the peer. This may be determined after it's initial
|
||||
/// connection.
|
||||
meta_data: Option<MetaData<T>>,
|
||||
/// Subnets the peer is connected to.
|
||||
subnets: HashSet<Subnet>,
|
||||
/// The time we would like to retain this peer. After this time, the peer is no longer
|
||||
/// necessary.
|
||||
#[serde(skip)]
|
||||
min_ttl: Option<Instant>,
|
||||
/// Is the peer a trusted peer.
|
||||
is_trusted: bool,
|
||||
/// Direction of the first connection of the last (or current) connected session with this peer.
|
||||
/// None if this peer was never connected.
|
||||
connection_direction: Option<ConnectionDirection>,
|
||||
/// The enr of the peer, if known.
|
||||
enr: Option<Enr>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Default for PeerInfo<TSpec> {
|
||||
fn default() -> PeerInfo<TSpec> {
|
||||
PeerInfo {
|
||||
_status: Default::default(),
|
||||
score: Score::default(),
|
||||
client: Client::default(),
|
||||
connection_status: Default::default(),
|
||||
listening_addresses: Vec::new(),
|
||||
seen_addresses: HashSet::new(),
|
||||
subnets: HashSet::new(),
|
||||
sync_status: SyncStatus::Unknown,
|
||||
meta_data: None,
|
||||
min_ttl: None,
|
||||
is_trusted: false,
|
||||
connection_direction: None,
|
||||
enr: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> PeerInfo<T> {
|
||||
/// Return a PeerInfo struct for a trusted peer.
|
||||
pub fn trusted_peer_info() -> Self {
|
||||
PeerInfo {
|
||||
score: Score::max_score(),
|
||||
is_trusted: true,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field.
|
||||
pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool {
|
||||
if let Some(meta_data) = &self.meta_data {
|
||||
match subnet {
|
||||
Subnet::Attestation(id) => {
|
||||
return meta_data.attnets().get(**id as usize).unwrap_or(false)
|
||||
}
|
||||
Subnet::SyncCommittee(id) => {
|
||||
return meta_data
|
||||
.syncnets()
|
||||
.map_or(false, |s| s.get(**id as usize).unwrap_or(false))
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Obtains the client of the peer.
|
||||
pub fn client(&self) -> &Client {
|
||||
&self.client
|
||||
}
|
||||
|
||||
/// Returns the listening addresses of the Peer.
|
||||
pub fn listening_addresses(&self) -> &Vec<Multiaddr> {
|
||||
&self.listening_addresses
|
||||
}
|
||||
|
||||
/// Returns the connection direction for the peer.
|
||||
pub fn connection_direction(&self) -> Option<&ConnectionDirection> {
|
||||
self.connection_direction.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the sync status of the peer.
|
||||
pub fn sync_status(&self) -> &SyncStatus {
|
||||
&self.sync_status
|
||||
}
|
||||
|
||||
/// Returns the metadata for the peer if currently known.
|
||||
pub fn meta_data(&self) -> Option<&MetaData<T>> {
|
||||
self.meta_data.as_ref()
|
||||
}
|
||||
|
||||
/// Returns whether the peer is a trusted peer or not.
|
||||
pub fn is_trusted(&self) -> bool {
|
||||
self.is_trusted
|
||||
}
|
||||
|
||||
/// The time a peer is expected to be useful until for an attached validator. If this is set to
|
||||
/// None, the peer is not required for any upcoming duty.
|
||||
pub fn min_ttl(&self) -> Option<&Instant> {
|
||||
self.min_ttl.as_ref()
|
||||
}
|
||||
|
||||
/// The ENR of the peer if it is known.
|
||||
pub fn enr(&self) -> Option<&Enr> {
|
||||
self.enr.as_ref()
|
||||
}
|
||||
|
||||
/// Returns if the peer is subscribed to a given `Subnet` from the gossipsub subscriptions.
|
||||
pub fn on_subnet_gossipsub(&self, subnet: &Subnet) -> bool {
|
||||
self.subnets.contains(subnet)
|
||||
}
|
||||
|
||||
/// Returns the seen addresses of the peer.
|
||||
pub fn seen_addresses(&self) -> impl Iterator<Item = &SocketAddr> + '_ {
|
||||
self.seen_addresses.iter()
|
||||
}
|
||||
|
||||
/// Returns a list of seen IP addresses for the peer.
|
||||
pub fn seen_ip_addresses(&self) -> impl Iterator<Item = IpAddr> + '_ {
|
||||
self.seen_addresses
|
||||
.iter()
|
||||
.map(|socket_addr| socket_addr.ip())
|
||||
}
|
||||
|
||||
/// Returns the connection status of the peer.
|
||||
pub fn connection_status(&self) -> &PeerConnectionStatus {
|
||||
&self.connection_status
|
||||
}
|
||||
|
||||
/// Reports if this peer has some future validator duty in which case it is valuable to keep it.
|
||||
pub fn has_future_duty(&self) -> bool {
|
||||
self.min_ttl.map_or(false, |i| i >= Instant::now())
|
||||
}
|
||||
|
||||
/// Returns score of the peer.
|
||||
pub fn score(&self) -> &Score {
|
||||
&self.score
|
||||
}
|
||||
|
||||
/// Returns the state of the peer based on the score.
|
||||
pub(crate) fn score_state(&self) -> ScoreState {
|
||||
self.score.state()
|
||||
}
|
||||
|
||||
/// Returns true if the gossipsub score is sufficient.
|
||||
pub fn is_good_gossipsub_peer(&self) -> bool {
|
||||
self.score.is_good_gossipsub_peer()
|
||||
}
|
||||
|
||||
/* Peer connection status API */
|
||||
|
||||
/// Checks if the status is connected.
|
||||
pub fn is_connected(&self) -> bool {
|
||||
matches!(
|
||||
self.connection_status,
|
||||
PeerConnectionStatus::Connected { .. }
|
||||
)
|
||||
}
|
||||
|
||||
/// Checks if the status is connected.
|
||||
pub fn is_dialing(&self) -> bool {
|
||||
matches!(self.connection_status, PeerConnectionStatus::Dialing { .. })
|
||||
}
|
||||
|
||||
/// The peer is either connected or in the process of being dialed.
|
||||
pub fn is_connected_or_dialing(&self) -> bool {
|
||||
self.is_connected() || self.is_dialing()
|
||||
}
|
||||
|
||||
/// Checks if the connection status is banned. This can lag behind the score state
|
||||
/// temporarily.
|
||||
pub fn is_banned(&self) -> bool {
|
||||
matches!(self.connection_status, PeerConnectionStatus::Banned { .. })
|
||||
}
|
||||
|
||||
/// Checks if the peer's score is banned.
|
||||
pub fn score_is_banned(&self) -> bool {
|
||||
matches!(self.score.state(), ScoreState::Banned)
|
||||
}
|
||||
|
||||
/// Checks if the status is disconnected.
|
||||
pub fn is_disconnected(&self) -> bool {
|
||||
matches!(self.connection_status, Disconnected { .. })
|
||||
}
|
||||
|
||||
/// Checks if the peer is outbound-only
|
||||
pub fn is_outbound_only(&self) -> bool {
|
||||
matches!(self.connection_status, Connected {n_in, n_out} if n_in == 0 && n_out > 0)
|
||||
}
|
||||
|
||||
/// Returns the number of connections with this peer.
|
||||
pub fn connections(&self) -> (u8, u8) {
|
||||
match self.connection_status {
|
||||
Connected { n_in, n_out } => (n_in, n_out),
|
||||
_ => (0, 0),
|
||||
}
|
||||
}
|
||||
|
||||
/* Mutable Functions */
|
||||
|
||||
/// Updates the sync status. Returns true if the status was changed.
|
||||
// VISIBILITY: Both the peer manager the network sync is able to update the sync state of a peer
|
||||
pub fn update_sync_status(&mut self, sync_status: SyncStatus) -> bool {
|
||||
self.sync_status.update(sync_status)
|
||||
}
|
||||
|
||||
/// Sets the client of the peer.
|
||||
// VISIBILITY: The peer manager is able to set the client
|
||||
pub(in crate::peer_manager) fn set_client(&mut self, client: Client) {
|
||||
self.client = client
|
||||
}
|
||||
|
||||
/// Replaces the current listening addresses with those specified, returning the current
|
||||
/// listening addresses.
|
||||
// VISIBILITY: The peer manager is able to set the listening addresses
|
||||
pub(in crate::peer_manager) fn set_listening_addresses(
|
||||
&mut self,
|
||||
listening_addresses: Vec<Multiaddr>,
|
||||
) -> Vec<Multiaddr> {
|
||||
std::mem::replace(&mut self.listening_addresses, listening_addresses)
|
||||
}
|
||||
|
||||
/// Sets an explicit value for the meta data.
|
||||
// VISIBILITY: The peer manager is able to adjust the meta_data
|
||||
pub(in crate::peer_manager) fn set_meta_data(&mut self, meta_data: MetaData<T>) {
|
||||
self.meta_data = Some(meta_data)
|
||||
}
|
||||
|
||||
/// Sets the connection status of the peer.
|
||||
pub(super) fn set_connection_status(&mut self, connection_status: PeerConnectionStatus) {
|
||||
self.connection_status = connection_status
|
||||
}
|
||||
|
||||
/// Sets the ENR of the peer if one is known.
|
||||
pub(super) fn set_enr(&mut self, enr: Enr) {
|
||||
self.enr = Some(enr)
|
||||
}
|
||||
|
||||
/// Sets the time that the peer is expected to be needed until for an attached validator duty.
|
||||
pub(super) fn set_min_ttl(&mut self, min_ttl: Instant) {
|
||||
self.min_ttl = Some(min_ttl)
|
||||
}
|
||||
|
||||
/// Adds a known subnet for the peer.
|
||||
pub(super) fn insert_subnet(&mut self, subnet: Subnet) {
|
||||
self.subnets.insert(subnet);
|
||||
}
|
||||
|
||||
/// Removes a subnet from the peer.
|
||||
pub(super) fn remove_subnet(&mut self, subnet: &Subnet) {
|
||||
self.subnets.remove(subnet);
|
||||
}
|
||||
|
||||
/// Removes all subnets from the peer.
|
||||
pub(super) fn clear_subnets(&mut self) {
|
||||
self.subnets.clear()
|
||||
}
|
||||
|
||||
/// Applies decay rates to a non-trusted peer's score.
|
||||
pub(super) fn score_update(&mut self) {
|
||||
if !self.is_trusted {
|
||||
self.score.update()
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply peer action to a non-trusted peer's score.
|
||||
// VISIBILITY: The peer manager is able to modify the score of a peer.
|
||||
pub(in crate::peer_manager) fn apply_peer_action_to_score(&mut self, peer_action: PeerAction) {
|
||||
if !self.is_trusted {
|
||||
self.score.apply_peer_action(peer_action)
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the gossipsub score with a new score. Optionally ignore the gossipsub score.
|
||||
pub(super) fn update_gossipsub_score(&mut self, new_score: f64, ignore: bool) {
|
||||
self.score.update_gossipsub_score(new_score, ignore);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Resets the peers score.
|
||||
pub fn reset_score(&mut self) {
|
||||
self.score.test_reset();
|
||||
}
|
||||
|
||||
/// Modifies the status to Dialing
|
||||
/// Returns an error if the current state is unexpected.
|
||||
pub(super) fn dialing_peer(&mut self) -> Result<(), &'static str> {
|
||||
match &mut self.connection_status {
|
||||
Connected { .. } => return Err("Dialing connected peer"),
|
||||
Dialing { .. } => return Err("Dialing an already dialing peer"),
|
||||
Disconnecting { .. } => return Err("Dialing a disconnecting peer"),
|
||||
Disconnected { .. } | Banned { .. } | Unknown => {}
|
||||
}
|
||||
self.connection_status = Dialing {
|
||||
since: Instant::now(),
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Modifies the status to Connected and increases the number of ingoing
|
||||
/// connections by one
|
||||
pub(super) fn connect_ingoing(&mut self, seen_address: Option<SocketAddr>) {
|
||||
match &mut self.connection_status {
|
||||
Connected { n_in, .. } => *n_in += 1,
|
||||
Disconnected { .. }
|
||||
| Banned { .. }
|
||||
| Dialing { .. }
|
||||
| Disconnecting { .. }
|
||||
| Unknown => {
|
||||
self.connection_status = Connected { n_in: 1, n_out: 0 };
|
||||
self.connection_direction = Some(ConnectionDirection::Incoming);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(socket_addr) = seen_address {
|
||||
self.seen_addresses.insert(socket_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Modifies the status to Connected and increases the number of outgoing
|
||||
/// connections by one
|
||||
pub(super) fn connect_outgoing(&mut self, seen_address: Option<SocketAddr>) {
|
||||
match &mut self.connection_status {
|
||||
Connected { n_out, .. } => *n_out += 1,
|
||||
Disconnected { .. }
|
||||
| Banned { .. }
|
||||
| Dialing { .. }
|
||||
| Disconnecting { .. }
|
||||
| Unknown => {
|
||||
self.connection_status = Connected { n_in: 0, n_out: 1 };
|
||||
self.connection_direction = Some(ConnectionDirection::Outgoing);
|
||||
}
|
||||
}
|
||||
if let Some(ip_addr) = seen_address {
|
||||
self.seen_addresses.insert(ip_addr);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Add an f64 to a non-trusted peer's score abiding by the limits.
|
||||
pub fn add_to_score(&mut self, score: f64) {
|
||||
if !self.is_trusted {
|
||||
self.score.test_add(score)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn set_gossipsub_score(&mut self, score: f64) {
|
||||
self.score.set_gossipsub_score(score);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
/// The current health status of the peer.
|
||||
pub enum PeerStatus {
|
||||
/// The peer is healthy.
|
||||
Healthy,
|
||||
/// The peer is clogged. It has not been responding to requests on time.
|
||||
_Clogged,
|
||||
}
|
||||
|
||||
impl Default for PeerStatus {
|
||||
fn default() -> Self {
|
||||
PeerStatus::Healthy
|
||||
}
|
||||
}
|
||||
|
||||
/// Connection Direction of connection.
|
||||
#[derive(Debug, Clone, Serialize, AsRefStr)]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum ConnectionDirection {
|
||||
/// The connection was established by a peer dialing us.
|
||||
Incoming,
|
||||
/// The connection was established by us dialing a peer.
|
||||
Outgoing,
|
||||
}
|
||||
|
||||
/// Connection Status of the peer.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PeerConnectionStatus {
|
||||
/// The peer is connected.
|
||||
Connected {
|
||||
/// number of ingoing connections.
|
||||
n_in: u8,
|
||||
/// number of outgoing connections.
|
||||
n_out: u8,
|
||||
},
|
||||
/// The peer is being disconnected.
|
||||
Disconnecting {
|
||||
// After the disconnection the peer will be considered banned.
|
||||
to_ban: bool,
|
||||
},
|
||||
/// The peer has disconnected.
|
||||
Disconnected {
|
||||
/// last time the peer was connected or discovered.
|
||||
since: Instant,
|
||||
},
|
||||
/// The peer has been banned and is disconnected.
|
||||
Banned {
|
||||
/// moment when the peer was banned.
|
||||
since: Instant,
|
||||
},
|
||||
/// We are currently dialing this peer.
|
||||
Dialing {
|
||||
/// time since we last communicated with the peer.
|
||||
since: Instant,
|
||||
},
|
||||
/// The connection status has not been specified.
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// Serialization for http requests.
|
||||
impl Serialize for PeerConnectionStatus {
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
let mut s = serializer.serialize_struct("connection_status", 6)?;
|
||||
match self {
|
||||
Connected { n_in, n_out } => {
|
||||
s.serialize_field("status", "connected")?;
|
||||
s.serialize_field("connections_in", n_in)?;
|
||||
s.serialize_field("connections_out", n_out)?;
|
||||
s.serialize_field("last_seen", &0)?;
|
||||
s.end()
|
||||
}
|
||||
Disconnecting { .. } => {
|
||||
s.serialize_field("status", "disconnecting")?;
|
||||
s.serialize_field("connections_in", &0)?;
|
||||
s.serialize_field("connections_out", &0)?;
|
||||
s.serialize_field("last_seen", &0)?;
|
||||
s.end()
|
||||
}
|
||||
Disconnected { since } => {
|
||||
s.serialize_field("status", "disconnected")?;
|
||||
s.serialize_field("connections_in", &0)?;
|
||||
s.serialize_field("connections_out", &0)?;
|
||||
s.serialize_field("last_seen", &since.elapsed().as_secs())?;
|
||||
s.serialize_field("banned_ips", &Vec::<IpAddr>::new())?;
|
||||
s.end()
|
||||
}
|
||||
Banned { since } => {
|
||||
s.serialize_field("status", "banned")?;
|
||||
s.serialize_field("connections_in", &0)?;
|
||||
s.serialize_field("connections_out", &0)?;
|
||||
s.serialize_field("last_seen", &since.elapsed().as_secs())?;
|
||||
s.end()
|
||||
}
|
||||
Dialing { since } => {
|
||||
s.serialize_field("status", "dialing")?;
|
||||
s.serialize_field("connections_in", &0)?;
|
||||
s.serialize_field("connections_out", &0)?;
|
||||
s.serialize_field("last_seen", &since.elapsed().as_secs())?;
|
||||
s.end()
|
||||
}
|
||||
Unknown => {
|
||||
s.serialize_field("status", "unknown")?;
|
||||
s.serialize_field("connections_in", &0)?;
|
||||
s.serialize_field("connections_out", &0)?;
|
||||
s.serialize_field("last_seen", &0)?;
|
||||
s.end()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PeerConnectionStatus {
|
||||
fn default() -> Self {
|
||||
PeerConnectionStatus::Unknown
|
||||
}
|
||||
}
|
||||
425
beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs
Normal file
425
beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs
Normal file
@@ -0,0 +1,425 @@
|
||||
//! This contains the scoring logic for peers.
|
||||
//!
|
||||
//! A peer's score is a rational number in the range [-100, 100].
|
||||
//!
|
||||
//! As the logic develops this documentation will advance.
|
||||
//!
|
||||
//! The scoring algorithms are currently experimental.
|
||||
use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD;
|
||||
use serde::Serialize;
|
||||
use std::time::Instant;
|
||||
use strum::AsRefStr;
|
||||
use tokio::time::Duration;
|
||||
|
||||
lazy_static! {
|
||||
static ref HALFLIFE_DECAY: f64 = -(2.0f64.ln()) / SCORE_HALFLIFE;
|
||||
}
|
||||
|
||||
/// The default score for new peers.
|
||||
pub(crate) const DEFAULT_SCORE: f64 = 0.0;
|
||||
/// The minimum reputation before a peer is disconnected.
|
||||
const MIN_SCORE_BEFORE_DISCONNECT: f64 = -20.0;
|
||||
/// The minimum reputation before a peer is banned.
|
||||
const MIN_SCORE_BEFORE_BAN: f64 = -50.0;
|
||||
/// If a peer has a lighthouse score below this constant all other score parts will get ignored and
|
||||
/// the peer will get banned regardless of the other parts.
|
||||
const MIN_LIGHTHOUSE_SCORE_BEFORE_BAN: f64 = -60.0;
|
||||
/// The maximum score a peer can obtain.
|
||||
const MAX_SCORE: f64 = 100.0;
|
||||
/// The minimum score a peer can obtain.
|
||||
const MIN_SCORE: f64 = -100.0;
|
||||
/// The halflife of a peer's score. I.e the number of seconds it takes for the score to decay to half its value.
|
||||
const SCORE_HALFLIFE: f64 = 600.0;
|
||||
/// The number of seconds we ban a peer for before their score begins to decay.
|
||||
const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(12 * 3600); // 12 hours
|
||||
|
||||
/// We weight negative gossipsub scores in such a way that they never result in a disconnect by
|
||||
/// themselves. This "solves" the problem of non-decaying gossipsub scores for disconnected peers.
|
||||
const GOSSIPSUB_NEGATIVE_SCORE_WEIGHT: f64 =
|
||||
(MIN_SCORE_BEFORE_DISCONNECT + 1.0) / GOSSIPSUB_GREYLIST_THRESHOLD;
|
||||
const GOSSIPSUB_POSITIVE_SCORE_WEIGHT: f64 = GOSSIPSUB_NEGATIVE_SCORE_WEIGHT;
|
||||
|
||||
/// A collection of actions a peer can perform which will adjust its score.
|
||||
/// Each variant has an associated score change.
|
||||
// To easily assess the behaviour of scores changes the number of variants should stay low, and
|
||||
// somewhat generic.
|
||||
#[derive(Debug, Clone, Copy, AsRefStr)]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum PeerAction {
|
||||
/// We should not communicate more with this peer.
|
||||
/// This action will cause the peer to get banned.
|
||||
Fatal,
|
||||
/// This peer's action is not malicious but will not be tolerated. A few occurrences will cause
|
||||
/// the peer to get kicked.
|
||||
/// NOTE: ~5 occurrences will get the peer banned
|
||||
LowToleranceError,
|
||||
/// An error occurred with this peer but it is not necessarily malicious.
|
||||
/// We have high tolerance for this actions: several occurrences are needed for a peer to get
|
||||
/// kicked.
|
||||
/// NOTE: ~10 occurrences will get the peer banned
|
||||
MidToleranceError,
|
||||
/// An error occurred with this peer but it is not necessarily malicious.
|
||||
/// We have high tolerance for this actions: several occurrences are needed for a peer to get
|
||||
/// kicked.
|
||||
/// NOTE: ~50 occurrences will get the peer banned
|
||||
HighToleranceError,
|
||||
}
|
||||
|
||||
/// Service reporting a `PeerAction` for a peer.
|
||||
#[derive(Debug)]
|
||||
pub enum ReportSource {
|
||||
Gossipsub,
|
||||
RPC,
|
||||
Processor,
|
||||
SyncService,
|
||||
PeerManager,
|
||||
}
|
||||
|
||||
impl From<ReportSource> for &'static str {
|
||||
fn from(report_source: ReportSource) -> &'static str {
|
||||
match report_source {
|
||||
ReportSource::Gossipsub => "gossipsub",
|
||||
ReportSource::RPC => "rpc_error",
|
||||
ReportSource::Processor => "processor",
|
||||
ReportSource::SyncService => "sync",
|
||||
ReportSource::PeerManager => "peer_manager",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PeerAction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PeerAction::Fatal => write!(f, "Fatal"),
|
||||
PeerAction::LowToleranceError => write!(f, "Low Tolerance Error"),
|
||||
PeerAction::MidToleranceError => write!(f, "Mid Tolerance Error"),
|
||||
PeerAction::HighToleranceError => write!(f, "High Tolerance Error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The expected state of the peer given the peer's score.
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub(crate) enum ScoreState {
|
||||
/// We are content with the peers performance. We permit connections and messages.
|
||||
Healthy,
|
||||
/// The peer should be disconnected. We allow re-connections if the peer is persistent.
|
||||
Disconnected,
|
||||
/// The peer is banned. We disallow new connections until it's score has decayed into a
|
||||
/// tolerable threshold.
|
||||
Banned,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ScoreState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ScoreState::Healthy => write!(f, "Healthy"),
|
||||
ScoreState::Banned => write!(f, "Banned"),
|
||||
ScoreState::Disconnected => write!(f, "Disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A peer's score (perceived potential usefulness).
|
||||
///
|
||||
/// This simplistic version consists of a global score per peer which decays to 0 over time. The
|
||||
/// decay rate applies equally to positive and negative scores.
|
||||
#[derive(PartialEq, Clone, Debug, Serialize)]
|
||||
pub struct RealScore {
|
||||
/// The global score.
|
||||
// NOTE: In the future we may separate this into sub-scores involving the RPC, Gossipsub and
|
||||
// lighthouse.
|
||||
lighthouse_score: f64,
|
||||
gossipsub_score: f64,
|
||||
/// We ignore the negative gossipsub scores of some peers to allow decaying without
|
||||
/// disconnecting.
|
||||
ignore_negative_gossipsub_score: bool,
|
||||
score: f64,
|
||||
/// The time the score was last updated to perform time-based adjustments such as score-decay.
|
||||
#[serde(skip)]
|
||||
last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for RealScore {
|
||||
fn default() -> Self {
|
||||
RealScore {
|
||||
lighthouse_score: DEFAULT_SCORE,
|
||||
gossipsub_score: DEFAULT_SCORE,
|
||||
score: DEFAULT_SCORE,
|
||||
last_updated: Instant::now(),
|
||||
ignore_negative_gossipsub_score: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RealScore {
|
||||
/// Access to the underlying score.
|
||||
fn recompute_score(&mut self) {
|
||||
self.score = self.lighthouse_score;
|
||||
if self.lighthouse_score <= MIN_LIGHTHOUSE_SCORE_BEFORE_BAN {
|
||||
//ignore all other scores, i.e. do nothing here
|
||||
} else if self.gossipsub_score >= 0.0 {
|
||||
self.score += self.gossipsub_score * GOSSIPSUB_POSITIVE_SCORE_WEIGHT;
|
||||
} else if !self.ignore_negative_gossipsub_score {
|
||||
self.score += self.gossipsub_score * GOSSIPSUB_NEGATIVE_SCORE_WEIGHT;
|
||||
}
|
||||
}
|
||||
|
||||
fn score(&self) -> f64 {
|
||||
self.score
|
||||
}
|
||||
|
||||
/// Modifies the score based on a peer's action.
|
||||
pub fn apply_peer_action(&mut self, peer_action: PeerAction) {
|
||||
match peer_action {
|
||||
PeerAction::Fatal => self.set_lighthouse_score(MIN_SCORE), // The worst possible score
|
||||
PeerAction::LowToleranceError => self.add(-10.0),
|
||||
PeerAction::MidToleranceError => self.add(-5.0),
|
||||
PeerAction::HighToleranceError => self.add(-1.0),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_lighthouse_score(&mut self, new_score: f64) {
|
||||
self.lighthouse_score = new_score;
|
||||
self.update_state();
|
||||
}
|
||||
|
||||
/// Add an f64 to the score abiding by the limits.
|
||||
fn add(&mut self, score: f64) {
|
||||
let mut new_score = self.lighthouse_score + score;
|
||||
if new_score > MAX_SCORE {
|
||||
new_score = MAX_SCORE;
|
||||
}
|
||||
if new_score < MIN_SCORE {
|
||||
new_score = MIN_SCORE;
|
||||
}
|
||||
|
||||
self.set_lighthouse_score(new_score);
|
||||
}
|
||||
|
||||
fn update_state(&mut self) {
|
||||
let was_not_banned = self.score > MIN_SCORE_BEFORE_BAN;
|
||||
self.recompute_score();
|
||||
if was_not_banned && self.score <= MIN_SCORE_BEFORE_BAN {
|
||||
//we ban this peer for at least BANNED_BEFORE_DECAY seconds
|
||||
self.last_updated += BANNED_BEFORE_DECAY;
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an f64 to the score abiding by the limits.
|
||||
#[cfg(test)]
|
||||
pub fn test_add(&mut self, score: f64) {
|
||||
self.add(score);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// reset the score
|
||||
pub fn test_reset(&mut self) {
|
||||
self.set_lighthouse_score(0f64);
|
||||
}
|
||||
|
||||
// Set the gossipsub_score to a specific f64.
|
||||
// Used in testing to induce score status changes during a heartbeat.
|
||||
#[cfg(test)]
|
||||
pub fn set_gossipsub_score(&mut self, score: f64) {
|
||||
self.gossipsub_score = score;
|
||||
}
|
||||
|
||||
/// Applies time-based logic such as decay rates to the score.
|
||||
/// This function should be called periodically.
|
||||
pub fn update(&mut self) {
|
||||
self.update_at(Instant::now())
|
||||
}
|
||||
|
||||
/// Applies time-based logic such as decay rates to the score with the given now value.
|
||||
/// This private sub function is mainly used for testing.
|
||||
fn update_at(&mut self, now: Instant) {
|
||||
// Decay the current score
|
||||
// Using exponential decay based on a constant half life.
|
||||
|
||||
// It is important that we use here `checked_duration_since` instead of elapsed, since
|
||||
// we set last_updated to the future when banning peers. Therefore `checked_duration_since`
|
||||
// will return None in this case and the score does not get decayed.
|
||||
if let Some(secs_since_update) = now
|
||||
.checked_duration_since(self.last_updated)
|
||||
.map(|d| d.as_secs())
|
||||
{
|
||||
// e^(-ln(2)/HL*t)
|
||||
let decay_factor = (*HALFLIFE_DECAY * secs_since_update as f64).exp();
|
||||
self.lighthouse_score *= decay_factor;
|
||||
self.last_updated = now;
|
||||
self.update_state();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_gossipsub_score(&mut self, new_score: f64, ignore: bool) {
|
||||
// we only update gossipsub if last_updated is in the past which means either the peer is
|
||||
// not banned or the BANNED_BEFORE_DECAY time is over.
|
||||
if self.last_updated <= Instant::now() {
|
||||
self.gossipsub_score = new_score;
|
||||
self.ignore_negative_gossipsub_score = ignore;
|
||||
self.update_state();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_good_gossipsub_peer(&self) -> bool {
|
||||
self.gossipsub_score >= 0.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Debug, Serialize)]
|
||||
pub enum Score {
|
||||
Max,
|
||||
Real(RealScore),
|
||||
}
|
||||
|
||||
impl Default for Score {
|
||||
fn default() -> Self {
|
||||
Self::Real(RealScore::default())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! apply {
|
||||
( $method:ident $(, $param_name: ident: $param_type: ty)*) => {
|
||||
impl Score {
|
||||
pub fn $method(
|
||||
&mut self, $($param_name: $param_type, )*
|
||||
) {
|
||||
if let Self::Real(score) = self {
|
||||
score.$method($($param_name, )*);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
apply!(apply_peer_action, peer_action: PeerAction);
|
||||
apply!(update);
|
||||
apply!(update_gossipsub_score, new_score: f64, ignore: bool);
|
||||
#[cfg(test)]
|
||||
apply!(test_add, score: f64);
|
||||
#[cfg(test)]
|
||||
apply!(test_reset);
|
||||
#[cfg(test)]
|
||||
apply!(set_gossipsub_score, score: f64);
|
||||
|
||||
impl Score {
|
||||
pub fn score(&self) -> f64 {
|
||||
match self {
|
||||
Self::Max => f64::INFINITY,
|
||||
Self::Real(score) => score.score(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_score() -> Self {
|
||||
Self::Max
|
||||
}
|
||||
|
||||
/// Returns the expected state of the peer given it's score.
|
||||
pub(crate) fn state(&self) -> ScoreState {
|
||||
match self.score() {
|
||||
x if x <= MIN_SCORE_BEFORE_BAN => ScoreState::Banned,
|
||||
x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::Disconnected,
|
||||
_ => ScoreState::Healthy,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_good_gossipsub_peer(&self) -> bool {
|
||||
match self {
|
||||
Self::Max => true,
|
||||
Self::Real(score) => score.is_good_gossipsub_peer(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Score {}
|
||||
|
||||
impl PartialOrd for Score {
|
||||
fn partial_cmp(&self, other: &Score) -> Option<std::cmp::Ordering> {
|
||||
self.score().partial_cmp(&other.score())
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Score {
|
||||
fn cmp(&self, other: &Score) -> std::cmp::Ordering {
|
||||
self.partial_cmp(other).unwrap_or(std::cmp::Ordering::Equal)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Score {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:.2}", self.score())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::float_cmp)]
|
||||
fn test_reputation_change() {
|
||||
let mut score = Score::default();
|
||||
|
||||
// 0 change does not change de reputation
|
||||
//
|
||||
let change = 0.0;
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), DEFAULT_SCORE);
|
||||
|
||||
// underflowing change is capped
|
||||
let mut score = Score::default();
|
||||
let change = MIN_SCORE - 50.0;
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), MIN_SCORE);
|
||||
|
||||
// overflowing change is capped
|
||||
let mut score = Score::default();
|
||||
let change = MAX_SCORE + 50.0;
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), MAX_SCORE);
|
||||
|
||||
// Score adjusts
|
||||
let mut score = Score::default();
|
||||
let change = 1.32;
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), DEFAULT_SCORE + change);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::float_cmp)]
|
||||
fn test_ban_time() {
|
||||
let mut score = RealScore::default();
|
||||
let now = Instant::now();
|
||||
|
||||
let change = MIN_SCORE_BEFORE_BAN;
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), MIN_SCORE_BEFORE_BAN);
|
||||
|
||||
score.update_at(now + BANNED_BEFORE_DECAY);
|
||||
assert_eq!(score.score(), MIN_SCORE_BEFORE_BAN);
|
||||
|
||||
score.update_at(now + BANNED_BEFORE_DECAY + Duration::from_secs(1));
|
||||
assert!(score.score() > MIN_SCORE_BEFORE_BAN);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_very_negative_gossipsub_score() {
|
||||
let mut score = Score::default();
|
||||
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, false);
|
||||
assert!(!score.is_good_gossipsub_peer());
|
||||
assert!(score.score() < 0.0);
|
||||
assert_eq!(score.state(), ScoreState::Healthy);
|
||||
score.test_add(-1.0001);
|
||||
assert_eq!(score.state(), ScoreState::Disconnected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::float_cmp)]
|
||||
fn test_ignored_gossipsub_score() {
|
||||
let mut score = Score::default();
|
||||
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, true);
|
||||
assert!(!score.is_good_gossipsub_peer());
|
||||
assert_eq!(score.score(), 0.0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
//! Handles individual sync status for peers.
|
||||
|
||||
use serde::Serialize;
|
||||
use types::{Epoch, Hash256, Slot};
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
/// The current sync status of the peer.
|
||||
pub enum SyncStatus {
|
||||
/// At the current state as our node or ahead of us.
|
||||
Synced { info: SyncInfo },
|
||||
/// The peer has greater knowledge about the canonical chain than we do.
|
||||
Advanced { info: SyncInfo },
|
||||
/// Is behind our current head and not useful for block downloads.
|
||||
Behind { info: SyncInfo },
|
||||
/// This peer is in an incompatible network.
|
||||
IrrelevantPeer,
|
||||
/// Not currently known as a STATUS handshake has not occurred.
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// A relevant peer's sync information.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct SyncInfo {
|
||||
pub head_slot: Slot,
|
||||
pub head_root: Hash256,
|
||||
pub finalized_epoch: Epoch,
|
||||
pub finalized_root: Hash256,
|
||||
}
|
||||
|
||||
impl std::cmp::PartialEq for SyncStatus {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!(
|
||||
(self, other),
|
||||
(SyncStatus::Synced { .. }, SyncStatus::Synced { .. })
|
||||
| (SyncStatus::Advanced { .. }, SyncStatus::Advanced { .. })
|
||||
| (SyncStatus::Behind { .. }, SyncStatus::Behind { .. })
|
||||
| (SyncStatus::IrrelevantPeer, SyncStatus::IrrelevantPeer)
|
||||
| (SyncStatus::Unknown, SyncStatus::Unknown)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SyncStatus {
|
||||
/// Returns true if the peer has advanced knowledge of the chain.
|
||||
pub fn is_advanced(&self) -> bool {
|
||||
matches!(self, SyncStatus::Advanced { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the peer is up to date with the current chain.
|
||||
pub fn is_synced(&self) -> bool {
|
||||
matches!(self, SyncStatus::Synced { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the peer is behind the current chain.
|
||||
pub fn is_behind(&self) -> bool {
|
||||
matches!(self, SyncStatus::Behind { .. })
|
||||
}
|
||||
|
||||
/// Updates the peer's sync status, returning whether the status transitioned.
|
||||
///
|
||||
/// E.g. returns `true` if the state changed from `Synced` to `Advanced`, but not if
|
||||
/// the status remained `Synced` with different `SyncInfo` within.
|
||||
pub fn update(&mut self, new_state: SyncStatus) -> bool {
|
||||
let changed_status = *self != new_state;
|
||||
*self = new_state;
|
||||
changed_status
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SyncStatus::Advanced { .. } => "Advanced",
|
||||
SyncStatus::Behind { .. } => "Behind",
|
||||
SyncStatus::Synced { .. } => "Synced",
|
||||
SyncStatus::Unknown => "Unknown",
|
||||
SyncStatus::IrrelevantPeer => "Irrelevant",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SyncStatus {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
295
beacon_node/lighthouse_network/src/rpc/codec/base.rs
Normal file
295
beacon_node/lighthouse_network/src/rpc/codec/base.rs
Normal file
@@ -0,0 +1,295 @@
|
||||
//! This handles the various supported encoding mechanism for the Eth 2.0 RPC.
|
||||
|
||||
use crate::rpc::methods::ErrorType;
|
||||
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse};
|
||||
use libp2p::bytes::BufMut;
|
||||
use libp2p::bytes::BytesMut;
|
||||
use std::marker::PhantomData;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
use types::EthSpec;
|
||||
|
||||
pub trait OutboundCodec<TItem>: Encoder<TItem> + Decoder {
|
||||
type CodecErrorType;
|
||||
|
||||
fn decode_error(
|
||||
&mut self,
|
||||
src: &mut BytesMut,
|
||||
) -> Result<Option<Self::CodecErrorType>, <Self as Decoder>::Error>;
|
||||
}
|
||||
|
||||
/* Global Inbound Codec */
|
||||
// This deals with Decoding RPC Requests from other peers and encoding our responses
|
||||
|
||||
pub struct BaseInboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TCodec: Encoder<RPCCodedResponse<TSpec>> + Decoder,
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// Inner codec for handling various encodings
|
||||
inner: TCodec,
|
||||
phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TCodec, TSpec> BaseInboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TCodec: Encoder<RPCCodedResponse<TSpec>> + Decoder,
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
pub fn new(codec: TCodec) -> Self {
|
||||
BaseInboundCodec {
|
||||
inner: codec,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Global Outbound Codec */
|
||||
// This deals with Decoding RPC Responses from other peers and encoding our requests
|
||||
pub struct BaseOutboundCodec<TOutboundCodec, TSpec>
|
||||
where
|
||||
TOutboundCodec: OutboundCodec<OutboundRequest<TSpec>>,
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// Inner codec for handling various encodings.
|
||||
inner: TOutboundCodec,
|
||||
/// Keeps track of the current response code for a chunk.
|
||||
current_response_code: Option<u8>,
|
||||
phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TOutboundCodec, TSpec> BaseOutboundCodec<TOutboundCodec, TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
TOutboundCodec: OutboundCodec<OutboundRequest<TSpec>>,
|
||||
{
|
||||
pub fn new(codec: TOutboundCodec) -> Self {
|
||||
BaseOutboundCodec {
|
||||
inner: codec,
|
||||
current_response_code: None,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Implementation of the Encoding/Decoding for the global codecs */
|
||||
|
||||
/* Base Inbound Codec */
|
||||
|
||||
// This Encodes RPC Responses sent to external peers
|
||||
impl<TCodec, TSpec> Encoder<RPCCodedResponse<TSpec>> for BaseInboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
TCodec: Decoder + Encoder<RPCCodedResponse<TSpec>>,
|
||||
{
|
||||
type Error = <TCodec as Encoder<RPCCodedResponse<TSpec>>>::Error;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: RPCCodedResponse<TSpec>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
dst.clear();
|
||||
dst.reserve(1);
|
||||
dst.put_u8(
|
||||
item.as_u8()
|
||||
.expect("Should never encode a stream termination"),
|
||||
);
|
||||
self.inner.encode(item, dst)
|
||||
}
|
||||
}
|
||||
|
||||
// This Decodes RPC Requests from external peers
|
||||
impl<TCodec, TSpec> Decoder for BaseInboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
TCodec: Encoder<RPCCodedResponse<TSpec>> + Decoder<Item = InboundRequest<TSpec>>,
|
||||
{
|
||||
type Item = InboundRequest<TSpec>;
|
||||
type Error = <TCodec as Decoder>::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
self.inner.decode(src)
|
||||
}
|
||||
}
|
||||
|
||||
/* Base Outbound Codec */
|
||||
|
||||
// This Encodes RPC Requests sent to external peers
|
||||
impl<TCodec, TSpec> Encoder<OutboundRequest<TSpec>> for BaseOutboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
TCodec: OutboundCodec<OutboundRequest<TSpec>> + Encoder<OutboundRequest<TSpec>>,
|
||||
{
|
||||
type Error = <TCodec as Encoder<OutboundRequest<TSpec>>>::Error;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: OutboundRequest<TSpec>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.inner.encode(item, dst)
|
||||
}
|
||||
}
|
||||
|
||||
// This decodes RPC Responses received from external peers
|
||||
impl<TCodec, TSpec> Decoder for BaseOutboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
TCodec: OutboundCodec<OutboundRequest<TSpec>, CodecErrorType = ErrorType>
|
||||
+ Decoder<Item = RPCResponse<TSpec>>,
|
||||
{
|
||||
type Item = RPCCodedResponse<TSpec>;
|
||||
type Error = <TCodec as Decoder>::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
// if we have only received the response code, wait for more bytes
|
||||
if src.len() <= 1 {
|
||||
return Ok(None);
|
||||
}
|
||||
// using the response code determine which kind of payload needs to be decoded.
|
||||
let response_code = self.current_response_code.unwrap_or_else(|| {
|
||||
let resp_code = src.split_to(1)[0];
|
||||
self.current_response_code = Some(resp_code);
|
||||
resp_code
|
||||
});
|
||||
|
||||
let inner_result = {
|
||||
if RPCCodedResponse::<TSpec>::is_response(response_code) {
|
||||
// decode an actual response and mutates the buffer if enough bytes have been read
|
||||
// returning the result.
|
||||
self.inner
|
||||
.decode(src)
|
||||
.map(|r| r.map(RPCCodedResponse::Success))
|
||||
} else {
|
||||
// decode an error
|
||||
self.inner
|
||||
.decode_error(src)
|
||||
.map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp)))
|
||||
}
|
||||
};
|
||||
// if the inner decoder was capable of decoding a chunk, we need to reset the current
|
||||
// response code for the next chunk
|
||||
if let Ok(Some(_)) = inner_result {
|
||||
self.current_response_code = None;
|
||||
}
|
||||
// return the result
|
||||
inner_result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::super::ssz_snappy::*;
|
||||
use super::*;
|
||||
use crate::rpc::protocol::*;
|
||||
|
||||
use std::sync::Arc;
|
||||
use types::{ForkContext, Hash256};
|
||||
use unsigned_varint::codec::Uvi;
|
||||
|
||||
type Spec = types::MainnetEthSpec;
|
||||
|
||||
fn fork_context() -> ForkContext {
|
||||
ForkContext::new::<Spec>(types::Slot::new(0), Hash256::zero(), &Spec::default_spec())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_status_message() {
|
||||
let message = hex::decode("0054ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap();
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&message);
|
||||
|
||||
let snappy_protocol_id =
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
||||
|
||||
let fork_context = Arc::new(fork_context());
|
||||
let mut snappy_outbound_codec =
|
||||
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, 1_048_576, fork_context);
|
||||
|
||||
// remove response code
|
||||
let mut snappy_buf = buf.clone();
|
||||
let _ = snappy_buf.split_to(1);
|
||||
|
||||
// decode message just as snappy message
|
||||
let _snappy_decoded_message = snappy_outbound_codec.decode(&mut snappy_buf).unwrap();
|
||||
|
||||
// build codecs for entire chunk
|
||||
let mut snappy_base_outbound_codec = BaseOutboundCodec::new(snappy_outbound_codec);
|
||||
|
||||
// decode message as ssz snappy chunk
|
||||
let _snappy_decoded_chunk = snappy_base_outbound_codec.decode(&mut buf).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_length_prefix() {
|
||||
let mut uvi_codec: Uvi<u128> = Uvi::default();
|
||||
let mut dst = BytesMut::with_capacity(1024);
|
||||
|
||||
// Smallest > 10 byte varint
|
||||
let len: u128 = 2u128.pow(70);
|
||||
|
||||
// Insert length-prefix
|
||||
uvi_codec.encode(len, &mut dst).unwrap();
|
||||
|
||||
let snappy_protocol_id =
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
||||
|
||||
let fork_context = Arc::new(fork_context());
|
||||
let mut snappy_outbound_codec =
|
||||
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, 1_048_576, fork_context);
|
||||
|
||||
let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err();
|
||||
|
||||
assert_eq!(
|
||||
snappy_decoded_message,
|
||||
RPCError::IoError("input bytes exceed maximum".to_string()),
|
||||
"length-prefix of > 10 bytes is invalid"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_length_limits() {
|
||||
fn encode_len(len: usize) -> BytesMut {
|
||||
let mut uvi_codec: Uvi<usize> = Uvi::default();
|
||||
let mut dst = BytesMut::with_capacity(1024);
|
||||
uvi_codec.encode(len, &mut dst).unwrap();
|
||||
dst
|
||||
}
|
||||
|
||||
let protocol_id =
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy);
|
||||
|
||||
// Response limits
|
||||
let limit = protocol_id.rpc_response_limits::<Spec>();
|
||||
let mut max = encode_len(limit.max + 1);
|
||||
let fork_context = Arc::new(fork_context());
|
||||
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||
protocol_id.clone(),
|
||||
1_048_576,
|
||||
fork_context.clone(),
|
||||
);
|
||||
assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData);
|
||||
|
||||
let mut min = encode_len(limit.min - 1);
|
||||
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||
protocol_id.clone(),
|
||||
1_048_576,
|
||||
fork_context.clone(),
|
||||
);
|
||||
assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData);
|
||||
|
||||
// Request limits
|
||||
let limit = protocol_id.rpc_request_limits();
|
||||
let mut max = encode_len(limit.max + 1);
|
||||
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||
protocol_id.clone(),
|
||||
1_048_576,
|
||||
fork_context.clone(),
|
||||
);
|
||||
assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData);
|
||||
|
||||
let mut min = encode_len(limit.min - 1);
|
||||
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(protocol_id, 1_048_576, fork_context);
|
||||
assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData);
|
||||
}
|
||||
}
|
||||
65
beacon_node/lighthouse_network/src/rpc/codec/mod.rs
Normal file
65
beacon_node/lighthouse_network/src/rpc/codec/mod.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
pub(crate) mod base;
|
||||
pub(crate) mod ssz_snappy;
|
||||
|
||||
use self::base::{BaseInboundCodec, BaseOutboundCodec};
|
||||
use self::ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec};
|
||||
use crate::rpc::protocol::RPCError;
|
||||
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse};
|
||||
use libp2p::bytes::BytesMut;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
use types::EthSpec;
|
||||
|
||||
// Known types of codecs
|
||||
pub enum InboundCodec<TSpec: EthSpec> {
|
||||
SSZSnappy(BaseInboundCodec<SSZSnappyInboundCodec<TSpec>, TSpec>),
|
||||
}
|
||||
|
||||
pub enum OutboundCodec<TSpec: EthSpec> {
|
||||
SSZSnappy(BaseOutboundCodec<SSZSnappyOutboundCodec<TSpec>, TSpec>),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Encoder<RPCCodedResponse<T>> for InboundCodec<T> {
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(&mut self, item: RPCCodedResponse<T>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
match self {
|
||||
InboundCodec::SSZSnappy(codec) => codec.encode(item, dst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Decoder for InboundCodec<TSpec> {
|
||||
type Item = InboundRequest<TSpec>;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self {
|
||||
InboundCodec::SSZSnappy(codec) => codec.decode(src),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for OutboundCodec<TSpec> {
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: OutboundRequest<TSpec>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
match self {
|
||||
OutboundCodec::SSZSnappy(codec) => codec.encode(item, dst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Decoder for OutboundCodec<T> {
|
||||
type Item = RPCCodedResponse<T>;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self {
|
||||
OutboundCodec::SSZSnappy(codec) => codec.decode(src),
|
||||
}
|
||||
}
|
||||
}
|
||||
1115
beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs
Normal file
1115
beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs
Normal file
File diff suppressed because it is too large
Load Diff
963
beacon_node/lighthouse_network/src/rpc/handler.rs
Normal file
963
beacon_node/lighthouse_network/src/rpc/handler.rs
Normal file
@@ -0,0 +1,963 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::cognitive_complexity)]
|
||||
|
||||
use super::methods::{
|
||||
GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination,
|
||||
};
|
||||
use super::outbound::OutboundRequestContainer;
|
||||
use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol};
|
||||
use super::{RPCReceived, RPCSend};
|
||||
use crate::rpc::outbound::{OutboundFramed, OutboundRequest};
|
||||
use crate::rpc::protocol::InboundFramed;
|
||||
use fnv::FnvHashMap;
|
||||
use futures::prelude::*;
|
||||
use futures::{Sink, SinkExt};
|
||||
use libp2p::core::upgrade::{
|
||||
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
|
||||
};
|
||||
use libp2p::swarm::protocols_handler::{
|
||||
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::swarm::NegotiatedSubstream;
|
||||
use slog::{crit, debug, trace, warn};
|
||||
use smallvec::SmallVec;
|
||||
use std::{
|
||||
collections::hash_map::Entry,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::time::{sleep_until, Instant as TInstant, Sleep};
|
||||
use tokio_util::time::{delay_queue, DelayQueue};
|
||||
use types::{EthSpec, ForkContext};
|
||||
|
||||
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
||||
pub const RESPONSE_TIMEOUT: u64 = 10;
|
||||
|
||||
/// The number of times to retry an outbound upgrade in the case of IO errors.
|
||||
const IO_ERROR_RETRIES: u8 = 3;
|
||||
|
||||
/// Maximum time given to the handler to perform shutdown operations.
|
||||
const SHUTDOWN_TIMEOUT_SECS: u8 = 15;
|
||||
|
||||
/// Identifier of inbound and outbound substreams from the handler's perspective.
|
||||
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
|
||||
pub struct SubstreamId(usize);
|
||||
|
||||
type InboundSubstream<TSpec> = InboundFramed<NegotiatedSubstream, TSpec>;
|
||||
|
||||
/// Output of the future handling the send of responses to a peer's request.
|
||||
type InboundProcessingOutput<TSpec> = (
|
||||
InboundSubstream<TSpec>, /* substream */
|
||||
Vec<RPCError>, /* Errors sending messages if any */
|
||||
bool, /* whether to remove the stream afterwards */
|
||||
u64, /* Chunks remaining to be sent after this processing finishes */
|
||||
);
|
||||
|
||||
/// Events the handler emits to the behaviour.
|
||||
type HandlerEvent<T> = Result<RPCReceived<T>, HandlerErr>;
|
||||
|
||||
/// An error encountered by the handler.
|
||||
#[derive(Debug)]
|
||||
pub enum HandlerErr {
|
||||
/// An error occurred for this peer's request. This can occur during protocol negotiation,
|
||||
/// message passing, or if the handler identifies that we are sending an error response to the peer.
|
||||
Inbound {
|
||||
/// Id of the peer's request for which an error occurred.
|
||||
id: SubstreamId,
|
||||
/// Information of the negotiated protocol.
|
||||
proto: Protocol,
|
||||
/// The error that occurred.
|
||||
error: RPCError,
|
||||
},
|
||||
/// An error occurred for this request. Such error can occur during protocol negotiation,
|
||||
/// message passing, or if we successfully received a response from the peer, but this response
|
||||
/// indicates an error.
|
||||
Outbound {
|
||||
/// Application-given Id of the request for which an error occurred.
|
||||
id: RequestId,
|
||||
/// Information of the protocol.
|
||||
proto: Protocol,
|
||||
/// The error that occurred.
|
||||
error: RPCError,
|
||||
},
|
||||
}
|
||||
|
||||
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
||||
pub struct RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// The upgrade for inbound substreams.
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>,
|
||||
|
||||
/// Queue of events to produce in `poll()`.
|
||||
events_out: SmallVec<[HandlerEvent<TSpec>; 4]>,
|
||||
|
||||
/// Queue of outbound substreams to open.
|
||||
dial_queue: SmallVec<[(RequestId, OutboundRequest<TSpec>); 4]>,
|
||||
|
||||
/// Current number of concurrent outbound substreams being opened.
|
||||
dial_negotiated: u32,
|
||||
|
||||
/// Current inbound substreams awaiting processing.
|
||||
inbound_substreams: FnvHashMap<SubstreamId, InboundInfo<TSpec>>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
inbound_substreams_delay: DelayQueue<SubstreamId>,
|
||||
|
||||
/// Map of outbound substreams that need to be driven to completion.
|
||||
outbound_substreams: FnvHashMap<SubstreamId, OutboundInfo<TSpec>>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
outbound_substreams_delay: DelayQueue<SubstreamId>,
|
||||
|
||||
/// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID.
|
||||
current_inbound_substream_id: SubstreamId,
|
||||
|
||||
/// Sequential ID for outbound substreams.
|
||||
current_outbound_substream_id: SubstreamId,
|
||||
|
||||
/// Maximum number of concurrent outbound substreams being opened. Value is never modified.
|
||||
max_dial_negotiated: u32,
|
||||
|
||||
/// State of the handler.
|
||||
state: HandlerState,
|
||||
|
||||
/// Try to negotiate the outbound upgrade a few times if there is an IO error before reporting the request as failed.
|
||||
/// This keeps track of the number of attempts.
|
||||
outbound_io_error_retries: u8,
|
||||
|
||||
/// Fork specific info.
|
||||
fork_context: Arc<ForkContext>,
|
||||
|
||||
/// Logger for handling RPC streams
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
enum HandlerState {
|
||||
/// The handler is active. All messages are sent and received.
|
||||
Active,
|
||||
/// The handler is shutting_down.
|
||||
///
|
||||
/// While in this state the handler rejects new requests but tries to finish existing ones.
|
||||
/// Once the timer expires, all messages are killed.
|
||||
ShuttingDown(Box<Sleep>),
|
||||
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
|
||||
/// received.
|
||||
Deactivated,
|
||||
}
|
||||
|
||||
/// Contains the information the handler keeps on established inbound substreams.
|
||||
struct InboundInfo<TSpec: EthSpec> {
|
||||
/// State of the substream.
|
||||
state: InboundState<TSpec>,
|
||||
/// Responses queued for sending.
|
||||
pending_items: Vec<RPCCodedResponse<TSpec>>,
|
||||
/// Protocol of the original request we received from the peer.
|
||||
protocol: Protocol,
|
||||
/// Responses that the peer is still expecting from us.
|
||||
remaining_chunks: u64,
|
||||
/// Key to keep track of the substream's timeout via `self.inbound_substreams_delay`.
|
||||
delay_key: Option<delay_queue::Key>,
|
||||
}
|
||||
|
||||
/// Contains the information the handler keeps on established outbound substreams.
|
||||
struct OutboundInfo<TSpec: EthSpec> {
|
||||
/// State of the substream.
|
||||
state: OutboundSubstreamState<TSpec>,
|
||||
/// Key to keep track of the substream's timeout via `self.outbound_substreams_delay`.
|
||||
delay_key: delay_queue::Key,
|
||||
/// Info over the protocol this substream is handling.
|
||||
proto: Protocol,
|
||||
/// Number of chunks to be seen from the peer's response.
|
||||
remaining_chunks: Option<u64>,
|
||||
/// `RequestId` as given by the application that sent the request.
|
||||
req_id: RequestId,
|
||||
}
|
||||
|
||||
/// State of an inbound substream connection.
|
||||
enum InboundState<TSpec: EthSpec> {
|
||||
/// The underlying substream is not being used.
|
||||
Idle(InboundSubstream<TSpec>),
|
||||
/// The underlying substream is processing responses.
|
||||
Busy(Pin<Box<dyn Future<Output = InboundProcessingOutput<TSpec>> + Send>>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
/// State of an outbound substream. Either waiting for a response, or in the process of sending.
|
||||
pub enum OutboundSubstreamState<TSpec: EthSpec> {
|
||||
/// A request has been sent, and we are awaiting a response. This future is driven in the
|
||||
/// handler because GOODBYE requests can be handled and responses dropped instantly.
|
||||
RequestPendingResponse {
|
||||
/// The framed negotiated substream.
|
||||
substream: Box<OutboundFramed<NegotiatedSubstream, TSpec>>,
|
||||
/// Keeps track of the actual request sent.
|
||||
request: OutboundRequest<TSpec>,
|
||||
},
|
||||
/// Closing an outbound substream>
|
||||
Closing(Box<OutboundFramed<NegotiatedSubstream, TSpec>>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<TSpec> RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
pub fn new(
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>,
|
||||
fork_context: Arc<ForkContext>,
|
||||
log: &slog::Logger,
|
||||
) -> Self {
|
||||
RPCHandler {
|
||||
listen_protocol,
|
||||
events_out: SmallVec::new(),
|
||||
dial_queue: SmallVec::new(),
|
||||
dial_negotiated: 0,
|
||||
inbound_substreams: FnvHashMap::default(),
|
||||
outbound_substreams: FnvHashMap::default(),
|
||||
inbound_substreams_delay: DelayQueue::new(),
|
||||
outbound_substreams_delay: DelayQueue::new(),
|
||||
current_inbound_substream_id: SubstreamId(0),
|
||||
current_outbound_substream_id: SubstreamId(0),
|
||||
state: HandlerState::Active,
|
||||
max_dial_negotiated: 8,
|
||||
outbound_io_error_retries: 0,
|
||||
fork_context,
|
||||
log: log.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initiates the handler's shutdown process, sending an optional Goodbye message to the
|
||||
/// peer.
|
||||
fn shutdown(&mut self, goodbye_reason: Option<GoodbyeReason>) {
|
||||
if matches!(self.state, HandlerState::Active) {
|
||||
if !self.dial_queue.is_empty() {
|
||||
debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len());
|
||||
}
|
||||
// We now drive to completion communications already dialed/established
|
||||
while let Some((id, req)) = self.dial_queue.pop() {
|
||||
self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error: RPCError::HandlerRejected,
|
||||
proto: req.protocol(),
|
||||
id,
|
||||
}));
|
||||
}
|
||||
|
||||
// Queue our goodbye message.
|
||||
if let Some(reason) = goodbye_reason {
|
||||
self.dial_queue
|
||||
.push((RequestId::Router, OutboundRequest::Goodbye(reason)));
|
||||
}
|
||||
|
||||
self.state = HandlerState::ShuttingDown(Box::new(sleep_until(
|
||||
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens an outbound substream with a request.
|
||||
fn send_request(&mut self, id: RequestId, req: OutboundRequest<TSpec>) {
|
||||
match self.state {
|
||||
HandlerState::Active => {
|
||||
self.dial_queue.push((id, req));
|
||||
}
|
||||
_ => self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error: RPCError::HandlerRejected,
|
||||
proto: req.protocol(),
|
||||
id,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a response to a peer's request.
|
||||
// NOTE: If the substream has closed due to inactivity, or the substream is in the
|
||||
// wrong state a response will fail silently.
|
||||
fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse<TSpec>) {
|
||||
// check if the stream matching the response still exists
|
||||
let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) {
|
||||
info
|
||||
} else {
|
||||
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
|
||||
// the stream is closed after sending the expected number of responses
|
||||
trace!(self.log, "Inbound stream has expired, response not sent";
|
||||
"response" => %response, "id" => inbound_id);
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
// If the response we are sending is an error, report back for handling
|
||||
if let RPCCodedResponse::Error(ref code, ref reason) = response {
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
error: RPCError::ErrorResponse(*code, reason.to_string()),
|
||||
proto: inbound_info.protocol,
|
||||
id: inbound_id,
|
||||
}));
|
||||
}
|
||||
|
||||
if matches!(self.state, HandlerState::Deactivated) {
|
||||
// we no longer send responses after the handler is deactivated
|
||||
debug!(self.log, "Response not sent. Deactivated handler";
|
||||
"response" => %response, "id" => inbound_id);
|
||||
return;
|
||||
}
|
||||
inbound_info.pending_items.push(response);
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> ProtocolsHandler for RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type InEvent = RPCSend<TSpec>;
|
||||
type OutEvent = HandlerEvent<TSpec>;
|
||||
type Error = RPCError;
|
||||
type InboundProtocol = RPCProtocol<TSpec>;
|
||||
type OutboundProtocol = OutboundRequestContainer<TSpec>;
|
||||
type OutboundOpenInfo = (RequestId, OutboundRequest<TSpec>); // Keep track of the id and the request
|
||||
type InboundOpenInfo = ();
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
|
||||
self.listen_protocol.clone()
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
_info: Self::InboundOpenInfo,
|
||||
) {
|
||||
// only accept new peer requests when active
|
||||
if !matches!(self.state, HandlerState::Active) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (req, substream) = substream;
|
||||
let expected_responses = req.expected_responses();
|
||||
|
||||
// store requests that expect responses
|
||||
if expected_responses > 0 {
|
||||
// Store the stream and tag the output.
|
||||
let delay_key = self.inbound_substreams_delay.insert(
|
||||
self.current_inbound_substream_id,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
let awaiting_stream = InboundState::Idle(substream);
|
||||
self.inbound_substreams.insert(
|
||||
self.current_inbound_substream_id,
|
||||
InboundInfo {
|
||||
state: awaiting_stream,
|
||||
pending_items: vec![],
|
||||
delay_key: Some(delay_key),
|
||||
protocol: req.protocol(),
|
||||
remaining_chunks: expected_responses,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// If we received a goodbye, shutdown the connection.
|
||||
if let InboundRequest::Goodbye(_) = req {
|
||||
self.shutdown(None);
|
||||
}
|
||||
|
||||
self.events_out.push(Ok(RPCReceived::Request(
|
||||
self.current_inbound_substream_id,
|
||||
req,
|
||||
)));
|
||||
self.current_inbound_substream_id.0 += 1;
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
request_info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
self.dial_negotiated -= 1;
|
||||
let (id, request) = request_info;
|
||||
let proto = request.protocol();
|
||||
|
||||
// accept outbound connections only if the handler is not deactivated
|
||||
if matches!(self.state, HandlerState::Deactivated) {
|
||||
self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error: RPCError::HandlerRejected,
|
||||
proto,
|
||||
id,
|
||||
}));
|
||||
}
|
||||
|
||||
// add the stream to substreams if we expect a response, otherwise drop the stream.
|
||||
let expected_responses = request.expected_responses();
|
||||
if expected_responses > 0 {
|
||||
// new outbound request. Store the stream and tag the output.
|
||||
let delay_key = self.outbound_substreams_delay.insert(
|
||||
self.current_outbound_substream_id,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
|
||||
substream: Box::new(out),
|
||||
request,
|
||||
};
|
||||
let expected_responses = if expected_responses > 1 {
|
||||
// Currently enforced only for multiple responses
|
||||
Some(expected_responses)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if self
|
||||
.outbound_substreams
|
||||
.insert(
|
||||
self.current_outbound_substream_id,
|
||||
OutboundInfo {
|
||||
state: awaiting_stream,
|
||||
delay_key,
|
||||
proto,
|
||||
remaining_chunks: expected_responses,
|
||||
req_id: id,
|
||||
},
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id);
|
||||
}
|
||||
self.current_outbound_substream_id.0 += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, rpc_event: Self::InEvent) {
|
||||
match rpc_event {
|
||||
RPCSend::Request(id, req) => self.send_request(id, req),
|
||||
RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response),
|
||||
RPCSend::Shutdown(reason) => self.shutdown(Some(reason)),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
request_info: Self::OutboundOpenInfo,
|
||||
error: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
let (id, req) = request_info;
|
||||
if let ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error {
|
||||
self.outbound_io_error_retries += 1;
|
||||
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
|
||||
self.send_request(id, req);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// This dialing is now considered failed
|
||||
self.dial_negotiated -= 1;
|
||||
|
||||
self.outbound_io_error_retries = 0;
|
||||
// map the error
|
||||
let error = match error {
|
||||
ProtocolsHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
|
||||
ProtocolsHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
|
||||
RPCError::UnsupportedProtocol
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(
|
||||
NegotiationError::ProtocolError(e),
|
||||
)) => match e {
|
||||
ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()),
|
||||
ProtocolError::InvalidProtocol => {
|
||||
RPCError::InternalError("Protocol was deemed invalid")
|
||||
}
|
||||
ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => {
|
||||
// Peer is sending invalid data during the negotiation phase, not
|
||||
// participating in the protocol
|
||||
RPCError::InvalidData
|
||||
}
|
||||
},
|
||||
};
|
||||
self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error,
|
||||
proto: req.protocol(),
|
||||
id,
|
||||
}));
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
// Check that we don't have outbound items pending for dialing, nor dialing, nor
|
||||
// established. Also check that there are no established inbound substreams.
|
||||
// Errors and events need to be reported back, so check those too.
|
||||
let should_shutdown = match self.state {
|
||||
HandlerState::ShuttingDown(_) => {
|
||||
self.dial_queue.is_empty()
|
||||
&& self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
&& self.events_out.is_empty()
|
||||
&& self.dial_negotiated == 0
|
||||
}
|
||||
HandlerState::Deactivated => {
|
||||
// Regardless of events, the timeout has expired. Force the disconnect.
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
if should_shutdown {
|
||||
KeepAlive::No
|
||||
} else {
|
||||
KeepAlive::Yes
|
||||
}
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
// return any events that need to be reported
|
||||
if !self.events_out.is_empty() {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(self.events_out.remove(0)));
|
||||
} else {
|
||||
self.events_out.shrink_to_fit();
|
||||
}
|
||||
|
||||
// Check if we are shutting down, and if the timer ran out
|
||||
if let HandlerState::ShuttingDown(delay) = &self.state {
|
||||
if delay.is_elapsed() {
|
||||
self.state = HandlerState::Deactivated;
|
||||
debug!(self.log, "Handler deactivated");
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Shutdown timeout",
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
// purge expired inbound substreams and send an error
|
||||
loop {
|
||||
match self.inbound_substreams_delay.poll_expired(cx) {
|
||||
Poll::Ready(Some(Ok(inbound_id))) => {
|
||||
// handle a stream timeout for various states
|
||||
if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) {
|
||||
// the delay has been removed
|
||||
info.delay_key = None;
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
error: RPCError::StreamTimeout,
|
||||
proto: info.protocol,
|
||||
id: *inbound_id.get_ref(),
|
||||
}));
|
||||
|
||||
if info.pending_items.last().map(|l| l.close_after()) == Some(false) {
|
||||
// if the last chunk does not close the stream, append an error
|
||||
info.pending_items.push(RPCCodedResponse::Error(
|
||||
RPCResponseErrorCode::ServerError,
|
||||
"Request timed out".into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!(self.log, "Inbound substream poll failed"; "error" => ?e);
|
||||
// drops the peer if we cannot read the delay queue
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Could not poll inbound stream timer",
|
||||
)));
|
||||
}
|
||||
Poll::Pending | Poll::Ready(None) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// purge expired outbound substreams
|
||||
loop {
|
||||
match self.outbound_substreams_delay.poll_expired(cx) {
|
||||
Poll::Ready(Some(Ok(outbound_id))) => {
|
||||
if let Some(OutboundInfo { proto, req_id, .. }) =
|
||||
self.outbound_substreams.remove(outbound_id.get_ref())
|
||||
{
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: req_id,
|
||||
proto,
|
||||
error: RPCError::StreamTimeout,
|
||||
};
|
||||
// notify the user
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
|
||||
} else {
|
||||
crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref());
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!(self.log, "Outbound substream poll failed"; "error" => ?e);
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Could not poll outbound stream timer",
|
||||
)));
|
||||
}
|
||||
Poll::Pending | Poll::Ready(None) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// when deactivated, close all streams
|
||||
let deactivated = matches!(self.state, HandlerState::Deactivated);
|
||||
|
||||
// drive inbound streams that need to be processed
|
||||
let mut substreams_to_remove = Vec::new(); // Closed substreams that need to be removed
|
||||
for (id, info) in self.inbound_substreams.iter_mut() {
|
||||
loop {
|
||||
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
||||
InboundState::Idle(substream) if !deactivated => {
|
||||
if !info.pending_items.is_empty() {
|
||||
let to_send = std::mem::take(&mut info.pending_items);
|
||||
let fut = process_inbound_substream(
|
||||
substream,
|
||||
info.remaining_chunks,
|
||||
to_send,
|
||||
)
|
||||
.boxed();
|
||||
info.state = InboundState::Busy(Box::pin(fut));
|
||||
} else {
|
||||
info.state = InboundState::Idle(substream);
|
||||
break;
|
||||
}
|
||||
}
|
||||
InboundState::Idle(mut substream) => {
|
||||
// handler is deactivated, close the stream and mark it for removal
|
||||
match substream.close().poll_unpin(cx) {
|
||||
// if we can't close right now, put the substream back and try again later
|
||||
Poll::Pending => info.state = InboundState::Idle(substream),
|
||||
Poll::Ready(res) => {
|
||||
// The substream closed, we remove it
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
if let Err(error) = res {
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
error,
|
||||
proto: info.protocol,
|
||||
id: *id,
|
||||
}));
|
||||
}
|
||||
if info.pending_items.last().map(|l| l.close_after()) == Some(false)
|
||||
{
|
||||
// if the request was still active, report back to cancel it
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
error: RPCError::HandlerRejected,
|
||||
proto: info.protocol,
|
||||
id: *id,
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
InboundState::Busy(mut fut) => {
|
||||
// first check if sending finished
|
||||
match fut.poll_unpin(cx) {
|
||||
Poll::Ready((substream, errors, remove, new_remaining_chunks)) => {
|
||||
info.remaining_chunks = new_remaining_chunks;
|
||||
// report any error
|
||||
for error in errors {
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
error,
|
||||
proto: info.protocol,
|
||||
id: *id,
|
||||
}))
|
||||
}
|
||||
if remove {
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
// If we are not removing this substream, we reset the timer.
|
||||
// Each chunk is allowed RESPONSE_TIMEOUT to be sent.
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.reset(
|
||||
delay_key,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// The stream may be currently idle. Attempt to process more
|
||||
// elements
|
||||
|
||||
if !deactivated && !info.pending_items.is_empty() {
|
||||
let to_send = std::mem::take(&mut info.pending_items);
|
||||
let fut = process_inbound_substream(
|
||||
substream,
|
||||
info.remaining_chunks,
|
||||
to_send,
|
||||
)
|
||||
.boxed();
|
||||
info.state = InboundState::Busy(Box::pin(fut));
|
||||
} else {
|
||||
info.state = InboundState::Idle(substream);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
info.state = InboundState::Busy(fut);
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
InboundState::Poisoned => unreachable!("Poisoned inbound substream"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove closed substreams
|
||||
for inbound_id in substreams_to_remove {
|
||||
self.inbound_substreams.remove(&inbound_id);
|
||||
}
|
||||
|
||||
// drive outbound streams that need to be processed
|
||||
for outbound_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() {
|
||||
// get the state and mark it as poisoned
|
||||
let (mut entry, state) = match self.outbound_substreams.entry(outbound_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
let state = std::mem::replace(
|
||||
&mut entry.get_mut().state,
|
||||
OutboundSubstreamState::Poisoned,
|
||||
);
|
||||
(entry, state)
|
||||
}
|
||||
Entry::Vacant(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match state {
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request: _,
|
||||
} if deactivated => {
|
||||
// the handler is deactivated. Close the stream
|
||||
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
|
||||
self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error: RPCError::HandlerRejected,
|
||||
proto: entry.get().proto,
|
||||
id: entry.get().req_id,
|
||||
}))
|
||||
}
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
mut substream,
|
||||
request,
|
||||
} => match substream.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(response))) => {
|
||||
if request.expected_responses() > 1 && !response.close_after() {
|
||||
let substream_entry = entry.get_mut();
|
||||
let delay_key = &substream_entry.delay_key;
|
||||
// chunks left after this one
|
||||
let remaining_chunks = substream_entry
|
||||
.remaining_chunks
|
||||
.map(|count| count.saturating_sub(1))
|
||||
.unwrap_or_else(|| 0);
|
||||
if remaining_chunks == 0 {
|
||||
// this is the last expected message, close the stream as all expected chunks have been received
|
||||
substream_entry.state = OutboundSubstreamState::Closing(substream);
|
||||
} else {
|
||||
// If the response chunk was expected update the remaining number of chunks expected and reset the Timeout
|
||||
substream_entry.state =
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request,
|
||||
};
|
||||
substream_entry.remaining_chunks = Some(remaining_chunks);
|
||||
self.outbound_substreams_delay
|
||||
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT));
|
||||
}
|
||||
} else {
|
||||
// either this is a single response request or this response closes the
|
||||
// stream
|
||||
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
|
||||
// Check what type of response we got and report it accordingly
|
||||
let id = entry.get().req_id;
|
||||
let proto = entry.get().proto;
|
||||
|
||||
let received = match response {
|
||||
RPCCodedResponse::StreamTermination(t) => {
|
||||
Ok(RPCReceived::EndOfStream(id, t))
|
||||
}
|
||||
RPCCodedResponse::Success(resp) => Ok(RPCReceived::Response(id, resp)),
|
||||
RPCCodedResponse::Error(ref code, ref r) => Err(HandlerErr::Outbound {
|
||||
id,
|
||||
proto,
|
||||
error: RPCError::ErrorResponse(*code, r.to_string()),
|
||||
}),
|
||||
};
|
||||
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(received));
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
// stream closed
|
||||
// if we expected multiple streams send a stream termination,
|
||||
// else report the stream terminating only.
|
||||
//trace!(self.log, "RPC Response - stream closed by remote");
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().delay_key;
|
||||
let request_id = entry.get().req_id;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
// notify the application error
|
||||
if request.expected_responses() > 1 {
|
||||
// return an end of stream result
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
|
||||
RPCReceived::EndOfStream(request_id, request.stream_termination()),
|
||||
)));
|
||||
}
|
||||
|
||||
// else we return an error, stream should not have closed early.
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: request_id,
|
||||
proto: request.protocol(),
|
||||
error: RPCError::IncompleteStream,
|
||||
};
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().state =
|
||||
OutboundSubstreamState::RequestPendingResponse { substream, request }
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().delay_key;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: entry.get().req_id,
|
||||
proto: entry.get().proto,
|
||||
error: e,
|
||||
};
|
||||
entry.remove_entry();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
|
||||
}
|
||||
},
|
||||
OutboundSubstreamState::Closing(mut substream) => {
|
||||
match Sink::poll_close(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(_) => {
|
||||
// drop the stream and its corresponding timeout
|
||||
let delay_key = &entry.get().delay_key;
|
||||
let protocol = entry.get().proto;
|
||||
let request_id = entry.get().req_id;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
|
||||
// report the stream termination to the user
|
||||
//
|
||||
// Streams can be terminated here if a responder tries to
|
||||
// continue sending responses beyond what we would expect. Here
|
||||
// we simply terminate the stream and report a stream
|
||||
// termination to the application
|
||||
let termination = match protocol {
|
||||
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
|
||||
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
|
||||
_ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream.
|
||||
};
|
||||
|
||||
if let Some(termination) = termination {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
|
||||
RPCReceived::EndOfStream(request_id, termination),
|
||||
)));
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
}
|
||||
}
|
||||
OutboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned outbound substream");
|
||||
unreachable!("Coding Error: Outbound substream is poisoned")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// establish outbound substreams
|
||||
if !self.dial_queue.is_empty() && self.dial_negotiated < self.max_dial_negotiated {
|
||||
self.dial_negotiated += 1;
|
||||
let (id, req) = self.dial_queue.remove(0);
|
||||
self.dial_queue.shrink_to_fit();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: SubstreamProtocol::new(
|
||||
OutboundRequestContainer {
|
||||
req: req.clone(),
|
||||
fork_context: self.fork_context.clone(),
|
||||
},
|
||||
(),
|
||||
)
|
||||
.map_info(|()| (id, req)),
|
||||
});
|
||||
}
|
||||
|
||||
// Check if we have completed sending a goodbye, disconnect.
|
||||
if let HandlerState::ShuttingDown(_) = self.state {
|
||||
if self.dial_queue.is_empty()
|
||||
&& self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
&& self.events_out.is_empty()
|
||||
&& self.dial_negotiated == 0
|
||||
{
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::Disconnected));
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::Value for SubstreamId {
|
||||
fn serialize(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
key: slog::Key,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
slog::Value::serialize(&self.0, record, key, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends the queued items to the peer.
|
||||
async fn process_inbound_substream<TSpec: EthSpec>(
|
||||
mut substream: InboundSubstream<TSpec>,
|
||||
mut remaining_chunks: u64,
|
||||
pending_items: Vec<RPCCodedResponse<TSpec>>,
|
||||
) -> InboundProcessingOutput<TSpec> {
|
||||
let mut errors = Vec::new();
|
||||
let mut substream_closed = false;
|
||||
|
||||
for item in pending_items {
|
||||
if !substream_closed {
|
||||
if matches!(item, RPCCodedResponse::StreamTermination(_)) {
|
||||
substream.close().await.unwrap_or_else(|e| errors.push(e));
|
||||
substream_closed = true;
|
||||
} else {
|
||||
remaining_chunks = remaining_chunks.saturating_sub(1);
|
||||
// chunks that are not stream terminations get sent, and the stream is closed if
|
||||
// the response is an error
|
||||
let is_error = matches!(item, RPCCodedResponse::Error(..));
|
||||
|
||||
substream
|
||||
.send(item)
|
||||
.await
|
||||
.unwrap_or_else(|e| errors.push(e));
|
||||
|
||||
if remaining_chunks == 0 || is_error {
|
||||
substream.close().await.unwrap_or_else(|e| errors.push(e));
|
||||
substream_closed = true;
|
||||
}
|
||||
}
|
||||
} else if matches!(item, RPCCodedResponse::StreamTermination(_)) {
|
||||
// The sender closed the stream before us, ignore this.
|
||||
} else {
|
||||
// we have more items after a closed substream, report those as errors
|
||||
errors.push(RPCError::InternalError(
|
||||
"Sending responses to closed inbound substream",
|
||||
));
|
||||
}
|
||||
}
|
||||
(substream, errors, substream_closed, remaining_chunks)
|
||||
}
|
||||
449
beacon_node/lighthouse_network/src/rpc/methods.rs
Normal file
449
beacon_node/lighthouse_network/src/rpc/methods.rs
Normal file
@@ -0,0 +1,449 @@
|
||||
//! Available RPC methods types and ids.
|
||||
|
||||
use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield};
|
||||
use regex::bytes::Regex;
|
||||
use serde::Serialize;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::{
|
||||
typenum::{U1024, U256},
|
||||
VariableList,
|
||||
};
|
||||
use std::ops::Deref;
|
||||
use strum::AsStaticStr;
|
||||
use superstruct::superstruct;
|
||||
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
/// Maximum number of blocks in a single request.
|
||||
pub type MaxRequestBlocks = U1024;
|
||||
pub const MAX_REQUEST_BLOCKS: u64 = 1024;
|
||||
|
||||
/// Maximum length of error message.
|
||||
pub type MaxErrorLen = U256;
|
||||
pub const MAX_ERROR_LEN: u64 = 256;
|
||||
|
||||
/// Wrapper over SSZ List to represent error message in rpc responses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ErrorType(pub VariableList<u8, MaxErrorLen>);
|
||||
|
||||
impl From<String> for ErrorType {
|
||||
fn from(s: String) -> Self {
|
||||
Self(VariableList::from(s.as_bytes().to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ErrorType {
|
||||
fn from(s: &str) -> Self {
|
||||
Self(VariableList::from(s.as_bytes().to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for ErrorType {
|
||||
type Target = VariableList<u8, MaxErrorLen>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for ErrorType {
|
||||
fn to_string(&self) -> String {
|
||||
#[allow(clippy::invalid_regex)]
|
||||
let re = Regex::new("\\p{C}").expect("Regex is valid");
|
||||
String::from_utf8_lossy(&re.replace_all(self.0.deref(), &b""[..])).to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/* Request/Response data structures for RPC methods */
|
||||
|
||||
/* Requests */
|
||||
|
||||
/// Identifier of a request.
|
||||
///
|
||||
// NOTE: The handler stores the `RequestId` to inform back of responses and errors, but it's execution
|
||||
// is independent of the contents on this type.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RequestId {
|
||||
Router,
|
||||
Sync(usize),
|
||||
Behaviour,
|
||||
}
|
||||
|
||||
/// The STATUS request/response handshake message.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct StatusMessage {
|
||||
/// The fork version of the chain we are broadcasting.
|
||||
pub fork_digest: [u8; 4],
|
||||
|
||||
/// Latest finalized root.
|
||||
pub finalized_root: Hash256,
|
||||
|
||||
/// Latest finalized epoch.
|
||||
pub finalized_epoch: Epoch,
|
||||
|
||||
/// The latest block root.
|
||||
pub head_root: Hash256,
|
||||
|
||||
/// The slot associated with the latest block root.
|
||||
pub head_slot: Slot,
|
||||
}
|
||||
|
||||
/// The PING request/response message.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct Ping {
|
||||
/// The metadata sequence number.
|
||||
pub data: u64,
|
||||
}
|
||||
|
||||
/// The METADATA response structure.
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variant_attributes(
|
||||
derive(Encode, Decode, Clone, Debug, PartialEq, Serialize),
|
||||
serde(bound = "T: EthSpec", deny_unknown_fields),
|
||||
)
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Encode)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
#[ssz(enum_behaviour = "transparent")]
|
||||
pub struct MetaData<T: EthSpec> {
|
||||
/// A sequential counter indicating when data gets modified.
|
||||
pub seq_number: u64,
|
||||
/// The persistent attestation subnet bitfield.
|
||||
pub attnets: EnrAttestationBitfield<T>,
|
||||
/// The persistent sync committee bitfield.
|
||||
#[superstruct(only(V2))]
|
||||
pub syncnets: EnrSyncCommitteeBitfield<T>,
|
||||
}
|
||||
|
||||
/// The reason given for a `Goodbye` message.
|
||||
///
|
||||
/// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`,
|
||||
/// however `GoodbyeReason::Unknown.into()` will go into `0_u64`. Therefore de-serializing then
|
||||
/// re-serializing may not return the same bytes.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum GoodbyeReason {
|
||||
/// This node has shutdown.
|
||||
ClientShutdown = 1,
|
||||
|
||||
/// Incompatible networks.
|
||||
IrrelevantNetwork = 2,
|
||||
|
||||
/// Error/fault in the RPC.
|
||||
Fault = 3,
|
||||
|
||||
/// Teku uses this code for not being able to verify a network.
|
||||
UnableToVerifyNetwork = 128,
|
||||
|
||||
/// The node has too many connected peers.
|
||||
TooManyPeers = 129,
|
||||
|
||||
/// Scored poorly.
|
||||
BadScore = 250,
|
||||
|
||||
/// The peer is banned
|
||||
Banned = 251,
|
||||
|
||||
/// The IP address the peer is using is banned.
|
||||
BannedIP = 252,
|
||||
|
||||
/// Unknown reason.
|
||||
Unknown = 0,
|
||||
}
|
||||
|
||||
impl From<u64> for GoodbyeReason {
|
||||
fn from(id: u64) -> GoodbyeReason {
|
||||
match id {
|
||||
1 => GoodbyeReason::ClientShutdown,
|
||||
2 => GoodbyeReason::IrrelevantNetwork,
|
||||
3 => GoodbyeReason::Fault,
|
||||
128 => GoodbyeReason::UnableToVerifyNetwork,
|
||||
129 => GoodbyeReason::TooManyPeers,
|
||||
250 => GoodbyeReason::BadScore,
|
||||
251 => GoodbyeReason::Banned,
|
||||
252 => GoodbyeReason::BannedIP,
|
||||
_ => GoodbyeReason::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GoodbyeReason> for u64 {
|
||||
fn from(reason: GoodbyeReason) -> u64 {
|
||||
reason as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl ssz::Encode for GoodbyeReason {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
<u64 as ssz::Encode>::is_ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
<u64 as ssz::Encode>::ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_bytes_len(&self) -> usize {
|
||||
0_u64.ssz_bytes_len()
|
||||
}
|
||||
|
||||
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||
let conv: u64 = self.clone().into();
|
||||
conv.ssz_append(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl ssz::Decode for GoodbyeReason {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
<u64 as ssz::Decode>::is_ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
<u64 as ssz::Decode>::ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
u64::from_ssz_bytes(bytes).map(|n| n.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon block roots from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRangeRequest {
|
||||
/// The starting slot to request blocks.
|
||||
pub start_slot: u64,
|
||||
|
||||
/// The number of blocks from the start slot.
|
||||
pub count: u64,
|
||||
|
||||
/// The step increment to receive blocks.
|
||||
///
|
||||
/// A value of 1 returns every block.
|
||||
/// A value of 2 returns every second block.
|
||||
/// A value of 3 returns every third block and so on.
|
||||
pub step: u64,
|
||||
}
|
||||
|
||||
/// Request a number of beacon block bodies from a peer.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRootRequest {
|
||||
/// The list of beacon block bodies being requested.
|
||||
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||
}
|
||||
|
||||
/* RPC Handling and Grouping */
|
||||
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum RPCResponse<T: EthSpec> {
|
||||
/// A HELLO message.
|
||||
Status(StatusMessage),
|
||||
|
||||
/// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the
|
||||
/// batch.
|
||||
BlocksByRange(Box<SignedBeaconBlock<T>>),
|
||||
|
||||
/// A response to a get BLOCKS_BY_ROOT request.
|
||||
BlocksByRoot(Box<SignedBeaconBlock<T>>),
|
||||
|
||||
/// A PONG response to a PING request.
|
||||
Pong(Ping),
|
||||
|
||||
/// A response to a META_DATA request.
|
||||
MetaData(MetaData<T>),
|
||||
}
|
||||
|
||||
/// Indicates which response is being terminated by a stream termination response.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ResponseTermination {
|
||||
/// Blocks by range stream termination.
|
||||
BlocksByRange,
|
||||
|
||||
/// Blocks by root stream termination.
|
||||
BlocksByRoot,
|
||||
}
|
||||
|
||||
/// The structured response containing a result/code indicating success or failure
|
||||
/// and the contents of the response
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RPCCodedResponse<T: EthSpec> {
|
||||
/// The response is a successful.
|
||||
Success(RPCResponse<T>),
|
||||
|
||||
Error(RPCResponseErrorCode, ErrorType),
|
||||
|
||||
/// Received a stream termination indicating which response is being terminated.
|
||||
StreamTermination(ResponseTermination),
|
||||
}
|
||||
|
||||
/// The code assigned to an erroneous `RPCResponse`.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, AsStaticStr)]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum RPCResponseErrorCode {
|
||||
RateLimited,
|
||||
InvalidRequest,
|
||||
ServerError,
|
||||
/// Error spec'd to indicate that a peer does not have blocks on a requested range.
|
||||
ResourceUnavailable,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> RPCCodedResponse<T> {
|
||||
/// Used to encode the response in the codec.
|
||||
pub fn as_u8(&self) -> Option<u8> {
|
||||
match self {
|
||||
RPCCodedResponse::Success(_) => Some(0),
|
||||
RPCCodedResponse::Error(code, _) => Some(code.as_u8()),
|
||||
RPCCodedResponse::StreamTermination(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tells the codec whether to decode as an RPCResponse or an error.
|
||||
pub fn is_response(response_code: u8) -> bool {
|
||||
matches!(response_code, 0)
|
||||
}
|
||||
|
||||
/// Builds an RPCCodedResponse from a response code and an ErrorMessage
|
||||
pub fn from_error(response_code: u8, err: ErrorType) -> Self {
|
||||
let code = match response_code {
|
||||
1 => RPCResponseErrorCode::InvalidRequest,
|
||||
2 => RPCResponseErrorCode::ServerError,
|
||||
3 => RPCResponseErrorCode::ResourceUnavailable,
|
||||
139 => RPCResponseErrorCode::RateLimited,
|
||||
_ => RPCResponseErrorCode::Unknown,
|
||||
};
|
||||
RPCCodedResponse::Error(code, err)
|
||||
}
|
||||
|
||||
/// Specifies which response allows for multiple chunks for the stream handler.
|
||||
pub fn multiple_responses(&self) -> bool {
|
||||
match self {
|
||||
RPCCodedResponse::Success(resp) => match resp {
|
||||
RPCResponse::Status(_) => false,
|
||||
RPCResponse::BlocksByRange(_) => true,
|
||||
RPCResponse::BlocksByRoot(_) => true,
|
||||
RPCResponse::Pong(_) => false,
|
||||
RPCResponse::MetaData(_) => false,
|
||||
},
|
||||
RPCCodedResponse::Error(_, _) => true,
|
||||
// Stream terminations are part of responses that have chunks
|
||||
RPCCodedResponse::StreamTermination(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this response always terminates the stream.
|
||||
pub fn close_after(&self) -> bool {
|
||||
!matches!(self, RPCCodedResponse::Success(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl RPCResponseErrorCode {
|
||||
fn as_u8(&self) -> u8 {
|
||||
match self {
|
||||
RPCResponseErrorCode::InvalidRequest => 1,
|
||||
RPCResponseErrorCode::ServerError => 2,
|
||||
RPCResponseErrorCode::ResourceUnavailable => 3,
|
||||
RPCResponseErrorCode::Unknown => 255,
|
||||
RPCResponseErrorCode::RateLimited => 139,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RPCResponseErrorCode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
RPCResponseErrorCode::InvalidRequest => "The request was invalid",
|
||||
RPCResponseErrorCode::ResourceUnavailable => "Resource unavailable",
|
||||
RPCResponseErrorCode::ServerError => "Server error occurred",
|
||||
RPCResponseErrorCode::Unknown => "Unknown error occurred",
|
||||
RPCResponseErrorCode::RateLimited => "Rate limited",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StatusMessage {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_digest, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCResponse::Status(status) => write!(f, "{}", status),
|
||||
RPCResponse::BlocksByRange(block) => {
|
||||
write!(f, "BlocksByRange: Block slot: {}", block.slot())
|
||||
}
|
||||
RPCResponse::BlocksByRoot(block) => {
|
||||
write!(f, "BlocksByRoot: Block slot: {}", block.slot())
|
||||
}
|
||||
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
|
||||
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for RPCCodedResponse<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCCodedResponse::Success(res) => write!(f, "{}", res),
|
||||
RPCCodedResponse::Error(code, err) => write!(f, "{}: {}", code, err.to_string()),
|
||||
RPCCodedResponse::StreamTermination(_) => write!(f, "Stream Termination"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for GoodbyeReason {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GoodbyeReason::ClientShutdown => write!(f, "Client Shutdown"),
|
||||
GoodbyeReason::IrrelevantNetwork => write!(f, "Irrelevant Network"),
|
||||
GoodbyeReason::Fault => write!(f, "Fault"),
|
||||
GoodbyeReason::UnableToVerifyNetwork => write!(f, "Unable to verify network"),
|
||||
GoodbyeReason::TooManyPeers => write!(f, "Too many peers"),
|
||||
GoodbyeReason::BadScore => write!(f, "Bad Score"),
|
||||
GoodbyeReason::Banned => write!(f, "Banned"),
|
||||
GoodbyeReason::BannedIP => write!(f, "BannedIP"),
|
||||
GoodbyeReason::Unknown => write!(f, "Unknown Reason"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BlocksByRangeRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Start Slot: {}, Count: {}, Step: {}",
|
||||
self.start_slot, self.count, self.step
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::KV for StatusMessage {
|
||||
fn serialize(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
use slog::Value;
|
||||
serializer.emit_arguments("fork_digest", &format_args!("{:?}", self.fork_digest))?;
|
||||
Value::serialize(&self.finalized_epoch, record, "finalized_epoch", serializer)?;
|
||||
serializer.emit_arguments("finalized_root", &format_args!("{}", self.finalized_root))?;
|
||||
Value::serialize(&self.head_slot, record, "head_slot", serializer)?;
|
||||
serializer.emit_arguments("head_root", &format_args!("{}", self.head_root))?;
|
||||
slog::Result::Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::Value for RequestId {
|
||||
fn serialize(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
key: slog::Key,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
match self {
|
||||
RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer),
|
||||
RequestId::Router => slog::Value::serialize("Router", record, key, serializer),
|
||||
RequestId::Sync(ref id) => slog::Value::serialize(id, record, key, serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
313
beacon_node/lighthouse_network/src/rpc/mod.rs
Normal file
313
beacon_node/lighthouse_network/src/rpc/mod.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
//! The Ethereum 2.0 Wire Protocol
|
||||
//!
|
||||
//! This protocol is a purpose built Ethereum 2.0 libp2p protocol. It's role is to facilitate
|
||||
//! direct peer-to-peer communication primarily for sending/receiving chain information for
|
||||
//! syncing.
|
||||
|
||||
use futures::future::FutureExt;
|
||||
use handler::RPCHandler;
|
||||
use libp2p::core::{connection::ConnectionId, ConnectedPoint};
|
||||
use libp2p::swarm::{
|
||||
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler,
|
||||
PollParameters, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr};
|
||||
use slog::{crit, debug, o};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use types::{EthSpec, ForkContext};
|
||||
|
||||
pub(crate) use handler::HandlerErr;
|
||||
pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse};
|
||||
pub(crate) use protocol::{InboundRequest, RPCProtocol};
|
||||
|
||||
pub use handler::SubstreamId;
|
||||
pub use methods::{
|
||||
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks,
|
||||
RPCResponseErrorCode, RequestId, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS,
|
||||
};
|
||||
pub(crate) use outbound::OutboundRequest;
|
||||
pub use protocol::{Protocol, RPCError};
|
||||
|
||||
pub(crate) mod codec;
|
||||
mod handler;
|
||||
pub mod methods;
|
||||
mod outbound;
|
||||
mod protocol;
|
||||
mod rate_limiter;
|
||||
|
||||
/// RPC events sent from Lighthouse.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RPCSend<TSpec: EthSpec> {
|
||||
/// A request sent from Lighthouse.
|
||||
///
|
||||
/// The `RequestId` is given by the application making the request. These
|
||||
/// go over *outbound* connections.
|
||||
Request(RequestId, OutboundRequest<TSpec>),
|
||||
/// A response sent from Lighthouse.
|
||||
///
|
||||
/// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the
|
||||
/// peer. The second parameter is a single chunk of a response. These go over *inbound*
|
||||
/// connections.
|
||||
Response(SubstreamId, RPCCodedResponse<TSpec>),
|
||||
/// Lighthouse has requested to terminate the connection with a goodbye message.
|
||||
Shutdown(GoodbyeReason),
|
||||
}
|
||||
|
||||
/// RPC events received from outside Lighthouse.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RPCReceived<T: EthSpec> {
|
||||
/// A request received from the outside.
|
||||
///
|
||||
/// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the
|
||||
/// *inbound* substream over which it is managed.
|
||||
Request(SubstreamId, InboundRequest<T>),
|
||||
/// A response received from the outside.
|
||||
///
|
||||
/// The `RequestId` corresponds to the application given ID of the original request sent to the
|
||||
/// peer. The second parameter is a single chunk of a response. These go over *outbound*
|
||||
/// connections.
|
||||
Response(RequestId, RPCResponse<T>),
|
||||
/// Marks a request as completed
|
||||
EndOfStream(RequestId, ResponseTermination),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for RPCSend<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req),
|
||||
RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res),
|
||||
RPCSend::Shutdown(reason) => write!(f, "Sending Goodbye: {}", reason),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Messages sent to the user from the RPC protocol.
|
||||
pub struct RPCMessage<TSpec: EthSpec> {
|
||||
/// The peer that sent the message.
|
||||
pub peer_id: PeerId,
|
||||
/// Handler managing this message.
|
||||
pub conn_id: ConnectionId,
|
||||
/// The message that was sent.
|
||||
pub event: <RPCHandler<TSpec> as ProtocolsHandler>::OutEvent,
|
||||
}
|
||||
|
||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||
/// logic.
|
||||
pub struct RPC<TSpec: EthSpec> {
|
||||
/// Rate limiter
|
||||
limiter: RateLimiter,
|
||||
/// Queue of events to be processed.
|
||||
events: Vec<NetworkBehaviourAction<RPCSend<TSpec>, RPCMessage<TSpec>>>,
|
||||
fork_context: Arc<ForkContext>,
|
||||
/// Slog logger for RPC behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> RPC<TSpec> {
|
||||
pub fn new(fork_context: Arc<ForkContext>, log: slog::Logger) -> Self {
|
||||
let log = log.new(o!("service" => "libp2p_rpc"));
|
||||
let limiter = RPCRateLimiterBuilder::new()
|
||||
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
|
||||
.n_every(Protocol::Ping, 2, Duration::from_secs(10))
|
||||
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
||||
.one_every(Protocol::Goodbye, Duration::from_secs(10))
|
||||
.n_every(
|
||||
Protocol::BlocksByRange,
|
||||
methods::MAX_REQUEST_BLOCKS,
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
|
||||
.build()
|
||||
.expect("Configuration parameters are valid");
|
||||
RPC {
|
||||
limiter,
|
||||
events: Vec::new(),
|
||||
fork_context,
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends an RPC response.
|
||||
///
|
||||
/// The peer must be connected for this to succeed.
|
||||
pub fn send_response(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
id: (ConnectionId, SubstreamId),
|
||||
event: RPCCodedResponse<TSpec>,
|
||||
) {
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::One(id.0),
|
||||
event: RPCSend::Response(id.1, event),
|
||||
});
|
||||
}
|
||||
|
||||
/// Submits an RPC request.
|
||||
///
|
||||
/// The peer must be connected for this to succeed.
|
||||
pub fn send_request(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request_id: RequestId,
|
||||
event: OutboundRequest<TSpec>,
|
||||
) {
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: RPCSend::Request(request_id, event),
|
||||
});
|
||||
}
|
||||
|
||||
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
|
||||
/// gracefully terminates the RPC behaviour with a goodbye message.
|
||||
pub fn shutdown(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: RPCSend::Shutdown(reason),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> NetworkBehaviour for RPC<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type ProtocolsHandler = RPCHandler<TSpec>;
|
||||
type OutEvent = RPCMessage<TSpec>;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
RPCHandler::new(
|
||||
SubstreamProtocol::new(
|
||||
RPCProtocol {
|
||||
fork_context: self.fork_context.clone(),
|
||||
phantom: PhantomData,
|
||||
},
|
||||
(),
|
||||
),
|
||||
self.fork_context.clone(),
|
||||
&self.log,
|
||||
)
|
||||
}
|
||||
|
||||
// handled by discovery
|
||||
fn addresses_of_peer(&mut self, _peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
// Use connection established/closed instead of these currently
|
||||
fn inject_connected(&mut self, peer_id: &PeerId) {
|
||||
// find the peer's meta-data
|
||||
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => %peer_id);
|
||||
let rpc_event =
|
||||
RPCSend::Request(RequestId::Behaviour, OutboundRequest::MetaData(PhantomData));
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id: *peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: rpc_event,
|
||||
});
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, _peer_id: &PeerId) {}
|
||||
|
||||
fn inject_connection_established(
|
||||
&mut self,
|
||||
_peer_id: &PeerId,
|
||||
_: &ConnectionId,
|
||||
_connected_point: &ConnectedPoint,
|
||||
) {
|
||||
}
|
||||
|
||||
fn inject_connection_closed(
|
||||
&mut self,
|
||||
_peer_id: &PeerId,
|
||||
_: &ConnectionId,
|
||||
_connected_point: &ConnectedPoint,
|
||||
) {
|
||||
}
|
||||
|
||||
fn inject_event(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
conn_id: ConnectionId,
|
||||
event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
) {
|
||||
if let Ok(RPCReceived::Request(ref id, ref req)) = event {
|
||||
// check if the request is conformant to the quota
|
||||
match self.limiter.allows(&peer_id, req) {
|
||||
Ok(()) => {
|
||||
// send the event to the user
|
||||
self.events
|
||||
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
|
||||
peer_id,
|
||||
conn_id,
|
||||
event,
|
||||
}))
|
||||
}
|
||||
Err(RateLimitedErr::TooLarge) => {
|
||||
// we set the batch sizes, so this is a coding/config err for most protocols
|
||||
let protocol = req.protocol();
|
||||
if matches!(protocol, Protocol::BlocksByRange) {
|
||||
debug!(self.log, "Blocks by range request will never be processed"; "request" => %req);
|
||||
} else {
|
||||
crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol);
|
||||
}
|
||||
// send an error code to the peer.
|
||||
// the handler upon receiving the error code will send it back to the behaviour
|
||||
self.send_response(
|
||||
peer_id,
|
||||
(conn_id, *id),
|
||||
RPCCodedResponse::Error(
|
||||
RPCResponseErrorCode::RateLimited,
|
||||
"Rate limited. Request too large".into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
Err(RateLimitedErr::TooSoon(wait_time)) => {
|
||||
debug!(self.log, "Request exceeds the rate limit";
|
||||
"request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis());
|
||||
// send an error code to the peer.
|
||||
// the handler upon receiving the error code will send it back to the behaviour
|
||||
self.send_response(
|
||||
peer_id,
|
||||
(conn_id, *id),
|
||||
RPCCodedResponse::Error(
|
||||
RPCResponseErrorCode::RateLimited,
|
||||
format!("Wait {:?}", wait_time).into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.events
|
||||
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
|
||||
peer_id,
|
||||
conn_id,
|
||||
event,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Poll<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
// let the rate limiter prune
|
||||
let _ = self.limiter.poll_unpin(cx);
|
||||
if !self.events.is_empty() {
|
||||
return Poll::Ready(self.events.remove(0));
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
182
beacon_node/lighthouse_network/src/rpc/outbound.rs
Normal file
182
beacon_node/lighthouse_network/src/rpc/outbound.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use super::methods::*;
|
||||
use super::protocol::Protocol;
|
||||
use super::protocol::ProtocolId;
|
||||
use super::RPCError;
|
||||
use crate::rpc::protocol::Encoding;
|
||||
use crate::rpc::protocol::Version;
|
||||
use crate::rpc::{
|
||||
codec::{base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec},
|
||||
methods::ResponseTermination,
|
||||
};
|
||||
use futures::future::BoxFuture;
|
||||
use futures::prelude::{AsyncRead, AsyncWrite};
|
||||
use futures::{FutureExt, SinkExt};
|
||||
use libp2p::core::{OutboundUpgrade, UpgradeInfo};
|
||||
use std::sync::Arc;
|
||||
use tokio_util::{
|
||||
codec::Framed,
|
||||
compat::{Compat, FuturesAsyncReadCompatExt},
|
||||
};
|
||||
use types::{EthSpec, ForkContext};
|
||||
/* Outbound request */
|
||||
|
||||
// Combines all the RPC requests into a single enum to implement `UpgradeInfo` and
|
||||
// `OutboundUpgrade`
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OutboundRequestContainer<TSpec: EthSpec> {
|
||||
pub req: OutboundRequest<TSpec>,
|
||||
pub fork_context: Arc<ForkContext>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum OutboundRequest<TSpec: EthSpec> {
|
||||
Status(StatusMessage),
|
||||
Goodbye(GoodbyeReason),
|
||||
BlocksByRange(BlocksByRangeRequest),
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
Ping(Ping),
|
||||
MetaData(PhantomData<TSpec>),
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> UpgradeInfo for OutboundRequestContainer<TSpec> {
|
||||
type Info = ProtocolId;
|
||||
type InfoIter = Vec<Self::Info>;
|
||||
|
||||
// add further protocols as we support more encodings/versions
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
self.req.supported_protocols()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the encoding per supported protocol for `RPCRequest`.
|
||||
impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
|
||||
match self {
|
||||
// add more protocols when versions/encodings are supported
|
||||
OutboundRequest::Status(_) => vec![ProtocolId::new(
|
||||
Protocol::Status,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::Goodbye(_) => vec![ProtocolId::new(
|
||||
Protocol::Goodbye,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::BlocksByRange(_) => vec![
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
],
|
||||
OutboundRequest::BlocksByRoot(_) => vec![
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
],
|
||||
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
||||
Protocol::Ping,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::MetaData(_) => vec![
|
||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
/* These functions are used in the handler for stream management */
|
||||
|
||||
/// Number of responses expected for this request.
|
||||
pub fn expected_responses(&self) -> u64 {
|
||||
match self {
|
||||
OutboundRequest::Status(_) => 1,
|
||||
OutboundRequest::Goodbye(_) => 0,
|
||||
OutboundRequest::BlocksByRange(req) => req.count,
|
||||
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||
OutboundRequest::Ping(_) => 1,
|
||||
OutboundRequest::MetaData(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives the corresponding `Protocol` to this request.
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
match self {
|
||||
OutboundRequest::Status(_) => Protocol::Status,
|
||||
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
|
||||
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||
OutboundRequest::Ping(_) => Protocol::Ping,
|
||||
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `ResponseTermination` type associated with the request if a stream gets
|
||||
/// terminated.
|
||||
pub fn stream_termination(&self) -> ResponseTermination {
|
||||
match self {
|
||||
// this only gets called after `multiple_responses()` returns true. Therefore, only
|
||||
// variants that have `multiple_responses()` can have values.
|
||||
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||
OutboundRequest::Status(_) => unreachable!(),
|
||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||
OutboundRequest::Ping(_) => unreachable!(),
|
||||
OutboundRequest::MetaData(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* RPC Response type - used for outbound upgrades */
|
||||
|
||||
/* Outbound upgrades */
|
||||
|
||||
pub type OutboundFramed<TSocket, TSpec> = Framed<Compat<TSocket>, OutboundCodec<TSpec>>;
|
||||
|
||||
impl<TSocket, TSpec> OutboundUpgrade<TSocket> for OutboundRequestContainer<TSpec>
|
||||
where
|
||||
TSpec: EthSpec + Send + 'static,
|
||||
TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
type Output = OutboundFramed<TSocket, TSpec>;
|
||||
type Error = RPCError;
|
||||
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
|
||||
|
||||
fn upgrade_outbound(self, socket: TSocket, protocol: Self::Info) -> Self::Future {
|
||||
// convert to a tokio compatible socket
|
||||
let socket = socket.compat();
|
||||
let codec = match protocol.encoding {
|
||||
Encoding::SSZSnappy => {
|
||||
let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new(
|
||||
protocol,
|
||||
usize::max_value(),
|
||||
self.fork_context.clone(),
|
||||
));
|
||||
OutboundCodec::SSZSnappy(ssz_snappy_codec)
|
||||
}
|
||||
};
|
||||
|
||||
let mut socket = Framed::new(socket, codec);
|
||||
|
||||
async {
|
||||
socket.send(self.req).await?;
|
||||
socket.close().await?;
|
||||
Ok(socket)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
OutboundRequest::Status(status) => write!(f, "Status Message: {}", status),
|
||||
OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
|
||||
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||
}
|
||||
}
|
||||
}
|
||||
593
beacon_node/lighthouse_network/src/rpc/protocol.rs
Normal file
593
beacon_node/lighthouse_network/src/rpc/protocol.rs
Normal file
@@ -0,0 +1,593 @@
|
||||
use super::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCodec, InboundCodec},
|
||||
methods::{MaxErrorLen, ResponseTermination, MAX_ERROR_LEN},
|
||||
MaxRequestBlocks, MAX_REQUEST_BLOCKS,
|
||||
};
|
||||
use futures::future::BoxFuture;
|
||||
use futures::prelude::{AsyncRead, AsyncWrite};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo};
|
||||
use ssz::Encode;
|
||||
use ssz_types::VariableList;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use strum::{AsStaticRef, AsStaticStr};
|
||||
use tokio_io_timeout::TimeoutStream;
|
||||
use tokio_util::{
|
||||
codec::Framed,
|
||||
compat::{Compat, FuturesAsyncReadCompatExt},
|
||||
};
|
||||
use types::{
|
||||
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, EthSpec, ForkContext, Hash256, MainnetEthSpec,
|
||||
Signature, SignedBeaconBlock,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
|
||||
// same across different `EthSpec` implementations.
|
||||
pub static ref SIGNED_BEACON_BLOCK_BASE_MIN: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
|
||||
BeaconBlock::Base(BeaconBlockBase::<MainnetEthSpec>::empty(&MainnetEthSpec::default_spec())),
|
||||
Signature::empty(),
|
||||
)
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref SIGNED_BEACON_BLOCK_BASE_MAX: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
|
||||
BeaconBlock::Base(BeaconBlockBase::full(&MainnetEthSpec::default_spec())),
|
||||
Signature::empty(),
|
||||
)
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
|
||||
pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MIN: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
|
||||
BeaconBlock::Altair(BeaconBlockAltair::<MainnetEthSpec>::empty(&MainnetEthSpec::default_spec())),
|
||||
Signature::empty(),
|
||||
)
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MAX: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
|
||||
BeaconBlock::Altair(BeaconBlockAltair::full(&MainnetEthSpec::default_spec())),
|
||||
Signature::empty(),
|
||||
)
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =
|
||||
VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize =
|
||||
VariableList::<Hash256, MaxRequestBlocks>::from(vec![
|
||||
Hash256::zero();
|
||||
MAX_REQUEST_BLOCKS
|
||||
as usize
|
||||
])
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref ERROR_TYPE_MIN: usize =
|
||||
VariableList::<u8, MaxErrorLen>::from(Vec::<u8>::new())
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref ERROR_TYPE_MAX: usize =
|
||||
VariableList::<u8, MaxErrorLen>::from(vec![
|
||||
0u8;
|
||||
MAX_ERROR_LEN
|
||||
as usize
|
||||
])
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
|
||||
}
|
||||
|
||||
/// The maximum bytes that can be sent across the RPC.
|
||||
const MAX_RPC_SIZE: usize = 1_048_576; // 1M
|
||||
/// The protocol prefix the RPC protocol id.
|
||||
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
|
||||
/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
|
||||
const TTFB_TIMEOUT: u64 = 5;
|
||||
/// The number of seconds to wait for the first bytes of a request once a protocol has been
|
||||
/// established before the stream is terminated.
|
||||
const REQUEST_TIMEOUT: u64 = 15;
|
||||
|
||||
/// Protocol names to be used.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Protocol {
|
||||
/// The Status protocol name.
|
||||
Status,
|
||||
/// The Goodbye protocol name.
|
||||
Goodbye,
|
||||
/// The `BlocksByRange` protocol name.
|
||||
BlocksByRange,
|
||||
/// The `BlocksByRoot` protocol name.
|
||||
BlocksByRoot,
|
||||
/// The `Ping` protocol name.
|
||||
Ping,
|
||||
/// The `MetaData` protocol name.
|
||||
MetaData,
|
||||
}
|
||||
|
||||
/// RPC Versions
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Version {
|
||||
/// Version 1 of RPC
|
||||
V1,
|
||||
/// Version 2 of RPC
|
||||
V2,
|
||||
}
|
||||
|
||||
/// RPC Encondings supported.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
SSZSnappy,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Protocol {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
Protocol::Status => "status",
|
||||
Protocol::Goodbye => "goodbye",
|
||||
Protocol::BlocksByRange => "beacon_blocks_by_range",
|
||||
Protocol::BlocksByRoot => "beacon_blocks_by_root",
|
||||
Protocol::Ping => "ping",
|
||||
Protocol::MetaData => "metadata",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Encoding {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
Encoding::SSZSnappy => "ssz_snappy",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Version {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
Version::V1 => "1",
|
||||
Version::V2 => "2",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCProtocol<TSpec: EthSpec> {
|
||||
pub fork_context: Arc<ForkContext>,
|
||||
pub phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
||||
type Info = ProtocolId;
|
||||
type InfoIter = Vec<Self::Info>;
|
||||
|
||||
/// The list of supported RPC protocols for Lighthouse.
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
vec![
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
||||
// V2 variants have higher preference then V1
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the ssz length bounds for RPC messages.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct RpcLimits {
|
||||
pub min: usize,
|
||||
pub max: usize,
|
||||
}
|
||||
|
||||
impl RpcLimits {
|
||||
pub fn new(min: usize, max: usize) -> Self {
|
||||
Self { min, max }
|
||||
}
|
||||
|
||||
/// Returns true if the given length is out of bounds, false otherwise.
|
||||
pub fn is_out_of_bounds(&self, length: usize) -> bool {
|
||||
length > self.max || length < self.min
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks the types in a protocol id.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ProtocolId {
|
||||
/// The RPC message type/name.
|
||||
pub message_name: Protocol,
|
||||
|
||||
/// The version of the RPC.
|
||||
pub version: Version,
|
||||
|
||||
/// The encoding of the RPC.
|
||||
pub encoding: Encoding,
|
||||
|
||||
/// The protocol id that is formed from the above fields.
|
||||
protocol_id: String,
|
||||
}
|
||||
|
||||
impl ProtocolId {
|
||||
/// Returns min and max size for messages of given protocol id requests.
|
||||
pub fn rpc_request_limits(&self) -> RpcLimits {
|
||||
match self.message_name {
|
||||
Protocol::Status => RpcLimits::new(
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::Goodbye => RpcLimits::new(
|
||||
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
||||
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::BlocksByRange => RpcLimits::new(
|
||||
<BlocksByRangeRequest as Encode>::ssz_fixed_len(),
|
||||
<BlocksByRangeRequest as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::BlocksByRoot => {
|
||||
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
|
||||
}
|
||||
Protocol::Ping => RpcLimits::new(
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns min and max size for messages of given protocol id responses.
|
||||
pub fn rpc_response_limits<T: EthSpec>(&self) -> RpcLimits {
|
||||
match self.message_name {
|
||||
Protocol::Status => RpcLimits::new(
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
|
||||
Protocol::BlocksByRange => RpcLimits::new(
|
||||
std::cmp::min(
|
||||
*SIGNED_BEACON_BLOCK_ALTAIR_MIN,
|
||||
*SIGNED_BEACON_BLOCK_BASE_MIN,
|
||||
),
|
||||
std::cmp::max(
|
||||
*SIGNED_BEACON_BLOCK_ALTAIR_MAX,
|
||||
*SIGNED_BEACON_BLOCK_BASE_MAX,
|
||||
),
|
||||
),
|
||||
Protocol::BlocksByRoot => RpcLimits::new(
|
||||
std::cmp::min(
|
||||
*SIGNED_BEACON_BLOCK_ALTAIR_MIN,
|
||||
*SIGNED_BEACON_BLOCK_BASE_MIN,
|
||||
),
|
||||
std::cmp::max(
|
||||
*SIGNED_BEACON_BLOCK_ALTAIR_MAX,
|
||||
*SIGNED_BEACON_BLOCK_BASE_MAX,
|
||||
),
|
||||
),
|
||||
|
||||
Protocol::Ping => RpcLimits::new(
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
<Ping as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::MetaData => RpcLimits::new(
|
||||
<MetaDataV1<T> as Encode>::ssz_fixed_len(),
|
||||
<MetaDataV2<T> as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
|
||||
/// beginning of the stream, else returns `false`.
|
||||
pub fn has_context_bytes(&self) -> bool {
|
||||
if self.version == Version::V2 {
|
||||
match self.message_name {
|
||||
Protocol::BlocksByRange | Protocol::BlocksByRoot => return true,
|
||||
_ => return false,
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// An RPC protocol ID.
|
||||
impl ProtocolId {
|
||||
pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self {
|
||||
let protocol_id = format!(
|
||||
"{}/{}/{}/{}",
|
||||
PROTOCOL_PREFIX, message_name, version, encoding
|
||||
);
|
||||
|
||||
ProtocolId {
|
||||
message_name,
|
||||
version,
|
||||
encoding,
|
||||
protocol_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProtocolName for ProtocolId {
|
||||
fn protocol_name(&self) -> &[u8] {
|
||||
self.protocol_id.as_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
/* Inbound upgrade */
|
||||
|
||||
// The inbound protocol reads the request, decodes it and returns the stream to the protocol
|
||||
// handler to respond to once ready.
|
||||
|
||||
pub type InboundOutput<TSocket, TSpec> = (InboundRequest<TSpec>, InboundFramed<TSocket, TSpec>);
|
||||
pub type InboundFramed<TSocket, TSpec> =
|
||||
Framed<std::pin::Pin<Box<TimeoutStream<Compat<TSocket>>>>, InboundCodec<TSpec>>;
|
||||
|
||||
impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec>
|
||||
where
|
||||
TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type Output = InboundOutput<TSocket, TSpec>;
|
||||
type Error = RPCError;
|
||||
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
|
||||
|
||||
fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future {
|
||||
async move {
|
||||
let protocol_name = protocol.message_name;
|
||||
// convert the socket to tokio compatible socket
|
||||
let socket = socket.compat();
|
||||
let codec = match protocol.encoding {
|
||||
Encoding::SSZSnappy => {
|
||||
let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new(
|
||||
protocol,
|
||||
MAX_RPC_SIZE,
|
||||
self.fork_context.clone(),
|
||||
));
|
||||
InboundCodec::SSZSnappy(ssz_snappy_codec)
|
||||
}
|
||||
};
|
||||
let mut timed_socket = TimeoutStream::new(socket);
|
||||
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
|
||||
|
||||
let socket = Framed::new(Box::pin(timed_socket), codec);
|
||||
|
||||
// MetaData requests should be empty, return the stream
|
||||
match protocol_name {
|
||||
Protocol::MetaData => Ok((InboundRequest::MetaData(PhantomData), socket)),
|
||||
_ => {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(REQUEST_TIMEOUT),
|
||||
socket.into_future(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Err(e) => Err(RPCError::from(e)),
|
||||
Ok((Some(Ok(request)), stream)) => Ok((request, stream)),
|
||||
Ok((Some(Err(e)), _)) => Err(e),
|
||||
Ok((None, _)) => Err(RPCError::IncompleteStream),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum InboundRequest<TSpec: EthSpec> {
|
||||
Status(StatusMessage),
|
||||
Goodbye(GoodbyeReason),
|
||||
BlocksByRange(BlocksByRangeRequest),
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
Ping(Ping),
|
||||
MetaData(PhantomData<TSpec>),
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> UpgradeInfo for InboundRequest<TSpec> {
|
||||
type Info = ProtocolId;
|
||||
type InfoIter = Vec<Self::Info>;
|
||||
|
||||
// add further protocols as we support more encodings/versions
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
self.supported_protocols()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the encoding per supported protocol for `RPCRequest`.
|
||||
impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
|
||||
match self {
|
||||
// add more protocols when versions/encodings are supported
|
||||
InboundRequest::Status(_) => vec![ProtocolId::new(
|
||||
Protocol::Status,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
InboundRequest::Goodbye(_) => vec![ProtocolId::new(
|
||||
Protocol::Goodbye,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
InboundRequest::BlocksByRange(_) => vec![
|
||||
// V2 has higher preference when negotiating a stream
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
],
|
||||
InboundRequest::BlocksByRoot(_) => vec![
|
||||
// V2 has higher preference when negotiating a stream
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
],
|
||||
InboundRequest::Ping(_) => vec![ProtocolId::new(
|
||||
Protocol::Ping,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
InboundRequest::MetaData(_) => vec![
|
||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
/* These functions are used in the handler for stream management */
|
||||
|
||||
/// Number of responses expected for this request.
|
||||
pub fn expected_responses(&self) -> u64 {
|
||||
match self {
|
||||
InboundRequest::Status(_) => 1,
|
||||
InboundRequest::Goodbye(_) => 0,
|
||||
InboundRequest::BlocksByRange(req) => req.count,
|
||||
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||
InboundRequest::Ping(_) => 1,
|
||||
InboundRequest::MetaData(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives the corresponding `Protocol` to this request.
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
match self {
|
||||
InboundRequest::Status(_) => Protocol::Status,
|
||||
InboundRequest::Goodbye(_) => Protocol::Goodbye,
|
||||
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||
InboundRequest::Ping(_) => Protocol::Ping,
|
||||
InboundRequest::MetaData(_) => Protocol::MetaData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `ResponseTermination` type associated with the request if a stream gets
|
||||
/// terminated.
|
||||
pub fn stream_termination(&self) -> ResponseTermination {
|
||||
match self {
|
||||
// this only gets called after `multiple_responses()` returns true. Therefore, only
|
||||
// variants that have `multiple_responses()` can have values.
|
||||
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||
InboundRequest::Status(_) => unreachable!(),
|
||||
InboundRequest::Goodbye(_) => unreachable!(),
|
||||
InboundRequest::Ping(_) => unreachable!(),
|
||||
InboundRequest::MetaData(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error in RPC Encoding/Decoding.
|
||||
#[derive(Debug, Clone, PartialEq, AsStaticStr)]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum RPCError {
|
||||
/// Error when decoding the raw buffer from ssz.
|
||||
// NOTE: in the future a ssz::DecodeError should map to an InvalidData error
|
||||
#[strum(serialize = "decode_error")]
|
||||
SSZDecodeError(ssz::DecodeError),
|
||||
/// IO Error.
|
||||
IoError(String),
|
||||
/// The peer returned a valid response but the response indicated an error.
|
||||
ErrorResponse(RPCResponseErrorCode, String),
|
||||
/// Timed out waiting for a response.
|
||||
StreamTimeout,
|
||||
/// Peer does not support the protocol.
|
||||
UnsupportedProtocol,
|
||||
/// Stream ended unexpectedly.
|
||||
IncompleteStream,
|
||||
/// Peer sent invalid data.
|
||||
InvalidData,
|
||||
/// An error occurred due to internal reasons. Ex: timer failure.
|
||||
InternalError(&'static str),
|
||||
/// Negotiation with this peer timed out.
|
||||
NegotiationTimeout,
|
||||
/// Handler rejected this request.
|
||||
HandlerRejected,
|
||||
/// We have intentionally disconnected.
|
||||
Disconnected,
|
||||
}
|
||||
|
||||
impl From<ssz::DecodeError> for RPCError {
|
||||
#[inline]
|
||||
fn from(err: ssz::DecodeError) -> Self {
|
||||
RPCError::SSZDecodeError(err)
|
||||
}
|
||||
}
|
||||
impl From<tokio::time::error::Elapsed> for RPCError {
|
||||
fn from(_: tokio::time::error::Elapsed) -> Self {
|
||||
RPCError::StreamTimeout
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for RPCError {
|
||||
fn from(err: io::Error) -> Self {
|
||||
RPCError::IoError(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// Error trait is required for `ProtocolsHandler`
|
||||
impl std::fmt::Display for RPCError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match *self {
|
||||
RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err),
|
||||
RPCError::InvalidData => write!(f, "Peer sent unexpected data"),
|
||||
RPCError::IoError(ref err) => write!(f, "IO Error: {}", err),
|
||||
RPCError::ErrorResponse(ref code, ref reason) => write!(
|
||||
f,
|
||||
"RPC response was an error: {} with reason: {}",
|
||||
code, reason
|
||||
),
|
||||
RPCError::StreamTimeout => write!(f, "Stream Timeout"),
|
||||
RPCError::UnsupportedProtocol => write!(f, "Peer does not support the protocol"),
|
||||
RPCError::IncompleteStream => write!(f, "Stream ended unexpectedly"),
|
||||
RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err),
|
||||
RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"),
|
||||
RPCError::HandlerRejected => write!(f, "Handler rejected the request"),
|
||||
RPCError::Disconnected => write!(f, "Gracefully Disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for RPCError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match *self {
|
||||
// NOTE: this does have a source
|
||||
RPCError::SSZDecodeError(_) => None,
|
||||
RPCError::IoError(_) => None,
|
||||
RPCError::StreamTimeout => None,
|
||||
RPCError::UnsupportedProtocol => None,
|
||||
RPCError::IncompleteStream => None,
|
||||
RPCError::InvalidData => None,
|
||||
RPCError::InternalError(_) => None,
|
||||
RPCError::ErrorResponse(_, _) => None,
|
||||
RPCError::NegotiationTimeout => None,
|
||||
RPCError::HandlerRejected => None,
|
||||
RPCError::Disconnected => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
InboundRequest::Status(status) => write!(f, "Status Message: {}", status),
|
||||
InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
|
||||
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RPCError {
|
||||
/// Get a `str` representation of the error.
|
||||
/// Used for metrics.
|
||||
pub fn as_static_str(&self) -> &'static str {
|
||||
match self {
|
||||
RPCError::ErrorResponse(ref code, ..) => code.as_static(),
|
||||
e => e.as_static(),
|
||||
}
|
||||
}
|
||||
}
|
||||
399
beacon_node/lighthouse_network/src/rpc/rate_limiter.rs
Normal file
399
beacon_node/lighthouse_network/src/rpc/rate_limiter.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
use crate::rpc::{InboundRequest, Protocol};
|
||||
use fnv::FnvHashMap;
|
||||
use libp2p::PeerId;
|
||||
use std::convert::TryInto;
|
||||
use std::future::Future;
|
||||
use std::hash::Hash;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time::Interval;
|
||||
use types::EthSpec;
|
||||
|
||||
/// Nanoseconds since a given time.
|
||||
// Maintained as u64 to reduce footprint
|
||||
// NOTE: this also implies that the rate limiter will manage checking if a batch is allowed for at
|
||||
// most <init time> + u64::MAX nanosecs, ~500 years. So it is realistic to assume this is fine.
|
||||
type Nanosecs = u64;
|
||||
|
||||
/// User-friendly rate limiting parameters of the GCRA.
|
||||
///
|
||||
/// A quota of `max_tokens` tokens every `replenish_all_every` units of time means that:
|
||||
/// 1. One token is replenished every `replenish_all_every`/`max_tokens` units of time.
|
||||
/// 2. Instantaneous bursts (batches) of up to `max_tokens` tokens are allowed.
|
||||
///
|
||||
/// The above implies that if `max_tokens` is greater than 1, the perceived rate may be higher (but
|
||||
/// bounded) than the defined rate when instantaneous bursts occur. For instance, for a rate of
|
||||
/// 4T/2s a first burst of 4T is allowed with subsequent requests of 1T every 0.5s forever,
|
||||
/// producing a perceived rate over the window of the first 2s of 8T. However, subsequent sliding
|
||||
/// windows of 2s keep the limit.
|
||||
///
|
||||
/// In this scenario using the same rate as above, the sender is always maxing out their tokens,
|
||||
/// except at seconds 1.5, 3, 3.5 and 4
|
||||
///
|
||||
/// ```ignore
|
||||
/// x
|
||||
/// used x
|
||||
/// tokens x x x
|
||||
/// at a x x x x x x
|
||||
/// given +--+--+--o--+--+--o--o--o--> seconds
|
||||
/// time | | | | | | | | |
|
||||
/// 0 1 2 3 4
|
||||
///
|
||||
/// 4 1 1 1 2 1 1 2 3 <= available tokens when the batch is received
|
||||
/// ```
|
||||
///
|
||||
/// For a sender to request a batch of `n`T, they would need to wait at least
|
||||
/// n*`replenish_all_every`/`max_tokens` units of time since their last request.
|
||||
///
|
||||
/// To produce hard limits, set `max_tokens` to 1.
|
||||
pub struct Quota {
|
||||
/// How often are `max_tokens` fully replenished.
|
||||
replenish_all_every: Duration,
|
||||
/// Token limit. This translates on how large can an instantaneous batch of
|
||||
/// tokens be.
|
||||
max_tokens: u64,
|
||||
}
|
||||
|
||||
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
|
||||
pub struct RPCRateLimiter {
|
||||
/// Interval to prune peers for which their timer ran out.
|
||||
prune_interval: Interval,
|
||||
/// Creation time of the rate limiter.
|
||||
init_time: Instant,
|
||||
/// Goodbye rate limiter.
|
||||
goodbye_rl: Limiter<PeerId>,
|
||||
/// Ping rate limiter.
|
||||
ping_rl: Limiter<PeerId>,
|
||||
/// MetaData rate limiter.
|
||||
metadata_rl: Limiter<PeerId>,
|
||||
/// Status rate limiter.
|
||||
status_rl: Limiter<PeerId>,
|
||||
/// BlocksByRange rate limiter.
|
||||
bbrange_rl: Limiter<PeerId>,
|
||||
/// BlocksByRoot rate limiter.
|
||||
bbroots_rl: Limiter<PeerId>,
|
||||
}
|
||||
|
||||
/// Error type for non conformant requests
|
||||
pub enum RateLimitedErr {
|
||||
/// Required tokens for this request exceed the maximum
|
||||
TooLarge,
|
||||
/// Request does not fit in the quota. Gives the earliest time the request could be accepted.
|
||||
TooSoon(Duration),
|
||||
}
|
||||
|
||||
/// User-friendly builder of a `RPCRateLimiter`
|
||||
#[derive(Default)]
|
||||
pub struct RPCRateLimiterBuilder {
|
||||
/// Quota for the Goodbye protocol.
|
||||
goodbye_quota: Option<Quota>,
|
||||
/// Quota for the Ping protocol.
|
||||
ping_quota: Option<Quota>,
|
||||
/// Quota for the MetaData protocol.
|
||||
metadata_quota: Option<Quota>,
|
||||
/// Quota for the Status protocol.
|
||||
status_quota: Option<Quota>,
|
||||
/// Quota for the BlocksByRange protocol.
|
||||
bbrange_quota: Option<Quota>,
|
||||
/// Quota for the BlocksByRoot protocol.
|
||||
bbroots_quota: Option<Quota>,
|
||||
}
|
||||
|
||||
impl RPCRateLimiterBuilder {
|
||||
/// Get an empty `RPCRateLimiterBuilder`.
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
/// Set a quota for a protocol.
|
||||
fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self {
|
||||
let q = Some(quota);
|
||||
match protocol {
|
||||
Protocol::Ping => self.ping_quota = q,
|
||||
Protocol::Status => self.status_quota = q,
|
||||
Protocol::MetaData => self.metadata_quota = q,
|
||||
Protocol::Goodbye => self.goodbye_quota = q,
|
||||
Protocol::BlocksByRange => self.bbrange_quota = q,
|
||||
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Allow one token every `time_period` to be used for this `protocol`.
|
||||
/// This produces a hard limit.
|
||||
pub fn one_every(self, protocol: Protocol, time_period: Duration) -> Self {
|
||||
self.set_quota(
|
||||
protocol,
|
||||
Quota {
|
||||
replenish_all_every: time_period,
|
||||
max_tokens: 1,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Allow `n` tokens to be use used every `time_period` for this `protocol`.
|
||||
pub fn n_every(self, protocol: Protocol, n: u64, time_period: Duration) -> Self {
|
||||
self.set_quota(
|
||||
protocol,
|
||||
Quota {
|
||||
max_tokens: n,
|
||||
replenish_all_every: time_period,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<RPCRateLimiter, &'static str> {
|
||||
// get our quotas
|
||||
let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?;
|
||||
let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?;
|
||||
let status_quota = self.status_quota.ok_or("Status quota not specified")?;
|
||||
let goodbye_quota = self.goodbye_quota.ok_or("Goodbye quota not specified")?;
|
||||
let bbroots_quota = self
|
||||
.bbroots_quota
|
||||
.ok_or("BlocksByRoot quota not specified")?;
|
||||
let bbrange_quota = self
|
||||
.bbrange_quota
|
||||
.ok_or("BlocksByRange quota not specified")?;
|
||||
|
||||
// create the rate limiters
|
||||
let ping_rl = Limiter::from_quota(ping_quota)?;
|
||||
let metadata_rl = Limiter::from_quota(metadata_quota)?;
|
||||
let status_rl = Limiter::from_quota(status_quota)?;
|
||||
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
|
||||
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
||||
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
||||
|
||||
// check for peers to prune every 30 seconds, starting in 30 seconds
|
||||
let prune_every = tokio::time::Duration::from_secs(30);
|
||||
let prune_start = tokio::time::Instant::now() + prune_every;
|
||||
let prune_interval = tokio::time::interval_at(prune_start, prune_every);
|
||||
Ok(RPCRateLimiter {
|
||||
prune_interval,
|
||||
ping_rl,
|
||||
metadata_rl,
|
||||
status_rl,
|
||||
goodbye_rl,
|
||||
bbroots_rl,
|
||||
bbrange_rl,
|
||||
init_time: Instant::now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RPCRateLimiter {
|
||||
pub fn allows<T: EthSpec>(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
request: &InboundRequest<T>,
|
||||
) -> Result<(), RateLimitedErr> {
|
||||
let time_since_start = self.init_time.elapsed();
|
||||
let mut tokens = request.expected_responses().max(1);
|
||||
|
||||
// Increase the rate limit for blocks by range requests with large step counts.
|
||||
// We count to tokens as a quadratic increase with step size.
|
||||
// Using (step_size/5)^2 + 1 as penalty factor allows step sizes of 1-4 to have no penalty
|
||||
// but step sizes higher than this add a quadratic penalty.
|
||||
// Penalty's go:
|
||||
// Step size | Penalty Factor
|
||||
// 1 | 1
|
||||
// 2 | 1
|
||||
// 3 | 1
|
||||
// 4 | 1
|
||||
// 5 | 2
|
||||
// 6 | 2
|
||||
// 7 | 2
|
||||
// 8 | 3
|
||||
// 9 | 4
|
||||
// 10 | 5
|
||||
|
||||
if let InboundRequest::BlocksByRange(bbr_req) = request {
|
||||
let penalty_factor = (bbr_req.step as f64 / 5.0).powi(2) as u64 + 1;
|
||||
tokens *= penalty_factor;
|
||||
}
|
||||
|
||||
let check =
|
||||
|limiter: &mut Limiter<PeerId>| limiter.allows(time_since_start, peer_id, tokens);
|
||||
let limiter = match request.protocol() {
|
||||
Protocol::Ping => &mut self.ping_rl,
|
||||
Protocol::Status => &mut self.status_rl,
|
||||
Protocol::MetaData => &mut self.metadata_rl,
|
||||
Protocol::Goodbye => &mut self.goodbye_rl,
|
||||
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
||||
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
||||
};
|
||||
check(limiter)
|
||||
}
|
||||
|
||||
pub fn prune(&mut self) {
|
||||
let time_since_start = self.init_time.elapsed();
|
||||
self.ping_rl.prune(time_since_start);
|
||||
self.status_rl.prune(time_since_start);
|
||||
self.metadata_rl.prune(time_since_start);
|
||||
self.goodbye_rl.prune(time_since_start);
|
||||
self.bbrange_rl.prune(time_since_start);
|
||||
self.bbroots_rl.prune(time_since_start);
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for RPCRateLimiter {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
while self.prune_interval.poll_tick(cx).is_ready() {
|
||||
self.prune();
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// Per key rate limiter using the token bucket / leaky bucket as a meter rate limiting algorithm,
|
||||
/// with the GCRA implementation.
|
||||
pub struct Limiter<Key: Hash + Eq + Clone> {
|
||||
/// After how long is the bucket considered full via replenishing 1T every `t`.
|
||||
tau: Nanosecs,
|
||||
/// How often is 1T replenished.
|
||||
t: Nanosecs,
|
||||
/// Time when the bucket will be full for each peer. TAT (theoretical arrival time) from GCRA.
|
||||
tat_per_key: FnvHashMap<Key, Nanosecs>,
|
||||
}
|
||||
|
||||
impl<Key: Hash + Eq + Clone> Limiter<Key> {
|
||||
pub fn from_quota(quota: Quota) -> Result<Self, &'static str> {
|
||||
if quota.max_tokens == 0 {
|
||||
return Err("Max number of tokens should be positive");
|
||||
}
|
||||
let tau = quota.replenish_all_every.as_nanos();
|
||||
if tau == 0 {
|
||||
return Err("Replenish time must be positive");
|
||||
}
|
||||
let t = (tau / quota.max_tokens as u128)
|
||||
.try_into()
|
||||
.map_err(|_| "total replenish time is too long")?;
|
||||
let tau = tau
|
||||
.try_into()
|
||||
.map_err(|_| "total replenish time is too long")?;
|
||||
Ok(Limiter {
|
||||
tau,
|
||||
t,
|
||||
tat_per_key: FnvHashMap::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn allows(
|
||||
&mut self,
|
||||
time_since_start: Duration,
|
||||
key: &Key,
|
||||
tokens: u64,
|
||||
) -> Result<(), RateLimitedErr> {
|
||||
let time_since_start = time_since_start.as_nanos() as u64;
|
||||
let tau = self.tau;
|
||||
let t = self.t;
|
||||
// how long does it take to replenish these tokens
|
||||
let additional_time = t * tokens;
|
||||
if additional_time > tau {
|
||||
// the time required to process this amount of tokens is longer than the time that
|
||||
// makes the bucket full. So, this batch can _never_ be processed
|
||||
return Err(RateLimitedErr::TooLarge);
|
||||
}
|
||||
// If the key is new, we consider their bucket full (which means, their request will be
|
||||
// allowed)
|
||||
let tat = self
|
||||
.tat_per_key
|
||||
.entry(key.clone())
|
||||
.or_insert(time_since_start);
|
||||
// check how soon could the request be made
|
||||
let earliest_time = (*tat + additional_time).saturating_sub(tau);
|
||||
// earliest_time is in the future
|
||||
if time_since_start < earliest_time {
|
||||
Err(RateLimitedErr::TooSoon(Duration::from_nanos(
|
||||
/* time they need to wait, i.e. how soon were they */
|
||||
earliest_time - time_since_start,
|
||||
)))
|
||||
} else {
|
||||
// calculate the new TAT
|
||||
*tat = time_since_start.max(*tat) + additional_time;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes keys for which their bucket is full by `time_limit`
|
||||
pub fn prune(&mut self, time_limit: Duration) {
|
||||
let lim = &mut (time_limit.as_nanos() as u64);
|
||||
// remove those for which tat < lim
|
||||
self.tat_per_key.retain(|_k, tat| tat >= lim)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::rpc::rate_limiter::{Limiter, Quota};
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn it_works_a() {
|
||||
let mut limiter = Limiter::from_quota(Quota {
|
||||
replenish_all_every: Duration::from_secs(2),
|
||||
max_tokens: 4,
|
||||
})
|
||||
.unwrap();
|
||||
let key = 10;
|
||||
// x
|
||||
// used x
|
||||
// tokens x x
|
||||
// x x x x
|
||||
// +--+--+--+--+----> seconds
|
||||
// | | | | |
|
||||
// 0 1 2
|
||||
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.0), &key, 4)
|
||||
.is_ok());
|
||||
limiter.prune(Duration::from_secs_f32(0.1));
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.1), &key, 1)
|
||||
.is_err());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.5), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(1.0), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(1.4), &key, 1)
|
||||
.is_err());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(2.0), &key, 2)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_works_b() {
|
||||
let mut limiter = Limiter::from_quota(Quota {
|
||||
replenish_all_every: Duration::from_secs(2),
|
||||
max_tokens: 4,
|
||||
})
|
||||
.unwrap();
|
||||
let key = 10;
|
||||
// if we limit to 4T per 2s, check that 4 requests worth 1 token can be sent before the
|
||||
// first half second, when one token will be available again. Check also that before
|
||||
// regaining a token, another request is rejected
|
||||
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.0), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.1), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.2), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.3), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.4), &key, 1)
|
||||
.is_err());
|
||||
}
|
||||
}
|
||||
608
beacon_node/lighthouse_network/src/service.rs
Normal file
608
beacon_node/lighthouse_network/src/service.rs
Normal file
@@ -0,0 +1,608 @@
|
||||
use crate::behaviour::{
|
||||
save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response,
|
||||
};
|
||||
use crate::discovery::enr;
|
||||
use crate::multiaddr::Protocol;
|
||||
use crate::rpc::{
|
||||
GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, RequestId,
|
||||
};
|
||||
use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind};
|
||||
use crate::EnrExt;
|
||||
use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource};
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::{
|
||||
connection::ConnectionLimits, identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox,
|
||||
transport::Boxed,
|
||||
};
|
||||
use libp2p::{
|
||||
bandwidth::{BandwidthLogging, BandwidthSinks},
|
||||
core, noise,
|
||||
swarm::{SwarmBuilder, SwarmEvent},
|
||||
PeerId, Swarm, Transport,
|
||||
};
|
||||
use slog::{crit, debug, info, o, trace, warn, Logger};
|
||||
use ssz::Decode;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use types::{ChainSpec, EnrForkId, EthSpec, ForkContext};
|
||||
|
||||
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
|
||||
|
||||
pub const NETWORK_KEY_FILENAME: &str = "key";
|
||||
/// The maximum simultaneous libp2p connections per peer.
|
||||
const MAX_CONNECTIONS_PER_PEER: u32 = 1;
|
||||
/// The filename to store our local metadata.
|
||||
pub const METADATA_FILENAME: &str = "metadata";
|
||||
|
||||
/// The types of events than can be obtained from polling the libp2p service.
|
||||
///
|
||||
/// This is a subset of the events that a libp2p swarm emits.
|
||||
#[derive(Debug)]
|
||||
pub enum Libp2pEvent<TSpec: EthSpec> {
|
||||
/// A behaviour event
|
||||
Behaviour(BehaviourEvent<TSpec>),
|
||||
/// A new listening address has been established.
|
||||
NewListenAddr(Multiaddr),
|
||||
/// We reached zero listening addresses.
|
||||
ZeroListeners,
|
||||
}
|
||||
|
||||
/// The configuration and state of the libp2p components for the beacon node.
|
||||
pub struct Service<TSpec: EthSpec> {
|
||||
/// The libp2p Swarm handler.
|
||||
pub swarm: Swarm<Behaviour<TSpec>>,
|
||||
/// The bandwidth logger for the underlying libp2p transport.
|
||||
pub bandwidth: Arc<BandwidthSinks>,
|
||||
/// This node's PeerId.
|
||||
pub local_peer_id: PeerId,
|
||||
/// The libp2p logger handle.
|
||||
pub log: Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Service<TSpec> {
|
||||
pub async fn new(
|
||||
executor: task_executor::TaskExecutor,
|
||||
config: &NetworkConfig,
|
||||
enr_fork_id: EnrForkId,
|
||||
log: &Logger,
|
||||
fork_context: Arc<ForkContext>,
|
||||
chain_spec: &ChainSpec,
|
||||
) -> error::Result<(Arc<NetworkGlobals<TSpec>>, Self)> {
|
||||
let log = log.new(o!("service"=> "libp2p"));
|
||||
trace!(log, "Libp2p Service starting");
|
||||
|
||||
// initialise the node's ID
|
||||
let local_keypair = load_private_key(config, &log);
|
||||
|
||||
// Create an ENR or load from disk if appropriate
|
||||
let enr =
|
||||
enr::build_or_load_enr::<TSpec>(local_keypair.clone(), config, enr_fork_id, &log)?;
|
||||
|
||||
let local_peer_id = enr.peer_id();
|
||||
|
||||
let meta_data = load_or_build_metadata(&config.network_dir, &log);
|
||||
|
||||
// set up a collection of variables accessible outside of the network crate
|
||||
let network_globals = Arc::new(NetworkGlobals::new(
|
||||
enr.clone(),
|
||||
config.libp2p_port,
|
||||
config.discovery_port,
|
||||
meta_data,
|
||||
config
|
||||
.trusted_peers
|
||||
.iter()
|
||||
.map(|x| PeerId::from(x.clone()))
|
||||
.collect(),
|
||||
&log,
|
||||
));
|
||||
|
||||
info!(log, "Libp2p Service"; "peer_id" => %enr.peer_id());
|
||||
let discovery_string = if config.disable_discovery {
|
||||
"None".into()
|
||||
} else {
|
||||
config.discovery_port.to_string()
|
||||
};
|
||||
debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string);
|
||||
|
||||
let (mut swarm, bandwidth) = {
|
||||
// Set up the transport - tcp/ws with noise and mplex
|
||||
let (transport, bandwidth) = build_transport(local_keypair.clone())
|
||||
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
|
||||
|
||||
// Lighthouse network behaviour
|
||||
let behaviour = Behaviour::new(
|
||||
&local_keypair,
|
||||
config.clone(),
|
||||
network_globals.clone(),
|
||||
&log,
|
||||
fork_context,
|
||||
chain_spec,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// use the executor for libp2p
|
||||
struct Executor(task_executor::TaskExecutor);
|
||||
impl libp2p::core::Executor for Executor {
|
||||
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
|
||||
self.0.spawn(f, "libp2p");
|
||||
}
|
||||
}
|
||||
|
||||
// sets up the libp2p connection limits
|
||||
let limits = ConnectionLimits::default()
|
||||
.with_max_pending_incoming(Some(5))
|
||||
.with_max_pending_outgoing(Some(16))
|
||||
.with_max_established_incoming(Some(
|
||||
(config.target_peers as f32
|
||||
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
|
||||
.ceil() as u32,
|
||||
))
|
||||
.with_max_established_outgoing(Some(
|
||||
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32,
|
||||
))
|
||||
.with_max_established(Some(
|
||||
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS))
|
||||
.ceil() as u32,
|
||||
))
|
||||
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
|
||||
|
||||
(
|
||||
SwarmBuilder::new(transport, behaviour, local_peer_id)
|
||||
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
|
||||
.connection_event_buffer_size(64)
|
||||
.connection_limits(limits)
|
||||
.executor(Box::new(Executor(executor)))
|
||||
.build(),
|
||||
bandwidth,
|
||||
)
|
||||
};
|
||||
|
||||
// listen on the specified address
|
||||
let listen_multiaddr = {
|
||||
let mut m = Multiaddr::from(config.listen_address);
|
||||
m.push(Protocol::Tcp(config.libp2p_port));
|
||||
m
|
||||
};
|
||||
|
||||
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
||||
Ok(_) => {
|
||||
let mut log_address = listen_multiaddr;
|
||||
log_address.push(Protocol::P2p(local_peer_id.into()));
|
||||
info!(log, "Listening established"; "address" => %log_address);
|
||||
}
|
||||
Err(err) => {
|
||||
crit!(
|
||||
log,
|
||||
"Unable to listen on libp2p address";
|
||||
"error" => ?err,
|
||||
"listen_multiaddr" => %listen_multiaddr,
|
||||
);
|
||||
return Err("Libp2p was unable to listen on the given listen address.".into());
|
||||
}
|
||||
};
|
||||
|
||||
// helper closure for dialing peers
|
||||
let mut dial_addr = |mut multiaddr: Multiaddr| {
|
||||
// strip the p2p protocol if it exists
|
||||
strip_peer_id(&mut multiaddr);
|
||||
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
|
||||
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr),
|
||||
Err(err) => debug!(
|
||||
log,
|
||||
"Could not connect to peer"; "address" => %multiaddr, "error" => ?err
|
||||
),
|
||||
};
|
||||
};
|
||||
|
||||
// attempt to connect to user-input libp2p nodes
|
||||
for multiaddr in &config.libp2p_nodes {
|
||||
dial_addr(multiaddr.clone());
|
||||
}
|
||||
|
||||
// attempt to connect to any specified boot-nodes
|
||||
let mut boot_nodes = config.boot_nodes_enr.clone();
|
||||
boot_nodes.dedup();
|
||||
|
||||
for bootnode_enr in boot_nodes {
|
||||
for multiaddr in &bootnode_enr.multiaddr() {
|
||||
// ignore udp multiaddr if it exists
|
||||
let components = multiaddr.iter().collect::<Vec<_>>();
|
||||
if let Protocol::Udp(_) = components[1] {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_connected_or_dialing(&bootnode_enr.peer_id())
|
||||
{
|
||||
dial_addr(multiaddr.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for multiaddr in &config.boot_nodes_multiaddr {
|
||||
// check TCP support for dialing
|
||||
if multiaddr
|
||||
.iter()
|
||||
.any(|proto| matches!(proto, Protocol::Tcp(_)))
|
||||
{
|
||||
dial_addr(multiaddr.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let mut subscribed_topics: Vec<GossipKind> = vec![];
|
||||
|
||||
for topic_kind in &config.topics {
|
||||
if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
|
||||
subscribed_topics.push(topic_kind.clone());
|
||||
} else {
|
||||
warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind);
|
||||
}
|
||||
}
|
||||
|
||||
if !subscribed_topics.is_empty() {
|
||||
info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics);
|
||||
}
|
||||
|
||||
let service = Service {
|
||||
swarm,
|
||||
bandwidth,
|
||||
local_peer_id,
|
||||
log,
|
||||
};
|
||||
|
||||
Ok((network_globals, service))
|
||||
}
|
||||
|
||||
/// Sends a request to a peer, with a given Id.
|
||||
pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) {
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.send_request(peer_id, request_id, request);
|
||||
}
|
||||
|
||||
/// Informs the peer that their request failed.
|
||||
pub fn respond_with_error(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
id: PeerRequestId,
|
||||
error: RPCResponseErrorCode,
|
||||
reason: String,
|
||||
) {
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.send_error_reponse(peer_id, id, error, reason);
|
||||
}
|
||||
|
||||
/// Report a peer's action.
|
||||
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) {
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.peer_manager_mut()
|
||||
.report_peer(peer_id, action, source, None);
|
||||
}
|
||||
|
||||
/// Disconnect and ban a peer, providing a reason.
|
||||
pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) {
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.goodbye_peer(peer_id, reason, source);
|
||||
}
|
||||
|
||||
/// Sends a response to a peer's request.
|
||||
pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response<TSpec>) {
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.send_successful_response(peer_id, id, response);
|
||||
}
|
||||
|
||||
pub async fn next_event(&mut self) -> Libp2pEvent<TSpec> {
|
||||
loop {
|
||||
match self.swarm.select_next_some().await {
|
||||
SwarmEvent::Behaviour(behaviour) => {
|
||||
// Handle banning here
|
||||
match &behaviour {
|
||||
BehaviourEvent::PeerBanned(peer_id) => {
|
||||
self.swarm.ban_peer_id(*peer_id);
|
||||
}
|
||||
BehaviourEvent::PeerUnbanned(peer_id) => {
|
||||
self.swarm.unban_peer_id(*peer_id);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
return Libp2pEvent::Behaviour(behaviour);
|
||||
}
|
||||
SwarmEvent::ConnectionEstablished {
|
||||
peer_id,
|
||||
endpoint,
|
||||
num_established,
|
||||
} => {
|
||||
// Inform the peer manager.
|
||||
// We require the ENR to inject into the peer db, if it exists.
|
||||
let enr = self
|
||||
.swarm
|
||||
.behaviour_mut()
|
||||
.discovery_mut()
|
||||
.enr_of_peer(&peer_id);
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.peer_manager_mut()
|
||||
.inject_connection_established(peer_id, endpoint, num_established, enr);
|
||||
}
|
||||
SwarmEvent::ConnectionClosed {
|
||||
peer_id,
|
||||
cause: _,
|
||||
endpoint,
|
||||
num_established,
|
||||
} => {
|
||||
// Inform the peer manager.
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.peer_manager_mut()
|
||||
.inject_connection_closed(peer_id, endpoint, num_established);
|
||||
}
|
||||
SwarmEvent::NewListenAddr { address, .. } => {
|
||||
return Libp2pEvent::NewListenAddr(address)
|
||||
}
|
||||
SwarmEvent::IncomingConnection {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
} => {
|
||||
trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr)
|
||||
}
|
||||
SwarmEvent::IncomingConnectionError {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
error,
|
||||
} => {
|
||||
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error);
|
||||
}
|
||||
SwarmEvent::BannedPeer { peer_id, .. } => {
|
||||
debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id);
|
||||
}
|
||||
SwarmEvent::UnreachableAddr {
|
||||
peer_id,
|
||||
address,
|
||||
error,
|
||||
attempts_remaining,
|
||||
} => {
|
||||
debug!(self.log, "Failed to dial address"; "peer_id" => %peer_id, "address" => %address, "error" => %error, "attempts_remaining" => attempts_remaining);
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.peer_manager_mut()
|
||||
.inject_dial_failure(&peer_id);
|
||||
}
|
||||
SwarmEvent::UnknownPeerUnreachableAddr { address, error } => {
|
||||
debug!(self.log, "Peer not known at dialed address"; "address" => %address, "error" => %error);
|
||||
}
|
||||
SwarmEvent::ExpiredListenAddr { address, .. } => {
|
||||
debug!(self.log, "Listen address expired"; "address" => %address)
|
||||
}
|
||||
SwarmEvent::ListenerClosed {
|
||||
addresses, reason, ..
|
||||
} => {
|
||||
crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason);
|
||||
if Swarm::listeners(&self.swarm).count() == 0 {
|
||||
return Libp2pEvent::ZeroListeners;
|
||||
}
|
||||
}
|
||||
SwarmEvent::ListenerError { error, .. } => {
|
||||
// this is non fatal, but we still check
|
||||
warn!(self.log, "Listener error"; "error" => ?error);
|
||||
if Swarm::listeners(&self.swarm).count() == 0 {
|
||||
return Libp2pEvent::ZeroListeners;
|
||||
}
|
||||
}
|
||||
SwarmEvent::Dialing(peer_id) => {
|
||||
// We require the ENR to inject into the peer db, if it exists.
|
||||
let enr = self
|
||||
.swarm
|
||||
.behaviour_mut()
|
||||
.discovery_mut()
|
||||
.enr_of_peer(&peer_id);
|
||||
self.swarm
|
||||
.behaviour_mut()
|
||||
.peer_manager_mut()
|
||||
.inject_dialing(&peer_id, enr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
|
||||
|
||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
||||
/// mplex as the multiplexing layer.
|
||||
fn build_transport(
|
||||
local_private_key: Keypair,
|
||||
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
|
||||
let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
|
||||
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
|
||||
#[cfg(feature = "libp2p-websocket")]
|
||||
let transport = {
|
||||
let trans_clone = transport.clone();
|
||||
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
||||
};
|
||||
|
||||
let (transport, bandwidth) = BandwidthLogging::new(transport);
|
||||
|
||||
// mplex config
|
||||
let mut mplex_config = libp2p::mplex::MplexConfig::new();
|
||||
mplex_config.set_max_buffer_size(256);
|
||||
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
|
||||
|
||||
// yamux config
|
||||
let mut yamux_config = libp2p::yamux::YamuxConfig::default();
|
||||
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read());
|
||||
|
||||
// Authentication
|
||||
Ok((
|
||||
transport
|
||||
.upgrade(core::upgrade::Version::V1)
|
||||
.authenticate(generate_noise_config(&local_private_key))
|
||||
.multiplex(core::upgrade::SelectUpgrade::new(
|
||||
yamux_config,
|
||||
mplex_config,
|
||||
))
|
||||
.timeout(Duration::from_secs(10))
|
||||
.boxed(),
|
||||
bandwidth,
|
||||
))
|
||||
}
|
||||
|
||||
// Useful helper functions for debugging. Currently not used in the client.
|
||||
#[allow(dead_code)]
|
||||
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
||||
let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") {
|
||||
stripped.to_string()
|
||||
} else {
|
||||
hex_bytes.to_string()
|
||||
};
|
||||
|
||||
hex::decode(&hex_bytes)
|
||||
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
||||
.and_then(keypair_from_bytes)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
|
||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
|
||||
.map(|secret| {
|
||||
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into();
|
||||
Keypair::Secp256k1(keypair)
|
||||
})
|
||||
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
|
||||
}
|
||||
|
||||
/// Loads a private key from disk. If this fails, a new key is
|
||||
/// generated and is then saved to disk.
|
||||
///
|
||||
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
|
||||
pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
||||
// check for key from disk
|
||||
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
|
||||
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
|
||||
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
|
||||
match network_key_file.read_to_end(&mut key_bytes) {
|
||||
Err(_) => debug!(log, "Could not read network key file"),
|
||||
Ok(_) => {
|
||||
// only accept secp256k1 keys for now
|
||||
if let Ok(secret_key) =
|
||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
|
||||
{
|
||||
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
|
||||
debug!(log, "Loaded network key from disk.");
|
||||
return Keypair::Secp256k1(kp);
|
||||
} else {
|
||||
debug!(log, "Network key file is not a valid secp256k1 key");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if a key could not be loaded from disk, generate a new one and save it
|
||||
let local_private_key = Keypair::generate_secp256k1();
|
||||
if let Keypair::Secp256k1(key) = local_private_key.clone() {
|
||||
let _ = std::fs::create_dir_all(&config.network_dir);
|
||||
match File::create(network_key_f.clone())
|
||||
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
|
||||
{
|
||||
Ok(_) => {
|
||||
debug!(log, "New network key generated and written to disk");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Could not write node key to file: {:?}. error: {}", network_key_f, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
local_private_key
|
||||
}
|
||||
|
||||
/// Generate authenticated XX Noise config from identity keys
|
||||
fn generate_noise_config(
|
||||
identity_keypair: &Keypair,
|
||||
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
|
||||
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
|
||||
.into_authentic(identity_keypair)
|
||||
.expect("signing can fail only once during starting a node");
|
||||
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
|
||||
}
|
||||
|
||||
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
|
||||
/// only supports dialing to an address without providing the peer id.
|
||||
fn strip_peer_id(addr: &mut Multiaddr) {
|
||||
let last = addr.pop();
|
||||
match last {
|
||||
Some(Protocol::P2p(_)) => {}
|
||||
Some(other) => addr.push(other),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Load metadata from persisted file. Return default metadata if loading fails.
|
||||
fn load_or_build_metadata<E: EthSpec>(
|
||||
network_dir: &std::path::Path,
|
||||
log: &slog::Logger,
|
||||
) -> MetaData<E> {
|
||||
// We load a V2 metadata version by default (regardless of current fork)
|
||||
// since a V2 metadata can be converted to V1. The RPC encoder is responsible
|
||||
// for sending the correct metadata version based on the negotiated protocol version.
|
||||
let mut meta_data = MetaDataV2 {
|
||||
seq_number: 0,
|
||||
attnets: EnrAttestationBitfield::<E>::default(),
|
||||
syncnets: EnrSyncCommitteeBitfield::<E>::default(),
|
||||
};
|
||||
// Read metadata from persisted file if available
|
||||
let metadata_path = network_dir.join(METADATA_FILENAME);
|
||||
if let Ok(mut metadata_file) = File::open(metadata_path) {
|
||||
let mut metadata_ssz = Vec::new();
|
||||
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
|
||||
// Attempt to read a MetaDataV2 version from the persisted file,
|
||||
// if that fails, read MetaDataV1
|
||||
match MetaDataV2::<E>::from_ssz_bytes(&metadata_ssz) {
|
||||
Ok(persisted_metadata) => {
|
||||
meta_data.seq_number = persisted_metadata.seq_number;
|
||||
// Increment seq number if persisted attnet is not default
|
||||
if persisted_metadata.attnets != meta_data.attnets
|
||||
|| persisted_metadata.syncnets != meta_data.syncnets
|
||||
{
|
||||
meta_data.seq_number += 1;
|
||||
}
|
||||
debug!(log, "Loaded metadata from disk");
|
||||
}
|
||||
Err(_) => {
|
||||
match MetaDataV1::<E>::from_ssz_bytes(&metadata_ssz) {
|
||||
Ok(persisted_metadata) => {
|
||||
let persisted_metadata = MetaData::V1(persisted_metadata);
|
||||
// Increment seq number as the persisted metadata version is updated
|
||||
meta_data.seq_number = *persisted_metadata.seq_number() + 1;
|
||||
debug!(log, "Loaded metadata from disk");
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(
|
||||
log,
|
||||
"Metadata from file could not be decoded";
|
||||
"error" => ?e,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Wrap the MetaData
|
||||
let meta_data = MetaData::V2(meta_data);
|
||||
|
||||
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number());
|
||||
save_metadata_to_disk(network_dir, meta_data.clone(), log);
|
||||
meta_data
|
||||
}
|
||||
5
beacon_node/lighthouse_network/src/types/error.rs
Normal file
5
beacon_node/lighthouse_network/src/types/error.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
// generates error types
|
||||
|
||||
use error_chain::error_chain;
|
||||
|
||||
error_chain! {}
|
||||
130
beacon_node/lighthouse_network/src/types/globals.rs
Normal file
130
beacon_node/lighthouse_network/src/types/globals.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
//! A collection of variables that are accessible outside of the network thread itself.
|
||||
use crate::peer_manager::peerdb::PeerDB;
|
||||
use crate::rpc::MetaData;
|
||||
use crate::types::{BackFillState, SyncState};
|
||||
use crate::Client;
|
||||
use crate::EnrExt;
|
||||
use crate::{Enr, GossipTopic, Multiaddr, PeerId};
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
use types::EthSpec;
|
||||
|
||||
pub struct NetworkGlobals<TSpec: EthSpec> {
|
||||
/// The current local ENR.
|
||||
pub local_enr: RwLock<Enr>,
|
||||
/// The local peer_id.
|
||||
pub peer_id: RwLock<PeerId>,
|
||||
/// Listening multiaddrs.
|
||||
pub listen_multiaddrs: RwLock<Vec<Multiaddr>>,
|
||||
/// The TCP port that the libp2p service is listening on
|
||||
pub listen_port_tcp: AtomicU16,
|
||||
/// The UDP port that the discovery service is listening on
|
||||
pub listen_port_udp: AtomicU16,
|
||||
/// The collection of known peers.
|
||||
pub peers: RwLock<PeerDB<TSpec>>,
|
||||
// The local meta data of our node.
|
||||
pub local_metadata: RwLock<MetaData<TSpec>>,
|
||||
/// The current gossipsub topic subscriptions.
|
||||
pub gossipsub_subscriptions: RwLock<HashSet<GossipTopic>>,
|
||||
/// The current sync status of the node.
|
||||
pub sync_state: RwLock<SyncState>,
|
||||
/// The current state of the backfill sync.
|
||||
pub backfill_state: RwLock<BackFillState>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
||||
pub fn new(
|
||||
enr: Enr,
|
||||
tcp_port: u16,
|
||||
udp_port: u16,
|
||||
local_metadata: MetaData<TSpec>,
|
||||
trusted_peers: Vec<PeerId>,
|
||||
log: &slog::Logger,
|
||||
) -> Self {
|
||||
NetworkGlobals {
|
||||
local_enr: RwLock::new(enr.clone()),
|
||||
peer_id: RwLock::new(enr.peer_id()),
|
||||
listen_multiaddrs: RwLock::new(Vec::new()),
|
||||
listen_port_tcp: AtomicU16::new(tcp_port),
|
||||
listen_port_udp: AtomicU16::new(udp_port),
|
||||
local_metadata: RwLock::new(local_metadata),
|
||||
peers: RwLock::new(PeerDB::new(trusted_peers, log)),
|
||||
gossipsub_subscriptions: RwLock::new(HashSet::new()),
|
||||
sync_state: RwLock::new(SyncState::Stalled),
|
||||
backfill_state: RwLock::new(BackFillState::NotRequired),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect
|
||||
/// to.
|
||||
pub fn local_enr(&self) -> Enr {
|
||||
self.local_enr.read().clone()
|
||||
}
|
||||
|
||||
/// Returns the local libp2p PeerID.
|
||||
pub fn local_peer_id(&self) -> PeerId {
|
||||
*self.peer_id.read()
|
||||
}
|
||||
|
||||
/// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on.
|
||||
pub fn listen_multiaddrs(&self) -> Vec<Multiaddr> {
|
||||
self.listen_multiaddrs.read().clone()
|
||||
}
|
||||
|
||||
/// Returns the libp2p TCP port that this node has been configured to listen on.
|
||||
pub fn listen_port_tcp(&self) -> u16 {
|
||||
self.listen_port_tcp.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns the UDP discovery port that this node has been configured to listen on.
|
||||
pub fn listen_port_udp(&self) -> u16 {
|
||||
self.listen_port_udp.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns the number of libp2p connected peers.
|
||||
pub fn connected_peers(&self) -> usize {
|
||||
self.peers.read().connected_peer_ids().count()
|
||||
}
|
||||
|
||||
/// Returns the number of libp2p connected peers with outbound-only connections.
|
||||
pub fn connected_outbound_only_peers(&self) -> usize {
|
||||
self.peers.read().connected_outbound_only_peers().count()
|
||||
}
|
||||
|
||||
/// Returns the number of libp2p peers that are either connected or being dialed.
|
||||
pub fn connected_or_dialing_peers(&self) -> usize {
|
||||
self.peers.read().connected_or_dialing_peers().count()
|
||||
}
|
||||
|
||||
/// Returns in the node is syncing.
|
||||
pub fn is_syncing(&self) -> bool {
|
||||
self.sync_state.read().is_syncing()
|
||||
}
|
||||
|
||||
/// Returns the current sync state of the peer.
|
||||
pub fn sync_state(&self) -> SyncState {
|
||||
self.sync_state.read().clone()
|
||||
}
|
||||
|
||||
/// Returns the current backfill state.
|
||||
pub fn backfill_state(&self) -> BackFillState {
|
||||
self.backfill_state.read().clone()
|
||||
}
|
||||
|
||||
/// Returns a `Client` type if one is known for the `PeerId`.
|
||||
pub fn client(&self, peer_id: &PeerId) -> Client {
|
||||
self.peers
|
||||
.read()
|
||||
.peer_info(peer_id)
|
||||
.map(|info| info.client().clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Updates the syncing state of the node.
|
||||
///
|
||||
/// The old state is returned
|
||||
pub fn set_sync_state(&self, new_state: SyncState) -> SyncState {
|
||||
std::mem::replace(&mut *self.sync_state.write(), new_state)
|
||||
}
|
||||
}
|
||||
19
beacon_node/lighthouse_network/src/types/mod.rs
Normal file
19
beacon_node/lighthouse_network/src/types/mod.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
pub mod error;
|
||||
mod globals;
|
||||
mod pubsub;
|
||||
mod subnet;
|
||||
mod sync_state;
|
||||
mod topics;
|
||||
|
||||
use types::{BitVector, EthSpec};
|
||||
|
||||
pub type EnrAttestationBitfield<T> = BitVector<<T as EthSpec>::SubnetBitfieldLength>;
|
||||
pub type EnrSyncCommitteeBitfield<T> = BitVector<<T as EthSpec>::SyncCommitteeSubnetCount>;
|
||||
|
||||
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
||||
|
||||
pub use globals::NetworkGlobals;
|
||||
pub use pubsub::{PubsubMessage, SnappyTransform};
|
||||
pub use subnet::{Subnet, SubnetDiscovery};
|
||||
pub use sync_state::{BackFillState, SyncState};
|
||||
pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
|
||||
260
beacon_node/lighthouse_network/src/types/pubsub.rs
Normal file
260
beacon_node/lighthouse_network/src/types/pubsub.rs
Normal file
@@ -0,0 +1,260 @@
|
||||
//! Handles the encoding and decoding of pubsub messages.
|
||||
|
||||
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
||||
use crate::TopicHash;
|
||||
use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage};
|
||||
use snap::raw::{decompress_len, Decoder, Encoder};
|
||||
use ssz::{Decode, Encode};
|
||||
use std::boxed::Box;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing,
|
||||
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase,
|
||||
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum PubsubMessage<T: EthSpec> {
|
||||
/// Gossipsub message providing notification of a new block.
|
||||
BeaconBlock(Box<SignedBeaconBlock<T>>),
|
||||
/// Gossipsub message providing notification of a Aggregate attestation and associated proof.
|
||||
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
|
||||
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
|
||||
Attestation(Box<(SubnetId, Attestation<T>)>),
|
||||
/// Gossipsub message providing notification of a voluntary exit.
|
||||
VoluntaryExit(Box<SignedVoluntaryExit>),
|
||||
/// Gossipsub message providing notification of a new proposer slashing.
|
||||
ProposerSlashing(Box<ProposerSlashing>),
|
||||
/// Gossipsub message providing notification of a new attester slashing.
|
||||
AttesterSlashing(Box<AttesterSlashing<T>>),
|
||||
/// Gossipsub message providing notification of partially aggregated sync committee signatures.
|
||||
SignedContributionAndProof(Box<SignedContributionAndProof<T>>),
|
||||
/// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id.
|
||||
SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>),
|
||||
}
|
||||
|
||||
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
|
||||
pub struct SnappyTransform {
|
||||
/// Sets the maximum size we allow gossipsub messages to decompress to.
|
||||
max_size_per_message: usize,
|
||||
}
|
||||
|
||||
impl SnappyTransform {
|
||||
pub fn new(max_size_per_message: usize) -> Self {
|
||||
SnappyTransform {
|
||||
max_size_per_message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataTransform for SnappyTransform {
|
||||
// Provides the snappy decompression from RawGossipsubMessages
|
||||
fn inbound_transform(
|
||||
&self,
|
||||
raw_message: RawGossipsubMessage,
|
||||
) -> Result<GossipsubMessage, std::io::Error> {
|
||||
// check the length of the raw bytes
|
||||
let len = decompress_len(&raw_message.data)?;
|
||||
if len > self.max_size_per_message {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
"ssz_snappy decoded data > GOSSIP_MAX_SIZE",
|
||||
));
|
||||
}
|
||||
|
||||
let mut decoder = Decoder::new();
|
||||
let decompressed_data = decoder.decompress_vec(&raw_message.data)?;
|
||||
|
||||
// Build the GossipsubMessage struct
|
||||
Ok(GossipsubMessage {
|
||||
source: raw_message.source,
|
||||
data: decompressed_data,
|
||||
sequence_number: raw_message.sequence_number,
|
||||
topic: raw_message.topic,
|
||||
})
|
||||
}
|
||||
|
||||
/// Provides the snappy compression logic to gossipsub.
|
||||
fn outbound_transform(
|
||||
&self,
|
||||
_topic: &TopicHash,
|
||||
data: Vec<u8>,
|
||||
) -> Result<Vec<u8>, std::io::Error> {
|
||||
// Currently we are not employing topic-based compression. Everything is expected to be
|
||||
// snappy compressed.
|
||||
if data.len() > self.max_size_per_message {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
"ssz_snappy Encoded data > GOSSIP_MAX_SIZE",
|
||||
));
|
||||
}
|
||||
let mut encoder = Encoder::new();
|
||||
encoder.compress_vec(&data).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> PubsubMessage<T> {
|
||||
/// Returns the topics that each pubsub message will be sent across, given a supported
|
||||
/// gossipsub encoding and fork version.
|
||||
pub fn topics(&self, encoding: GossipEncoding, fork_version: [u8; 4]) -> Vec<GossipTopic> {
|
||||
vec![GossipTopic::new(self.kind(), encoding, fork_version)]
|
||||
}
|
||||
|
||||
/// Returns the kind of gossipsub topic associated with the message.
|
||||
pub fn kind(&self) -> GossipKind {
|
||||
match self {
|
||||
PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock,
|
||||
PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof,
|
||||
PubsubMessage::Attestation(attestation_data) => {
|
||||
GossipKind::Attestation(attestation_data.0)
|
||||
}
|
||||
PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit,
|
||||
PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing,
|
||||
PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing,
|
||||
PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof,
|
||||
PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0),
|
||||
}
|
||||
}
|
||||
|
||||
/// This decodes `data` into a `PubsubMessage` given a topic.
|
||||
/* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will
|
||||
* need to be modified.
|
||||
*/
|
||||
pub fn decode(
|
||||
topic: &TopicHash,
|
||||
data: &[u8],
|
||||
fork_context: &ForkContext,
|
||||
) -> Result<Self, String> {
|
||||
match GossipTopic::decode(topic.as_str()) {
|
||||
Err(_) => Err(format!("Unknown gossipsub topic: {:?}", topic)),
|
||||
Ok(gossip_topic) => {
|
||||
// All topics are currently expected to be compressed and decompressed with snappy.
|
||||
// This is done in the `SnappyTransform` struct.
|
||||
// Therefore compression has already been handled for us by the time we are
|
||||
// decoding the objects here.
|
||||
|
||||
// the ssz decoders
|
||||
match gossip_topic.kind() {
|
||||
GossipKind::BeaconAggregateAndProof => {
|
||||
let agg_and_proof = SignedAggregateAndProof::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::AggregateAndProofAttestation(Box::new(
|
||||
agg_and_proof,
|
||||
)))
|
||||
}
|
||||
GossipKind::Attestation(subnet_id) => {
|
||||
let attestation =
|
||||
Attestation::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::Attestation(Box::new((
|
||||
*subnet_id,
|
||||
attestation,
|
||||
))))
|
||||
}
|
||||
GossipKind::BeaconBlock => {
|
||||
let beacon_block =
|
||||
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
|
||||
Some(ForkName::Base) => SignedBeaconBlock::<T>::Base(
|
||||
SignedBeaconBlockBase::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?,
|
||||
),
|
||||
Some(ForkName::Altair) => SignedBeaconBlock::<T>::Altair(
|
||||
SignedBeaconBlockAltair::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?,
|
||||
),
|
||||
None => {
|
||||
return Err(format!(
|
||||
"Unknown gossipsub fork digest: {:?}",
|
||||
gossip_topic.fork_digest
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block)))
|
||||
}
|
||||
GossipKind::VoluntaryExit => {
|
||||
let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::VoluntaryExit(Box::new(voluntary_exit)))
|
||||
}
|
||||
GossipKind::ProposerSlashing => {
|
||||
let proposer_slashing = ProposerSlashing::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing)))
|
||||
}
|
||||
GossipKind::AttesterSlashing => {
|
||||
let attester_slashing = AttesterSlashing::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing)))
|
||||
}
|
||||
GossipKind::SignedContributionAndProof => {
|
||||
let sync_aggregate = SignedContributionAndProof::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::SignedContributionAndProof(Box::new(
|
||||
sync_aggregate,
|
||||
)))
|
||||
}
|
||||
GossipKind::SyncCommitteeMessage(subnet_id) => {
|
||||
let sync_committee = SyncCommitteeMessage::from_ssz_bytes(data)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
Ok(PubsubMessage::SyncCommitteeMessage(Box::new((
|
||||
*subnet_id,
|
||||
sync_committee,
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encodes a `PubsubMessage` based on the topic encodings. The first known encoding is used. If
|
||||
/// no encoding is known, and error is returned.
|
||||
pub fn encode(&self, _encoding: GossipEncoding) -> Vec<u8> {
|
||||
// Currently do not employ encoding strategies based on the topic. All messages are ssz
|
||||
// encoded.
|
||||
// Also note, that the compression is handled by the `SnappyTransform` struct. Gossipsub will compress the
|
||||
// messages for us.
|
||||
match &self {
|
||||
PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(),
|
||||
PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(),
|
||||
PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PubsubMessage::BeaconBlock(block) => write!(
|
||||
f,
|
||||
"Beacon Block: slot: {}, proposer_index: {}",
|
||||
block.slot(),
|
||||
block.message().proposer_index()
|
||||
),
|
||||
PubsubMessage::AggregateAndProofAttestation(att) => write!(
|
||||
f,
|
||||
"Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}",
|
||||
att.message.aggregate.data.slot,
|
||||
att.message.aggregate.data.index,
|
||||
att.message.aggregator_index,
|
||||
),
|
||||
PubsubMessage::Attestation(data) => write!(
|
||||
f,
|
||||
"Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {}",
|
||||
*data.0, data.1.data.slot, data.1.data.index,
|
||||
),
|
||||
PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"),
|
||||
PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"),
|
||||
PubsubMessage::AttesterSlashing(_data) => write!(f, "Attester Slashing"),
|
||||
PubsubMessage::SignedContributionAndProof(_) => {
|
||||
write!(f, "Signed Contribution and Proof")
|
||||
}
|
||||
PubsubMessage::SyncCommitteeMessage(data) => {
|
||||
write!(f, "Sync committee message: subnet_id: {}", *data.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
28
beacon_node/lighthouse_network/src/types/subnet.rs
Normal file
28
beacon_node/lighthouse_network/src/types/subnet.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use serde::Serialize;
|
||||
use std::time::Instant;
|
||||
use types::{SubnetId, SyncSubnetId};
|
||||
|
||||
/// Represents a subnet on an attestation or sync committee `SubnetId`.
|
||||
///
|
||||
/// Used for subscribing to the appropriate gossipsub subnets and mark
|
||||
/// appropriate metadata bitfields.
|
||||
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, Hash)]
|
||||
pub enum Subnet {
|
||||
/// Represents a gossipsub attestation subnet and the metadata `attnets` field.
|
||||
Attestation(SubnetId),
|
||||
/// Represents a gossipsub sync committee subnet and the metadata `syncnets` field.
|
||||
SyncCommittee(SyncSubnetId),
|
||||
}
|
||||
|
||||
/// A subnet to discover peers on along with the instant after which it's no longer useful.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SubnetDiscovery {
|
||||
pub subnet: Subnet,
|
||||
pub min_ttl: Option<Instant>,
|
||||
}
|
||||
|
||||
impl PartialEq for SubnetDiscovery {
|
||||
fn eq(&self, other: &SubnetDiscovery) -> bool {
|
||||
self.subnet.eq(&other.subnet)
|
||||
}
|
||||
}
|
||||
96
beacon_node/lighthouse_network/src/types/sync_state.rs
Normal file
96
beacon_node/lighthouse_network/src/types/sync_state.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::Slot;
|
||||
|
||||
/// The current state of the node.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum SyncState {
|
||||
/// The node is performing a long-range (batch) sync over a finalized chain.
|
||||
/// In this state, parent lookups are disabled.
|
||||
SyncingFinalized { start_slot: Slot, target_slot: Slot },
|
||||
/// The node is performing a long-range (batch) sync over one or many head chains.
|
||||
/// In this state parent lookups are disabled.
|
||||
SyncingHead { start_slot: Slot, target_slot: Slot },
|
||||
/// The node is undertaking a backfill sync. This occurs when a user has specified a trusted
|
||||
/// state. The node first syncs "forward" by downloading blocks up to the current head as
|
||||
/// specified by its peers. Once completed, the node enters this sync state and attempts to
|
||||
/// download all required historical blocks to complete its chain.
|
||||
BackFillSyncing { completed: usize, remaining: usize },
|
||||
/// The node has completed syncing a finalized chain and is in the process of re-evaluating
|
||||
/// which sync state to progress to.
|
||||
SyncTransition,
|
||||
/// The node is up to date with all known peers and is connected to at least one
|
||||
/// fully synced peer. In this state, parent lookups are enabled.
|
||||
Synced,
|
||||
/// No useful peers are connected. Long-range sync's cannot proceed and we have no useful
|
||||
/// peers to download parents for. More peers need to be connected before we can proceed.
|
||||
Stalled,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
/// The state of the backfill sync.
|
||||
pub enum BackFillState {
|
||||
/// The sync is partially completed and currently paused.
|
||||
Paused,
|
||||
/// We are currently backfilling.
|
||||
Syncing,
|
||||
/// A backfill sync has completed.
|
||||
Completed,
|
||||
/// A backfill sync is not required.
|
||||
NotRequired,
|
||||
/// Too many failed attempts at backfilling. Consider it failed.
|
||||
Failed,
|
||||
}
|
||||
|
||||
impl PartialEq for SyncState {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!(
|
||||
(self, other),
|
||||
(
|
||||
SyncState::SyncingFinalized { .. },
|
||||
SyncState::SyncingFinalized { .. }
|
||||
) | (SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. })
|
||||
| (SyncState::Synced, SyncState::Synced)
|
||||
| (SyncState::Stalled, SyncState::Stalled)
|
||||
| (SyncState::SyncTransition, SyncState::SyncTransition)
|
||||
| (
|
||||
SyncState::BackFillSyncing { .. },
|
||||
SyncState::BackFillSyncing { .. }
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SyncState {
|
||||
/// Returns a boolean indicating the node is currently performing a long-range sync.
|
||||
pub fn is_syncing(&self) -> bool {
|
||||
match self {
|
||||
SyncState::SyncingFinalized { .. } => true,
|
||||
SyncState::SyncingHead { .. } => true,
|
||||
SyncState::SyncTransition => true,
|
||||
// Backfill doesn't effect any logic, we consider this state, not syncing.
|
||||
SyncState::BackFillSyncing { .. } => false,
|
||||
SyncState::Synced => false,
|
||||
SyncState::Stalled => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the node is synced.
|
||||
///
|
||||
/// NOTE: We consider the node synced if it is fetching old historical blocks.
|
||||
pub fn is_synced(&self) -> bool {
|
||||
matches!(self, SyncState::Synced | SyncState::BackFillSyncing { .. })
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SyncState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
SyncState::SyncingFinalized { .. } => write!(f, "Syncing Finalized Chain"),
|
||||
SyncState::SyncingHead { .. } => write!(f, "Syncing Head Chain"),
|
||||
SyncState::Synced { .. } => write!(f, "Synced"),
|
||||
SyncState::Stalled { .. } => write!(f, "Stalled"),
|
||||
SyncState::SyncTransition => write!(f, "Evaluating known peers"),
|
||||
SyncState::BackFillSyncing { .. } => write!(f, "Syncing Historical Blocks"),
|
||||
}
|
||||
}
|
||||
}
|
||||
395
beacon_node/lighthouse_network/src/types/topics.rs
Normal file
395
beacon_node/lighthouse_network/src/types/topics.rs
Normal file
@@ -0,0 +1,395 @@
|
||||
use libp2p::gossipsub::{IdentTopic as Topic, TopicHash};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use strum::AsRefStr;
|
||||
use types::{SubnetId, SyncSubnetId};
|
||||
|
||||
use crate::Subnet;
|
||||
|
||||
/// The gossipsub topic names.
|
||||
// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX
|
||||
// For example /eth2/beacon_block/ssz
|
||||
pub const TOPIC_PREFIX: &str = "eth2";
|
||||
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
|
||||
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
|
||||
pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof";
|
||||
pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_";
|
||||
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
|
||||
pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
|
||||
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
|
||||
pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof";
|
||||
pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_";
|
||||
|
||||
pub const CORE_TOPICS: [GossipKind; 6] = [
|
||||
GossipKind::BeaconBlock,
|
||||
GossipKind::BeaconAggregateAndProof,
|
||||
GossipKind::VoluntaryExit,
|
||||
GossipKind::ProposerSlashing,
|
||||
GossipKind::AttesterSlashing,
|
||||
GossipKind::SignedContributionAndProof,
|
||||
];
|
||||
|
||||
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
|
||||
/// the pubsub protocol and the way the messages should be encoded.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
pub struct GossipTopic {
|
||||
/// The encoding of the topic.
|
||||
encoding: GossipEncoding,
|
||||
/// The fork digest of the topic,
|
||||
pub fork_digest: [u8; 4],
|
||||
/// The kind of topic.
|
||||
kind: GossipKind,
|
||||
}
|
||||
|
||||
/// Enum that brings these topics into the rust type system.
|
||||
// NOTE: There is intentionally no unknown type here. We only allow known gossipsub topics.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, AsRefStr)]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum GossipKind {
|
||||
/// Topic for publishing beacon blocks.
|
||||
BeaconBlock,
|
||||
/// Topic for publishing aggregate attestations and proofs.
|
||||
BeaconAggregateAndProof,
|
||||
/// Topic for publishing raw attestations on a particular subnet.
|
||||
#[strum(serialize = "beacon_attestation")]
|
||||
Attestation(SubnetId),
|
||||
/// Topic for publishing voluntary exits.
|
||||
VoluntaryExit,
|
||||
/// Topic for publishing block proposer slashings.
|
||||
ProposerSlashing,
|
||||
/// Topic for publishing attester slashings.
|
||||
AttesterSlashing,
|
||||
/// Topic for publishing partially aggregated sync committee signatures.
|
||||
SignedContributionAndProof,
|
||||
/// Topic for publishing unaggregated sync committee signatures on a particular subnet.
|
||||
#[strum(serialize = "sync_committee")]
|
||||
SyncCommitteeMessage(SyncSubnetId),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for GossipKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GossipKind::Attestation(subnet_id) => write!(f, "beacon_attestation_{}", **subnet_id),
|
||||
GossipKind::SyncCommitteeMessage(subnet_id) => {
|
||||
write!(f, "sync_committee_{}", **subnet_id)
|
||||
}
|
||||
x => f.write_str(x.as_ref()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The known encoding types for gossipsub messages.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
pub enum GossipEncoding {
|
||||
/// Messages are encoded with SSZSnappy.
|
||||
SSZSnappy,
|
||||
}
|
||||
|
||||
impl Default for GossipEncoding {
|
||||
fn default() -> Self {
|
||||
GossipEncoding::SSZSnappy
|
||||
}
|
||||
}
|
||||
|
||||
impl GossipTopic {
|
||||
pub fn new(kind: GossipKind, encoding: GossipEncoding, fork_digest: [u8; 4]) -> Self {
|
||||
GossipTopic {
|
||||
encoding,
|
||||
fork_digest,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the encoding type for the gossipsub topic.
|
||||
pub fn encoding(&self) -> &GossipEncoding {
|
||||
&self.encoding
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the fork digest of the gossipsub topic.
|
||||
pub fn digest(&mut self) -> &mut [u8; 4] {
|
||||
&mut self.fork_digest
|
||||
}
|
||||
|
||||
/// Returns the kind of message expected on the gossipsub topic.
|
||||
pub fn kind(&self) -> &GossipKind {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
pub fn decode(topic: &str) -> Result<Self, String> {
|
||||
let topic_parts: Vec<&str> = topic.split('/').collect();
|
||||
if topic_parts.len() == 5 && topic_parts[1] == TOPIC_PREFIX {
|
||||
let digest_bytes = hex::decode(topic_parts[2])
|
||||
.map_err(|e| format!("Could not decode fork_digest hex: {}", e))?;
|
||||
|
||||
if digest_bytes.len() != 4 {
|
||||
return Err(format!(
|
||||
"Invalid gossipsub fork digest size: {}",
|
||||
digest_bytes.len()
|
||||
));
|
||||
}
|
||||
|
||||
let mut fork_digest = [0; 4];
|
||||
fork_digest.copy_from_slice(&digest_bytes);
|
||||
|
||||
let encoding = match topic_parts[4] {
|
||||
SSZ_SNAPPY_ENCODING_POSTFIX => GossipEncoding::SSZSnappy,
|
||||
_ => return Err(format!("Unknown encoding: {}", topic)),
|
||||
};
|
||||
let kind = match topic_parts[3] {
|
||||
BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock,
|
||||
BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof,
|
||||
SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof,
|
||||
VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit,
|
||||
PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing,
|
||||
ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing,
|
||||
topic => match committee_topic_index(topic) {
|
||||
Some(subnet) => match subnet {
|
||||
Subnet::Attestation(s) => GossipKind::Attestation(s),
|
||||
Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s),
|
||||
},
|
||||
None => return Err(format!("Unknown topic: {}", topic)),
|
||||
},
|
||||
};
|
||||
|
||||
return Ok(GossipTopic {
|
||||
encoding,
|
||||
fork_digest,
|
||||
kind,
|
||||
});
|
||||
}
|
||||
|
||||
Err(format!("Unknown topic: {}", topic))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GossipTopic> for Topic {
|
||||
fn from(topic: GossipTopic) -> Topic {
|
||||
Topic::new(topic)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GossipTopic> for String {
|
||||
fn from(topic: GossipTopic) -> String {
|
||||
let encoding = match topic.encoding {
|
||||
GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX,
|
||||
};
|
||||
|
||||
let kind = match topic.kind {
|
||||
GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(),
|
||||
GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(),
|
||||
GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(),
|
||||
GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(),
|
||||
GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(),
|
||||
GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,),
|
||||
GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(),
|
||||
GossipKind::SyncCommitteeMessage(index) => {
|
||||
format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index)
|
||||
}
|
||||
};
|
||||
format!(
|
||||
"/{}/{}/{}/{}",
|
||||
TOPIC_PREFIX,
|
||||
hex::encode(topic.fork_digest),
|
||||
kind,
|
||||
encoding
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for GossipTopic {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let encoding = match self.encoding {
|
||||
GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX,
|
||||
};
|
||||
|
||||
let kind = match self.kind {
|
||||
GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(),
|
||||
GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(),
|
||||
GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(),
|
||||
GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(),
|
||||
GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(),
|
||||
GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,),
|
||||
GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(),
|
||||
GossipKind::SyncCommitteeMessage(index) => {
|
||||
format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index)
|
||||
}
|
||||
};
|
||||
write!(
|
||||
f,
|
||||
"/{}/{}/{}/{}",
|
||||
TOPIC_PREFIX,
|
||||
hex::encode(self.fork_digest),
|
||||
kind,
|
||||
encoding
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Subnet> for GossipKind {
|
||||
fn from(subnet_id: Subnet) -> Self {
|
||||
match subnet_id {
|
||||
Subnet::Attestation(s) => GossipKind::Attestation(s),
|
||||
Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper functions
|
||||
|
||||
/// Get subnet id from an attestation subnet topic hash.
|
||||
pub fn subnet_from_topic_hash(topic_hash: &TopicHash) -> Option<Subnet> {
|
||||
let gossip_topic = GossipTopic::decode(topic_hash.as_str()).ok()?;
|
||||
match gossip_topic.kind() {
|
||||
GossipKind::Attestation(subnet_id) => Some(Subnet::Attestation(*subnet_id)),
|
||||
GossipKind::SyncCommitteeMessage(subnet_id) => Some(Subnet::SyncCommittee(*subnet_id)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Determines if a string is an attestation or sync committee topic.
|
||||
fn committee_topic_index(topic: &str) -> Option<Subnet> {
|
||||
if topic.starts_with(BEACON_ATTESTATION_PREFIX) {
|
||||
return Some(Subnet::Attestation(SubnetId::new(
|
||||
topic
|
||||
.trim_start_matches(BEACON_ATTESTATION_PREFIX)
|
||||
.parse::<u64>()
|
||||
.ok()?,
|
||||
)));
|
||||
} else if topic.starts_with(SYNC_COMMITTEE_PREFIX_TOPIC) {
|
||||
return Some(Subnet::SyncCommittee(SyncSubnetId::new(
|
||||
topic
|
||||
.trim_start_matches(SYNC_COMMITTEE_PREFIX_TOPIC)
|
||||
.parse::<u64>()
|
||||
.ok()?,
|
||||
)));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::GossipKind::*;
|
||||
use super::*;
|
||||
|
||||
const GOOD_FORK_DIGEST: &str = "e1925f3b";
|
||||
const BAD_PREFIX: &str = "tezos";
|
||||
const BAD_FORK_DIGEST: &str = "e1925f3b4b";
|
||||
const BAD_ENCODING: &str = "rlp";
|
||||
const BAD_KIND: &str = "blocks";
|
||||
|
||||
fn topics() -> Vec<String> {
|
||||
let mut topics = Vec::new();
|
||||
let fork_digest: [u8; 4] = [1, 2, 3, 4];
|
||||
for encoding in [GossipEncoding::SSZSnappy].iter() {
|
||||
for kind in [
|
||||
BeaconBlock,
|
||||
BeaconAggregateAndProof,
|
||||
SignedContributionAndProof,
|
||||
Attestation(SubnetId::new(42)),
|
||||
SyncCommitteeMessage(SyncSubnetId::new(42)),
|
||||
VoluntaryExit,
|
||||
ProposerSlashing,
|
||||
AttesterSlashing,
|
||||
]
|
||||
.iter()
|
||||
{
|
||||
topics.push(GossipTopic::new(kind.clone(), encoding.clone(), fork_digest).into());
|
||||
}
|
||||
}
|
||||
topics
|
||||
}
|
||||
|
||||
fn create_topic(prefix: &str, fork_digest: &str, kind: &str, encoding: &str) -> String {
|
||||
format!("/{}/{}/{}/{}", prefix, fork_digest, kind, encoding)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode() {
|
||||
for topic in topics().iter() {
|
||||
assert!(GossipTopic::decode(topic.as_str()).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_malicious() {
|
||||
let bad_prefix_str = create_topic(
|
||||
BAD_PREFIX,
|
||||
GOOD_FORK_DIGEST,
|
||||
BEACON_BLOCK_TOPIC,
|
||||
SSZ_SNAPPY_ENCODING_POSTFIX,
|
||||
);
|
||||
assert!(GossipTopic::decode(bad_prefix_str.as_str()).is_err());
|
||||
|
||||
let bad_digest_str = create_topic(
|
||||
TOPIC_PREFIX,
|
||||
BAD_FORK_DIGEST,
|
||||
BEACON_BLOCK_TOPIC,
|
||||
SSZ_SNAPPY_ENCODING_POSTFIX,
|
||||
);
|
||||
assert!(GossipTopic::decode(bad_digest_str.as_str()).is_err());
|
||||
|
||||
let bad_kind_str = create_topic(
|
||||
TOPIC_PREFIX,
|
||||
GOOD_FORK_DIGEST,
|
||||
BAD_KIND,
|
||||
SSZ_SNAPPY_ENCODING_POSTFIX,
|
||||
);
|
||||
assert!(GossipTopic::decode(bad_kind_str.as_str()).is_err());
|
||||
|
||||
let bad_encoding_str = create_topic(
|
||||
TOPIC_PREFIX,
|
||||
GOOD_FORK_DIGEST,
|
||||
BEACON_BLOCK_TOPIC,
|
||||
BAD_ENCODING,
|
||||
);
|
||||
assert!(GossipTopic::decode(bad_encoding_str.as_str()).is_err());
|
||||
|
||||
// Extra parts
|
||||
assert!(
|
||||
GossipTopic::decode("/eth2/e1925f3b/beacon_block/ssz_snappy/yolo").is_err(),
|
||||
"should have exactly 5 parts"
|
||||
);
|
||||
// Empty string
|
||||
assert!(GossipTopic::decode("").is_err());
|
||||
// Empty parts
|
||||
assert!(GossipTopic::decode("////").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subnet_from_topic_hash() {
|
||||
let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/beacon_block/ssz_snappy");
|
||||
assert!(subnet_from_topic_hash(&topic_hash).is_none());
|
||||
|
||||
let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/beacon_attestation_42/ssz_snappy");
|
||||
assert_eq!(
|
||||
subnet_from_topic_hash(&topic_hash),
|
||||
Some(Subnet::Attestation(SubnetId::new(42)))
|
||||
);
|
||||
|
||||
let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/sync_committee_42/ssz_snappy");
|
||||
assert_eq!(
|
||||
subnet_from_topic_hash(&topic_hash),
|
||||
Some(Subnet::SyncCommittee(SyncSubnetId::new(42)))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_as_str_ref() {
|
||||
assert_eq!("beacon_block", BeaconBlock.as_ref());
|
||||
assert_eq!(
|
||||
"beacon_aggregate_and_proof",
|
||||
BeaconAggregateAndProof.as_ref()
|
||||
);
|
||||
assert_eq!(
|
||||
"beacon_attestation",
|
||||
Attestation(SubnetId::new(42)).as_ref()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
"sync_committee",
|
||||
SyncCommitteeMessage(SyncSubnetId::new(42)).as_ref()
|
||||
);
|
||||
assert_eq!("voluntary_exit", VoluntaryExit.as_ref());
|
||||
assert_eq!("proposer_slashing", ProposerSlashing.as_ref());
|
||||
assert_eq!("attester_slashing", AttesterSlashing.as_ref());
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user