Rename eth2_libp2p to lighthouse_network (#2702)

## Description

The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration. 

As of this writing it currently houses the following high-level lighthouse-specific logic:
- Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings
- Integration and handling of ENRs with respect to libp2p and eth2
- Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers. 
- Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc.
- Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api.
- Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks.
- Lighthouse specific types for managing gossipsub topics, sync status and ENR fields
- Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring
- Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p. 

Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network`

Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
Age Manning
2021-10-19 00:30:39 +00:00
parent 06e310c4eb
commit df40700ddd
94 changed files with 157 additions and 150 deletions

View File

@@ -0,0 +1,295 @@
//! This handles the various supported encoding mechanism for the Eth 2.0 RPC.
use crate::rpc::methods::ErrorType;
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse};
use libp2p::bytes::BufMut;
use libp2p::bytes::BytesMut;
use std::marker::PhantomData;
use tokio_util::codec::{Decoder, Encoder};
use types::EthSpec;
pub trait OutboundCodec<TItem>: Encoder<TItem> + Decoder {
type CodecErrorType;
fn decode_error(
&mut self,
src: &mut BytesMut,
) -> Result<Option<Self::CodecErrorType>, <Self as Decoder>::Error>;
}
/* Global Inbound Codec */
// This deals with Decoding RPC Requests from other peers and encoding our responses
pub struct BaseInboundCodec<TCodec, TSpec>
where
TCodec: Encoder<RPCCodedResponse<TSpec>> + Decoder,
TSpec: EthSpec,
{
/// Inner codec for handling various encodings
inner: TCodec,
phantom: PhantomData<TSpec>,
}
impl<TCodec, TSpec> BaseInboundCodec<TCodec, TSpec>
where
TCodec: Encoder<RPCCodedResponse<TSpec>> + Decoder,
TSpec: EthSpec,
{
pub fn new(codec: TCodec) -> Self {
BaseInboundCodec {
inner: codec,
phantom: PhantomData,
}
}
}
/* Global Outbound Codec */
// This deals with Decoding RPC Responses from other peers and encoding our requests
pub struct BaseOutboundCodec<TOutboundCodec, TSpec>
where
TOutboundCodec: OutboundCodec<OutboundRequest<TSpec>>,
TSpec: EthSpec,
{
/// Inner codec for handling various encodings.
inner: TOutboundCodec,
/// Keeps track of the current response code for a chunk.
current_response_code: Option<u8>,
phantom: PhantomData<TSpec>,
}
impl<TOutboundCodec, TSpec> BaseOutboundCodec<TOutboundCodec, TSpec>
where
TSpec: EthSpec,
TOutboundCodec: OutboundCodec<OutboundRequest<TSpec>>,
{
pub fn new(codec: TOutboundCodec) -> Self {
BaseOutboundCodec {
inner: codec,
current_response_code: None,
phantom: PhantomData,
}
}
}
/* Implementation of the Encoding/Decoding for the global codecs */
/* Base Inbound Codec */
// This Encodes RPC Responses sent to external peers
impl<TCodec, TSpec> Encoder<RPCCodedResponse<TSpec>> for BaseInboundCodec<TCodec, TSpec>
where
TSpec: EthSpec,
TCodec: Decoder + Encoder<RPCCodedResponse<TSpec>>,
{
type Error = <TCodec as Encoder<RPCCodedResponse<TSpec>>>::Error;
fn encode(
&mut self,
item: RPCCodedResponse<TSpec>,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
dst.clear();
dst.reserve(1);
dst.put_u8(
item.as_u8()
.expect("Should never encode a stream termination"),
);
self.inner.encode(item, dst)
}
}
// This Decodes RPC Requests from external peers
impl<TCodec, TSpec> Decoder for BaseInboundCodec<TCodec, TSpec>
where
TSpec: EthSpec,
TCodec: Encoder<RPCCodedResponse<TSpec>> + Decoder<Item = InboundRequest<TSpec>>,
{
type Item = InboundRequest<TSpec>;
type Error = <TCodec as Decoder>::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.inner.decode(src)
}
}
/* Base Outbound Codec */
// This Encodes RPC Requests sent to external peers
impl<TCodec, TSpec> Encoder<OutboundRequest<TSpec>> for BaseOutboundCodec<TCodec, TSpec>
where
TSpec: EthSpec,
TCodec: OutboundCodec<OutboundRequest<TSpec>> + Encoder<OutboundRequest<TSpec>>,
{
type Error = <TCodec as Encoder<OutboundRequest<TSpec>>>::Error;
fn encode(
&mut self,
item: OutboundRequest<TSpec>,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
self.inner.encode(item, dst)
}
}
// This decodes RPC Responses received from external peers
impl<TCodec, TSpec> Decoder for BaseOutboundCodec<TCodec, TSpec>
where
TSpec: EthSpec,
TCodec: OutboundCodec<OutboundRequest<TSpec>, CodecErrorType = ErrorType>
+ Decoder<Item = RPCResponse<TSpec>>,
{
type Item = RPCCodedResponse<TSpec>;
type Error = <TCodec as Decoder>::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// if we have only received the response code, wait for more bytes
if src.len() <= 1 {
return Ok(None);
}
// using the response code determine which kind of payload needs to be decoded.
let response_code = self.current_response_code.unwrap_or_else(|| {
let resp_code = src.split_to(1)[0];
self.current_response_code = Some(resp_code);
resp_code
});
let inner_result = {
if RPCCodedResponse::<TSpec>::is_response(response_code) {
// decode an actual response and mutates the buffer if enough bytes have been read
// returning the result.
self.inner
.decode(src)
.map(|r| r.map(RPCCodedResponse::Success))
} else {
// decode an error
self.inner
.decode_error(src)
.map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp)))
}
};
// if the inner decoder was capable of decoding a chunk, we need to reset the current
// response code for the next chunk
if let Ok(Some(_)) = inner_result {
self.current_response_code = None;
}
// return the result
inner_result
}
}
#[cfg(test)]
mod tests {
use super::super::ssz_snappy::*;
use super::*;
use crate::rpc::protocol::*;
use std::sync::Arc;
use types::{ForkContext, Hash256};
use unsigned_varint::codec::Uvi;
type Spec = types::MainnetEthSpec;
fn fork_context() -> ForkContext {
ForkContext::new::<Spec>(types::Slot::new(0), Hash256::zero(), &Spec::default_spec())
}
#[test]
fn test_decode_status_message() {
let message = hex::decode("0054ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap();
let mut buf = BytesMut::new();
buf.extend_from_slice(&message);
let snappy_protocol_id =
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context());
let mut snappy_outbound_codec =
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, 1_048_576, fork_context);
// remove response code
let mut snappy_buf = buf.clone();
let _ = snappy_buf.split_to(1);
// decode message just as snappy message
let _snappy_decoded_message = snappy_outbound_codec.decode(&mut snappy_buf).unwrap();
// build codecs for entire chunk
let mut snappy_base_outbound_codec = BaseOutboundCodec::new(snappy_outbound_codec);
// decode message as ssz snappy chunk
let _snappy_decoded_chunk = snappy_base_outbound_codec.decode(&mut buf).unwrap();
}
#[test]
fn test_invalid_length_prefix() {
let mut uvi_codec: Uvi<u128> = Uvi::default();
let mut dst = BytesMut::with_capacity(1024);
// Smallest > 10 byte varint
let len: u128 = 2u128.pow(70);
// Insert length-prefix
uvi_codec.encode(len, &mut dst).unwrap();
let snappy_protocol_id =
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context());
let mut snappy_outbound_codec =
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, 1_048_576, fork_context);
let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err();
assert_eq!(
snappy_decoded_message,
RPCError::IoError("input bytes exceed maximum".to_string()),
"length-prefix of > 10 bytes is invalid"
);
}
#[test]
fn test_length_limits() {
fn encode_len(len: usize) -> BytesMut {
let mut uvi_codec: Uvi<usize> = Uvi::default();
let mut dst = BytesMut::with_capacity(1024);
uvi_codec.encode(len, &mut dst).unwrap();
dst
}
let protocol_id =
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy);
// Response limits
let limit = protocol_id.rpc_response_limits::<Spec>();
let mut max = encode_len(limit.max + 1);
let fork_context = Arc::new(fork_context());
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(
protocol_id.clone(),
1_048_576,
fork_context.clone(),
);
assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData);
let mut min = encode_len(limit.min - 1);
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(
protocol_id.clone(),
1_048_576,
fork_context.clone(),
);
assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData);
// Request limits
let limit = protocol_id.rpc_request_limits();
let mut max = encode_len(limit.max + 1);
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(
protocol_id.clone(),
1_048_576,
fork_context.clone(),
);
assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData);
let mut min = encode_len(limit.min - 1);
let mut codec = SSZSnappyOutboundCodec::<Spec>::new(protocol_id, 1_048_576, fork_context);
assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData);
}
}

View File

@@ -0,0 +1,65 @@
pub(crate) mod base;
pub(crate) mod ssz_snappy;
use self::base::{BaseInboundCodec, BaseOutboundCodec};
use self::ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec};
use crate::rpc::protocol::RPCError;
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse};
use libp2p::bytes::BytesMut;
use tokio_util::codec::{Decoder, Encoder};
use types::EthSpec;
// Known types of codecs
pub enum InboundCodec<TSpec: EthSpec> {
SSZSnappy(BaseInboundCodec<SSZSnappyInboundCodec<TSpec>, TSpec>),
}
pub enum OutboundCodec<TSpec: EthSpec> {
SSZSnappy(BaseOutboundCodec<SSZSnappyOutboundCodec<TSpec>, TSpec>),
}
impl<T: EthSpec> Encoder<RPCCodedResponse<T>> for InboundCodec<T> {
type Error = RPCError;
fn encode(&mut self, item: RPCCodedResponse<T>, dst: &mut BytesMut) -> Result<(), Self::Error> {
match self {
InboundCodec::SSZSnappy(codec) => codec.encode(item, dst),
}
}
}
impl<TSpec: EthSpec> Decoder for InboundCodec<TSpec> {
type Item = InboundRequest<TSpec>;
type Error = RPCError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
match self {
InboundCodec::SSZSnappy(codec) => codec.decode(src),
}
}
}
impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for OutboundCodec<TSpec> {
type Error = RPCError;
fn encode(
&mut self,
item: OutboundRequest<TSpec>,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match self {
OutboundCodec::SSZSnappy(codec) => codec.encode(item, dst),
}
}
}
impl<T: EthSpec> Decoder for OutboundCodec<T> {
type Item = RPCCodedResponse<T>;
type Error = RPCError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
match self {
OutboundCodec::SSZSnappy(codec) => codec.decode(src),
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,963 @@
#![allow(clippy::type_complexity)]
#![allow(clippy::cognitive_complexity)]
use super::methods::{
GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination,
};
use super::outbound::OutboundRequestContainer;
use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol};
use super::{RPCReceived, RPCSend};
use crate::rpc::outbound::{OutboundFramed, OutboundRequest};
use crate::rpc::protocol::InboundFramed;
use fnv::FnvHashMap;
use futures::prelude::*;
use futures::{Sink, SinkExt};
use libp2p::core::upgrade::{
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
};
use libp2p::swarm::protocols_handler::{
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
};
use libp2p::swarm::NegotiatedSubstream;
use slog::{crit, debug, trace, warn};
use smallvec::SmallVec;
use std::{
collections::hash_map::Entry,
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use tokio::time::{sleep_until, Instant as TInstant, Sleep};
use tokio_util::time::{delay_queue, DelayQueue};
use types::{EthSpec, ForkContext};
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
pub const RESPONSE_TIMEOUT: u64 = 10;
/// The number of times to retry an outbound upgrade in the case of IO errors.
const IO_ERROR_RETRIES: u8 = 3;
/// Maximum time given to the handler to perform shutdown operations.
const SHUTDOWN_TIMEOUT_SECS: u8 = 15;
/// Identifier of inbound and outbound substreams from the handler's perspective.
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct SubstreamId(usize);
type InboundSubstream<TSpec> = InboundFramed<NegotiatedSubstream, TSpec>;
/// Output of the future handling the send of responses to a peer's request.
type InboundProcessingOutput<TSpec> = (
InboundSubstream<TSpec>, /* substream */
Vec<RPCError>, /* Errors sending messages if any */
bool, /* whether to remove the stream afterwards */
u64, /* Chunks remaining to be sent after this processing finishes */
);
/// Events the handler emits to the behaviour.
type HandlerEvent<T> = Result<RPCReceived<T>, HandlerErr>;
/// An error encountered by the handler.
#[derive(Debug)]
pub enum HandlerErr {
/// An error occurred for this peer's request. This can occur during protocol negotiation,
/// message passing, or if the handler identifies that we are sending an error response to the peer.
Inbound {
/// Id of the peer's request for which an error occurred.
id: SubstreamId,
/// Information of the negotiated protocol.
proto: Protocol,
/// The error that occurred.
error: RPCError,
},
/// An error occurred for this request. Such error can occur during protocol negotiation,
/// message passing, or if we successfully received a response from the peer, but this response
/// indicates an error.
Outbound {
/// Application-given Id of the request for which an error occurred.
id: RequestId,
/// Information of the protocol.
proto: Protocol,
/// The error that occurred.
error: RPCError,
},
}
/// Implementation of `ProtocolsHandler` for the RPC protocol.
pub struct RPCHandler<TSpec>
where
TSpec: EthSpec,
{
/// The upgrade for inbound substreams.
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>,
/// Queue of events to produce in `poll()`.
events_out: SmallVec<[HandlerEvent<TSpec>; 4]>,
/// Queue of outbound substreams to open.
dial_queue: SmallVec<[(RequestId, OutboundRequest<TSpec>); 4]>,
/// Current number of concurrent outbound substreams being opened.
dial_negotiated: u32,
/// Current inbound substreams awaiting processing.
inbound_substreams: FnvHashMap<SubstreamId, InboundInfo<TSpec>>,
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
inbound_substreams_delay: DelayQueue<SubstreamId>,
/// Map of outbound substreams that need to be driven to completion.
outbound_substreams: FnvHashMap<SubstreamId, OutboundInfo<TSpec>>,
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
outbound_substreams_delay: DelayQueue<SubstreamId>,
/// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID.
current_inbound_substream_id: SubstreamId,
/// Sequential ID for outbound substreams.
current_outbound_substream_id: SubstreamId,
/// Maximum number of concurrent outbound substreams being opened. Value is never modified.
max_dial_negotiated: u32,
/// State of the handler.
state: HandlerState,
/// Try to negotiate the outbound upgrade a few times if there is an IO error before reporting the request as failed.
/// This keeps track of the number of attempts.
outbound_io_error_retries: u8,
/// Fork specific info.
fork_context: Arc<ForkContext>,
/// Logger for handling RPC streams
log: slog::Logger,
}
enum HandlerState {
/// The handler is active. All messages are sent and received.
Active,
/// The handler is shutting_down.
///
/// While in this state the handler rejects new requests but tries to finish existing ones.
/// Once the timer expires, all messages are killed.
ShuttingDown(Box<Sleep>),
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
/// received.
Deactivated,
}
/// Contains the information the handler keeps on established inbound substreams.
struct InboundInfo<TSpec: EthSpec> {
/// State of the substream.
state: InboundState<TSpec>,
/// Responses queued for sending.
pending_items: Vec<RPCCodedResponse<TSpec>>,
/// Protocol of the original request we received from the peer.
protocol: Protocol,
/// Responses that the peer is still expecting from us.
remaining_chunks: u64,
/// Key to keep track of the substream's timeout via `self.inbound_substreams_delay`.
delay_key: Option<delay_queue::Key>,
}
/// Contains the information the handler keeps on established outbound substreams.
struct OutboundInfo<TSpec: EthSpec> {
/// State of the substream.
state: OutboundSubstreamState<TSpec>,
/// Key to keep track of the substream's timeout via `self.outbound_substreams_delay`.
delay_key: delay_queue::Key,
/// Info over the protocol this substream is handling.
proto: Protocol,
/// Number of chunks to be seen from the peer's response.
remaining_chunks: Option<u64>,
/// `RequestId` as given by the application that sent the request.
req_id: RequestId,
}
/// State of an inbound substream connection.
enum InboundState<TSpec: EthSpec> {
/// The underlying substream is not being used.
Idle(InboundSubstream<TSpec>),
/// The underlying substream is processing responses.
Busy(Pin<Box<dyn Future<Output = InboundProcessingOutput<TSpec>> + Send>>),
/// Temporary state during processing
Poisoned,
}
/// State of an outbound substream. Either waiting for a response, or in the process of sending.
pub enum OutboundSubstreamState<TSpec: EthSpec> {
/// A request has been sent, and we are awaiting a response. This future is driven in the
/// handler because GOODBYE requests can be handled and responses dropped instantly.
RequestPendingResponse {
/// The framed negotiated substream.
substream: Box<OutboundFramed<NegotiatedSubstream, TSpec>>,
/// Keeps track of the actual request sent.
request: OutboundRequest<TSpec>,
},
/// Closing an outbound substream>
Closing(Box<OutboundFramed<NegotiatedSubstream, TSpec>>),
/// Temporary state during processing
Poisoned,
}
impl<TSpec> RPCHandler<TSpec>
where
TSpec: EthSpec,
{
pub fn new(
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>,
fork_context: Arc<ForkContext>,
log: &slog::Logger,
) -> Self {
RPCHandler {
listen_protocol,
events_out: SmallVec::new(),
dial_queue: SmallVec::new(),
dial_negotiated: 0,
inbound_substreams: FnvHashMap::default(),
outbound_substreams: FnvHashMap::default(),
inbound_substreams_delay: DelayQueue::new(),
outbound_substreams_delay: DelayQueue::new(),
current_inbound_substream_id: SubstreamId(0),
current_outbound_substream_id: SubstreamId(0),
state: HandlerState::Active,
max_dial_negotiated: 8,
outbound_io_error_retries: 0,
fork_context,
log: log.clone(),
}
}
/// Initiates the handler's shutdown process, sending an optional Goodbye message to the
/// peer.
fn shutdown(&mut self, goodbye_reason: Option<GoodbyeReason>) {
if matches!(self.state, HandlerState::Active) {
if !self.dial_queue.is_empty() {
debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len());
}
// We now drive to completion communications already dialed/established
while let Some((id, req)) = self.dial_queue.pop() {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::HandlerRejected,
proto: req.protocol(),
id,
}));
}
// Queue our goodbye message.
if let Some(reason) = goodbye_reason {
self.dial_queue
.push((RequestId::Router, OutboundRequest::Goodbye(reason)));
}
self.state = HandlerState::ShuttingDown(Box::new(sleep_until(
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
)));
}
}
/// Opens an outbound substream with a request.
fn send_request(&mut self, id: RequestId, req: OutboundRequest<TSpec>) {
match self.state {
HandlerState::Active => {
self.dial_queue.push((id, req));
}
_ => self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::HandlerRejected,
proto: req.protocol(),
id,
})),
}
}
/// Sends a response to a peer's request.
// NOTE: If the substream has closed due to inactivity, or the substream is in the
// wrong state a response will fail silently.
fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse<TSpec>) {
// check if the stream matching the response still exists
let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) {
info
} else {
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
// the stream is closed after sending the expected number of responses
trace!(self.log, "Inbound stream has expired, response not sent";
"response" => %response, "id" => inbound_id);
}
return;
};
// If the response we are sending is an error, report back for handling
if let RPCCodedResponse::Error(ref code, ref reason) = response {
self.events_out.push(Err(HandlerErr::Inbound {
error: RPCError::ErrorResponse(*code, reason.to_string()),
proto: inbound_info.protocol,
id: inbound_id,
}));
}
if matches!(self.state, HandlerState::Deactivated) {
// we no longer send responses after the handler is deactivated
debug!(self.log, "Response not sent. Deactivated handler";
"response" => %response, "id" => inbound_id);
return;
}
inbound_info.pending_items.push(response);
}
}
impl<TSpec> ProtocolsHandler for RPCHandler<TSpec>
where
TSpec: EthSpec,
{
type InEvent = RPCSend<TSpec>;
type OutEvent = HandlerEvent<TSpec>;
type Error = RPCError;
type InboundProtocol = RPCProtocol<TSpec>;
type OutboundProtocol = OutboundRequestContainer<TSpec>;
type OutboundOpenInfo = (RequestId, OutboundRequest<TSpec>); // Keep track of the id and the request
type InboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
self.listen_protocol.clone()
}
fn inject_fully_negotiated_inbound(
&mut self,
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
_info: Self::InboundOpenInfo,
) {
// only accept new peer requests when active
if !matches!(self.state, HandlerState::Active) {
return;
}
let (req, substream) = substream;
let expected_responses = req.expected_responses();
// store requests that expect responses
if expected_responses > 0 {
// Store the stream and tag the output.
let delay_key = self.inbound_substreams_delay.insert(
self.current_inbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = InboundState::Idle(substream);
self.inbound_substreams.insert(
self.current_inbound_substream_id,
InboundInfo {
state: awaiting_stream,
pending_items: vec![],
delay_key: Some(delay_key),
protocol: req.protocol(),
remaining_chunks: expected_responses,
},
);
}
// If we received a goodbye, shutdown the connection.
if let InboundRequest::Goodbye(_) = req {
self.shutdown(None);
}
self.events_out.push(Ok(RPCReceived::Request(
self.current_inbound_substream_id,
req,
)));
self.current_inbound_substream_id.0 += 1;
}
fn inject_fully_negotiated_outbound(
&mut self,
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
request_info: Self::OutboundOpenInfo,
) {
self.dial_negotiated -= 1;
let (id, request) = request_info;
let proto = request.protocol();
// accept outbound connections only if the handler is not deactivated
if matches!(self.state, HandlerState::Deactivated) {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::HandlerRejected,
proto,
id,
}));
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let expected_responses = request.expected_responses();
if expected_responses > 0 {
// new outbound request. Store the stream and tag the output.
let delay_key = self.outbound_substreams_delay.insert(
self.current_outbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: Box::new(out),
request,
};
let expected_responses = if expected_responses > 1 {
// Currently enforced only for multiple responses
Some(expected_responses)
} else {
None
};
if self
.outbound_substreams
.insert(
self.current_outbound_substream_id,
OutboundInfo {
state: awaiting_stream,
delay_key,
proto,
remaining_chunks: expected_responses,
req_id: id,
},
)
.is_some()
{
crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id);
}
self.current_outbound_substream_id.0 += 1;
}
}
fn inject_event(&mut self, rpc_event: Self::InEvent) {
match rpc_event {
RPCSend::Request(id, req) => self.send_request(id, req),
RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response),
RPCSend::Shutdown(reason) => self.shutdown(Some(reason)),
}
}
fn inject_dial_upgrade_error(
&mut self,
request_info: Self::OutboundOpenInfo,
error: ProtocolsHandlerUpgrErr<
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
>,
) {
let (id, req) = request_info;
if let ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error {
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
}
// This dialing is now considered failed
self.dial_negotiated -= 1;
self.outbound_io_error_retries = 0;
// map the error
let error = match error {
ProtocolsHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
ProtocolsHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
RPCError::UnsupportedProtocol
}
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(
NegotiationError::ProtocolError(e),
)) => match e {
ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()),
ProtocolError::InvalidProtocol => {
RPCError::InternalError("Protocol was deemed invalid")
}
ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => {
// Peer is sending invalid data during the negotiation phase, not
// participating in the protocol
RPCError::InvalidData
}
},
};
self.events_out.push(Err(HandlerErr::Outbound {
error,
proto: req.protocol(),
id,
}));
}
fn connection_keep_alive(&self) -> KeepAlive {
// Check that we don't have outbound items pending for dialing, nor dialing, nor
// established. Also check that there are no established inbound substreams.
// Errors and events need to be reported back, so check those too.
let should_shutdown = match self.state {
HandlerState::ShuttingDown(_) => {
self.dial_queue.is_empty()
&& self.outbound_substreams.is_empty()
&& self.inbound_substreams.is_empty()
&& self.events_out.is_empty()
&& self.dial_negotiated == 0
}
HandlerState::Deactivated => {
// Regardless of events, the timeout has expired. Force the disconnect.
true
}
_ => false,
};
if should_shutdown {
KeepAlive::No
} else {
KeepAlive::Yes
}
}
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<
ProtocolsHandlerEvent<
Self::OutboundProtocol,
Self::OutboundOpenInfo,
Self::OutEvent,
Self::Error,
>,
> {
// return any events that need to be reported
if !self.events_out.is_empty() {
return Poll::Ready(ProtocolsHandlerEvent::Custom(self.events_out.remove(0)));
} else {
self.events_out.shrink_to_fit();
}
// Check if we are shutting down, and if the timer ran out
if let HandlerState::ShuttingDown(delay) = &self.state {
if delay.is_elapsed() {
self.state = HandlerState::Deactivated;
debug!(self.log, "Handler deactivated");
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
"Shutdown timeout",
)));
}
}
// purge expired inbound substreams and send an error
loop {
match self.inbound_substreams_delay.poll_expired(cx) {
Poll::Ready(Some(Ok(inbound_id))) => {
// handle a stream timeout for various states
if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) {
// the delay has been removed
info.delay_key = None;
self.events_out.push(Err(HandlerErr::Inbound {
error: RPCError::StreamTimeout,
proto: info.protocol,
id: *inbound_id.get_ref(),
}));
if info.pending_items.last().map(|l| l.close_after()) == Some(false) {
// if the last chunk does not close the stream, append an error
info.pending_items.push(RPCCodedResponse::Error(
RPCResponseErrorCode::ServerError,
"Request timed out".into(),
));
}
}
}
Poll::Ready(Some(Err(e))) => {
warn!(self.log, "Inbound substream poll failed"; "error" => ?e);
// drops the peer if we cannot read the delay queue
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
"Could not poll inbound stream timer",
)));
}
Poll::Pending | Poll::Ready(None) => break,
}
}
// purge expired outbound substreams
loop {
match self.outbound_substreams_delay.poll_expired(cx) {
Poll::Ready(Some(Ok(outbound_id))) => {
if let Some(OutboundInfo { proto, req_id, .. }) =
self.outbound_substreams.remove(outbound_id.get_ref())
{
let outbound_err = HandlerErr::Outbound {
id: req_id,
proto,
error: RPCError::StreamTimeout,
};
// notify the user
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
} else {
crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref());
}
}
Poll::Ready(Some(Err(e))) => {
warn!(self.log, "Outbound substream poll failed"; "error" => ?e);
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
"Could not poll outbound stream timer",
)));
}
Poll::Pending | Poll::Ready(None) => break,
}
}
// when deactivated, close all streams
let deactivated = matches!(self.state, HandlerState::Deactivated);
// drive inbound streams that need to be processed
let mut substreams_to_remove = Vec::new(); // Closed substreams that need to be removed
for (id, info) in self.inbound_substreams.iter_mut() {
loop {
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
InboundState::Idle(substream) if !deactivated => {
if !info.pending_items.is_empty() {
let to_send = std::mem::take(&mut info.pending_items);
let fut = process_inbound_substream(
substream,
info.remaining_chunks,
to_send,
)
.boxed();
info.state = InboundState::Busy(Box::pin(fut));
} else {
info.state = InboundState::Idle(substream);
break;
}
}
InboundState::Idle(mut substream) => {
// handler is deactivated, close the stream and mark it for removal
match substream.close().poll_unpin(cx) {
// if we can't close right now, put the substream back and try again later
Poll::Pending => info.state = InboundState::Idle(substream),
Poll::Ready(res) => {
// The substream closed, we remove it
substreams_to_remove.push(*id);
if let Some(ref delay_key) = info.delay_key {
self.inbound_substreams_delay.remove(delay_key);
}
if let Err(error) = res {
self.events_out.push(Err(HandlerErr::Inbound {
error,
proto: info.protocol,
id: *id,
}));
}
if info.pending_items.last().map(|l| l.close_after()) == Some(false)
{
// if the request was still active, report back to cancel it
self.events_out.push(Err(HandlerErr::Inbound {
error: RPCError::HandlerRejected,
proto: info.protocol,
id: *id,
}));
}
}
}
break;
}
InboundState::Busy(mut fut) => {
// first check if sending finished
match fut.poll_unpin(cx) {
Poll::Ready((substream, errors, remove, new_remaining_chunks)) => {
info.remaining_chunks = new_remaining_chunks;
// report any error
for error in errors {
self.events_out.push(Err(HandlerErr::Inbound {
error,
proto: info.protocol,
id: *id,
}))
}
if remove {
substreams_to_remove.push(*id);
if let Some(ref delay_key) = info.delay_key {
self.inbound_substreams_delay.remove(delay_key);
}
break;
} else {
// If we are not removing this substream, we reset the timer.
// Each chunk is allowed RESPONSE_TIMEOUT to be sent.
if let Some(ref delay_key) = info.delay_key {
self.inbound_substreams_delay.reset(
delay_key,
Duration::from_secs(RESPONSE_TIMEOUT),
);
}
}
// The stream may be currently idle. Attempt to process more
// elements
if !deactivated && !info.pending_items.is_empty() {
let to_send = std::mem::take(&mut info.pending_items);
let fut = process_inbound_substream(
substream,
info.remaining_chunks,
to_send,
)
.boxed();
info.state = InboundState::Busy(Box::pin(fut));
} else {
info.state = InboundState::Idle(substream);
break;
}
}
Poll::Pending => {
info.state = InboundState::Busy(fut);
break;
}
};
}
InboundState::Poisoned => unreachable!("Poisoned inbound substream"),
}
}
}
// remove closed substreams
for inbound_id in substreams_to_remove {
self.inbound_substreams.remove(&inbound_id);
}
// drive outbound streams that need to be processed
for outbound_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() {
// get the state and mark it as poisoned
let (mut entry, state) = match self.outbound_substreams.entry(outbound_id) {
Entry::Occupied(mut entry) => {
let state = std::mem::replace(
&mut entry.get_mut().state,
OutboundSubstreamState::Poisoned,
);
(entry, state)
}
Entry::Vacant(_) => unreachable!(),
};
match state {
OutboundSubstreamState::RequestPendingResponse {
substream,
request: _,
} if deactivated => {
// the handler is deactivated. Close the stream
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::HandlerRejected,
proto: entry.get().proto,
id: entry.get().req_id,
}))
}
OutboundSubstreamState::RequestPendingResponse {
mut substream,
request,
} => match substream.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(response))) => {
if request.expected_responses() > 1 && !response.close_after() {
let substream_entry = entry.get_mut();
let delay_key = &substream_entry.delay_key;
// chunks left after this one
let remaining_chunks = substream_entry
.remaining_chunks
.map(|count| count.saturating_sub(1))
.unwrap_or_else(|| 0);
if remaining_chunks == 0 {
// this is the last expected message, close the stream as all expected chunks have been received
substream_entry.state = OutboundSubstreamState::Closing(substream);
} else {
// If the response chunk was expected update the remaining number of chunks expected and reset the Timeout
substream_entry.state =
OutboundSubstreamState::RequestPendingResponse {
substream,
request,
};
substream_entry.remaining_chunks = Some(remaining_chunks);
self.outbound_substreams_delay
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT));
}
} else {
// either this is a single response request or this response closes the
// stream
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
}
// Check what type of response we got and report it accordingly
let id = entry.get().req_id;
let proto = entry.get().proto;
let received = match response {
RPCCodedResponse::StreamTermination(t) => {
Ok(RPCReceived::EndOfStream(id, t))
}
RPCCodedResponse::Success(resp) => Ok(RPCReceived::Response(id, resp)),
RPCCodedResponse::Error(ref code, ref r) => Err(HandlerErr::Outbound {
id,
proto,
error: RPCError::ErrorResponse(*code, r.to_string()),
}),
};
return Poll::Ready(ProtocolsHandlerEvent::Custom(received));
}
Poll::Ready(None) => {
// stream closed
// if we expected multiple streams send a stream termination,
// else report the stream terminating only.
//trace!(self.log, "RPC Response - stream closed by remote");
// drop the stream
let delay_key = &entry.get().delay_key;
let request_id = entry.get().req_id;
self.outbound_substreams_delay.remove(delay_key);
entry.remove_entry();
// notify the application error
if request.expected_responses() > 1 {
// return an end of stream result
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
RPCReceived::EndOfStream(request_id, request.stream_termination()),
)));
}
// else we return an error, stream should not have closed early.
let outbound_err = HandlerErr::Outbound {
id: request_id,
proto: request.protocol(),
error: RPCError::IncompleteStream,
};
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
}
Poll::Pending => {
entry.get_mut().state =
OutboundSubstreamState::RequestPendingResponse { substream, request }
}
Poll::Ready(Some(Err(e))) => {
// drop the stream
let delay_key = &entry.get().delay_key;
self.outbound_substreams_delay.remove(delay_key);
let outbound_err = HandlerErr::Outbound {
id: entry.get().req_id,
proto: entry.get().proto,
error: e,
};
entry.remove_entry();
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
}
},
OutboundSubstreamState::Closing(mut substream) => {
match Sink::poll_close(Pin::new(&mut substream), cx) {
Poll::Ready(_) => {
// drop the stream and its corresponding timeout
let delay_key = &entry.get().delay_key;
let protocol = entry.get().proto;
let request_id = entry.get().req_id;
self.outbound_substreams_delay.remove(delay_key);
entry.remove_entry();
// report the stream termination to the user
//
// Streams can be terminated here if a responder tries to
// continue sending responses beyond what we would expect. Here
// we simply terminate the stream and report a stream
// termination to the application
let termination = match protocol {
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
_ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream.
};
if let Some(termination) = termination {
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
RPCReceived::EndOfStream(request_id, termination),
)));
}
}
Poll::Pending => {
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
}
}
}
OutboundSubstreamState::Poisoned => {
crit!(self.log, "Poisoned outbound substream");
unreachable!("Coding Error: Outbound substream is poisoned")
}
}
}
// establish outbound substreams
if !self.dial_queue.is_empty() && self.dial_negotiated < self.max_dial_negotiated {
self.dial_negotiated += 1;
let (id, req) = self.dial_queue.remove(0);
self.dial_queue.shrink_to_fit();
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(
OutboundRequestContainer {
req: req.clone(),
fork_context: self.fork_context.clone(),
},
(),
)
.map_info(|()| (id, req)),
});
}
// Check if we have completed sending a goodbye, disconnect.
if let HandlerState::ShuttingDown(_) = self.state {
if self.dial_queue.is_empty()
&& self.outbound_substreams.is_empty()
&& self.inbound_substreams.is_empty()
&& self.events_out.is_empty()
&& self.dial_negotiated == 0
{
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::Disconnected));
}
}
Poll::Pending
}
}
impl slog::Value for SubstreamId {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
slog::Value::serialize(&self.0, record, key, serializer)
}
}
/// Sends the queued items to the peer.
async fn process_inbound_substream<TSpec: EthSpec>(
mut substream: InboundSubstream<TSpec>,
mut remaining_chunks: u64,
pending_items: Vec<RPCCodedResponse<TSpec>>,
) -> InboundProcessingOutput<TSpec> {
let mut errors = Vec::new();
let mut substream_closed = false;
for item in pending_items {
if !substream_closed {
if matches!(item, RPCCodedResponse::StreamTermination(_)) {
substream.close().await.unwrap_or_else(|e| errors.push(e));
substream_closed = true;
} else {
remaining_chunks = remaining_chunks.saturating_sub(1);
// chunks that are not stream terminations get sent, and the stream is closed if
// the response is an error
let is_error = matches!(item, RPCCodedResponse::Error(..));
substream
.send(item)
.await
.unwrap_or_else(|e| errors.push(e));
if remaining_chunks == 0 || is_error {
substream.close().await.unwrap_or_else(|e| errors.push(e));
substream_closed = true;
}
}
} else if matches!(item, RPCCodedResponse::StreamTermination(_)) {
// The sender closed the stream before us, ignore this.
} else {
// we have more items after a closed substream, report those as errors
errors.push(RPCError::InternalError(
"Sending responses to closed inbound substream",
));
}
}
(substream, errors, substream_closed, remaining_chunks)
}

View File

@@ -0,0 +1,449 @@
//! Available RPC methods types and ids.
use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield};
use regex::bytes::Regex;
use serde::Serialize;
use ssz_derive::{Decode, Encode};
use ssz_types::{
typenum::{U1024, U256},
VariableList,
};
use std::ops::Deref;
use strum::AsStaticStr;
use superstruct::superstruct;
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
/// Maximum number of blocks in a single request.
pub type MaxRequestBlocks = U1024;
pub const MAX_REQUEST_BLOCKS: u64 = 1024;
/// Maximum length of error message.
pub type MaxErrorLen = U256;
pub const MAX_ERROR_LEN: u64 = 256;
/// Wrapper over SSZ List to represent error message in rpc responses.
#[derive(Debug, Clone)]
pub struct ErrorType(pub VariableList<u8, MaxErrorLen>);
impl From<String> for ErrorType {
fn from(s: String) -> Self {
Self(VariableList::from(s.as_bytes().to_vec()))
}
}
impl From<&str> for ErrorType {
fn from(s: &str) -> Self {
Self(VariableList::from(s.as_bytes().to_vec()))
}
}
impl Deref for ErrorType {
type Target = VariableList<u8, MaxErrorLen>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ToString for ErrorType {
fn to_string(&self) -> String {
#[allow(clippy::invalid_regex)]
let re = Regex::new("\\p{C}").expect("Regex is valid");
String::from_utf8_lossy(&re.replace_all(self.0.deref(), &b""[..])).to_string()
}
}
/* Request/Response data structures for RPC methods */
/* Requests */
/// Identifier of a request.
///
// NOTE: The handler stores the `RequestId` to inform back of responses and errors, but it's execution
// is independent of the contents on this type.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RequestId {
Router,
Sync(usize),
Behaviour,
}
/// The STATUS request/response handshake message.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct StatusMessage {
/// The fork version of the chain we are broadcasting.
pub fork_digest: [u8; 4],
/// Latest finalized root.
pub finalized_root: Hash256,
/// Latest finalized epoch.
pub finalized_epoch: Epoch,
/// The latest block root.
pub head_root: Hash256,
/// The slot associated with the latest block root.
pub head_slot: Slot,
}
/// The PING request/response message.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct Ping {
/// The metadata sequence number.
pub data: u64,
}
/// The METADATA response structure.
#[superstruct(
variants(V1, V2),
variant_attributes(
derive(Encode, Decode, Clone, Debug, PartialEq, Serialize),
serde(bound = "T: EthSpec", deny_unknown_fields),
)
)]
#[derive(Clone, Debug, PartialEq, Serialize, Encode)]
#[serde(bound = "T: EthSpec")]
#[ssz(enum_behaviour = "transparent")]
pub struct MetaData<T: EthSpec> {
/// A sequential counter indicating when data gets modified.
pub seq_number: u64,
/// The persistent attestation subnet bitfield.
pub attnets: EnrAttestationBitfield<T>,
/// The persistent sync committee bitfield.
#[superstruct(only(V2))]
pub syncnets: EnrSyncCommitteeBitfield<T>,
}
/// The reason given for a `Goodbye` message.
///
/// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`,
/// however `GoodbyeReason::Unknown.into()` will go into `0_u64`. Therefore de-serializing then
/// re-serializing may not return the same bytes.
#[derive(Debug, Clone, PartialEq)]
pub enum GoodbyeReason {
/// This node has shutdown.
ClientShutdown = 1,
/// Incompatible networks.
IrrelevantNetwork = 2,
/// Error/fault in the RPC.
Fault = 3,
/// Teku uses this code for not being able to verify a network.
UnableToVerifyNetwork = 128,
/// The node has too many connected peers.
TooManyPeers = 129,
/// Scored poorly.
BadScore = 250,
/// The peer is banned
Banned = 251,
/// The IP address the peer is using is banned.
BannedIP = 252,
/// Unknown reason.
Unknown = 0,
}
impl From<u64> for GoodbyeReason {
fn from(id: u64) -> GoodbyeReason {
match id {
1 => GoodbyeReason::ClientShutdown,
2 => GoodbyeReason::IrrelevantNetwork,
3 => GoodbyeReason::Fault,
128 => GoodbyeReason::UnableToVerifyNetwork,
129 => GoodbyeReason::TooManyPeers,
250 => GoodbyeReason::BadScore,
251 => GoodbyeReason::Banned,
252 => GoodbyeReason::BannedIP,
_ => GoodbyeReason::Unknown,
}
}
}
impl From<GoodbyeReason> for u64 {
fn from(reason: GoodbyeReason) -> u64 {
reason as u64
}
}
impl ssz::Encode for GoodbyeReason {
fn is_ssz_fixed_len() -> bool {
<u64 as ssz::Encode>::is_ssz_fixed_len()
}
fn ssz_fixed_len() -> usize {
<u64 as ssz::Encode>::ssz_fixed_len()
}
fn ssz_bytes_len(&self) -> usize {
0_u64.ssz_bytes_len()
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
let conv: u64 = self.clone().into();
conv.ssz_append(buf)
}
}
impl ssz::Decode for GoodbyeReason {
fn is_ssz_fixed_len() -> bool {
<u64 as ssz::Decode>::is_ssz_fixed_len()
}
fn ssz_fixed_len() -> usize {
<u64 as ssz::Decode>::ssz_fixed_len()
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
u64::from_ssz_bytes(bytes).map(|n| n.into())
}
}
/// Request a number of beacon block roots from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlocksByRangeRequest {
/// The starting slot to request blocks.
pub start_slot: u64,
/// The number of blocks from the start slot.
pub count: u64,
/// The step increment to receive blocks.
///
/// A value of 1 returns every block.
/// A value of 2 returns every second block.
/// A value of 3 returns every third block and so on.
pub step: u64,
}
/// Request a number of beacon block bodies from a peer.
#[derive(Clone, Debug, PartialEq)]
pub struct BlocksByRootRequest {
/// The list of beacon block bodies being requested.
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
}
/* RPC Handling and Grouping */
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
#[derive(Debug, Clone, PartialEq)]
pub enum RPCResponse<T: EthSpec> {
/// A HELLO message.
Status(StatusMessage),
/// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the
/// batch.
BlocksByRange(Box<SignedBeaconBlock<T>>),
/// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Box<SignedBeaconBlock<T>>),
/// A PONG response to a PING request.
Pong(Ping),
/// A response to a META_DATA request.
MetaData(MetaData<T>),
}
/// Indicates which response is being terminated by a stream termination response.
#[derive(Debug, Clone)]
pub enum ResponseTermination {
/// Blocks by range stream termination.
BlocksByRange,
/// Blocks by root stream termination.
BlocksByRoot,
}
/// The structured response containing a result/code indicating success or failure
/// and the contents of the response
#[derive(Debug, Clone)]
pub enum RPCCodedResponse<T: EthSpec> {
/// The response is a successful.
Success(RPCResponse<T>),
Error(RPCResponseErrorCode, ErrorType),
/// Received a stream termination indicating which response is being terminated.
StreamTermination(ResponseTermination),
}
/// The code assigned to an erroneous `RPCResponse`.
#[derive(Debug, Clone, Copy, PartialEq, AsStaticStr)]
#[strum(serialize_all = "snake_case")]
pub enum RPCResponseErrorCode {
RateLimited,
InvalidRequest,
ServerError,
/// Error spec'd to indicate that a peer does not have blocks on a requested range.
ResourceUnavailable,
Unknown,
}
impl<T: EthSpec> RPCCodedResponse<T> {
/// Used to encode the response in the codec.
pub fn as_u8(&self) -> Option<u8> {
match self {
RPCCodedResponse::Success(_) => Some(0),
RPCCodedResponse::Error(code, _) => Some(code.as_u8()),
RPCCodedResponse::StreamTermination(_) => None,
}
}
/// Tells the codec whether to decode as an RPCResponse or an error.
pub fn is_response(response_code: u8) -> bool {
matches!(response_code, 0)
}
/// Builds an RPCCodedResponse from a response code and an ErrorMessage
pub fn from_error(response_code: u8, err: ErrorType) -> Self {
let code = match response_code {
1 => RPCResponseErrorCode::InvalidRequest,
2 => RPCResponseErrorCode::ServerError,
3 => RPCResponseErrorCode::ResourceUnavailable,
139 => RPCResponseErrorCode::RateLimited,
_ => RPCResponseErrorCode::Unknown,
};
RPCCodedResponse::Error(code, err)
}
/// Specifies which response allows for multiple chunks for the stream handler.
pub fn multiple_responses(&self) -> bool {
match self {
RPCCodedResponse::Success(resp) => match resp {
RPCResponse::Status(_) => false,
RPCResponse::BlocksByRange(_) => true,
RPCResponse::BlocksByRoot(_) => true,
RPCResponse::Pong(_) => false,
RPCResponse::MetaData(_) => false,
},
RPCCodedResponse::Error(_, _) => true,
// Stream terminations are part of responses that have chunks
RPCCodedResponse::StreamTermination(_) => true,
}
}
/// Returns true if this response always terminates the stream.
pub fn close_after(&self) -> bool {
!matches!(self, RPCCodedResponse::Success(_))
}
}
impl RPCResponseErrorCode {
fn as_u8(&self) -> u8 {
match self {
RPCResponseErrorCode::InvalidRequest => 1,
RPCResponseErrorCode::ServerError => 2,
RPCResponseErrorCode::ResourceUnavailable => 3,
RPCResponseErrorCode::Unknown => 255,
RPCResponseErrorCode::RateLimited => 139,
}
}
}
impl std::fmt::Display for RPCResponseErrorCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let repr = match self {
RPCResponseErrorCode::InvalidRequest => "The request was invalid",
RPCResponseErrorCode::ResourceUnavailable => "Resource unavailable",
RPCResponseErrorCode::ServerError => "Server error occurred",
RPCResponseErrorCode::Unknown => "Unknown error occurred",
RPCResponseErrorCode::RateLimited => "Rate limited",
};
f.write_str(repr)
}
}
impl std::fmt::Display for StatusMessage {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_digest, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot)
}
}
impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RPCResponse::Status(status) => write!(f, "{}", status),
RPCResponse::BlocksByRange(block) => {
write!(f, "BlocksByRange: Block slot: {}", block.slot())
}
RPCResponse::BlocksByRoot(block) => {
write!(f, "BlocksByRoot: Block slot: {}", block.slot())
}
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
}
}
}
impl<T: EthSpec> std::fmt::Display for RPCCodedResponse<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RPCCodedResponse::Success(res) => write!(f, "{}", res),
RPCCodedResponse::Error(code, err) => write!(f, "{}: {}", code, err.to_string()),
RPCCodedResponse::StreamTermination(_) => write!(f, "Stream Termination"),
}
}
}
impl std::fmt::Display for GoodbyeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GoodbyeReason::ClientShutdown => write!(f, "Client Shutdown"),
GoodbyeReason::IrrelevantNetwork => write!(f, "Irrelevant Network"),
GoodbyeReason::Fault => write!(f, "Fault"),
GoodbyeReason::UnableToVerifyNetwork => write!(f, "Unable to verify network"),
GoodbyeReason::TooManyPeers => write!(f, "Too many peers"),
GoodbyeReason::BadScore => write!(f, "Bad Score"),
GoodbyeReason::Banned => write!(f, "Banned"),
GoodbyeReason::BannedIP => write!(f, "BannedIP"),
GoodbyeReason::Unknown => write!(f, "Unknown Reason"),
}
}
}
impl std::fmt::Display for BlocksByRangeRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Start Slot: {}, Count: {}, Step: {}",
self.start_slot, self.count, self.step
)
}
}
impl slog::KV for StatusMessage {
fn serialize(
&self,
record: &slog::Record,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
use slog::Value;
serializer.emit_arguments("fork_digest", &format_args!("{:?}", self.fork_digest))?;
Value::serialize(&self.finalized_epoch, record, "finalized_epoch", serializer)?;
serializer.emit_arguments("finalized_root", &format_args!("{}", self.finalized_root))?;
Value::serialize(&self.head_slot, record, "head_slot", serializer)?;
serializer.emit_arguments("head_root", &format_args!("{}", self.head_root))?;
slog::Result::Ok(())
}
}
impl slog::Value for RequestId {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
match self {
RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer),
RequestId::Router => slog::Value::serialize("Router", record, key, serializer),
RequestId::Sync(ref id) => slog::Value::serialize(id, record, key, serializer),
}
}
}

View File

@@ -0,0 +1,313 @@
//! The Ethereum 2.0 Wire Protocol
//!
//! This protocol is a purpose built Ethereum 2.0 libp2p protocol. It's role is to facilitate
//! direct peer-to-peer communication primarily for sending/receiving chain information for
//! syncing.
use futures::future::FutureExt;
use handler::RPCHandler;
use libp2p::core::{connection::ConnectionId, ConnectedPoint};
use libp2p::swarm::{
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler,
PollParameters, SubstreamProtocol,
};
use libp2p::{Multiaddr, PeerId};
use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr};
use slog::{crit, debug, o};
use std::marker::PhantomData;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use types::{EthSpec, ForkContext};
pub(crate) use handler::HandlerErr;
pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse};
pub(crate) use protocol::{InboundRequest, RPCProtocol};
pub use handler::SubstreamId;
pub use methods::{
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks,
RPCResponseErrorCode, RequestId, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS,
};
pub(crate) use outbound::OutboundRequest;
pub use protocol::{Protocol, RPCError};
pub(crate) mod codec;
mod handler;
pub mod methods;
mod outbound;
mod protocol;
mod rate_limiter;
/// RPC events sent from Lighthouse.
#[derive(Debug, Clone)]
pub enum RPCSend<TSpec: EthSpec> {
/// A request sent from Lighthouse.
///
/// The `RequestId` is given by the application making the request. These
/// go over *outbound* connections.
Request(RequestId, OutboundRequest<TSpec>),
/// A response sent from Lighthouse.
///
/// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the
/// peer. The second parameter is a single chunk of a response. These go over *inbound*
/// connections.
Response(SubstreamId, RPCCodedResponse<TSpec>),
/// Lighthouse has requested to terminate the connection with a goodbye message.
Shutdown(GoodbyeReason),
}
/// RPC events received from outside Lighthouse.
#[derive(Debug, Clone)]
pub enum RPCReceived<T: EthSpec> {
/// A request received from the outside.
///
/// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the
/// *inbound* substream over which it is managed.
Request(SubstreamId, InboundRequest<T>),
/// A response received from the outside.
///
/// The `RequestId` corresponds to the application given ID of the original request sent to the
/// peer. The second parameter is a single chunk of a response. These go over *outbound*
/// connections.
Response(RequestId, RPCResponse<T>),
/// Marks a request as completed
EndOfStream(RequestId, ResponseTermination),
}
impl<T: EthSpec> std::fmt::Display for RPCSend<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req),
RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res),
RPCSend::Shutdown(reason) => write!(f, "Sending Goodbye: {}", reason),
}
}
}
/// Messages sent to the user from the RPC protocol.
pub struct RPCMessage<TSpec: EthSpec> {
/// The peer that sent the message.
pub peer_id: PeerId,
/// Handler managing this message.
pub conn_id: ConnectionId,
/// The message that was sent.
pub event: <RPCHandler<TSpec> as ProtocolsHandler>::OutEvent,
}
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
/// logic.
pub struct RPC<TSpec: EthSpec> {
/// Rate limiter
limiter: RateLimiter,
/// Queue of events to be processed.
events: Vec<NetworkBehaviourAction<RPCSend<TSpec>, RPCMessage<TSpec>>>,
fork_context: Arc<ForkContext>,
/// Slog logger for RPC behaviour.
log: slog::Logger,
}
impl<TSpec: EthSpec> RPC<TSpec> {
pub fn new(fork_context: Arc<ForkContext>, log: slog::Logger) -> Self {
let log = log.new(o!("service" => "libp2p_rpc"));
let limiter = RPCRateLimiterBuilder::new()
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
.n_every(Protocol::Ping, 2, Duration::from_secs(10))
.n_every(Protocol::Status, 5, Duration::from_secs(15))
.one_every(Protocol::Goodbye, Duration::from_secs(10))
.n_every(
Protocol::BlocksByRange,
methods::MAX_REQUEST_BLOCKS,
Duration::from_secs(10),
)
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
.build()
.expect("Configuration parameters are valid");
RPC {
limiter,
events: Vec::new(),
fork_context,
log,
}
}
/// Sends an RPC response.
///
/// The peer must be connected for this to succeed.
pub fn send_response(
&mut self,
peer_id: PeerId,
id: (ConnectionId, SubstreamId),
event: RPCCodedResponse<TSpec>,
) {
self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id,
handler: NotifyHandler::One(id.0),
event: RPCSend::Response(id.1, event),
});
}
/// Submits an RPC request.
///
/// The peer must be connected for this to succeed.
pub fn send_request(
&mut self,
peer_id: PeerId,
request_id: RequestId,
event: OutboundRequest<TSpec>,
) {
self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event: RPCSend::Request(request_id, event),
});
}
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
/// gracefully terminates the RPC behaviour with a goodbye message.
pub fn shutdown(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event: RPCSend::Shutdown(reason),
});
}
}
impl<TSpec> NetworkBehaviour for RPC<TSpec>
where
TSpec: EthSpec,
{
type ProtocolsHandler = RPCHandler<TSpec>;
type OutEvent = RPCMessage<TSpec>;
fn new_handler(&mut self) -> Self::ProtocolsHandler {
RPCHandler::new(
SubstreamProtocol::new(
RPCProtocol {
fork_context: self.fork_context.clone(),
phantom: PhantomData,
},
(),
),
self.fork_context.clone(),
&self.log,
)
}
// handled by discovery
fn addresses_of_peer(&mut self, _peer_id: &PeerId) -> Vec<Multiaddr> {
Vec::new()
}
// Use connection established/closed instead of these currently
fn inject_connected(&mut self, peer_id: &PeerId) {
// find the peer's meta-data
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => %peer_id);
let rpc_event =
RPCSend::Request(RequestId::Behaviour, OutboundRequest::MetaData(PhantomData));
self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id: *peer_id,
handler: NotifyHandler::Any,
event: rpc_event,
});
}
fn inject_disconnected(&mut self, _peer_id: &PeerId) {}
fn inject_connection_established(
&mut self,
_peer_id: &PeerId,
_: &ConnectionId,
_connected_point: &ConnectedPoint,
) {
}
fn inject_connection_closed(
&mut self,
_peer_id: &PeerId,
_: &ConnectionId,
_connected_point: &ConnectedPoint,
) {
}
fn inject_event(
&mut self,
peer_id: PeerId,
conn_id: ConnectionId,
event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
) {
if let Ok(RPCReceived::Request(ref id, ref req)) = event {
// check if the request is conformant to the quota
match self.limiter.allows(&peer_id, req) {
Ok(()) => {
// send the event to the user
self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
}))
}
Err(RateLimitedErr::TooLarge) => {
// we set the batch sizes, so this is a coding/config err for most protocols
let protocol = req.protocol();
if matches!(protocol, Protocol::BlocksByRange) {
debug!(self.log, "Blocks by range request will never be processed"; "request" => %req);
} else {
crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol);
}
// send an error code to the peer.
// the handler upon receiving the error code will send it back to the behaviour
self.send_response(
peer_id,
(conn_id, *id),
RPCCodedResponse::Error(
RPCResponseErrorCode::RateLimited,
"Rate limited. Request too large".into(),
),
);
}
Err(RateLimitedErr::TooSoon(wait_time)) => {
debug!(self.log, "Request exceeds the rate limit";
"request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis());
// send an error code to the peer.
// the handler upon receiving the error code will send it back to the behaviour
self.send_response(
peer_id,
(conn_id, *id),
RPCCodedResponse::Error(
RPCResponseErrorCode::RateLimited,
format!("Wait {:?}", wait_time).into(),
),
);
}
}
} else {
self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
}));
}
}
fn poll(
&mut self,
cx: &mut Context,
_: &mut impl PollParameters,
) -> Poll<
NetworkBehaviourAction<
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
Self::OutEvent,
>,
> {
// let the rate limiter prune
let _ = self.limiter.poll_unpin(cx);
if !self.events.is_empty() {
return Poll::Ready(self.events.remove(0));
}
Poll::Pending
}
}

View File

@@ -0,0 +1,182 @@
use std::marker::PhantomData;
use super::methods::*;
use super::protocol::Protocol;
use super::protocol::ProtocolId;
use super::RPCError;
use crate::rpc::protocol::Encoding;
use crate::rpc::protocol::Version;
use crate::rpc::{
codec::{base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec},
methods::ResponseTermination,
};
use futures::future::BoxFuture;
use futures::prelude::{AsyncRead, AsyncWrite};
use futures::{FutureExt, SinkExt};
use libp2p::core::{OutboundUpgrade, UpgradeInfo};
use std::sync::Arc;
use tokio_util::{
codec::Framed,
compat::{Compat, FuturesAsyncReadCompatExt},
};
use types::{EthSpec, ForkContext};
/* Outbound request */
// Combines all the RPC requests into a single enum to implement `UpgradeInfo` and
// `OutboundUpgrade`
#[derive(Debug, Clone)]
pub struct OutboundRequestContainer<TSpec: EthSpec> {
pub req: OutboundRequest<TSpec>,
pub fork_context: Arc<ForkContext>,
}
#[derive(Debug, Clone, PartialEq)]
pub enum OutboundRequest<TSpec: EthSpec> {
Status(StatusMessage),
Goodbye(GoodbyeReason),
BlocksByRange(BlocksByRangeRequest),
BlocksByRoot(BlocksByRootRequest),
Ping(Ping),
MetaData(PhantomData<TSpec>),
}
impl<TSpec: EthSpec> UpgradeInfo for OutboundRequestContainer<TSpec> {
type Info = ProtocolId;
type InfoIter = Vec<Self::Info>;
// add further protocols as we support more encodings/versions
fn protocol_info(&self) -> Self::InfoIter {
self.req.supported_protocols()
}
}
/// Implements the encoding per supported protocol for `RPCRequest`.
impl<TSpec: EthSpec> OutboundRequest<TSpec> {
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
match self {
// add more protocols when versions/encodings are supported
OutboundRequest::Status(_) => vec![ProtocolId::new(
Protocol::Status,
Version::V1,
Encoding::SSZSnappy,
)],
OutboundRequest::Goodbye(_) => vec![ProtocolId::new(
Protocol::Goodbye,
Version::V1,
Encoding::SSZSnappy,
)],
OutboundRequest::BlocksByRange(_) => vec![
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
],
OutboundRequest::BlocksByRoot(_) => vec![
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
],
OutboundRequest::Ping(_) => vec![ProtocolId::new(
Protocol::Ping,
Version::V1,
Encoding::SSZSnappy,
)],
OutboundRequest::MetaData(_) => vec![
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
],
}
}
/* These functions are used in the handler for stream management */
/// Number of responses expected for this request.
pub fn expected_responses(&self) -> u64 {
match self {
OutboundRequest::Status(_) => 1,
OutboundRequest::Goodbye(_) => 0,
OutboundRequest::BlocksByRange(req) => req.count,
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
OutboundRequest::Ping(_) => 1,
OutboundRequest::MetaData(_) => 1,
}
}
/// Gives the corresponding `Protocol` to this request.
pub fn protocol(&self) -> Protocol {
match self {
OutboundRequest::Status(_) => Protocol::Status,
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
OutboundRequest::Ping(_) => Protocol::Ping,
OutboundRequest::MetaData(_) => Protocol::MetaData,
}
}
/// Returns the `ResponseTermination` type associated with the request if a stream gets
/// terminated.
pub fn stream_termination(&self) -> ResponseTermination {
match self {
// this only gets called after `multiple_responses()` returns true. Therefore, only
// variants that have `multiple_responses()` can have values.
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
OutboundRequest::Status(_) => unreachable!(),
OutboundRequest::Goodbye(_) => unreachable!(),
OutboundRequest::Ping(_) => unreachable!(),
OutboundRequest::MetaData(_) => unreachable!(),
}
}
}
/* RPC Response type - used for outbound upgrades */
/* Outbound upgrades */
pub type OutboundFramed<TSocket, TSpec> = Framed<Compat<TSocket>, OutboundCodec<TSpec>>;
impl<TSocket, TSpec> OutboundUpgrade<TSocket> for OutboundRequestContainer<TSpec>
where
TSpec: EthSpec + Send + 'static,
TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
type Output = OutboundFramed<TSocket, TSpec>;
type Error = RPCError;
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
fn upgrade_outbound(self, socket: TSocket, protocol: Self::Info) -> Self::Future {
// convert to a tokio compatible socket
let socket = socket.compat();
let codec = match protocol.encoding {
Encoding::SSZSnappy => {
let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new(
protocol,
usize::max_value(),
self.fork_context.clone(),
));
OutboundCodec::SSZSnappy(ssz_snappy_codec)
}
};
let mut socket = Framed::new(socket, codec);
async {
socket.send(self.req).await?;
socket.close().await?;
Ok(socket)
}
.boxed()
}
}
impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
OutboundRequest::Status(status) => write!(f, "Status Message: {}", status),
OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
}
}
}

View File

@@ -0,0 +1,593 @@
use super::methods::*;
use crate::rpc::{
codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCodec, InboundCodec},
methods::{MaxErrorLen, ResponseTermination, MAX_ERROR_LEN},
MaxRequestBlocks, MAX_REQUEST_BLOCKS,
};
use futures::future::BoxFuture;
use futures::prelude::{AsyncRead, AsyncWrite};
use futures::{FutureExt, StreamExt};
use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo};
use ssz::Encode;
use ssz_types::VariableList;
use std::io;
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;
use strum::{AsStaticRef, AsStaticStr};
use tokio_io_timeout::TimeoutStream;
use tokio_util::{
codec::Framed,
compat::{Compat, FuturesAsyncReadCompatExt},
};
use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, EthSpec, ForkContext, Hash256, MainnetEthSpec,
Signature, SignedBeaconBlock,
};
lazy_static! {
// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
// same across different `EthSpec` implementations.
pub static ref SIGNED_BEACON_BLOCK_BASE_MIN: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::Base(BeaconBlockBase::<MainnetEthSpec>::empty(&MainnetEthSpec::default_spec())),
Signature::empty(),
)
.as_ssz_bytes()
.len();
pub static ref SIGNED_BEACON_BLOCK_BASE_MAX: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::Base(BeaconBlockBase::full(&MainnetEthSpec::default_spec())),
Signature::empty(),
)
.as_ssz_bytes()
.len();
pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MIN: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::Altair(BeaconBlockAltair::<MainnetEthSpec>::empty(&MainnetEthSpec::default_spec())),
Signature::empty(),
)
.as_ssz_bytes()
.len();
pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MAX: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::Altair(BeaconBlockAltair::full(&MainnetEthSpec::default_spec())),
Signature::empty(),
)
.as_ssz_bytes()
.len();
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =
VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
.as_ssz_bytes()
.len();
pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize =
VariableList::<Hash256, MaxRequestBlocks>::from(vec![
Hash256::zero();
MAX_REQUEST_BLOCKS
as usize
])
.as_ssz_bytes()
.len();
pub static ref ERROR_TYPE_MIN: usize =
VariableList::<u8, MaxErrorLen>::from(Vec::<u8>::new())
.as_ssz_bytes()
.len();
pub static ref ERROR_TYPE_MAX: usize =
VariableList::<u8, MaxErrorLen>::from(vec![
0u8;
MAX_ERROR_LEN
as usize
])
.as_ssz_bytes()
.len();
}
/// The maximum bytes that can be sent across the RPC.
const MAX_RPC_SIZE: usize = 1_048_576; // 1M
/// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
const TTFB_TIMEOUT: u64 = 5;
/// The number of seconds to wait for the first bytes of a request once a protocol has been
/// established before the stream is terminated.
const REQUEST_TIMEOUT: u64 = 15;
/// Protocol names to be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Protocol {
/// The Status protocol name.
Status,
/// The Goodbye protocol name.
Goodbye,
/// The `BlocksByRange` protocol name.
BlocksByRange,
/// The `BlocksByRoot` protocol name.
BlocksByRoot,
/// The `Ping` protocol name.
Ping,
/// The `MetaData` protocol name.
MetaData,
}
/// RPC Versions
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Version {
/// Version 1 of RPC
V1,
/// Version 2 of RPC
V2,
}
/// RPC Encondings supported.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Encoding {
SSZSnappy,
}
impl std::fmt::Display for Protocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let repr = match self {
Protocol::Status => "status",
Protocol::Goodbye => "goodbye",
Protocol::BlocksByRange => "beacon_blocks_by_range",
Protocol::BlocksByRoot => "beacon_blocks_by_root",
Protocol::Ping => "ping",
Protocol::MetaData => "metadata",
};
f.write_str(repr)
}
}
impl std::fmt::Display for Encoding {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let repr = match self {
Encoding::SSZSnappy => "ssz_snappy",
};
f.write_str(repr)
}
}
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let repr = match self {
Version::V1 => "1",
Version::V2 => "2",
};
f.write_str(repr)
}
}
#[derive(Debug, Clone)]
pub struct RPCProtocol<TSpec: EthSpec> {
pub fork_context: Arc<ForkContext>,
pub phantom: PhantomData<TSpec>,
}
impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
type Info = ProtocolId;
type InfoIter = Vec<Self::Info>;
/// The list of supported RPC protocols for Lighthouse.
fn protocol_info(&self) -> Self::InfoIter {
vec![
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
// V2 variants have higher preference then V1
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
]
}
}
/// Represents the ssz length bounds for RPC messages.
#[derive(Debug, PartialEq)]
pub struct RpcLimits {
pub min: usize,
pub max: usize,
}
impl RpcLimits {
pub fn new(min: usize, max: usize) -> Self {
Self { min, max }
}
/// Returns true if the given length is out of bounds, false otherwise.
pub fn is_out_of_bounds(&self, length: usize) -> bool {
length > self.max || length < self.min
}
}
/// Tracks the types in a protocol id.
#[derive(Clone, Debug)]
pub struct ProtocolId {
/// The RPC message type/name.
pub message_name: Protocol,
/// The version of the RPC.
pub version: Version,
/// The encoding of the RPC.
pub encoding: Encoding,
/// The protocol id that is formed from the above fields.
protocol_id: String,
}
impl ProtocolId {
/// Returns min and max size for messages of given protocol id requests.
pub fn rpc_request_limits(&self) -> RpcLimits {
match self.message_name {
Protocol::Status => RpcLimits::new(
<StatusMessage as Encode>::ssz_fixed_len(),
<StatusMessage as Encode>::ssz_fixed_len(),
),
Protocol::Goodbye => RpcLimits::new(
<GoodbyeReason as Encode>::ssz_fixed_len(),
<GoodbyeReason as Encode>::ssz_fixed_len(),
),
Protocol::BlocksByRange => RpcLimits::new(
<BlocksByRangeRequest as Encode>::ssz_fixed_len(),
<BlocksByRangeRequest as Encode>::ssz_fixed_len(),
),
Protocol::BlocksByRoot => {
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
}
Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(),
),
Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty
}
}
/// Returns min and max size for messages of given protocol id responses.
pub fn rpc_response_limits<T: EthSpec>(&self) -> RpcLimits {
match self.message_name {
Protocol::Status => RpcLimits::new(
<StatusMessage as Encode>::ssz_fixed_len(),
<StatusMessage as Encode>::ssz_fixed_len(),
),
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
Protocol::BlocksByRange => RpcLimits::new(
std::cmp::min(
*SIGNED_BEACON_BLOCK_ALTAIR_MIN,
*SIGNED_BEACON_BLOCK_BASE_MIN,
),
std::cmp::max(
*SIGNED_BEACON_BLOCK_ALTAIR_MAX,
*SIGNED_BEACON_BLOCK_BASE_MAX,
),
),
Protocol::BlocksByRoot => RpcLimits::new(
std::cmp::min(
*SIGNED_BEACON_BLOCK_ALTAIR_MIN,
*SIGNED_BEACON_BLOCK_BASE_MIN,
),
std::cmp::max(
*SIGNED_BEACON_BLOCK_ALTAIR_MAX,
*SIGNED_BEACON_BLOCK_BASE_MAX,
),
),
Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(),
),
Protocol::MetaData => RpcLimits::new(
<MetaDataV1<T> as Encode>::ssz_fixed_len(),
<MetaDataV2<T> as Encode>::ssz_fixed_len(),
),
}
}
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
/// beginning of the stream, else returns `false`.
pub fn has_context_bytes(&self) -> bool {
if self.version == Version::V2 {
match self.message_name {
Protocol::BlocksByRange | Protocol::BlocksByRoot => return true,
_ => return false,
}
}
false
}
}
/// An RPC protocol ID.
impl ProtocolId {
pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self {
let protocol_id = format!(
"{}/{}/{}/{}",
PROTOCOL_PREFIX, message_name, version, encoding
);
ProtocolId {
message_name,
version,
encoding,
protocol_id,
}
}
}
impl ProtocolName for ProtocolId {
fn protocol_name(&self) -> &[u8] {
self.protocol_id.as_bytes()
}
}
/* Inbound upgrade */
// The inbound protocol reads the request, decodes it and returns the stream to the protocol
// handler to respond to once ready.
pub type InboundOutput<TSocket, TSpec> = (InboundRequest<TSpec>, InboundFramed<TSocket, TSpec>);
pub type InboundFramed<TSocket, TSpec> =
Framed<std::pin::Pin<Box<TimeoutStream<Compat<TSocket>>>>, InboundCodec<TSpec>>;
impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec>
where
TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static,
TSpec: EthSpec,
{
type Output = InboundOutput<TSocket, TSpec>;
type Error = RPCError;
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future {
async move {
let protocol_name = protocol.message_name;
// convert the socket to tokio compatible socket
let socket = socket.compat();
let codec = match protocol.encoding {
Encoding::SSZSnappy => {
let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new(
protocol,
MAX_RPC_SIZE,
self.fork_context.clone(),
));
InboundCodec::SSZSnappy(ssz_snappy_codec)
}
};
let mut timed_socket = TimeoutStream::new(socket);
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
let socket = Framed::new(Box::pin(timed_socket), codec);
// MetaData requests should be empty, return the stream
match protocol_name {
Protocol::MetaData => Ok((InboundRequest::MetaData(PhantomData), socket)),
_ => {
match tokio::time::timeout(
Duration::from_secs(REQUEST_TIMEOUT),
socket.into_future(),
)
.await
{
Err(e) => Err(RPCError::from(e)),
Ok((Some(Ok(request)), stream)) => Ok((request, stream)),
Ok((Some(Err(e)), _)) => Err(e),
Ok((None, _)) => Err(RPCError::IncompleteStream),
}
}
}
}
.boxed()
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum InboundRequest<TSpec: EthSpec> {
Status(StatusMessage),
Goodbye(GoodbyeReason),
BlocksByRange(BlocksByRangeRequest),
BlocksByRoot(BlocksByRootRequest),
Ping(Ping),
MetaData(PhantomData<TSpec>),
}
impl<TSpec: EthSpec> UpgradeInfo for InboundRequest<TSpec> {
type Info = ProtocolId;
type InfoIter = Vec<Self::Info>;
// add further protocols as we support more encodings/versions
fn protocol_info(&self) -> Self::InfoIter {
self.supported_protocols()
}
}
/// Implements the encoding per supported protocol for `RPCRequest`.
impl<TSpec: EthSpec> InboundRequest<TSpec> {
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
match self {
// add more protocols when versions/encodings are supported
InboundRequest::Status(_) => vec![ProtocolId::new(
Protocol::Status,
Version::V1,
Encoding::SSZSnappy,
)],
InboundRequest::Goodbye(_) => vec![ProtocolId::new(
Protocol::Goodbye,
Version::V1,
Encoding::SSZSnappy,
)],
InboundRequest::BlocksByRange(_) => vec![
// V2 has higher preference when negotiating a stream
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
],
InboundRequest::BlocksByRoot(_) => vec![
// V2 has higher preference when negotiating a stream
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
],
InboundRequest::Ping(_) => vec![ProtocolId::new(
Protocol::Ping,
Version::V1,
Encoding::SSZSnappy,
)],
InboundRequest::MetaData(_) => vec![
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
],
}
}
/* These functions are used in the handler for stream management */
/// Number of responses expected for this request.
pub fn expected_responses(&self) -> u64 {
match self {
InboundRequest::Status(_) => 1,
InboundRequest::Goodbye(_) => 0,
InboundRequest::BlocksByRange(req) => req.count,
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
InboundRequest::Ping(_) => 1,
InboundRequest::MetaData(_) => 1,
}
}
/// Gives the corresponding `Protocol` to this request.
pub fn protocol(&self) -> Protocol {
match self {
InboundRequest::Status(_) => Protocol::Status,
InboundRequest::Goodbye(_) => Protocol::Goodbye,
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
InboundRequest::Ping(_) => Protocol::Ping,
InboundRequest::MetaData(_) => Protocol::MetaData,
}
}
/// Returns the `ResponseTermination` type associated with the request if a stream gets
/// terminated.
pub fn stream_termination(&self) -> ResponseTermination {
match self {
// this only gets called after `multiple_responses()` returns true. Therefore, only
// variants that have `multiple_responses()` can have values.
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
InboundRequest::Status(_) => unreachable!(),
InboundRequest::Goodbye(_) => unreachable!(),
InboundRequest::Ping(_) => unreachable!(),
InboundRequest::MetaData(_) => unreachable!(),
}
}
}
/// Error in RPC Encoding/Decoding.
#[derive(Debug, Clone, PartialEq, AsStaticStr)]
#[strum(serialize_all = "snake_case")]
pub enum RPCError {
/// Error when decoding the raw buffer from ssz.
// NOTE: in the future a ssz::DecodeError should map to an InvalidData error
#[strum(serialize = "decode_error")]
SSZDecodeError(ssz::DecodeError),
/// IO Error.
IoError(String),
/// The peer returned a valid response but the response indicated an error.
ErrorResponse(RPCResponseErrorCode, String),
/// Timed out waiting for a response.
StreamTimeout,
/// Peer does not support the protocol.
UnsupportedProtocol,
/// Stream ended unexpectedly.
IncompleteStream,
/// Peer sent invalid data.
InvalidData,
/// An error occurred due to internal reasons. Ex: timer failure.
InternalError(&'static str),
/// Negotiation with this peer timed out.
NegotiationTimeout,
/// Handler rejected this request.
HandlerRejected,
/// We have intentionally disconnected.
Disconnected,
}
impl From<ssz::DecodeError> for RPCError {
#[inline]
fn from(err: ssz::DecodeError) -> Self {
RPCError::SSZDecodeError(err)
}
}
impl From<tokio::time::error::Elapsed> for RPCError {
fn from(_: tokio::time::error::Elapsed) -> Self {
RPCError::StreamTimeout
}
}
impl From<io::Error> for RPCError {
fn from(err: io::Error) -> Self {
RPCError::IoError(err.to_string())
}
}
// Error trait is required for `ProtocolsHandler`
impl std::fmt::Display for RPCError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err),
RPCError::InvalidData => write!(f, "Peer sent unexpected data"),
RPCError::IoError(ref err) => write!(f, "IO Error: {}", err),
RPCError::ErrorResponse(ref code, ref reason) => write!(
f,
"RPC response was an error: {} with reason: {}",
code, reason
),
RPCError::StreamTimeout => write!(f, "Stream Timeout"),
RPCError::UnsupportedProtocol => write!(f, "Peer does not support the protocol"),
RPCError::IncompleteStream => write!(f, "Stream ended unexpectedly"),
RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err),
RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"),
RPCError::HandlerRejected => write!(f, "Handler rejected the request"),
RPCError::Disconnected => write!(f, "Gracefully Disconnected"),
}
}
}
impl std::error::Error for RPCError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
// NOTE: this does have a source
RPCError::SSZDecodeError(_) => None,
RPCError::IoError(_) => None,
RPCError::StreamTimeout => None,
RPCError::UnsupportedProtocol => None,
RPCError::IncompleteStream => None,
RPCError::InvalidData => None,
RPCError::InternalError(_) => None,
RPCError::ErrorResponse(_, _) => None,
RPCError::NegotiationTimeout => None,
RPCError::HandlerRejected => None,
RPCError::Disconnected => None,
}
}
}
impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
InboundRequest::Status(status) => write!(f, "Status Message: {}", status),
InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
}
}
}
impl RPCError {
/// Get a `str` representation of the error.
/// Used for metrics.
pub fn as_static_str(&self) -> &'static str {
match self {
RPCError::ErrorResponse(ref code, ..) => code.as_static(),
e => e.as_static(),
}
}
}

View File

@@ -0,0 +1,399 @@
use crate::rpc::{InboundRequest, Protocol};
use fnv::FnvHashMap;
use libp2p::PeerId;
use std::convert::TryInto;
use std::future::Future;
use std::hash::Hash;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use tokio::time::Interval;
use types::EthSpec;
/// Nanoseconds since a given time.
// Maintained as u64 to reduce footprint
// NOTE: this also implies that the rate limiter will manage checking if a batch is allowed for at
// most <init time> + u64::MAX nanosecs, ~500 years. So it is realistic to assume this is fine.
type Nanosecs = u64;
/// User-friendly rate limiting parameters of the GCRA.
///
/// A quota of `max_tokens` tokens every `replenish_all_every` units of time means that:
/// 1. One token is replenished every `replenish_all_every`/`max_tokens` units of time.
/// 2. Instantaneous bursts (batches) of up to `max_tokens` tokens are allowed.
///
/// The above implies that if `max_tokens` is greater than 1, the perceived rate may be higher (but
/// bounded) than the defined rate when instantaneous bursts occur. For instance, for a rate of
/// 4T/2s a first burst of 4T is allowed with subsequent requests of 1T every 0.5s forever,
/// producing a perceived rate over the window of the first 2s of 8T. However, subsequent sliding
/// windows of 2s keep the limit.
///
/// In this scenario using the same rate as above, the sender is always maxing out their tokens,
/// except at seconds 1.5, 3, 3.5 and 4
///
/// ```ignore
/// x
/// used x
/// tokens x x x
/// at a x x x x x x
/// given +--+--+--o--+--+--o--o--o--> seconds
/// time | | | | | | | | |
/// 0 1 2 3 4
///
/// 4 1 1 1 2 1 1 2 3 <= available tokens when the batch is received
/// ```
///
/// For a sender to request a batch of `n`T, they would need to wait at least
/// n*`replenish_all_every`/`max_tokens` units of time since their last request.
///
/// To produce hard limits, set `max_tokens` to 1.
pub struct Quota {
/// How often are `max_tokens` fully replenished.
replenish_all_every: Duration,
/// Token limit. This translates on how large can an instantaneous batch of
/// tokens be.
max_tokens: u64,
}
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
pub struct RPCRateLimiter {
/// Interval to prune peers for which their timer ran out.
prune_interval: Interval,
/// Creation time of the rate limiter.
init_time: Instant,
/// Goodbye rate limiter.
goodbye_rl: Limiter<PeerId>,
/// Ping rate limiter.
ping_rl: Limiter<PeerId>,
/// MetaData rate limiter.
metadata_rl: Limiter<PeerId>,
/// Status rate limiter.
status_rl: Limiter<PeerId>,
/// BlocksByRange rate limiter.
bbrange_rl: Limiter<PeerId>,
/// BlocksByRoot rate limiter.
bbroots_rl: Limiter<PeerId>,
}
/// Error type for non conformant requests
pub enum RateLimitedErr {
/// Required tokens for this request exceed the maximum
TooLarge,
/// Request does not fit in the quota. Gives the earliest time the request could be accepted.
TooSoon(Duration),
}
/// User-friendly builder of a `RPCRateLimiter`
#[derive(Default)]
pub struct RPCRateLimiterBuilder {
/// Quota for the Goodbye protocol.
goodbye_quota: Option<Quota>,
/// Quota for the Ping protocol.
ping_quota: Option<Quota>,
/// Quota for the MetaData protocol.
metadata_quota: Option<Quota>,
/// Quota for the Status protocol.
status_quota: Option<Quota>,
/// Quota for the BlocksByRange protocol.
bbrange_quota: Option<Quota>,
/// Quota for the BlocksByRoot protocol.
bbroots_quota: Option<Quota>,
}
impl RPCRateLimiterBuilder {
/// Get an empty `RPCRateLimiterBuilder`.
pub fn new() -> Self {
Default::default()
}
/// Set a quota for a protocol.
fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self {
let q = Some(quota);
match protocol {
Protocol::Ping => self.ping_quota = q,
Protocol::Status => self.status_quota = q,
Protocol::MetaData => self.metadata_quota = q,
Protocol::Goodbye => self.goodbye_quota = q,
Protocol::BlocksByRange => self.bbrange_quota = q,
Protocol::BlocksByRoot => self.bbroots_quota = q,
}
self
}
/// Allow one token every `time_period` to be used for this `protocol`.
/// This produces a hard limit.
pub fn one_every(self, protocol: Protocol, time_period: Duration) -> Self {
self.set_quota(
protocol,
Quota {
replenish_all_every: time_period,
max_tokens: 1,
},
)
}
/// Allow `n` tokens to be use used every `time_period` for this `protocol`.
pub fn n_every(self, protocol: Protocol, n: u64, time_period: Duration) -> Self {
self.set_quota(
protocol,
Quota {
max_tokens: n,
replenish_all_every: time_period,
},
)
}
pub fn build(self) -> Result<RPCRateLimiter, &'static str> {
// get our quotas
let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?;
let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?;
let status_quota = self.status_quota.ok_or("Status quota not specified")?;
let goodbye_quota = self.goodbye_quota.ok_or("Goodbye quota not specified")?;
let bbroots_quota = self
.bbroots_quota
.ok_or("BlocksByRoot quota not specified")?;
let bbrange_quota = self
.bbrange_quota
.ok_or("BlocksByRange quota not specified")?;
// create the rate limiters
let ping_rl = Limiter::from_quota(ping_quota)?;
let metadata_rl = Limiter::from_quota(metadata_quota)?;
let status_rl = Limiter::from_quota(status_quota)?;
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
// check for peers to prune every 30 seconds, starting in 30 seconds
let prune_every = tokio::time::Duration::from_secs(30);
let prune_start = tokio::time::Instant::now() + prune_every;
let prune_interval = tokio::time::interval_at(prune_start, prune_every);
Ok(RPCRateLimiter {
prune_interval,
ping_rl,
metadata_rl,
status_rl,
goodbye_rl,
bbroots_rl,
bbrange_rl,
init_time: Instant::now(),
})
}
}
impl RPCRateLimiter {
pub fn allows<T: EthSpec>(
&mut self,
peer_id: &PeerId,
request: &InboundRequest<T>,
) -> Result<(), RateLimitedErr> {
let time_since_start = self.init_time.elapsed();
let mut tokens = request.expected_responses().max(1);
// Increase the rate limit for blocks by range requests with large step counts.
// We count to tokens as a quadratic increase with step size.
// Using (step_size/5)^2 + 1 as penalty factor allows step sizes of 1-4 to have no penalty
// but step sizes higher than this add a quadratic penalty.
// Penalty's go:
// Step size | Penalty Factor
// 1 | 1
// 2 | 1
// 3 | 1
// 4 | 1
// 5 | 2
// 6 | 2
// 7 | 2
// 8 | 3
// 9 | 4
// 10 | 5
if let InboundRequest::BlocksByRange(bbr_req) = request {
let penalty_factor = (bbr_req.step as f64 / 5.0).powi(2) as u64 + 1;
tokens *= penalty_factor;
}
let check =
|limiter: &mut Limiter<PeerId>| limiter.allows(time_since_start, peer_id, tokens);
let limiter = match request.protocol() {
Protocol::Ping => &mut self.ping_rl,
Protocol::Status => &mut self.status_rl,
Protocol::MetaData => &mut self.metadata_rl,
Protocol::Goodbye => &mut self.goodbye_rl,
Protocol::BlocksByRange => &mut self.bbrange_rl,
Protocol::BlocksByRoot => &mut self.bbroots_rl,
};
check(limiter)
}
pub fn prune(&mut self) {
let time_since_start = self.init_time.elapsed();
self.ping_rl.prune(time_since_start);
self.status_rl.prune(time_since_start);
self.metadata_rl.prune(time_since_start);
self.goodbye_rl.prune(time_since_start);
self.bbrange_rl.prune(time_since_start);
self.bbroots_rl.prune(time_since_start);
}
}
impl Future for RPCRateLimiter {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
while self.prune_interval.poll_tick(cx).is_ready() {
self.prune();
}
Poll::Pending
}
}
/// Per key rate limiter using the token bucket / leaky bucket as a meter rate limiting algorithm,
/// with the GCRA implementation.
pub struct Limiter<Key: Hash + Eq + Clone> {
/// After how long is the bucket considered full via replenishing 1T every `t`.
tau: Nanosecs,
/// How often is 1T replenished.
t: Nanosecs,
/// Time when the bucket will be full for each peer. TAT (theoretical arrival time) from GCRA.
tat_per_key: FnvHashMap<Key, Nanosecs>,
}
impl<Key: Hash + Eq + Clone> Limiter<Key> {
pub fn from_quota(quota: Quota) -> Result<Self, &'static str> {
if quota.max_tokens == 0 {
return Err("Max number of tokens should be positive");
}
let tau = quota.replenish_all_every.as_nanos();
if tau == 0 {
return Err("Replenish time must be positive");
}
let t = (tau / quota.max_tokens as u128)
.try_into()
.map_err(|_| "total replenish time is too long")?;
let tau = tau
.try_into()
.map_err(|_| "total replenish time is too long")?;
Ok(Limiter {
tau,
t,
tat_per_key: FnvHashMap::default(),
})
}
pub fn allows(
&mut self,
time_since_start: Duration,
key: &Key,
tokens: u64,
) -> Result<(), RateLimitedErr> {
let time_since_start = time_since_start.as_nanos() as u64;
let tau = self.tau;
let t = self.t;
// how long does it take to replenish these tokens
let additional_time = t * tokens;
if additional_time > tau {
// the time required to process this amount of tokens is longer than the time that
// makes the bucket full. So, this batch can _never_ be processed
return Err(RateLimitedErr::TooLarge);
}
// If the key is new, we consider their bucket full (which means, their request will be
// allowed)
let tat = self
.tat_per_key
.entry(key.clone())
.or_insert(time_since_start);
// check how soon could the request be made
let earliest_time = (*tat + additional_time).saturating_sub(tau);
// earliest_time is in the future
if time_since_start < earliest_time {
Err(RateLimitedErr::TooSoon(Duration::from_nanos(
/* time they need to wait, i.e. how soon were they */
earliest_time - time_since_start,
)))
} else {
// calculate the new TAT
*tat = time_since_start.max(*tat) + additional_time;
Ok(())
}
}
/// Removes keys for which their bucket is full by `time_limit`
pub fn prune(&mut self, time_limit: Duration) {
let lim = &mut (time_limit.as_nanos() as u64);
// remove those for which tat < lim
self.tat_per_key.retain(|_k, tat| tat >= lim)
}
}
#[cfg(test)]
mod tests {
use crate::rpc::rate_limiter::{Limiter, Quota};
use std::time::Duration;
#[test]
fn it_works_a() {
let mut limiter = Limiter::from_quota(Quota {
replenish_all_every: Duration::from_secs(2),
max_tokens: 4,
})
.unwrap();
let key = 10;
// x
// used x
// tokens x x
// x x x x
// +--+--+--+--+----> seconds
// | | | | |
// 0 1 2
assert!(limiter
.allows(Duration::from_secs_f32(0.0), &key, 4)
.is_ok());
limiter.prune(Duration::from_secs_f32(0.1));
assert!(limiter
.allows(Duration::from_secs_f32(0.1), &key, 1)
.is_err());
assert!(limiter
.allows(Duration::from_secs_f32(0.5), &key, 1)
.is_ok());
assert!(limiter
.allows(Duration::from_secs_f32(1.0), &key, 1)
.is_ok());
assert!(limiter
.allows(Duration::from_secs_f32(1.4), &key, 1)
.is_err());
assert!(limiter
.allows(Duration::from_secs_f32(2.0), &key, 2)
.is_ok());
}
#[test]
fn it_works_b() {
let mut limiter = Limiter::from_quota(Quota {
replenish_all_every: Duration::from_secs(2),
max_tokens: 4,
})
.unwrap();
let key = 10;
// if we limit to 4T per 2s, check that 4 requests worth 1 token can be sent before the
// first half second, when one token will be available again. Check also that before
// regaining a token, another request is rejected
assert!(limiter
.allows(Duration::from_secs_f32(0.0), &key, 1)
.is_ok());
assert!(limiter
.allows(Duration::from_secs_f32(0.1), &key, 1)
.is_ok());
assert!(limiter
.allows(Duration::from_secs_f32(0.2), &key, 1)
.is_ok());
assert!(limiter
.allows(Duration::from_secs_f32(0.3), &key, 1)
.is_ok());
assert!(limiter
.allows(Duration::from_secs_f32(0.4), &key, 1)
.is_err());
}
}