mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-09 03:31:45 +00:00
I've been working at updating another library to latest Lighthouse and got very confused with RPC request Ids. There were types that had fields called `request_id` and `id`. And interchangeably could have types `PeerRequestId`, `rpc::RequestId`, `AppRequestId`, `api_types::RequestId` or even `Request.id`. I couldn't keep track of which Id was linked to what and what each type meant. So this PR mainly does a few things: - Changes the field naming to match the actual type. So any field that has an `AppRequestId` will be named `app_request_id` rather than `id` or `request_id` for example. - I simplified the types. I removed the two different `RequestId` types (one in Lighthouse_network the other in the rpc) and grouped them into one. It has one downside tho. I had to add a few unreachable lines of code in the beacon processor, which the extra type would prevent, but I feel like it might be worth it. Happy to add an extra type to avoid those few lines. - I also removed the concept of `PeerRequestId` which sometimes went alongside a `request_id`. There were times were had a `PeerRequest` and a `Request` being returned, both of which contain a `RequestId` so we had redundant information. I've simplified the logic by removing `PeerRequestId` and made a `ResponseId`. I think if you look at the code changes, it simplifies things a bit and removes the redundant extra info. I think with this PR things are a little bit easier to reasonable about what is going on with all these RPC Ids. NOTE: I did this with the help of AI, so probably should be checked
535 lines
19 KiB
Rust
535 lines
19 KiB
Rust
//! The Ethereum 2.0 Wire Protocol
|
|
//!
|
|
//! This protocol is a purpose built Ethereum 2.0 libp2p protocol. It's role is to facilitate
|
|
//! direct peer-to-peer communication primarily for sending/receiving chain information for
|
|
//! syncing.
|
|
|
|
use futures::future::FutureExt;
|
|
use handler::RPCHandler;
|
|
use libp2p::core::transport::PortUse;
|
|
use libp2p::swarm::{
|
|
handler::ConnectionHandler, CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler,
|
|
ToSwarm,
|
|
};
|
|
use libp2p::swarm::{ConnectionClosed, FromSwarm, SubstreamProtocol, THandlerInEvent};
|
|
use libp2p::PeerId;
|
|
use logging::crit;
|
|
use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr};
|
|
use std::marker::PhantomData;
|
|
use std::sync::Arc;
|
|
use std::task::{Context, Poll};
|
|
use std::time::Duration;
|
|
use tracing::{debug, instrument, trace};
|
|
use types::{EthSpec, ForkContext};
|
|
|
|
pub(crate) use handler::{HandlerErr, HandlerEvent};
|
|
pub(crate) use methods::{
|
|
MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RpcResponse, RpcSuccessResponse,
|
|
};
|
|
pub use protocol::RequestType;
|
|
|
|
pub use handler::SubstreamId;
|
|
pub use methods::{
|
|
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest,
|
|
ResponseTermination, RpcErrorResponse, StatusMessage,
|
|
};
|
|
pub use protocol::{max_rpc_size, Protocol, RPCError};
|
|
|
|
use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig};
|
|
use self::protocol::RPCProtocol;
|
|
use self::self_limiter::SelfRateLimiter;
|
|
|
|
pub(crate) mod codec;
|
|
pub mod config;
|
|
mod handler;
|
|
pub mod methods;
|
|
mod outbound;
|
|
mod protocol;
|
|
mod rate_limiter;
|
|
mod self_limiter;
|
|
|
|
/// Composite trait for a request id.
|
|
pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {}
|
|
impl<T> ReqId for T where T: Send + 'static + std::fmt::Debug + Copy + Clone {}
|
|
|
|
/// RPC events sent from Lighthouse.
|
|
#[derive(Debug, Clone)]
|
|
pub enum RPCSend<Id, E: EthSpec> {
|
|
/// A request sent from Lighthouse.
|
|
///
|
|
/// The `Id` is given by the application making the request. These
|
|
/// go over *outbound* connections.
|
|
Request(Id, RequestType<E>),
|
|
/// A response sent from Lighthouse.
|
|
///
|
|
/// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the
|
|
/// peer. The second parameter is a single chunk of a response. These go over *inbound*
|
|
/// connections.
|
|
Response(SubstreamId, RpcResponse<E>),
|
|
/// Lighthouse has requested to terminate the connection with a goodbye message.
|
|
Shutdown(Id, GoodbyeReason),
|
|
}
|
|
|
|
/// RPC events received from outside Lighthouse.
|
|
#[derive(Debug, Clone)]
|
|
pub enum RPCReceived<Id, E: EthSpec> {
|
|
/// A request received from the outside.
|
|
///
|
|
/// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the
|
|
/// *inbound* substream over which it is managed.
|
|
Request(InboundRequestId, RequestType<E>),
|
|
/// A response received from the outside.
|
|
///
|
|
/// The `Id` corresponds to the application given ID of the original request sent to the
|
|
/// peer. The second parameter is a single chunk of a response. These go over *outbound*
|
|
/// connections.
|
|
Response(Id, RpcSuccessResponse<E>),
|
|
/// Marks a request as completed
|
|
EndOfStream(Id, ResponseTermination),
|
|
}
|
|
|
|
// An identifier for the inbound requests received via Rpc.
|
|
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)]
|
|
pub struct InboundRequestId {
|
|
/// The connection ID of the peer that sent the request.
|
|
connection_id: ConnectionId,
|
|
/// The ID of the substream that sent the request.
|
|
substream_id: SubstreamId,
|
|
}
|
|
|
|
impl InboundRequestId {
|
|
/// Creates an _unchecked_ [`InboundRequestId`].
|
|
///
|
|
/// [`Rpc`] enforces that [`InboundRequestId`]s are unique and not reused.
|
|
/// This constructor does not, hence the _unchecked_.
|
|
///
|
|
/// It is primarily meant for allowing manual tests.
|
|
pub fn new_unchecked(connection_id: usize, substream_id: usize) -> Self {
|
|
Self {
|
|
connection_id: ConnectionId::new_unchecked(connection_id),
|
|
substream_id: SubstreamId::new(substream_id),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<E: EthSpec, Id: std::fmt::Debug> std::fmt::Display for RPCSend<Id, E> {
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
match self {
|
|
RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req),
|
|
RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res),
|
|
RPCSend::Shutdown(_id, reason) => write!(f, "Sending Goodbye: {}", reason),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Messages sent to the user from the RPC protocol.
|
|
#[derive(Debug)]
|
|
pub struct RPCMessage<Id, E: EthSpec> {
|
|
/// The peer that sent the message.
|
|
pub peer_id: PeerId,
|
|
/// Handler managing this message.
|
|
pub connection_id: ConnectionId,
|
|
/// The message that was sent.
|
|
pub message: Result<RPCReceived<Id, E>, HandlerErr<Id>>,
|
|
}
|
|
|
|
type BehaviourAction<Id, E> = ToSwarm<RPCMessage<Id, E>, RPCSend<Id, E>>;
|
|
|
|
pub struct NetworkParams {
|
|
pub max_chunk_size: usize,
|
|
pub ttfb_timeout: Duration,
|
|
pub resp_timeout: Duration,
|
|
}
|
|
|
|
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
|
/// logic.
|
|
pub struct RPC<Id: ReqId, E: EthSpec> {
|
|
/// Rate limiter
|
|
limiter: Option<RateLimiter>,
|
|
/// Rate limiter for our own requests.
|
|
self_limiter: Option<SelfRateLimiter<Id, E>>,
|
|
/// Queue of events to be processed.
|
|
events: Vec<BehaviourAction<Id, E>>,
|
|
fork_context: Arc<ForkContext>,
|
|
enable_light_client_server: bool,
|
|
/// Networking constant values
|
|
network_params: NetworkParams,
|
|
/// A sequential counter indicating when data gets modified.
|
|
seq_number: u64,
|
|
}
|
|
|
|
impl<Id: ReqId, E: EthSpec> RPC<Id, E> {
|
|
#[instrument(parent = None,
|
|
level = "trace",
|
|
fields(service = "libp2p_rpc"),
|
|
name = "libp2p_rpc",
|
|
skip_all
|
|
)]
|
|
pub fn new(
|
|
fork_context: Arc<ForkContext>,
|
|
enable_light_client_server: bool,
|
|
inbound_rate_limiter_config: Option<InboundRateLimiterConfig>,
|
|
outbound_rate_limiter_config: Option<OutboundRateLimiterConfig>,
|
|
network_params: NetworkParams,
|
|
seq_number: u64,
|
|
) -> Self {
|
|
let inbound_limiter = inbound_rate_limiter_config.map(|config| {
|
|
debug!(?config, "Using inbound rate limiting params");
|
|
RateLimiter::new_with_config(config.0, fork_context.clone())
|
|
.expect("Inbound limiter configuration parameters are valid")
|
|
});
|
|
|
|
let self_limiter = outbound_rate_limiter_config.map(|config| {
|
|
SelfRateLimiter::new(config, fork_context.clone())
|
|
.expect("Configuration parameters are valid")
|
|
});
|
|
|
|
RPC {
|
|
limiter: inbound_limiter,
|
|
self_limiter,
|
|
events: Vec::new(),
|
|
fork_context,
|
|
enable_light_client_server,
|
|
network_params,
|
|
seq_number,
|
|
}
|
|
}
|
|
|
|
/// Sends an RPC response.
|
|
///
|
|
/// The peer must be connected for this to succeed.
|
|
#[instrument(parent = None,
|
|
level = "trace",
|
|
fields(service = "libp2p_rpc"),
|
|
name = "libp2p_rpc",
|
|
skip_all
|
|
)]
|
|
pub fn send_response(
|
|
&mut self,
|
|
peer_id: PeerId,
|
|
request_id: InboundRequestId,
|
|
response: RpcResponse<E>,
|
|
) {
|
|
self.events.push(ToSwarm::NotifyHandler {
|
|
peer_id,
|
|
handler: NotifyHandler::One(request_id.connection_id),
|
|
event: RPCSend::Response(request_id.substream_id, response),
|
|
});
|
|
}
|
|
|
|
/// Submits an RPC request.
|
|
///
|
|
/// The peer must be connected for this to succeed.
|
|
#[instrument(parent = None,
|
|
level = "trace",
|
|
fields(service = "libp2p_rpc"),
|
|
name = "libp2p_rpc",
|
|
skip_all
|
|
)]
|
|
pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType<E>) {
|
|
let event = if let Some(self_limiter) = self.self_limiter.as_mut() {
|
|
match self_limiter.allows(peer_id, request_id, req) {
|
|
Ok(event) => event,
|
|
Err(_e) => {
|
|
// Request is logged and queued internally in the self rate limiter.
|
|
return;
|
|
}
|
|
}
|
|
} else {
|
|
RPCSend::Request(request_id, req)
|
|
};
|
|
|
|
self.events.push(BehaviourAction::NotifyHandler {
|
|
peer_id,
|
|
handler: NotifyHandler::Any,
|
|
event,
|
|
});
|
|
}
|
|
|
|
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
|
|
/// gracefully terminates the RPC behaviour with a goodbye message.
|
|
#[instrument(parent = None,
|
|
level = "trace",
|
|
fields(service = "libp2p_rpc"),
|
|
name = "libp2p_rpc",
|
|
skip_all
|
|
)]
|
|
pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) {
|
|
self.events.push(ToSwarm::NotifyHandler {
|
|
peer_id,
|
|
handler: NotifyHandler::Any,
|
|
event: RPCSend::Shutdown(id, reason),
|
|
});
|
|
}
|
|
|
|
#[instrument(parent = None,
|
|
level = "trace",
|
|
fields(service = "libp2p_rpc"),
|
|
name = "libp2p_rpc",
|
|
skip_all
|
|
)]
|
|
pub fn update_seq_number(&mut self, seq_number: u64) {
|
|
self.seq_number = seq_number
|
|
}
|
|
|
|
/// Send a Ping request to the destination `PeerId` via `ConnectionId`.
|
|
#[instrument(parent = None,
|
|
level = "trace",
|
|
fields(service = "libp2p_rpc"),
|
|
name = "libp2p_rpc",
|
|
skip_all
|
|
)]
|
|
pub fn ping(&mut self, peer_id: PeerId, id: Id) {
|
|
let ping = Ping {
|
|
data: self.seq_number,
|
|
};
|
|
trace!(%peer_id, "Sending Ping");
|
|
self.send_request(peer_id, id, RequestType::Ping(ping));
|
|
}
|
|
}
|
|
|
|
impl<Id, E> NetworkBehaviour for RPC<Id, E>
|
|
where
|
|
E: EthSpec,
|
|
Id: ReqId,
|
|
{
|
|
type ConnectionHandler = RPCHandler<Id, E>;
|
|
type ToSwarm = RPCMessage<Id, E>;
|
|
|
|
fn handle_established_inbound_connection(
|
|
&mut self,
|
|
connection_id: ConnectionId,
|
|
peer_id: PeerId,
|
|
_local_addr: &libp2p::Multiaddr,
|
|
_remote_addr: &libp2p::Multiaddr,
|
|
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
|
|
let protocol = SubstreamProtocol::new(
|
|
RPCProtocol {
|
|
fork_context: self.fork_context.clone(),
|
|
max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size),
|
|
enable_light_client_server: self.enable_light_client_server,
|
|
phantom: PhantomData,
|
|
ttfb_timeout: self.network_params.ttfb_timeout,
|
|
},
|
|
(),
|
|
);
|
|
|
|
let handler = RPCHandler::new(
|
|
protocol,
|
|
self.fork_context.clone(),
|
|
self.network_params.resp_timeout,
|
|
peer_id,
|
|
connection_id,
|
|
);
|
|
|
|
Ok(handler)
|
|
}
|
|
|
|
fn handle_established_outbound_connection(
|
|
&mut self,
|
|
connection_id: ConnectionId,
|
|
peer_id: PeerId,
|
|
_addr: &libp2p::Multiaddr,
|
|
_role_override: libp2p::core::Endpoint,
|
|
_port_use: PortUse,
|
|
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
|
|
let protocol = SubstreamProtocol::new(
|
|
RPCProtocol {
|
|
fork_context: self.fork_context.clone(),
|
|
max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size),
|
|
enable_light_client_server: self.enable_light_client_server,
|
|
phantom: PhantomData,
|
|
ttfb_timeout: self.network_params.ttfb_timeout,
|
|
},
|
|
(),
|
|
);
|
|
|
|
let handler = RPCHandler::new(
|
|
protocol,
|
|
self.fork_context.clone(),
|
|
self.network_params.resp_timeout,
|
|
peer_id,
|
|
connection_id,
|
|
);
|
|
|
|
Ok(handler)
|
|
}
|
|
|
|
fn on_swarm_event(&mut self, event: FromSwarm) {
|
|
// NOTE: FromSwarm is a non exhaustive enum so updates should be based on release notes more
|
|
// than compiler feedback
|
|
// The self rate limiter holds on to requests and attempts to process them within our rate
|
|
// limits. If a peer disconnects whilst we are self-rate limiting, we want to terminate any
|
|
// pending requests and return an error response to the application.
|
|
|
|
if let FromSwarm::ConnectionClosed(ConnectionClosed {
|
|
peer_id,
|
|
remaining_established,
|
|
connection_id,
|
|
..
|
|
}) = event
|
|
{
|
|
// If there are still connections remaining, do nothing.
|
|
if remaining_established > 0 {
|
|
return;
|
|
}
|
|
// Get a list of pending requests from the self rate limiter
|
|
if let Some(limiter) = self.self_limiter.as_mut() {
|
|
for (id, proto) in limiter.peer_disconnected(peer_id) {
|
|
let error_msg = ToSwarm::GenerateEvent(RPCMessage {
|
|
peer_id,
|
|
connection_id,
|
|
message: Err(HandlerErr::Outbound {
|
|
id,
|
|
proto,
|
|
error: RPCError::Disconnected,
|
|
}),
|
|
});
|
|
self.events.push(error_msg);
|
|
}
|
|
}
|
|
|
|
// Replace the pending Requests to the disconnected peer
|
|
// with reports of failed requests.
|
|
self.events.iter_mut().for_each(|event| match &event {
|
|
ToSwarm::NotifyHandler {
|
|
peer_id: p,
|
|
event: RPCSend::Request(request_id, req),
|
|
..
|
|
} if *p == peer_id => {
|
|
*event = ToSwarm::GenerateEvent(RPCMessage {
|
|
peer_id,
|
|
connection_id,
|
|
message: Err(HandlerErr::Outbound {
|
|
id: *request_id,
|
|
proto: req.versioned_protocol().protocol(),
|
|
error: RPCError::Disconnected,
|
|
}),
|
|
});
|
|
}
|
|
_ => {}
|
|
});
|
|
}
|
|
}
|
|
|
|
fn on_connection_handler_event(
|
|
&mut self,
|
|
peer_id: PeerId,
|
|
connection_id: ConnectionId,
|
|
event: <Self::ConnectionHandler as ConnectionHandler>::ToBehaviour,
|
|
) {
|
|
match event {
|
|
HandlerEvent::Ok(RPCReceived::Request(request_id, request_type)) => {
|
|
if let Some(limiter) = self.limiter.as_mut() {
|
|
// check if the request is conformant to the quota
|
|
match limiter.allows(&peer_id, &request_type) {
|
|
Err(RateLimitedErr::TooLarge) => {
|
|
// we set the batch sizes, so this is a coding/config err for most protocols
|
|
let protocol = request_type.versioned_protocol().protocol();
|
|
if matches!(
|
|
protocol,
|
|
Protocol::BlocksByRange
|
|
| Protocol::BlobsByRange
|
|
| Protocol::DataColumnsByRange
|
|
| Protocol::BlocksByRoot
|
|
| Protocol::BlobsByRoot
|
|
| Protocol::DataColumnsByRoot
|
|
) {
|
|
debug!(request = %request_type, %protocol, "Request too large to process");
|
|
} else {
|
|
// Other protocols shouldn't be sending large messages, we should flag the peer kind
|
|
crit!(%protocol, "Request size too large to ever be processed");
|
|
}
|
|
// send an error code to the peer.
|
|
// the handler upon receiving the error code will send it back to the behaviour
|
|
self.send_response(
|
|
peer_id,
|
|
request_id,
|
|
RpcResponse::Error(
|
|
RpcErrorResponse::RateLimited,
|
|
"Rate limited. Request too large".into(),
|
|
),
|
|
);
|
|
return;
|
|
}
|
|
Err(RateLimitedErr::TooSoon(wait_time)) => {
|
|
debug!(request = %request_type, %peer_id, wait_time_ms = wait_time.as_millis(), "Request exceeds the rate limit");
|
|
// send an error code to the peer.
|
|
// the handler upon receiving the error code will send it back to the behaviour
|
|
self.send_response(
|
|
peer_id,
|
|
request_id,
|
|
RpcResponse::Error(
|
|
RpcErrorResponse::RateLimited,
|
|
format!("Wait {:?}", wait_time).into(),
|
|
),
|
|
);
|
|
return;
|
|
}
|
|
// No rate limiting, continue.
|
|
Ok(()) => {}
|
|
}
|
|
}
|
|
|
|
// If we received a Ping, we queue a Pong response.
|
|
if let RequestType::Ping(_) = request_type {
|
|
trace!(connection_id = %connection_id, %peer_id, "Received Ping, queueing Pong");
|
|
self.send_response(
|
|
peer_id,
|
|
request_id,
|
|
RpcResponse::Success(RpcSuccessResponse::Pong(Ping {
|
|
data: self.seq_number,
|
|
})),
|
|
);
|
|
}
|
|
|
|
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
|
|
peer_id,
|
|
connection_id,
|
|
message: Ok(RPCReceived::Request(request_id, request_type)),
|
|
}));
|
|
}
|
|
HandlerEvent::Ok(rpc) => {
|
|
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
|
|
peer_id,
|
|
connection_id,
|
|
message: Ok(rpc),
|
|
}));
|
|
}
|
|
HandlerEvent::Err(err) => {
|
|
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
|
|
peer_id,
|
|
connection_id,
|
|
message: Err(err),
|
|
}));
|
|
}
|
|
HandlerEvent::Close(_) => {
|
|
// Handle the close event here.
|
|
self.events.push(ToSwarm::CloseConnection {
|
|
peer_id,
|
|
connection: CloseConnection::All,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
fn poll(&mut self, cx: &mut Context) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
|
|
// let the rate limiter prune.
|
|
if let Some(limiter) = self.limiter.as_mut() {
|
|
let _ = limiter.poll_unpin(cx);
|
|
}
|
|
|
|
if let Some(self_limiter) = self.self_limiter.as_mut() {
|
|
if let Poll::Ready(event) = self_limiter.poll_ready(cx) {
|
|
self.events.push(event)
|
|
}
|
|
}
|
|
|
|
if !self.events.is_empty() {
|
|
return Poll::Ready(self.events.remove(0));
|
|
}
|
|
|
|
Poll::Pending
|
|
}
|
|
}
|