Add range sync machinery on sync side

This commit is contained in:
Pawan Dhananjay
2026-03-30 17:00:29 -07:00
parent 0f996ddbe8
commit 4ca10e95be
12 changed files with 538 additions and 72 deletions

View File

@@ -619,6 +619,15 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
return;
};
// TODO(gloas): Implement Gloas chain segment processing.
// Gloas blocks carry separate envelopes and need a different import path.
if downloaded_blocks
.iter()
.any(|b| matches!(b, RangeSyncBlock::Gloas { .. }))
{
todo!("Gloas chain segment processing");
}
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
let sent_blocks = downloaded_blocks.len();

View File

@@ -333,10 +333,8 @@ impl<T: BeaconChainTypes> Router<T> {
Response::PayloadEnvelopesByRoot(envelope) => {
self.on_payload_envelopes_by_root_response(peer_id, app_request_id, envelope);
}
// TODO(EIP-7732): implement outgoing payload envelopes by range responses once
// range sync requests them.
Response::PayloadEnvelopesByRange(_) => {
unreachable!()
Response::PayloadEnvelopesByRange(envelope) => {
self.on_payload_envelopes_by_range_response(peer_id, app_request_id, envelope);
}
// Light client responses should not be received
Response::LightClientBootstrap(_)
@@ -834,6 +832,29 @@ impl<T: BeaconChainTypes> Router<T> {
}
}
pub fn on_payload_envelopes_by_range_response(
&mut self,
peer_id: PeerId,
app_request_id: AppRequestId,
envelope: Option<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>,
) {
trace!(
%peer_id,
"Received PayloadEnvelopesByRange Response"
);
if let AppRequestId::Sync(sync_request_id) = app_request_id {
self.send_to_sync(SyncMessage::RpcPayloadEnvelope {
peer_id,
sync_request_id,
envelope,
seen_timestamp: timestamp_now(),
});
} else {
crit!("All payload envelopes by range responses should belong to sync");
}
}
fn handle_beacon_processor_send_result(
&mut self,
result: Result<(), crate::network_beacon_processor::Error<T::EthSpec>>,

View File

@@ -33,6 +33,7 @@ pub type BatchId = Epoch;
#[strum(serialize_all = "snake_case")]
pub enum ByRangeRequestType {
BlocksAndColumns,
BlocksAndEnvelopesAndColumns,
BlocksAndBlobs,
Blocks,
Columns(HashSet<u64>),

View File

@@ -4,11 +4,13 @@ use beacon_chain::{
data_availability_checker::DataAvailabilityChecker,
data_column_verification::CustodyDataColumn,
get_block_root,
payload_envelope_verification::AvailableEnvelope,
};
use lighthouse_network::{
PeerId,
service::api_types::{
BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId,
PayloadEnvelopesByRangeRequestId,
},
};
use ssz_types::RuntimeVariableList;
@@ -16,7 +18,7 @@ use std::{collections::HashMap, sync::Arc};
use tracing::{Span, debug};
use types::{
BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec,
Hash256, SignedBeaconBlock,
Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope,
};
use crate::sync::network_context::MAX_COLUMN_RETRIES;
@@ -35,6 +37,13 @@ use crate::sync::network_context::MAX_COLUMN_RETRIES;
pub struct RangeBlockComponentsRequest<E: EthSpec> {
/// Blocks we have received awaiting for their corresponding sidecar.
blocks_request: ByRangeRequest<BlocksByRangeRequestId, Vec<Arc<SignedBeaconBlock<E>>>>,
/// Payload envelopes (Gloas+). None for pre-Gloas forks.
payloads_request: Option<
ByRangeRequest<
PayloadEnvelopesByRangeRequestId,
Vec<Arc<SignedExecutionPayloadEnvelope<E>>>,
>,
>,
/// Sidecars we have received awaiting for their corresponding block.
block_data_request: RangeBlockDataRequest<E>,
/// Span to track the range request and all children range requests.
@@ -88,6 +97,7 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
Vec<(DataColumnsByRangeRequestId, Vec<ColumnIndex>)>,
Vec<ColumnIndex>,
)>,
payloads_req_id: Option<PayloadEnvelopesByRangeRequestId>,
request_span: Span,
) -> Self {
let block_data_request = if let Some(blobs_req_id) = blobs_req_id {
@@ -109,6 +119,7 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
Self {
blocks_request: ByRangeRequest::Active(blocks_req_id),
payloads_request: payloads_req_id.map(ByRangeRequest::Active),
block_data_request,
request_span,
}
@@ -191,6 +202,18 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
}
}
/// Adds received payload envelopes to the request.
pub fn add_payload_envelopes(
&mut self,
req_id: PayloadEnvelopesByRangeRequestId,
envelopes: Vec<Arc<SignedExecutionPayloadEnvelope<E>>>,
) -> Result<(), String> {
match &mut self.payloads_request {
Some(req) => req.finish(req_id, envelopes),
None => Err("received payload envelopes but none expected".to_owned()),
}
}
/// Attempts to construct RPC blocks from all received components.
///
/// Returns `None` if not all expected requests have completed.
@@ -208,6 +231,13 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
return None;
};
// If payloads are expected, they must also be complete before we can produce responses.
if let Some(payloads_req) = &self.payloads_request
&& payloads_req.to_finished().is_none()
{
return None;
}
// Increment the attempt once this function returns the response or errors
match &mut self.block_data_request {
RangeBlockDataRequest::NoData => Some(Self::responses_with_blobs(
@@ -254,15 +284,29 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
}
}
let resp = Self::responses_with_custody_columns(
blocks.to_vec(),
data_columns,
column_to_peer_id,
expected_custody_columns,
*attempt,
da_checker,
spec,
);
// Gloas path: if payloads are present, produce Gloas blocks
let resp = if let Some(payloads_req) = &self.payloads_request {
let payloads = payloads_req.to_finished().expect("checked above").to_vec();
Self::responses_gloas(
blocks.to_vec(),
payloads,
data_columns,
column_to_peer_id,
expected_custody_columns,
*attempt,
spec,
)
} else {
Self::responses_with_custody_columns(
blocks.to_vec(),
data_columns,
column_to_peer_id,
expected_custody_columns,
*attempt,
da_checker,
spec,
)
};
if let Err(CouplingError::DataColumnPeerFailure {
error: _,
@@ -460,6 +504,136 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
Ok(range_sync_blocks)
}
/// Couples blocks with payload envelopes and custody columns for Gloas.
/// In Gloas, columns are associated with the envelope (not the block directly).
fn responses_gloas(
blocks: Vec<Arc<SignedBeaconBlock<E>>>,
payloads: Vec<Arc<SignedExecutionPayloadEnvelope<E>>>,
data_columns: DataColumnSidecarList<E>,
column_to_peer: HashMap<u64, PeerId>,
expects_custody_columns: &[ColumnIndex],
attempt: usize,
spec: Arc<ChainSpec>,
) -> Result<Vec<RangeSyncBlock<E>>, CouplingError> {
// Group data columns by block_root
let mut data_columns_by_block =
HashMap::<Hash256, HashMap<ColumnIndex, Arc<DataColumnSidecar<E>>>>::new();
for column in data_columns {
let block_root = column.block_root();
let index = *column.index();
if data_columns_by_block
.entry(block_root)
.or_default()
.insert(index, column)
.is_some()
{
debug!(?block_root, ?index, "Repeated column for block_root");
}
}
let mut range_sync_blocks = Vec::with_capacity(blocks.len());
let mut payload_iter = payloads.into_iter().peekable();
let exceeded_retries = attempt >= MAX_COLUMN_RETRIES;
for block in blocks {
// Match payload envelope to block by slot
let mut envelope_for_block = None;
if payload_iter
.peek()
.map(|e| e.message.slot == block.slot())
.unwrap_or(false)
{
envelope_for_block = payload_iter.next();
}
let block_root = get_block_root(&block);
let available_envelope = if block.num_expected_blobs() > 0 {
// Block has blobs — envelope and columns are required
let envelope = envelope_for_block.ok_or_else(|| {
CouplingError::InternalError(format!(
"Missing payload envelope for block {block_root:?} with blobs"
))
})?;
let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root)
else {
let responsible_peers = column_to_peer.iter().map(|c| (*c.0, *c.1)).collect();
return Err(CouplingError::DataColumnPeerFailure {
error: format!("No columns for block {block_root:?} with data"),
faulty_peers: responsible_peers,
exceeded_retries,
});
};
let mut custody_columns = vec![];
let mut naughty_peers = vec![];
for index in expects_custody_columns {
if let Some(data_column) = data_columns_by_index.remove(index) {
custody_columns.push(data_column);
} else {
let Some(responsible_peer) = column_to_peer.get(index) else {
return Err(CouplingError::InternalError(format!(
"Internal error, no request made for column {index}"
)));
};
naughty_peers.push((*index, *responsible_peer));
}
}
if !naughty_peers.is_empty() {
return Err(CouplingError::DataColumnPeerFailure {
error: format!(
"Peers did not return column for block_root {block_root:?} {naughty_peers:?}"
),
faulty_peers: naughty_peers,
exceeded_retries,
});
}
Some(Box::new(AvailableEnvelope::new(
envelope.block_hash(),
envelope,
custody_columns,
None,
spec.clone(),
)))
} else {
envelope_for_block.map(|envelope| {
Box::new(AvailableEnvelope::new(
envelope.block_hash(),
envelope,
vec![],
None,
spec.clone(),
))
})
};
range_sync_blocks.push(RangeSyncBlock::new_gloas(block, available_envelope));
}
// Log any remaining unmatched payloads
if payload_iter.next().is_some() {
let remaining = payload_iter.count() + 1;
debug!(
remaining,
"Received payload envelopes that don't pair with blocks"
);
}
// Log remaining unmatched columns
if !data_columns_by_block.is_empty() {
let remaining_roots = data_columns_by_block.keys().collect::<Vec<_>>();
debug!(
?remaining_roots,
"Not all columns consumed for Gloas blocks"
);
}
Ok(range_sync_blocks)
}
}
impl<I: PartialEq + std::fmt::Display, T> ByRangeRequest<I, T> {
@@ -560,7 +734,7 @@ mod tests {
let blocks_req_id = blocks_id(components_id());
let mut info =
RangeBlockComponentsRequest::<E>::new(blocks_req_id, None, None, Span::none());
RangeBlockComponentsRequest::<E>::new(blocks_req_id, None, None, None, Span::none());
// Send blocks and complete terminate response
info.add_blocks(blocks_req_id, blocks).unwrap();
@@ -591,6 +765,7 @@ mod tests {
blocks_req_id,
Some(blobs_req_id),
None,
None,
Span::none(),
);
@@ -650,6 +825,7 @@ mod tests {
blocks_req_id,
None,
Some((columns_req_id.clone(), expects_custody_columns.clone())),
None,
Span::none(),
);
// Send blocks and complete terminate response
@@ -726,6 +902,7 @@ mod tests {
blocks_req_id,
None,
Some((columns_req_id.clone(), expected_sampling_columns.clone())),
None,
Span::none(),
);
@@ -818,6 +995,7 @@ mod tests {
blocks_req_id,
None,
Some((columns_req_id.clone(), expected_sampling_columns.clone())),
None,
Span::none(),
);
@@ -915,6 +1093,7 @@ mod tests {
blocks_req_id,
None,
Some((columns_req_id.clone(), expected_sampling_columns.clone())),
None,
Span::none(),
);
@@ -1030,6 +1209,7 @@ mod tests {
blocks_req_id,
None,
Some((columns_req_id.clone(), expected_sampling_columns.clone())),
None,
Span::none(),
);

View File

@@ -60,7 +60,8 @@ use lighthouse_network::service::api_types::{
BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId,
CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyRequester,
DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId,
DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId,
DataColumnsByRootRequester, Id, PayloadEnvelopesByRangeRequestId, SingleLookupReqId,
SyncRequestId,
};
use lighthouse_network::types::{NetworkGlobals, SyncState};
use lighthouse_network::{PeerAction, PeerId};
@@ -522,6 +523,8 @@ impl<T: BeaconChainTypes> SyncManager<T> {
SyncRequestId::SinglePayloadEnvelope { id } => {
self.on_single_envelope_response(id, peer_id, RpcEvent::RPCError(error))
}
SyncRequestId::PayloadEnvelopesByRange(req_id) => self
.on_payload_envelopes_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)),
}
}
@@ -1262,8 +1265,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id,
RpcEvent::from_chunk(envelope, seen_timestamp),
),
SyncRequestId::PayloadEnvelopesByRange(req_id) => {
self.on_payload_envelopes_by_range_response(
req_id,
peer_id,
RpcEvent::from_chunk(envelope, seen_timestamp),
);
}
_ => {
crit!(%peer_id, "bad request id for payload envelope");
crit!(%peer_id, "bad request id for payload_envelope");
}
}
}
@@ -1302,6 +1312,24 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
}
fn on_payload_envelopes_by_range_response(
&mut self,
id: PayloadEnvelopesByRangeRequestId,
peer_id: PeerId,
envelope: RpcEvent<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>,
) {
if let Some(resp) = self
.network
.on_payload_envelopes_by_range_response(id, peer_id, envelope)
{
self.on_range_components_response(
id.parent_request_id,
peer_id,
RangeBlockComponent::PayloadEnvelope(id, resp),
);
}
}
fn on_single_blob_response(
&mut self,
id: SingleLookupReqId,

View File

@@ -22,14 +22,17 @@ use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock};
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState};
use custody::CustodyRequestResult;
use fnv::FnvHashMap;
use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest};
use lighthouse_network::rpc::methods::{
BlobsByRangeRequest, DataColumnsByRangeRequest, PayloadEnvelopesByRangeRequest,
};
use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType};
pub use lighthouse_network::service::api_types::RangeRequestId;
use lighthouse_network::service::api_types::{
AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId,
CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyId, CustodyRequester,
DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId,
DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId,
DataColumnsByRootRequester, Id, PayloadEnvelopesByRangeRequestId, SingleLookupReqId,
SyncRequestId,
};
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource};
use parking_lot::RwLock;
@@ -37,7 +40,8 @@ pub use requests::LookupVerifyError;
use requests::{
ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems,
BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems,
PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest,
PayloadEnvelopesByRangeRequestItems, PayloadEnvelopesByRootRequestItems,
PayloadEnvelopesByRootSingleRequest,
};
#[cfg(test)]
use slot_clock::SlotClock;
@@ -217,6 +221,11 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// A mapping of active PayloadEnvelopesByRoot requests
payload_envelopes_by_root_requests:
ActiveRequests<SingleLookupReqId, PayloadEnvelopesByRootRequestItems<T::EthSpec>>,
/// A mapping of active PayloadEnvelopesByRange requests
payload_envelopes_by_range_requests: ActiveRequests<
PayloadEnvelopesByRangeRequestId,
PayloadEnvelopesByRangeRequestItems<T::EthSpec>,
>,
/// Mapping of active custody column requests for a block root
custody_by_root_requests: FnvHashMap<CustodyRequester, ActiveCustodyRequest<T>>,
@@ -254,6 +263,10 @@ pub enum RangeBlockComponent<E: EthSpec> {
DataColumnsByRangeRequestId,
RpcResponseResult<Vec<Arc<DataColumnSidecar<E>>>>,
),
PayloadEnvelope(
PayloadEnvelopesByRangeRequestId,
RpcResponseResult<Vec<Arc<SignedExecutionPayloadEnvelope<E>>>>,
),
}
#[cfg(test)]
@@ -303,6 +316,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
blobs_by_range_requests: ActiveRequests::new("blobs_by_range"),
data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"),
payload_envelopes_by_root_requests: ActiveRequests::new("payload_envelopes_by_root"),
payload_envelopes_by_range_requests: ActiveRequests::new("payload_envelopes_by_range"),
custody_by_root_requests: <_>::default(),
components_by_range_requests: FnvHashMap::default(),
custody_backfill_data_column_batch_requests: FnvHashMap::default(),
@@ -332,6 +346,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
blobs_by_range_requests,
data_columns_by_range_requests,
payload_envelopes_by_root_requests,
payload_envelopes_by_range_requests,
// custody_by_root_requests is a meta request of data_columns_by_root_requests
custody_by_root_requests: _,
// components_by_range_requests is a meta request of various _by_range requests
@@ -371,6 +386,10 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
.active_requests_of_peer(peer_id)
.into_iter()
.map(|id| SyncRequestId::SinglePayloadEnvelope { id: *id });
let payload_envelope_by_range_ids = payload_envelopes_by_range_requests
.active_requests_of_peer(peer_id)
.into_iter()
.map(|req_id| SyncRequestId::PayloadEnvelopesByRange(*req_id));
blocks_by_root_ids
.chain(blobs_by_root_ids)
.chain(data_column_by_root_ids)
@@ -378,6 +397,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
.chain(blobs_by_range_ids)
.chain(data_column_by_range_ids)
.chain(envelope_by_root_ids)
.chain(payload_envelope_by_range_ids)
.collect()
}
@@ -435,6 +455,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
blobs_by_range_requests,
data_columns_by_range_requests,
payload_envelopes_by_root_requests,
payload_envelopes_by_range_requests,
// custody_by_root_requests is a meta request of data_columns_by_root_requests
custody_by_root_requests: _,
// components_by_range_requests is a meta request of various _by_range requests
@@ -458,6 +479,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
.chain(blobs_by_range_requests.iter_request_peers())
.chain(data_columns_by_range_requests.iter_request_peers())
.chain(payload_envelopes_by_root_requests.iter_request_peers())
.chain(payload_envelopes_by_range_requests.iter_request_peers())
{
*active_request_count_by_peer.entry(peer_id).or_default() += 1;
}
@@ -590,24 +612,26 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
};
// Attempt to find all required custody peers before sending any request or creating an ID
let columns_by_range_peers_to_request =
if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) {
let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch());
let column_indexes = self
.chain
.sampling_columns_for_epoch(epoch)
.iter()
.cloned()
.collect();
Some(self.select_columns_by_range_peers_to_request(
&column_indexes,
column_peers,
active_request_count_by_peer,
peers_to_deprioritize,
)?)
} else {
None
};
let columns_by_range_peers_to_request = if matches!(
batch_type,
ByRangeRequestType::BlocksAndColumns | ByRangeRequestType::BlocksAndEnvelopesAndColumns
) {
let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch());
let column_indexes = self
.chain
.sampling_columns_for_epoch(epoch)
.iter()
.cloned()
.collect();
Some(self.select_columns_by_range_peers_to_request(
&column_indexes,
column_peers,
active_request_count_by_peer,
peers_to_deprioritize,
)?)
} else {
None
};
// Create the overall components_by_range request ID before its individual components
let id = ComponentsByRangeRequestId {
@@ -672,6 +696,28 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
.transpose()?;
let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch());
// Send envelope request for Gloas epochs
let payloads_req_id =
if matches!(batch_type, ByRangeRequestType::BlocksAndEnvelopesAndColumns) {
Some(self.send_payload_envelopes_by_range_request(
block_peer,
PayloadEnvelopesByRangeRequest {
start_slot: *request.start_slot(),
count: *request.count(),
},
id,
new_range_request_span!(
self,
"outgoing_envelopes_by_range",
range_request_span.clone(),
block_peer
),
)?)
} else {
None
};
let info = RangeBlockComponentsRequest::new(
blocks_req_id,
blobs_req_id,
@@ -681,6 +727,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
self.chain.sampling_columns_for_epoch(epoch).to_vec(),
)
}),
payloads_req_id,
range_request_span,
);
self.components_by_range_requests.insert(id, info);
@@ -783,6 +830,17 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
})
})
}
RangeBlockComponent::PayloadEnvelope(req_id, resp) => {
resp.and_then(|(envelopes, _)| {
request
.add_payload_envelopes(req_id, envelopes)
.map_err(|e| {
RpcResponseError::BlockComponentCouplingError(
CouplingError::InternalError(e),
)
})
})
}
}
} {
entry.remove();
@@ -1352,6 +1410,57 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
Ok((id, requested_columns))
}
fn send_payload_envelopes_by_range_request(
&mut self,
peer_id: PeerId,
request: PayloadEnvelopesByRangeRequest,
parent_request_id: ComponentsByRangeRequestId,
request_span: Span,
) -> Result<PayloadEnvelopesByRangeRequestId, RpcRequestSendError> {
let id = PayloadEnvelopesByRangeRequestId {
id: self.next_id(),
parent_request_id,
};
self.send_network_msg(NetworkMessage::SendRequest {
peer_id,
request: RequestType::PayloadEnvelopesByRange(request.clone()),
app_request_id: AppRequestId::Sync(SyncRequestId::PayloadEnvelopesByRange(id)),
})
.map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?;
debug!(
method = "PayloadEnvelopesByRange",
slots = request.count,
epoch = %Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()),
peer = %peer_id,
%id,
"Sync RPC request sent"
);
self.payload_envelopes_by_range_requests.insert(
id,
peer_id,
false,
PayloadEnvelopesByRangeRequestItems::new(request),
request_span,
);
Ok(id)
}
#[allow(clippy::type_complexity)]
pub(crate) fn on_payload_envelopes_by_range_response(
&mut self,
id: PayloadEnvelopesByRangeRequestId,
peer_id: PeerId,
rpc_event: RpcEvent<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>,
) -> Option<RpcResponseResult<Vec<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>>> {
let resp = self
.payload_envelopes_by_range_requests
.on_response(id, rpc_event);
self.on_rpc_response_result(resp, peer_id)
}
pub fn is_execution_engine_online(&self) -> bool {
self.execution_engine_state == EngineState::Online
}
@@ -1433,6 +1542,12 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
);
if self
.chain
.data_availability_checker
.envelopes_required_for_epoch(epoch)
{
ByRangeRequestType::BlocksAndEnvelopesAndColumns
} else if self
.chain
.data_availability_checker
.data_columns_required_for_epoch(epoch)
@@ -1900,6 +2015,10 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
"data_columns_by_range",
self.data_columns_by_range_requests.len(),
),
(
"payload_envelopes_by_range",
self.payload_envelopes_by_range_requests.len(),
),
("custody_by_root", self.custody_by_root_requests.len()),
(
"components_by_range",

View File

@@ -19,6 +19,7 @@ pub use data_columns_by_root::{
pub use payload_envelopes_by_root::{
PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest,
};
pub use payload_envelopes_by_range::PayloadEnvelopesByRangeRequestItems;
use crate::metrics;
@@ -31,6 +32,7 @@ mod blocks_by_root;
mod data_columns_by_range;
mod data_columns_by_root;
mod payload_envelopes_by_root;
mod payload_envelopes_by_range;
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
pub enum LookupVerifyError {

View File

@@ -0,0 +1,42 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::methods::PayloadEnvelopesByRangeRequest;
use std::sync::Arc;
use types::{EthSpec, SignedExecutionPayloadEnvelope};
/// Accumulates results of a payload_envelopes_by_range request. Only returns items after
/// receiving the stream termination.
pub struct PayloadEnvelopesByRangeRequestItems<E: EthSpec> {
request: PayloadEnvelopesByRangeRequest,
items: Vec<Arc<SignedExecutionPayloadEnvelope<E>>>,
}
impl<E: EthSpec> PayloadEnvelopesByRangeRequestItems<E> {
pub fn new(request: PayloadEnvelopesByRangeRequest) -> Self {
Self {
request,
items: vec![],
}
}
}
impl<E: EthSpec> ActiveRequestItems for PayloadEnvelopesByRangeRequestItems<E> {
type Item = Arc<SignedExecutionPayloadEnvelope<E>>;
fn add(&mut self, envelope: Self::Item) -> Result<bool, LookupVerifyError> {
let slot = envelope.slot();
if slot < self.request.start_slot || slot >= self.request.start_slot + self.request.count {
return Err(LookupVerifyError::UnrequestedSlot(slot));
}
if self.items.iter().any(|existing| existing.slot() == slot) {
return Err(LookupVerifyError::DuplicatedData(slot, 0));
}
self.items.push(envelope);
Ok(false)
}
fn consume(&mut self) -> Vec<Self::Item> {
std::mem::take(&mut self.items)
}
}