Add individual by_range sync requests (#6497)

Part of
- https://github.com/sigp/lighthouse/issues/6258

To address PeerDAS sync issues we need to make individual by_range requests within a batch retriable. We should adopt the same pattern for lookup sync where each request (block/blobs/columns) is tracked individually within a "meta" request that group them all and handles retry logic.


  - Building on https://github.com/sigp/lighthouse/pull/6398

second step is to add individual request accumulators for `blocks_by_range`, `blobs_by_range`, and `data_columns_by_range`. This will allow each request to progress independently and be retried separately.

Most of the logic is just piping, excuse the large diff. This PR does not change the logic of how requests are handled or retried. This will be done in a future PR changing the logic of `RangeBlockComponentsRequest`.

### Before

- Sync manager receives block with `SyncRequestId::RangeBlockAndBlobs`
- Insert block into `SyncNetworkContext::range_block_components_requests`
- (If received stream terminators of all requests)
- Return `Vec<RpcBlock>`, and insert into `range_sync`

### Now

- Sync manager receives block with `SyncRequestId::RangeBlockAndBlobs`
- Insert block into `SyncNetworkContext:: blocks_by_range_requests`
- (If received stream terminator of this request)
- Return `Vec<SignedBlock>`, and insert into `SyncNetworkContext::components_by_range_requests `
- (If received a result for all requests)
- Return `Vec<RpcBlock>`, and insert into `range_sync`
This commit is contained in:
Lion - dapplion
2025-02-05 04:08:28 -03:00
committed by GitHub
parent 7bfdb33729
commit 2193f6a4d4
15 changed files with 776 additions and 502 deletions

View File

@@ -4,10 +4,13 @@ use beacon_chain::validator_monitor::timestamp_now;
use fnv::FnvHashMap;
use lighthouse_network::PeerId;
use strum::IntoStaticStr;
use types::Hash256;
use types::{Hash256, Slot};
pub use blobs_by_range::BlobsByRangeRequestItems;
pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest};
pub use blocks_by_range::BlocksByRangeRequestItems;
pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest};
pub use data_columns_by_range::DataColumnsByRangeRequestItems;
pub use data_columns_by_root::{
DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest,
};
@@ -16,8 +19,11 @@ use crate::metrics;
use super::{RpcEvent, RpcResponseResult};
mod blobs_by_range;
mod blobs_by_root;
mod blocks_by_range;
mod blocks_by_root;
mod data_columns_by_range;
mod data_columns_by_root;
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
@@ -26,8 +32,9 @@ pub enum LookupVerifyError {
TooManyResponses,
UnrequestedBlockRoot(Hash256),
UnrequestedIndex(u64),
UnrequestedSlot(Slot),
InvalidInclusionProof,
DuplicateData,
DuplicatedData(Slot, u64),
InternalError(String),
}

View File

@@ -0,0 +1,56 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use std::sync::Arc;
use types::{BlobSidecar, EthSpec};
/// Accumulates results of a blobs_by_range request. Only returns items after receiving the
/// stream termination.
pub struct BlobsByRangeRequestItems<E: EthSpec> {
request: BlobsByRangeRequest,
items: Vec<Arc<BlobSidecar<E>>>,
max_blobs_per_block: u64,
}
impl<E: EthSpec> BlobsByRangeRequestItems<E> {
pub fn new(request: BlobsByRangeRequest, max_blobs_per_block: u64) -> Self {
Self {
request,
items: vec![],
max_blobs_per_block,
}
}
}
impl<E: EthSpec> ActiveRequestItems for BlobsByRangeRequestItems<E> {
type Item = Arc<BlobSidecar<E>>;
fn add(&mut self, blob: Self::Item) -> Result<bool, LookupVerifyError> {
if blob.slot() < self.request.start_slot
|| blob.slot() >= self.request.start_slot + self.request.count
{
return Err(LookupVerifyError::UnrequestedSlot(blob.slot()));
}
if blob.index >= self.max_blobs_per_block {
return Err(LookupVerifyError::UnrequestedIndex(blob.index));
}
if !blob.verify_blob_sidecar_inclusion_proof() {
return Err(LookupVerifyError::InvalidInclusionProof);
}
if self
.items
.iter()
.any(|existing| existing.slot() == blob.slot() && existing.index == blob.index)
{
return Err(LookupVerifyError::DuplicatedData(blob.slot(), blob.index));
}
self.items.push(blob);
// Skip check if blobs are ready as it's rare that all blocks have max blobs
Ok(false)
}
fn consume(&mut self) -> Vec<Self::Item> {
std::mem::take(&mut self.items)
}
}

View File

@@ -57,7 +57,7 @@ impl<E: EthSpec> ActiveRequestItems for BlobsByRootRequestItems<E> {
return Err(LookupVerifyError::UnrequestedIndex(blob.index));
}
if self.items.iter().any(|b| b.index == blob.index) {
return Err(LookupVerifyError::DuplicateData);
return Err(LookupVerifyError::DuplicatedData(blob.slot(), blob.index));
}
self.items.push(blob);

View File

@@ -0,0 +1,48 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::BlocksByRangeRequest;
use std::sync::Arc;
use types::{EthSpec, SignedBeaconBlock};
/// Accumulates results of a blocks_by_range request. Only returns items after receiving the
/// stream termination.
pub struct BlocksByRangeRequestItems<E: EthSpec> {
request: BlocksByRangeRequest,
items: Vec<Arc<SignedBeaconBlock<E>>>,
}
impl<E: EthSpec> BlocksByRangeRequestItems<E> {
pub fn new(request: BlocksByRangeRequest) -> Self {
Self {
request,
items: vec![],
}
}
}
impl<E: EthSpec> ActiveRequestItems for BlocksByRangeRequestItems<E> {
type Item = Arc<SignedBeaconBlock<E>>;
fn add(&mut self, block: Self::Item) -> Result<bool, LookupVerifyError> {
if block.slot().as_u64() < *self.request.start_slot()
|| block.slot().as_u64() >= self.request.start_slot() + self.request.count()
{
return Err(LookupVerifyError::UnrequestedSlot(block.slot()));
}
if self
.items
.iter()
.any(|existing| existing.slot() == block.slot())
{
// DuplicatedData is a common error for all components, default index to 0
return Err(LookupVerifyError::DuplicatedData(block.slot(), 0));
}
self.items.push(block);
Ok(self.items.len() >= *self.request.count() as usize)
}
fn consume(&mut self) -> Vec<Self::Item> {
std::mem::take(&mut self.items)
}
}

View File

@@ -0,0 +1,54 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::methods::DataColumnsByRangeRequest;
use std::sync::Arc;
use types::{DataColumnSidecar, EthSpec};
/// Accumulates results of a data_columns_by_range request. Only returns items after receiving the
/// stream termination.
pub struct DataColumnsByRangeRequestItems<E: EthSpec> {
request: DataColumnsByRangeRequest,
items: Vec<Arc<DataColumnSidecar<E>>>,
}
impl<E: EthSpec> DataColumnsByRangeRequestItems<E> {
pub fn new(request: DataColumnsByRangeRequest) -> Self {
Self {
request,
items: vec![],
}
}
}
impl<E: EthSpec> ActiveRequestItems for DataColumnsByRangeRequestItems<E> {
type Item = Arc<DataColumnSidecar<E>>;
fn add(&mut self, data_column: Self::Item) -> Result<bool, LookupVerifyError> {
if data_column.slot() < self.request.start_slot
|| data_column.slot() >= self.request.start_slot + self.request.count
{
return Err(LookupVerifyError::UnrequestedSlot(data_column.slot()));
}
if !self.request.columns.contains(&data_column.index) {
return Err(LookupVerifyError::UnrequestedIndex(data_column.index));
}
if !data_column.verify_inclusion_proof() {
return Err(LookupVerifyError::InvalidInclusionProof);
}
if self.items.iter().any(|existing| {
existing.slot() == data_column.slot() && existing.index == data_column.index
}) {
return Err(LookupVerifyError::DuplicatedData(
data_column.slot(),
data_column.index,
));
}
self.items.push(data_column);
Ok(self.items.len() >= self.request.count as usize * self.request.columns.len())
}
fn consume(&mut self) -> Vec<Self::Item> {
std::mem::take(&mut self.items)
}
}

View File

@@ -57,7 +57,10 @@ impl<E: EthSpec> ActiveRequestItems for DataColumnsByRootRequestItems<E> {
return Err(LookupVerifyError::UnrequestedIndex(data_column.index));
}
if self.items.iter().any(|d| d.index == data_column.index) {
return Err(LookupVerifyError::DuplicateData);
return Err(LookupVerifyError::DuplicatedData(
data_column.slot(),
data_column.index,
));
}
self.items.push(data_column);