deal with rpc blobs in groups per block in the da checker. don't cache missing blob ids in the da checker.

This commit is contained in:
realbigsean
2023-04-04 10:29:07 -04:00
parent 22694d1c89
commit 6f12df37cf
8 changed files with 247 additions and 79 deletions

View File

@@ -334,7 +334,7 @@ pub fn verify_kzg_for_blob<T: EthSpec>(
/// Note: This function should be preferred over calling `verify_kzg_for_blob` /// Note: This function should be preferred over calling `verify_kzg_for_blob`
/// in a loop since this function kzg verifies a list of blobs more efficiently. /// in a loop since this function kzg verifies a list of blobs more efficiently.
pub fn verify_kzg_for_blob_list<T: EthSpec>( pub fn verify_kzg_for_blob_list<T: EthSpec>(
blob_list: BlobSidecarList<T>, blob_list: Vec<Arc<BlobSidecar<T>>>,
kzg: &Kzg, kzg: &Kzg,
) -> Result<KzgVerifiedBlobList<T>, AvailabilityCheckError> { ) -> Result<KzgVerifiedBlobList<T>, AvailabilityCheckError> {
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list

View File

@@ -781,21 +781,17 @@ impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
} }
pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> { pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> {
self.get_filtered_blob_ids(|_| true) let block_root = self.import_data.block_root;
self.block
.get_filtered_blob_ids(Some(block_root), |_, _| true)
} }
pub fn get_filtered_blob_ids(&self, filter: impl Fn(u64) -> bool) -> Vec<BlobIdentifier> { pub fn get_filtered_blob_ids(
let num_blobs_expected = self.num_blobs_expected(); &self,
let mut blob_ids = Vec::with_capacity(num_blobs_expected); filter: impl Fn(usize, Hash256) -> bool,
for i in 0..num_blobs_expected as u64 { ) -> Vec<BlobIdentifier> {
if filter(i) { self.block
blob_ids.push(BlobIdentifier { .get_filtered_blob_ids(Some(self.import_data.block_root), filter)
block_root: self.import_data.block_root,
index: i,
});
}
}
blob_ids
} }
} }

View File

@@ -12,6 +12,7 @@ use ssz_types::{Error, FixedVector, VariableList};
use state_processing::per_block_processing::deneb::deneb::verify_kzg_commitments_against_transactions; use state_processing::per_block_processing::deneb::deneb::verify_kzg_commitments_against_transactions;
use std::collections::hash_map::{Entry, OccupiedEntry}; use std::collections::hash_map::{Entry, OccupiedEntry};
use std::collections::HashMap; use std::collections::HashMap;
use std::ops::Index;
use std::sync::Arc; use std::sync::Arc;
use types::beacon_block_body::KzgCommitments; use types::beacon_block_body::KzgCommitments;
use types::blob_sidecar::{BlobIdentifier, BlobSidecar}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar};
@@ -39,6 +40,10 @@ pub enum AvailabilityCheckError {
}, },
Pending, Pending,
IncorrectFork, IncorrectFork,
BlockBlobRootMismatch {
block_root: Hash256,
blob_block_root: Hash256,
},
} }
impl From<ssz_types::Error> for AvailabilityCheckError { impl From<ssz_types::Error> for AvailabilityCheckError {
@@ -68,30 +73,28 @@ struct ReceivedComponents<T: EthSpec> {
/// We use a `BTreeMap` here to maintain the order of `BlobSidecar`s based on index. /// We use a `BTreeMap` here to maintain the order of `BlobSidecar`s based on index.
verified_blobs: FixedVector<Option<KzgVerifiedBlob<T>>, T::MaxBlobsPerBlock>, verified_blobs: FixedVector<Option<KzgVerifiedBlob<T>>, T::MaxBlobsPerBlock>,
executed_block: Option<AvailabilityPendingExecutedBlock<T>>, executed_block: Option<AvailabilityPendingExecutedBlock<T>>,
missing_blob_ids: Vec<BlobIdentifier>,
} }
impl<T: EthSpec> ReceivedComponents<T> { impl<T: EthSpec> ReceivedComponents<T> {
fn new_from_blob(blob: KzgVerifiedBlob<T>) -> Self { fn new_from_blobs(blobs: &[KzgVerifiedBlob<T>]) -> Self {
let mut verified_blobs = FixedVector::<_, _>::default(); let mut verified_blobs = FixedVector::<_, _>::default();
// TODO: verify that we've already ensured the blob index < T::MaxBlobsPerBlock for blob in blobs {
if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) { // TODO: verify that we've already ensured the blob index < T::MaxBlobsPerBlock
*mut_maybe_blob = Some(blob); if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) {
*mut_maybe_blob = Some(blob.clone());
}
} }
Self { Self {
verified_blobs, verified_blobs,
executed_block: None, executed_block: None,
missing_blob_ids: vec![],
} }
} }
fn new_from_block(block: AvailabilityPendingExecutedBlock<T>) -> Self { fn new_from_block(block: AvailabilityPendingExecutedBlock<T>) -> Self {
let missing_blob_ids = block.get_all_blob_ids();
Self { Self {
verified_blobs: <_>::default(), verified_blobs: <_>::default(),
executed_block: Some(block), executed_block: Some(block),
missing_blob_ids,
} }
} }
@@ -155,20 +158,19 @@ impl<T: EthSpec, S: SlotClock> DataAvailabilityChecker<T, S> {
.map(|kzg_verified_blob| kzg_verified_blob.clone_blob()) .map(|kzg_verified_blob| kzg_verified_blob.clone_blob())
} }
pub fn put_rpc_blob( pub fn put_rpc_blobs(
&self, &self,
blob: Arc<BlobSidecar<T>>, block_root: Hash256,
blobs: Vec<Arc<BlobSidecar<T>>>,
) -> Result<Availability<T>, AvailabilityCheckError> { ) -> Result<Availability<T>, AvailabilityCheckError> {
// Verify the KZG commitment. // Verify the KZG commitment.
let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { let kzg_verified_blobs = if let Some(kzg) = self.kzg.as_ref() {
verify_kzg_for_blob(blob, kzg)? verify_kzg_for_blob_list(blobs, kzg)?
} else { } else {
return Err(AvailabilityCheckError::KzgNotInitialized); return Err(AvailabilityCheckError::KzgNotInitialized);
}; };
self.put_kzg_verified_blob(kzg_verified_blob, |blob_id, missing_blob_ids| { self.put_kzg_verified_blobs(block_root, &kzg_verified_blobs)
missing_blob_ids.contains(&blob_id)
})
} }
/// This first validates the KZG commitments included in the blob sidecar. /// This first validates the KZG commitments included in the blob sidecar.
@@ -188,32 +190,47 @@ impl<T: EthSpec, S: SlotClock> DataAvailabilityChecker<T, S> {
return Err(AvailabilityCheckError::KzgNotInitialized); return Err(AvailabilityCheckError::KzgNotInitialized);
}; };
let availability = match self self.put_kzg_verified_blobs(kzg_verified_blob.block_root(), &[kzg_verified_blob])
.availability_cache }
.write()
.entry(kzg_verified_blob.block_root()) fn put_kzg_verified_blobs(
{ &self,
block_root: Hash256,
kzg_verified_blobs: &[KzgVerifiedBlob<T>],
) -> Result<Availability<T>, AvailabilityCheckError> {
for blob in kzg_verified_blobs {
let blob_block_root = blob.block_root();
if blob_block_root != block_root {
return Err(AvailabilityCheckError::BlockBlobRootMismatch {
block_root,
blob_block_root,
});
}
}
let availability = match self.availability_cache.write().entry(block_root) {
Entry::Occupied(mut occupied_entry) => { Entry::Occupied(mut occupied_entry) => {
// All blobs reaching this cache should be gossip verified and gossip verification // All blobs reaching this cache should be gossip verified and gossip verification
// should filter duplicates, as well as validate indices. // should filter duplicates, as well as validate indices.
let received_components = occupied_entry.get_mut(); let received_components = occupied_entry.get_mut();
if let Some(maybe_verified_blob) = received_components for kzg_verified_blob in kzg_verified_blobs {
.verified_blobs if let Some(maybe_verified_blob) = received_components
.get_mut(kzg_verified_blob.blob_index() as usize) .verified_blobs
{ .get_mut(kzg_verified_blob.blob_index() as usize)
*maybe_verified_blob = Some(kzg_verified_blob) {
*maybe_verified_blob = Some(kzg_verified_blob.clone())
}
} }
if let Some(executed_block) = received_components.executed_block.take() { if let Some(executed_block) = received_components.executed_block.take() {
self.check_block_availability_maybe_cache(occupied_entry, executed_block)? self.check_block_availability_maybe_cache(occupied_entry, executed_block)?
} else { } else {
Availability::PendingBlock(blob.block_root) Availability::PendingBlock(block_root)
} }
} }
Entry::Vacant(vacant_entry) => { Entry::Vacant(vacant_entry) => {
let block_root = kzg_verified_blob.block_root(); vacant_entry.insert(ReceivedComponents::new_from_blobs(kzg_verified_blobs));
vacant_entry.insert(ReceivedComponents::new_from_blob(kzg_verified_blob));
Availability::PendingBlock(block_root) Availability::PendingBlock(block_root)
} }
}; };
@@ -287,7 +304,7 @@ impl<T: EthSpec, S: SlotClock> DataAvailabilityChecker<T, S> {
} else { } else {
let received_components = occupied_entry.get_mut(); let received_components = occupied_entry.get_mut();
let missing_blob_ids = executed_block.get_filtered_blob_ids(|index| { let missing_blob_ids = executed_block.get_filtered_blob_ids(|index, _| {
received_components received_components
.verified_blobs .verified_blobs
.get(index as usize) .get(index as usize)
@@ -314,7 +331,7 @@ impl<T: EthSpec, S: SlotClock> DataAvailabilityChecker<T, S> {
.kzg .kzg
.as_ref() .as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?; .ok_or(AvailabilityCheckError::KzgNotInitialized)?;
let verified_blobs = verify_kzg_for_blob_list(VariableList::new(blob_list)?, kzg)?; let verified_blobs = verify_kzg_for_blob_list(blob_list, kzg)?;
Ok(MaybeAvailableBlock::Available( Ok(MaybeAvailableBlock::Available(
self.check_availability_with_blobs(block, verified_blobs)?, self.check_availability_with_blobs(block, verified_blobs)?,
@@ -323,6 +340,40 @@ impl<T: EthSpec, S: SlotClock> DataAvailabilityChecker<T, S> {
} }
} }
/// For a given block wrapper, find the missing blobs. Useful for parent unknown blocks.
/// Because these don't otherwise hit the data availability caches.
pub fn get_missing_blob_ids(
&self,
block: BlockWrapper<T>,
block_root: Option<Hash256>,
) -> Result<Vec<BlobIdentifier>, AvailabilityCheckError> {
let (block, blobs) = block.deconstruct();
let maybe_available = self.check_availability_without_blobs(block)?;
let blob_ids = match &maybe_available {
MaybeAvailableBlock::Available(_) => {
vec![]
}
MaybeAvailableBlock::AvailabilityPending(pending_block) => {
if let Some(blobs) = blobs {
pending_block.get_filtered_blob_ids(block_root, |index_usize, block_root| {
let index = index_usize as u64;
let blob_in_wrapper = blobs
.get(index_usize)
.map(|blob| blob.index == index)
.unwrap_or(false);
let blob_in_cache = self
.get_blob(&BlobIdentifier { block_root, index })
.is_some();
!blob_in_wrapper && !blob_in_cache
})
} else {
pending_block.get_all_blob_ids(block_root)
}
}
};
Ok(blob_ids)
}
/// Checks if a block is available, returning an error if the block is not immediately available. /// Checks if a block is available, returning an error if the block is not immediately available.
/// Does not access the gossip cache. /// Does not access the gossip cache.
pub fn try_check_availability( pub fn try_check_availability(
@@ -495,6 +546,36 @@ pub struct AvailabilityPendingBlock<E: EthSpec> {
block: Arc<SignedBeaconBlock<E>>, block: Arc<SignedBeaconBlock<E>>,
} }
impl<E: EthSpec> AvailabilityPendingBlock<E> {
pub fn num_blobs_expected(&self) -> usize {
self.kzg_commitments()
.map_or(0, |commitments| commitments.len())
}
pub fn get_all_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
self.get_filtered_blob_ids(block_root, |_, _| true)
}
pub fn get_filtered_blob_ids(
&self,
block_root: Option<Hash256>,
filter: impl Fn(usize, Hash256) -> bool,
) -> Vec<BlobIdentifier> {
let block_root = block_root.unwrap_or_else(|| self.as_block().canonical_root());
let num_blobs_expected = self.num_blobs_expected();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
for i in 0..num_blobs_expected {
if filter(i, block_root) {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
}
blob_ids
}
}
impl<E: EthSpec> AvailabilityPendingBlock<E> { impl<E: EthSpec> AvailabilityPendingBlock<E> {
pub fn to_block(self) -> Arc<SignedBeaconBlock<E>> { pub fn to_block(self) -> Arc<SignedBeaconBlock<E>> {
self.block self.block

View File

@@ -625,15 +625,17 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
} }
} }
pub fn rpc_blob( pub fn rpc_blobs(
blob: Arc<BlobSidecar<T::EthSpec>>, block_root: Hash256,
blobs: Vec<Arc<BlobSidecar<T::EthSpec>>>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
) -> Self { ) -> Self {
Self { Self {
drop_during_sync: false, drop_during_sync: false,
work: Work::RpcBlob { work: Work::RpcBlobs {
block: blob, block_root,
blobs,
seen_timestamp, seen_timestamp,
process_type, process_type,
}, },
@@ -936,8 +938,9 @@ pub enum Work<T: BeaconChainTypes> {
process_type: BlockProcessType, process_type: BlockProcessType,
should_process: bool, should_process: bool,
}, },
RpcBlob { RpcBlobs {
block: Arc<BlobSidecar<T::EthSpec>>, block_root: Hash256,
blobs: Vec<Arc<BlobSidecar<T::EthSpec>>>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
}, },
@@ -1000,7 +1003,7 @@ impl<T: BeaconChainTypes> Work<T> {
Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
Work::RpcBlock { .. } => RPC_BLOCK, Work::RpcBlock { .. } => RPC_BLOCK,
Work::RpcBlob { .. } => RPC_BLOB, Work::RpcBlobs { .. } => RPC_BLOB,
Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::ChainSegment { .. } => CHAIN_SEGMENT,
Work::Status { .. } => STATUS_PROCESSING, Work::Status { .. } => STATUS_PROCESSING,
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
@@ -1513,7 +1516,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
optimistic_update_queue.push(work, work_id, &self.log) optimistic_update_queue.push(work, work_id, &self.log)
} }
Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log),
Work::RpcBlob { .. } => rpc_blob_queue.push(work, work_id, &self.log), Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log),
Work::ChainSegment { ref process_id, .. } => match process_id { Work::ChainSegment { ref process_id, .. } => match process_id {
ChainSegmentProcessId::RangeBatchId { .. } ChainSegmentProcessId::RangeBatchId { .. }
| ChainSegmentProcessId::ParentLookup { .. } => { | ChainSegmentProcessId::ParentLookup { .. } => {
@@ -1936,12 +1939,14 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
duplicate_cache, duplicate_cache,
should_process, should_process,
)), )),
Work::RpcBlob { Work::RpcBlobs {
block, block_root,
blobs,
seen_timestamp, seen_timestamp,
process_type, process_type,
} => task_spawner.spawn_async(worker.process_rpc_blob( } => task_spawner.spawn_async(worker.process_rpc_blobs(
block, block_root,
blobs,
seen_timestamp, seen_timestamp,
process_type, process_type,
)), )),

View File

@@ -136,16 +136,21 @@ impl<T: BeaconChainTypes> Worker<T> {
drop(handle); drop(handle);
} }
pub async fn process_rpc_blob( pub async fn process_rpc_blobs(
self, self,
blob: Arc<BlobSidecar<T::EthSpec>>, block_root: Hash256,
blobs: Vec<Arc<BlobSidecar<T::EthSpec>>>,
seen_timestamp: Duration, seen_timestamp: Duration,
process_type: BlockProcessType, process_type: BlockProcessType,
) { ) {
let result = self let result = self
.chain .chain
.check_availability_and_maybe_import( .check_availability_and_maybe_import(
|chain| chain.data_availability_checker.put_rpc_blob(blob), |chain| {
chain
.data_availability_checker
.put_rpc_blobs(block_root, blobs)
},
CountUnrealized::True, CountUnrealized::True,
) )
.await; .await;

View File

@@ -1,5 +1,5 @@
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::sync::Arc; use std::sync::Arc;
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
@@ -19,6 +19,7 @@ use types::{BlobSidecar, SignedBeaconBlock};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
use crate::metrics; use crate::metrics;
use crate::sync::block_lookups::single_block_lookup::SingleBlobRequest;
use self::parent_lookup::PARENT_FAIL_TOLERANCE; use self::parent_lookup::PARENT_FAIL_TOLERANCE;
use self::{ use self::{
@@ -59,6 +60,8 @@ pub(crate) struct BlockLookups<T: BeaconChainTypes> {
/// The flag allows us to determine if the peer returned data or sent us nothing. /// The flag allows us to determine if the peer returned data or sent us nothing.
single_block_lookups: FnvHashMap<Id, SingleBlockRequest<SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS>>, single_block_lookups: FnvHashMap<Id, SingleBlockRequest<SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS>>,
single_blob_lookups: FnvHashMap<Id, SingleBlobRequest<SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS>>,
/// The logger for the import manager. /// The logger for the import manager.
log: Logger, log: Logger,
} }
@@ -72,6 +75,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
FAILED_CHAINS_CACHE_EXPIRY_SECONDS, FAILED_CHAINS_CACHE_EXPIRY_SECONDS,
)), )),
single_block_lookups: Default::default(), single_block_lookups: Default::default(),
single_blob_lookups: Default::default(),
log, log,
} }
} }
@@ -137,6 +141,56 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
cx: &mut SyncNetworkContext<T>, cx: &mut SyncNetworkContext<T>,
) { ) {
todo!() todo!()
//
// let hash = Hash256::zero();
//
// // Do not re-request a blo that is already being requested
// if self
// .single_blob_lookups
// .values_mut()
// .any(|single_block_request| single_block_request.add_peer(&hash, &peer_id))
// {
// return;
// }
//
// if self.parent_lookups.iter_mut().any(|parent_req| {
// parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash)
// }) {
// // If the block was already downloaded, or is being downloaded in this moment, do not
// // request it.
// return;
// }
//
// if self
// .processing_parent_lookups
// .values()
// .any(|(hashes, _last_parent_request)| hashes.contains(&hash))
// {
// // we are already processing this block, ignore it.
// return;
// }
//
// debug!(
// self.log,
// "Searching for block";
// "peer_id" => %peer_id,
// "block" => %hash
// );
//
// let mut single_block_request = SingleBlobRequest::new(hash, peer_id);
//
// let (peer_id, request) = single_block_request
// .request_block()
// .expect("none of the possible failure cases apply for a newly created block lookup");
// if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) {
// self.single_blob_lookups
// .insert(request_id, single_block_request);
//
// metrics::set_gauge(
// &metrics::SYNC_SINGLE_BLOB_LOOKUPS,
// self.single_blob_lookups.len() as i64,
// );
} }
pub fn search_block_delayed( pub fn search_block_delayed(
@@ -171,6 +225,13 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
peer_id: PeerId, peer_id: PeerId,
cx: &mut SyncNetworkContext<T>, cx: &mut SyncNetworkContext<T>,
) { ) {
//
// let missing_ids = cx.chain.data_availability_checker.get_missing_blob_ids(block, Some(root));
// // TODO(sean) how do we handle this erroring?
// if let Ok(missing_ids) = missing_ids {
// self.search_blobs(missing_ids, peer_id, cx);
// }
let parent_root = block.parent_root(); let parent_root = block.parent_root();
// If this block or it's parent is part of a known failed chain, ignore it. // If this block or it's parent is part of a known failed chain, ignore it.
if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) {
@@ -497,19 +558,17 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
}; };
match result { match result {
BlockProcessResult::Ok(status) => { BlockProcessResult::Ok(status) => match status {
match status { AvailabilityProcessingStatus::Imported(hash) => {
AvailabilityProcessingStatus::Imported(hash) => { trace!(self.log, "Single block processing succeeded"; "block" => %root);
trace!(self.log, "Single block processing succeeded"; "block" => %root);
}
AvailabilityProcessingStatus::PendingBlobs(blobs) => {
// trigger?
}
AvailabilityProcessingStatus::PendingBlock(hash) => {
// logic error
}
} }
} AvailabilityProcessingStatus::PendingBlobs(blobs_ids) => {
self.search_blobs(blobs_ids, peer_id, cx);
}
AvailabilityProcessingStatus::PendingBlock(hash) => {
warn!(self.log, "Block processed but returned PendingBlock"; "block" => %hash);
}
},
BlockProcessResult::Ignored => { BlockProcessResult::Ignored => {
// Beacon processor signalled to ignore the block processing result. // Beacon processor signalled to ignore the block processing result.
// This implies that the cpu is overloaded. Drop the request. // This implies that the cpu is overloaded. Drop the request.
@@ -620,13 +679,18 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
BlockProcessResult::Ok(AvailabilityProcessingStatus::PendingBlock(_)) => { BlockProcessResult::Ok(AvailabilityProcessingStatus::PendingBlock(_)) => {
// doesn't make sense // doesn't make sense
} }
BlockProcessResult::Ok(AvailabilityProcessingStatus::PendingBlobs(blobs)) => { BlockProcessResult::Ok(AvailabilityProcessingStatus::PendingBlobs(blobs_ids)) => {
// trigger self.search_blobs(blobs_ids, peer_id, cx);
} }
BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { BlockProcessResult::Err(BlockError::ParentUnknown(block)) => {
// need to keep looking for parents // TODO(sean) how do we handle this erroring?
// add the block back to the queue and continue the search let missing_ids = cx
.chain
.data_availability_checker
.get_missing_blob_ids(block.clone(), None)
.unwrap_or_default();
parent_lookup.add_block(block); parent_lookup.add_block(block);
self.search_blobs(missing_ids, peer_id, cx);
self.request_parent(parent_lookup, cx); self.request_parent(parent_lookup, cx);
} }
BlockProcessResult::Ok(AvailabilityProcessingStatus::Imported(_)) BlockProcessResult::Ok(AvailabilityProcessingStatus::Imported(_))

View File

@@ -29,7 +29,23 @@ pub struct SingleBlockRequest<const MAX_ATTEMPTS: u8> {
failed_processing: u8, failed_processing: u8,
/// How many times have we attempted to download this block. /// How many times have we attempted to download this block.
failed_downloading: u8, failed_downloading: u8,
missing_blobs: Vec<BlobIdentifier>, }
#[derive(PartialEq, Eq)]
pub struct SingleBlobRequest<const MAX_ATTEMPTS: u8> {
/// The hash of the requested block.
pub hash: Hash256,
pub blob_ids: Vec<BlobIdentifier>,
/// State of this request.
pub state: State,
/// Peers that should have this block.
pub available_peers: HashSet<PeerId>,
/// Peers from which we have requested this block.
pub used_peers: HashSet<PeerId>,
/// How many times have we attempted to process this block.
failed_processing: u8,
/// How many times have we attempted to download this block.
failed_downloading: u8,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
@@ -65,7 +81,6 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
used_peers: HashSet::default(), used_peers: HashSet::default(),
failed_processing: 0, failed_processing: 0,
failed_downloading: 0, failed_downloading: 0,
missing_blobs: vec![],
} }
} }

View File

@@ -64,7 +64,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// Channel to send work to the beacon processor. /// Channel to send work to the beacon processor.
beacon_processor_send: mpsc::Sender<WorkEvent<T>>, beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
/// Logger for the `SyncNetworkContext`. /// Logger for the `SyncNetworkContext`.
log: slog::Logger, log: slog::Logger,
@@ -411,6 +411,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
} }
} }
// TODO(sean) add single blob lookup + parent lookup request methods
/// Sends a blocks by root request for a single block lookup. /// Sends a blocks by root request for a single block lookup.
pub fn single_block_lookup_request( pub fn single_block_lookup_request(
&mut self, &mut self,