a lot more reprocessing work

This commit is contained in:
realbigsean
2023-03-31 09:09:56 -04:00
parent b78a6e8d1f
commit 8403402620
14 changed files with 365 additions and 151 deletions

View File

@@ -1,11 +1,12 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use beacon_chain::blob_verification::AsBlock;
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChainTypes, BlockError};
use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError};
use fnv::FnvHashMap;
use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode};
use lighthouse_network::{PeerAction, PeerId};
@@ -14,6 +15,7 @@ use slog::{debug, error, trace, warn, Logger};
use smallvec::SmallVec;
use store::Hash256;
use types::blob_sidecar::BlobIdentifier;
use types::{BlobSidecar, SignedBeaconBlock};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
use crate::metrics;
@@ -36,7 +38,7 @@ mod single_block_lookup;
#[cfg(test)]
mod tests;
pub type RootBlockTuple<T> = (Hash256, BlockWrapper<T>);
pub type RootBlockTuple<T> = (Hash256, Arc<SignedBeaconBlock<T>>);
const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60;
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
@@ -145,6 +147,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
cx: &mut SyncNetworkContext<T>,
) {
//TODO(sean) handle delay
//TODO(sean) cannot use peer id here cause it assumes it has the block, this is from gossip so not true
self.search_block(hash, peer_id, cx);
}
@@ -206,7 +209,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
&mut self,
id: Id,
peer_id: PeerId,
block: Option<BlockWrapper<T::EthSpec>>,
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
seen_timestamp: Duration,
cx: &mut SyncNetworkContext<T>,
) {
@@ -271,7 +274,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
&mut self,
id: Id,
peer_id: PeerId,
block: Option<BlockWrapper<T::EthSpec>>,
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
seen_timestamp: Duration,
cx: &mut SyncNetworkContext<T>,
) {
@@ -349,6 +352,28 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
);
}
pub fn single_lookup_blob_response(
&mut self,
id: Id,
peer_id: PeerId,
block: Option<Arc<BlobSidecar<T::EthSpec>>>,
seen_timestamp: Duration,
cx: &mut SyncNetworkContext<T>,
) {
todo!()
}
pub fn parent_lookup_blob_response(
&mut self,
id: Id,
peer_id: PeerId,
block: Option<Arc<BlobSidecar<T::EthSpec>>>,
seen_timestamp: Duration,
cx: &mut SyncNetworkContext<T>,
) {
todo!()
}
/* Error responses */
#[allow(clippy::needless_collect)] // false positive
@@ -472,11 +497,18 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
};
match result {
BlockProcessResult::Ok => {
trace!(self.log, "Single block processing succeeded"; "block" => %root);
}
BlockProcessResult::MissingBlobs(blobs) => {
todo!()
BlockProcessResult::Ok(status) => {
match status {
AvailabilityProcessingStatus::Imported(hash) => {
trace!(self.log, "Single block processing succeeded"; "block" => %root);
}
AvailabilityProcessingStatus::PendingBlobs(blobs) => {
// trigger?
}
AvailabilityProcessingStatus::PendingBlock(hash) => {
// logic error
}
}
}
BlockProcessResult::Ignored => {
// Beacon processor signalled to ignore the block processing result.
@@ -558,11 +590,18 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
};
match &result {
BlockProcessResult::Ok => {
trace!(self.log, "Parent block processing succeeded"; &parent_lookup)
}
BlockProcessResult::MissingBlobs(blobs) => {
todo!()
BlockProcessResult::Ok(status) => {
match status {
AvailabilityProcessingStatus::Imported(hash) => {
trace!(self.log, "Parent block processing succeeded"; &parent_lookup)
}
AvailabilityProcessingStatus::PendingBlobs(blobs) => {
// trigger?
}
AvailabilityProcessingStatus::PendingBlock(hash) => {
// logic error
}
}
}
BlockProcessResult::Err(e) => {
trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e)
@@ -578,8 +617,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
}
match result {
BlockProcessResult::MissingBlobs(blobs) => {
todo!()
BlockProcessResult::Ok(AvailabilityProcessingStatus::PendingBlock(_)) => {
// doesn't make sense
}
BlockProcessResult::Ok(AvailabilityProcessingStatus::PendingBlobs(blobs)) => {
// trigger
}
BlockProcessResult::Err(BlockError::ParentUnknown(block)) => {
// need to keep looking for parents
@@ -587,7 +629,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
parent_lookup.add_block(block);
self.request_parent(parent_lookup, cx);
}
BlockProcessResult::Ok
BlockProcessResult::Ok(AvailabilityProcessingStatus::Imported(_))
| BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => {
// Check if the beacon processor is available
let beacon_processor_send = match cx.processor_channel_if_enabled() {
@@ -666,6 +708,24 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
);
}
pub fn single_blob_processed(
&mut self,
id: Id,
result: BlockProcessResult<T::EthSpec>,
cx: &mut SyncNetworkContext<T>,
) {
todo!()
}
pub fn parent_blob_processed(
&mut self,
chain_hash: Hash256,
result: BlockProcessResult<T::EthSpec>,
cx: &mut SyncNetworkContext<T>,
) {
todo!()
}
pub fn parent_chain_processed(
&mut self,
chain_hash: Hash256,
@@ -709,7 +769,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
fn send_block_for_processing(
&mut self,
block_root: Hash256,
block: BlockWrapper<T::EthSpec>,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
duration: Duration,
process_type: BlockProcessType,
cx: &mut SyncNetworkContext<T>,

View File

@@ -7,8 +7,10 @@ use beacon_chain::blob_verification::AsBlock;
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::BeaconChainTypes;
use lighthouse_network::PeerId;
use std::sync::Arc;
use store::Hash256;
use strum::IntoStaticStr;
use types::{BlobSidecar, SignedBeaconBlock};
use super::single_block_lookup::{self, SingleBlockRequest};
@@ -25,6 +27,7 @@ pub(crate) struct ParentLookup<T: BeaconChainTypes> {
chain_hash: Hash256,
/// The blocks that have currently been downloaded.
downloaded_blocks: Vec<RootBlockTuple<T::EthSpec>>,
downloaded_blobs: Vec<Option<Vec<Arc<BlobSidecar<T::EthSpec>>>>>,
/// Request of the last parent.
current_parent_request: SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
/// Id of the last parent request.
@@ -59,12 +62,18 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
.any(|(root, _d_block)| root == block_root)
}
pub fn new(block_root: Hash256, block: BlockWrapper<T::EthSpec>, peer_id: PeerId) -> Self {
pub fn new(
block_root: Hash256,
block_wrapper: BlockWrapper<T::EthSpec>,
peer_id: PeerId,
) -> Self {
let (block, blobs) = block_wrapper.deconstruct();
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id);
Self {
chain_hash: block_root,
downloaded_blocks: vec![(block_root, block)],
downloaded_blobs: vec![blobs],
current_parent_request,
current_parent_request_id: None,
}
@@ -94,10 +103,12 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
self.current_parent_request.check_peer_disconnected(peer_id)
}
pub fn add_block(&mut self, block: BlockWrapper<T::EthSpec>) {
let next_parent = block.parent_root();
pub fn add_block(&mut self, block_wrapper: BlockWrapper<T::EthSpec>) {
let next_parent = block_wrapper.parent_root();
let current_root = self.current_parent_request.hash;
let (block, blobs) = block_wrapper.deconstruct();
self.downloaded_blocks.push((current_root, block));
self.downloaded_blobs.push(blobs);
self.current_parent_request.hash = next_parent;
self.current_parent_request.state = single_block_lookup::State::AwaitingDownload;
self.current_parent_request_id = None;
@@ -120,14 +131,23 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
let ParentLookup {
chain_hash,
downloaded_blocks,
downloaded_blobs,
current_parent_request,
current_parent_request_id: _,
} = self;
let block_count = downloaded_blocks.len();
let mut blocks = Vec::with_capacity(block_count);
let mut hashes = Vec::with_capacity(block_count);
for (hash, block) in downloaded_blocks {
blocks.push(block);
for ((hash, block), blobs) in downloaded_blocks
.into_iter()
.zip(downloaded_blobs.into_iter())
{
let wrapped_block = if let Some(blobs) = blobs {
BlockWrapper::BlockAndBlobs(block, blobs)
} else {
BlockWrapper::Block(block)
};
blocks.push(wrapped_block);
hashes.push(hash);
}
(chain_hash, blocks, hashes, current_parent_request)
@@ -152,7 +172,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
/// the processing result of the block.
pub fn verify_block(
&mut self,
block: Option<BlockWrapper<T::EthSpec>>,
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> {
let root_and_block = self.current_parent_request.verify_block(block)?;

View File

@@ -6,10 +6,15 @@ use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
use rand::seq::IteratorRandom;
use ssz_types::VariableList;
use std::collections::HashSet;
use std::sync::Arc;
use store::{EthSpec, Hash256};
use strum::IntoStaticStr;
use types::blob_sidecar::BlobIdentifier;
use types::SignedBeaconBlock;
/// Object representing a single block lookup request.
///
//previously assumed we would have a single block. Now we may have the block but not the blobs
#[derive(PartialEq, Eq)]
pub struct SingleBlockRequest<const MAX_ATTEMPTS: u8> {
/// The hash of the requested block.
@@ -24,6 +29,7 @@ pub struct SingleBlockRequest<const MAX_ATTEMPTS: u8> {
failed_processing: u8,
/// How many times have we attempted to download this block.
failed_downloading: u8,
missing_blobs: Vec<BlobIdentifier>,
}
#[derive(Debug, PartialEq, Eq)]
@@ -59,6 +65,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
used_peers: HashSet::default(),
failed_processing: 0,
failed_downloading: 0,
missing_blobs: vec![],
}
}
@@ -105,7 +112,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
/// Returns the block for processing if the response is what we expected.
pub fn verify_block<T: EthSpec>(
&mut self,
block: Option<BlockWrapper<T>>,
block: Option<Arc<SignedBeaconBlock<T>>>,
) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
match self.state {
State::AwaitingDownload => {
@@ -116,7 +123,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
Some(block) => {
// Compute the block root using this specific function so that we can get timing
// metrics.
let block_root = get_block_root(block.as_block());
let block_root = get_block_root(&block);
if block_root != self.hash {
// return an error and drop the block
// NOTE: we take this is as a download failure to prevent counting the