Fix PeerDAS sync scoring (#7352)

* Remove request tracking inside syncing chains

* Prioritize by range peers in network context

* Prioritize custody peers for columns by range

* Explicit error handling of the no peers error case

* Remove good_peers_on_sampling_subnets

* Count AwaitingDownload towards the buffer limit

* Retry syncing chains in AwaitingDownload state

* Use same peer priorization for lookups

* Review PR

* Address TODOs

* Revert changes to peer erroring in range sync

* Revert metrics changes

* Update comment

* Pass peers_to_deprioritize to select_columns_by_range_peers_to_request

* more idiomatic

* Idiomatic while

* Add note about infinite loop

* Use while let

* Fix wrong custody column count for lookup blocks

* Remove impl

* Remove stale comment

* Fix build errors.

* Or default

* Review PR

* BatchPeerGroup

* Match block and blob signatures

* Explicit match statement to BlockError in range sync

* Remove todo in BatchPeerGroup

* Remove participating peers from backfill sync

* Remove MissingAllCustodyColumns error

* Merge fixes

* Clean up PR

* Consistent naming of batch_peers

* Address multiple review comments

* Better errors for das

* Penalize column peers once

* Restore fn

* Fix error enum

* Removed MismatchedPublicKeyLen

* Revert testing changes

* Change BlockAndCustodyColumns enum variant

* Revert type change in import_historical_block_batch

* Drop pubkey cache

* Don't collect Vec

* Classify errors

* Remove ReconstructColumnsError

* More detailed UnrequestedSlot error

* Lint test

* Fix slot conversion

* Reduce penalty for missing blobs

* Revert changes in peer selection

* Lint tests

* Rename block matching functions

* Reorder block matching in historical blocks

* Fix order of block matching

* Add store tests

* Filter blockchain in assert_correct_historical_block_chain

* Also filter before KZG checks

* Lint tests

* Fix lint

* Fix fulu err assertion

* Check point is not at infinity

* Fix ws sync test

* Revert dropping filter fn

---------

Co-authored-by: Jimmy Chen <jchen.tc@gmail.com>
Co-authored-by: Jimmy Chen <jimmy@sigmaprime.io>
Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
This commit is contained in:
Lion - dapplion
2025-05-21 08:06:42 -05:00
committed by GitHub
parent f06d1d0346
commit b014675b7a
27 changed files with 1103 additions and 654 deletions

View File

@@ -18,6 +18,7 @@ use crate::sync::range_sync::{
};
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use itertools::Itertools;
use lighthouse_network::service::api_types::Id;
use lighthouse_network::types::{BackFillState, NetworkGlobals};
use lighthouse_network::{PeerAction, PeerId};
@@ -30,6 +31,8 @@ use std::sync::Arc;
use tracing::{debug, error, info, instrument, warn};
use types::{Epoch, EthSpec};
use super::range_sync::BatchPeers;
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
/// already requested slots. There is a timeout for each batch request. If this value is too high,
@@ -128,12 +131,6 @@ pub struct BackFillSync<T: BeaconChainTypes> {
/// Batches validated by this chain.
validated_batches: u64,
/// We keep track of peers that are participating in the backfill sync. Unlike RangeSync,
/// BackFillSync uses all synced peers to download the chain from. If BackFillSync fails, we don't
/// want to penalize all our synced peers, so we use this variable to keep track of peers that
/// have participated and only penalize these peers if backfill sync fails.
participating_peers: HashSet<PeerId>,
/// When a backfill sync fails, we keep track of whether a new fully synced peer has joined.
/// This signifies that we are able to attempt to restart a failed chain.
restart_failed_sync: bool,
@@ -181,7 +178,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
network_globals,
current_processing_batch: None,
validated_batches: 0,
participating_peers: HashSet::new(),
restart_failed_sync: false,
beacon_chain,
};
@@ -302,25 +298,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
}
}
/// A peer has disconnected.
/// If the peer has active batches, those are considered failed and re-requested.
#[instrument(parent = None,
level = "info",
fields(service = "backfill_sync"),
name = "backfill_sync",
skip_all
)]
#[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"]
pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), BackFillError> {
if matches!(self.state(), BackFillState::Failed) {
return Ok(());
}
// Remove the peer from the participation list
self.participating_peers.remove(peer_id);
Ok(())
}
/// An RPC error has occurred.
///
/// If the batch exists it is re-requested.
@@ -378,7 +355,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
&mut self,
network: &mut SyncNetworkContext<T>,
batch_id: BatchId,
peer_id: &PeerId,
batch_peers: BatchPeers,
request_id: Id,
blocks: Vec<RpcBlock<T::EthSpec>>,
) -> Result<ProcessResult, BackFillError> {
@@ -399,7 +376,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
return Ok(ProcessResult::Successful);
}
match batch.download_completed(blocks, *peer_id) {
match batch.download_completed(blocks, batch_peers) {
Ok(received) => {
let awaiting_batches =
self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH;
@@ -440,7 +417,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
self.set_state(BackFillState::Failed);
// Remove all batches and active requests and participating peers.
self.batches.clear();
self.participating_peers.clear();
self.restart_failed_sync = false;
// Reset all downloading and processing targets
@@ -573,7 +549,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
}
};
let Some(peer) = batch.processing_peer() else {
let Some(batch_peers) = batch.processing_peers() else {
self.fail_sync(BackFillError::BatchInvalidState(
batch_id,
String::from("Peer does not exist"),
@@ -585,8 +561,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
?result,
%batch,
batch_epoch = %batch_id,
%peer,
client = %network.client_type(peer),
"Backfill batch processed"
);
@@ -628,31 +602,52 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
}
BatchProcessResult::FaultyFailure {
imported_blocks,
penalty,
peer_action,
error,
} => {
// TODO(sync): De-dup between back and forwards sync
if let Some(penalty) = peer_action.block_peer {
// Penalize the peer appropiately.
network.report_peer(batch_peers.block(), penalty, "faulty_batch");
}
// Penalize each peer only once. Currently a peer_action does not mix different
// PeerAction levels.
for (peer, penalty) in peer_action
.column_peer
.iter()
.filter_map(|(column_index, penalty)| {
batch_peers
.column(column_index)
.map(|peer| (*peer, *penalty))
})
.unique()
{
network.report_peer(peer, penalty, "faulty_batch_column");
}
match batch.processing_completed(BatchProcessingResult::FaultyFailure) {
Err(e) => {
// Batch was in the wrong state
self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))
.map(|_| ProcessResult::Successful)
}
Ok(BatchOperationOutcome::Failed { blacklist: _ }) => {
// check that we have not exceeded the re-process retry counter
// If a batch has exceeded the invalid batch lookup attempts limit, it means
// that it is likely all peers are sending invalid batches
// repeatedly and are either malicious or faulty. We stop the backfill sync and
// report all synced peers that have participated.
Ok(BatchOperationOutcome::Failed { .. }) => {
// When backfill syncing post-PeerDAS we can't attribute fault to previous
// peers if a batch fails to process too many times. We have strict peer
// scoring for faulty errors, so participating peers that sent invalid
// data are already downscored.
//
// Because backfill sync deals with historical data that we can assert
// to be correct, once we import a batch that contains at least one
// block we are sure we got the right data. There's no need to penalize
// all participating peers in backfill sync if a batch fails
warn!(
score_adjustment = %penalty,
batch_epoch = %batch_id,
"Backfill batch failed to download. Penalizing peers"
error,
"Backfill sync failed after attempting to process batch too many times"
);
for peer in self.participating_peers.drain() {
// TODO(das): `participating_peers` only includes block peers. Should we
// penalize the custody column peers too?
network.report_peer(peer, *penalty, "backfill_batch_failed");
}
self.fail_sync(BackFillError::BatchProcessingFailed(batch_id))
.map(|_| ProcessResult::Successful)
}
@@ -781,37 +776,38 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
// The validated batch has been re-processed
if attempt.hash != processed_attempt.hash {
// The re-downloaded version was different.
if processed_attempt.peer_id != attempt.peer_id {
// TODO(das): should penalize other peers?
let valid_attempt_peer = processed_attempt.block_peer();
let bad_attempt_peer = attempt.block_peer();
if valid_attempt_peer != bad_attempt_peer {
// A different peer sent the correct batch, the previous peer did not
// We negatively score the original peer.
let action = PeerAction::LowToleranceError;
debug!(
batch_epoch = ?id,
score_adjustment = %action,
original_peer = %attempt.peer_id,
new_peer = %processed_attempt.peer_id,
batch_epoch = %id, score_adjustment = %action,
original_peer = %bad_attempt_peer, new_peer = %valid_attempt_peer,
"Re-processed batch validated. Scoring original peer"
);
network.report_peer(
attempt.peer_id,
bad_attempt_peer,
action,
"backfill_reprocessed_original_peer",
"batch_reprocessed_original_peer",
);
} else {
// The same peer corrected it's previous mistake. There was an error, so we
// negative score the original peer.
let action = PeerAction::MidToleranceError;
debug!(
batch_epoch = ?id,
batch_epoch = %id,
score_adjustment = %action,
original_peer = %attempt.peer_id,
new_peer = %processed_attempt.peer_id,
original_peer = %bad_attempt_peer,
new_peer = %valid_attempt_peer,
"Re-processed batch validated by the same peer"
);
network.report_peer(
attempt.peer_id,
bad_attempt_peer,
action,
"backfill_reprocessed_same_peer",
"batch_reprocessed_same_peer",
);
}
}
@@ -926,10 +922,9 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
.cloned()
.collect::<HashSet<_>>();
let (request, is_blob_batch) = batch.to_blocks_by_range_request();
let failed_peers = batch.failed_peers();
let request = batch.to_blocks_by_range_request();
let failed_peers = batch.failed_block_peers();
match network.block_components_by_range_request(
is_blob_batch,
request,
RangeRequestId::BackfillSync { batch_id },
&synced_peers,
@@ -1089,12 +1084,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
self.include_next_batch(network)
}
Entry::Vacant(entry) => {
let batch_type = network.batch_type(batch_id);
entry.insert(BatchInfo::new(
&batch_id,
BACKFILL_EPOCHS_PER_BATCH,
batch_type,
));
entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH));
if self.would_complete(batch_id) {
self.last_batch_downloaded = true;
}

View File

@@ -1,15 +1,23 @@
use beacon_chain::{
block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root,
};
use lighthouse_network::service::api_types::{
BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId,
use lighthouse_network::{
service::api_types::{
BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId,
},
PeerId,
};
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use std::{collections::HashMap, sync::Arc};
use types::{
BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec,
Hash256, RuntimeVariableList, SignedBeaconBlock,
Hash256, RuntimeVariableList, SignedBeaconBlock, Slot,
};
use super::range_sync::BatchPeers;
pub struct RangeBlockComponentsRequest<E: EthSpec> {
/// Blocks we have received awaiting for their corresponding sidecar.
blocks_request: ByRangeRequest<BlocksByRangeRequestId, Vec<Arc<SignedBeaconBlock<E>>>>,
@@ -19,18 +27,21 @@ pub struct RangeBlockComponentsRequest<E: EthSpec> {
enum ByRangeRequest<I: PartialEq + std::fmt::Display, T> {
Active(I),
Complete(T),
Complete(T, PeerId),
}
enum RangeBlockDataRequest<E: EthSpec> {
/// All pre-deneb blocks
NoData,
/// All post-Deneb blocks, regardless of if they have data or not
Blobs(ByRangeRequest<BlobsByRangeRequestId, Vec<Arc<BlobSidecar<E>>>>),
/// All post-Fulu blocks, regardless of if they have data or not
DataColumns {
requests: HashMap<
DataColumnsByRangeRequestId,
ByRangeRequest<DataColumnsByRangeRequestId, DataColumnSidecarList<E>>,
>,
expected_custody_columns: Vec<ColumnIndex>,
expected_column_to_peer: HashMap<ColumnIndex, PeerId>,
},
}
@@ -38,17 +49,20 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
pub fn new(
blocks_req_id: BlocksByRangeRequestId,
blobs_req_id: Option<BlobsByRangeRequestId>,
data_columns: Option<(Vec<DataColumnsByRangeRequestId>, Vec<ColumnIndex>)>,
data_columns: Option<(
Vec<DataColumnsByRangeRequestId>,
HashMap<ColumnIndex, PeerId>,
)>,
) -> Self {
let block_data_request = if let Some(blobs_req_id) = blobs_req_id {
RangeBlockDataRequest::Blobs(ByRangeRequest::Active(blobs_req_id))
} else if let Some((requests, expected_custody_columns)) = data_columns {
} else if let Some((requests, expected_column_to_peer)) = data_columns {
RangeBlockDataRequest::DataColumns {
requests: requests
.into_iter()
.map(|id| (id, ByRangeRequest::Active(id)))
.collect(),
expected_custody_columns,
expected_column_to_peer,
}
} else {
RangeBlockDataRequest::NoData
@@ -64,18 +78,20 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
&mut self,
req_id: BlocksByRangeRequestId,
blocks: Vec<Arc<SignedBeaconBlock<E>>>,
peer_id: PeerId,
) -> Result<(), String> {
self.blocks_request.finish(req_id, blocks)
self.blocks_request.finish(req_id, blocks, peer_id)
}
pub fn add_blobs(
&mut self,
req_id: BlobsByRangeRequestId,
blobs: Vec<Arc<BlobSidecar<E>>>,
peer_id: PeerId,
) -> Result<(), String> {
match &mut self.block_data_request {
RangeBlockDataRequest::NoData => Err("received blobs but expected no data".to_owned()),
RangeBlockDataRequest::Blobs(ref mut req) => req.finish(req_id, blobs),
RangeBlockDataRequest::Blobs(ref mut req) => req.finish(req_id, blobs, peer_id),
RangeBlockDataRequest::DataColumns { .. } => {
Err("received blobs but expected data columns".to_owned())
}
@@ -86,6 +102,7 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
&mut self,
req_id: DataColumnsByRangeRequestId,
columns: Vec<Arc<DataColumnSidecar<E>>>,
peer_id: PeerId,
) -> Result<(), String> {
match &mut self.block_data_request {
RangeBlockDataRequest::NoData => {
@@ -100,48 +117,60 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
let req = requests
.get_mut(&req_id)
.ok_or(format!("unknown data columns by range req_id {req_id}"))?;
req.finish(req_id, columns)
req.finish(req_id, columns, peer_id)
}
}
}
pub fn responses(&self, spec: &ChainSpec) -> Option<Result<Vec<RpcBlock<E>>, String>> {
let Some(blocks) = self.blocks_request.to_finished() else {
/// If all internal requests are complete returns a Vec of coupled RpcBlocks
#[allow(clippy::type_complexity)]
pub fn responses(
&self,
spec: &ChainSpec,
) -> Option<Result<(Vec<RpcBlock<E>>, BatchPeers), String>> {
let Some((blocks, &block_peer)) = self.blocks_request.to_finished() else {
return None;
};
match &self.block_data_request {
RangeBlockDataRequest::NoData => {
Some(Self::responses_with_blobs(blocks.to_vec(), vec![], spec))
}
RangeBlockDataRequest::NoData => Some(
Self::responses_with_blobs(blocks.to_vec(), vec![], spec)
.map(|blocks| (blocks, BatchPeers::new_from_block_peer(block_peer))),
),
RangeBlockDataRequest::Blobs(request) => {
let Some(blobs) = request.to_finished() else {
let Some((blobs, _blob_peer)) = request.to_finished() else {
return None;
};
Some(Self::responses_with_blobs(
blocks.to_vec(),
blobs.to_vec(),
spec,
))
Some(
Self::responses_with_blobs(blocks.to_vec(), blobs.to_vec(), spec)
.map(|blocks| (blocks, BatchPeers::new_from_block_peer(block_peer))),
)
}
RangeBlockDataRequest::DataColumns {
requests,
expected_custody_columns,
expected_column_to_peer,
} => {
let mut data_columns = vec![];
let mut column_peers = HashMap::new();
for req in requests.values() {
let Some(data) = req.to_finished() else {
let Some((resp_columns, column_peer)) = req.to_finished() else {
return None;
};
data_columns.extend(data.clone())
data_columns.extend(resp_columns.clone());
for column in resp_columns {
column_peers.insert(column.index, *column_peer);
}
}
Some(Self::responses_with_custody_columns(
blocks.to_vec(),
data_columns,
expected_custody_columns,
spec,
))
Some(
Self::responses_with_custody_columns(
blocks.to_vec(),
data_columns,
expected_column_to_peer.clone(),
spec,
)
.map(|blocks| (blocks, BatchPeers::new(block_peer, column_peers))),
)
}
}
}
@@ -199,106 +228,98 @@ impl<E: EthSpec> RangeBlockComponentsRequest<E> {
fn responses_with_custody_columns(
blocks: Vec<Arc<SignedBeaconBlock<E>>>,
data_columns: DataColumnSidecarList<E>,
expects_custody_columns: &[ColumnIndex],
expected_custody_columns: HashMap<ColumnIndex, PeerId>,
spec: &ChainSpec,
) -> Result<Vec<RpcBlock<E>>, String> {
// Group data columns by block_root and index
let mut data_columns_by_block =
HashMap::<Hash256, HashMap<ColumnIndex, Arc<DataColumnSidecar<E>>>>::new();
let mut custody_columns_by_block = HashMap::<Hash256, Vec<CustodyDataColumn<E>>>::new();
let mut block_roots_by_slot = HashMap::<Slot, HashSet<Hash256>>::new();
let expected_custody_indices = expected_custody_columns.keys().cloned().collect::<Vec<_>>();
for column in data_columns {
let block_root = column.block_root();
let index = column.index;
if data_columns_by_block
.entry(block_root)
block_roots_by_slot
.entry(column.slot())
.or_default()
.insert(index, column)
.is_some()
{
.insert(block_root);
// Sanity check before casting to `CustodyDataColumn`. But this should never happen
if !expected_custody_columns.contains_key(&index) {
return Err(format!(
"Repeated column block_root {block_root:?} index {index}"
"Received column not in expected custody indices {index}"
));
}
custody_columns_by_block
.entry(block_root)
.or_default()
.push(CustodyDataColumn::from_asserted_custody(column));
}
// Now iterate all blocks ensuring that the block roots of each block and data column match,
// plus we have columns for our custody requirements
let mut rpc_blocks = Vec::with_capacity(blocks.len());
let rpc_blocks = blocks
.into_iter()
.map(|block| {
let block_root = get_block_root(&block);
block_roots_by_slot
.entry(block.slot())
.or_default()
.insert(block_root);
for block in blocks {
let block_root = get_block_root(&block);
rpc_blocks.push(if block.num_expected_blobs() > 0 {
let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root)
else {
// This PR ignores the fix from https://github.com/sigp/lighthouse/pull/5675
// which allows blobs to not match blocks.
// TODO(das): on the initial version of PeerDAS the beacon chain does not check
// rpc custody requirements and dropping this check can allow the block to have
// an inconsistent DB.
return Err(format!("No columns for block {block_root:?} with data"));
};
let mut custody_columns = vec![];
for index in expects_custody_columns {
let Some(data_column) = data_columns_by_index.remove(index) else {
return Err(format!("No column for block {block_root:?} index {index}"));
};
// Safe to convert to `CustodyDataColumn`: we have asserted that the index of
// this column is in the set of `expects_custody_columns` and with the expected
// block root, so for the expected epoch of this batch.
custody_columns.push(CustodyDataColumn::from_asserted_custody(data_column));
}
// Assert that there are no columns left
if !data_columns_by_index.is_empty() {
let remaining_indices = data_columns_by_index.keys().collect::<Vec<_>>();
return Err(format!(
"Not all columns consumed for block {block_root:?}: {remaining_indices:?}"
));
}
let custody_columns = custody_columns_by_block
.remove(&block_root)
.unwrap_or_default();
RpcBlock::new_with_custody_columns(
Some(block_root),
block,
custody_columns,
expects_custody_columns.len(),
expected_custody_indices.clone(),
spec,
)
.map_err(|e| format!("{e:?}"))?
} else {
// Block has no data, expects zero columns
RpcBlock::new_without_blobs(Some(block_root), block, 0)
});
}
.map_err(|e| format!("{e:?}"))
})
.collect::<Result<Vec<_>, _>>()?;
// Assert that there are no columns left for other blocks
if !data_columns_by_block.is_empty() {
let remaining_roots = data_columns_by_block.keys().collect::<Vec<_>>();
if !custody_columns_by_block.is_empty() {
let remaining_roots = custody_columns_by_block.keys().collect::<Vec<_>>();
return Err(format!("Not all columns consumed: {remaining_roots:?}"));
}
for (_slot, block_roots) in block_roots_by_slot {
if block_roots.len() > 1 {
// TODO: Some peer(s) are faulty or malicious. This batch will fail processing but
// we want to send it to the process to better attribute fault. Maybe warn log for
// now and track it in a metric?
}
}
Ok(rpc_blocks)
}
}
impl<I: PartialEq + std::fmt::Display, T> ByRangeRequest<I, T> {
fn finish(&mut self, id: I, data: T) -> Result<(), String> {
fn finish(&mut self, id: I, data: T, peer_id: PeerId) -> Result<(), String> {
match self {
Self::Active(expected_id) => {
if expected_id != &id {
return Err(format!("unexpected req_id expected {expected_id} got {id}"));
}
*self = Self::Complete(data);
*self = Self::Complete(data, peer_id);
Ok(())
}
Self::Complete(_) => Err("request already complete".to_owned()),
Self::Complete(_, _) => Err("request already complete".to_owned()),
}
}
fn to_finished(&self) -> Option<&T> {
fn to_finished(&self) -> Option<(&T, &PeerId)> {
match self {
Self::Active(_) => None,
Self::Complete(data) => Some(data),
Self::Complete(data, peer_id) => Some((data, peer_id)),
}
}
}
@@ -309,12 +330,15 @@ mod tests {
use beacon_chain::test_utils::{
generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs,
};
use lighthouse_network::service::api_types::{
BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId,
DataColumnsByRangeRequestId, Id, RangeRequestId,
use lighthouse_network::{
service::api_types::{
BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId,
DataColumnsByRangeRequestId, Id, RangeRequestId,
},
PeerId,
};
use rand::SeedableRng;
use std::sync::Arc;
use std::{collections::HashMap, sync::Arc};
use types::{test_utils::XorShiftRng, Epoch, ForkName, MinimalEthSpec as E, SignedBeaconBlock};
fn components_id() -> ComponentsByRangeRequestId {
@@ -359,6 +383,7 @@ mod tests {
#[test]
fn no_blobs_into_responses() {
let spec = test_spec::<E>();
let peer = PeerId::random();
let mut rng = XorShiftRng::from_seed([42; 16]);
let blocks = (0..4)
.map(|_| {
@@ -372,7 +397,7 @@ mod tests {
let mut info = RangeBlockComponentsRequest::<E>::new(blocks_req_id, None, None);
// Send blocks and complete terminate response
info.add_blocks(blocks_req_id, blocks).unwrap();
info.add_blocks(blocks_req_id, blocks, peer).unwrap();
// Assert response is finished and RpcBlocks can be constructed
info.responses(&test_spec::<E>()).unwrap().unwrap();
@@ -381,6 +406,7 @@ mod tests {
#[test]
fn empty_blobs_into_responses() {
let spec = test_spec::<E>();
let peer = PeerId::random();
let mut rng = XorShiftRng::from_seed([42; 16]);
let blocks = (0..4)
.map(|_| {
@@ -403,9 +429,9 @@ mod tests {
RangeBlockComponentsRequest::<E>::new(blocks_req_id, Some(blobs_req_id), None);
// Send blocks and complete terminate response
info.add_blocks(blocks_req_id, blocks).unwrap();
info.add_blocks(blocks_req_id, blocks, peer).unwrap();
// Expect no blobs returned
info.add_blobs(blobs_req_id, vec![]).unwrap();
info.add_blobs(blobs_req_id, vec![], peer).unwrap();
// Assert response is finished and RpcBlocks can be constructed, even if blobs weren't returned.
// This makes sure we don't expect blobs here when they have expired. Checking this logic should
@@ -416,7 +442,8 @@ mod tests {
#[test]
fn rpc_block_with_custody_columns() {
let spec = test_spec::<E>();
let expects_custody_columns = vec![1, 2, 3, 4];
let peer = PeerId::random();
let expects_custody_columns = [1, 2, 3, 4];
let mut rng = XorShiftRng::from_seed([42; 16]);
let blocks = (0..4)
.map(|_| {
@@ -436,15 +463,22 @@ mod tests {
.enumerate()
.map(|(i, _)| columns_id(i as Id, components_id))
.collect::<Vec<_>>();
let column_to_peer = expects_custody_columns
.iter()
.map(|index| (*index, peer))
.collect::<HashMap<_, _>>();
let mut info = RangeBlockComponentsRequest::<E>::new(
blocks_req_id,
None,
Some((columns_req_id.clone(), expects_custody_columns.clone())),
Some((columns_req_id.clone(), column_to_peer)),
);
// Send blocks and complete terminate response
info.add_blocks(
blocks_req_id,
blocks.iter().map(|b| b.0.clone().into()).collect(),
peer,
)
.unwrap();
// Assert response is not finished
@@ -458,6 +492,7 @@ mod tests {
.iter()
.flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned())
.collect(),
peer,
)
.unwrap();
@@ -476,12 +511,13 @@ mod tests {
#[test]
fn rpc_block_with_custody_columns_batched() {
let spec = test_spec::<E>();
let peer = PeerId::random();
let batched_column_requests = [vec![1_u64, 2], vec![3, 4]];
let expects_custody_columns = batched_column_requests
.iter()
.flatten()
.cloned()
.collect::<Vec<_>>();
.map(|index| (*index, peer))
.collect::<HashMap<_, _>>();
let custody_column_request_ids =
(0..batched_column_requests.len() as u32).collect::<Vec<_>>();
let num_of_data_column_requests = custody_column_request_ids.len();
@@ -516,6 +552,7 @@ mod tests {
info.add_blocks(
blocks_req_id,
blocks.iter().map(|b| b.0.clone().into()).collect(),
peer,
)
.unwrap();
// Assert response is not finished
@@ -533,6 +570,7 @@ mod tests {
.cloned()
})
.collect::<Vec<_>>(),
peer,
)
.unwrap();

View File

@@ -41,7 +41,9 @@ use super::network_context::{
use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult};
use super::peer_sync_info::{remote_sync_type, PeerSyncType};
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor};
use crate::network_beacon_processor::{
ChainSegmentProcessId, NetworkBeaconProcessor, PeerGroupAction,
};
use crate::service::NetworkMessage;
use crate::status::ToStatusMessage;
use crate::sync::block_lookups::{
@@ -61,8 +63,8 @@ use lighthouse_network::service::api_types::{
SamplingId, SamplingRequester, SingleLookupReqId, SyncRequestId,
};
use lighthouse_network::types::{NetworkGlobals, SyncState};
use lighthouse_network::PeerId;
use lighthouse_network::SyncInfo;
use lighthouse_network::{PeerAction, PeerId};
use logging::crit;
use lru_cache::LRUTimeCache;
use std::ops::Sub;
@@ -218,7 +220,8 @@ pub enum BatchProcessResult {
/// The batch processing failed. It carries whether the processing imported any block.
FaultyFailure {
imported_blocks: usize,
penalty: PeerAction,
peer_action: PeerGroupAction,
error: String,
},
NonFaultyFailure,
}
@@ -528,7 +531,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// Remove peer from all data structures
self.range_sync.peer_disconnect(&mut self.network, peer_id);
let _ = self.backfill_sync.peer_disconnected(peer_id);
self.block_lookups.peer_disconnected(peer_id);
// Regardless of the outcome, we update the sync status.
@@ -1271,17 +1273,18 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id: PeerId,
range_block_component: RangeBlockComponent<T::EthSpec>,
) {
if let Some(resp) = self
.network
.range_block_component_response(range_request_id, range_block_component)
{
if let Some(resp) = self.network.range_block_component_response(
range_request_id,
peer_id,
range_block_component,
) {
match resp {
Ok(blocks) => {
Ok((blocks, batch_peers)) => {
match range_request_id.requester {
RangeRequestId::RangeSync { chain_id, batch_id } => {
self.range_sync.blocks_by_range_response(
&mut self.network,
peer_id,
batch_peers,
chain_id,
batch_id,
range_request_id.id,
@@ -1293,7 +1296,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
match self.backfill_sync.on_block_response(
&mut self.network,
batch_id,
&peer_id,
batch_peers,
range_request_id.id,
blocks,
) {

View File

@@ -5,7 +5,7 @@ use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError};
pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest};
use super::block_sidecar_coupling::RangeBlockComponentsRequest;
use super::manager::BlockProcessType;
use super::range_sync::ByRangeRequestType;
use super::range_sync::{BatchPeers, ByRangeRequestType};
use super::SyncMessage;
use crate::metrics;
use crate::network_beacon_processor::NetworkBeaconProcessor;
@@ -443,12 +443,14 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
/// A blocks by range request sent by the range sync algorithm
pub fn block_components_by_range_request(
&mut self,
batch_type: ByRangeRequestType,
request: BlocksByRangeRequest,
requester: RangeRequestId,
peers: &HashSet<PeerId>,
peers_to_deprioritize: &HashSet<PeerId>,
) -> Result<Id, RpcRequestSendError> {
let batch_epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch());
let batch_type = self.batch_type(batch_epoch);
let active_request_count_by_peer = self.active_request_count_by_peer();
let Some(block_peer) = peers
@@ -510,7 +512,12 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
let data_column_requests = columns_by_range_peers_to_request
.map(|columns_by_range_peers_to_request| {
columns_by_range_peers_to_request
let column_to_peer_map = columns_by_range_peers_to_request
.iter()
.flat_map(|(peer_id, columns)| columns.iter().map(|column| (*column, *peer_id)))
.collect::<HashMap<ColumnIndex, PeerId>>();
let requests = columns_by_range_peers_to_request
.into_iter()
.map(|(peer_id, columns)| {
self.send_data_columns_by_range_request(
@@ -523,25 +530,14 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
id,
)
})
.collect::<Result<Vec<_>, _>>()
.collect::<Result<Vec<_>, _>>()?;
Ok((requests, column_to_peer_map))
})
.transpose()?;
let info = RangeBlockComponentsRequest::new(
blocks_req_id,
blobs_req_id,
data_column_requests.map(|data_column_requests| {
(
data_column_requests,
self.network_globals()
.sampling_columns
.clone()
.iter()
.copied()
.collect(),
)
}),
);
let info =
RangeBlockComponentsRequest::new(blocks_req_id, blobs_req_id, data_column_requests);
self.components_by_range_requests.insert(id, info);
Ok(id.id)
@@ -602,13 +598,16 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
Ok(columns_to_request_by_peer)
}
/// Received a blocks by range or blobs by range response for a request that couples blocks '
/// and blobs.
/// Received a _by_range response for a request that couples blocks and its data
///
/// `peer_id` is the peer that served this individual RPC _by_range response.
#[allow(clippy::type_complexity)]
pub fn range_block_component_response(
&mut self,
id: ComponentsByRangeRequestId,
peer_id: PeerId,
range_block_component: RangeBlockComponent<T::EthSpec>,
) -> Option<Result<Vec<RpcBlock<T::EthSpec>>, RpcResponseError>> {
) -> Option<Result<(Vec<RpcBlock<T::EthSpec>>, BatchPeers), RpcResponseError>> {
let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else {
metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]);
return None;
@@ -619,18 +618,18 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
match range_block_component {
RangeBlockComponent::Block(req_id, resp) => resp.and_then(|(blocks, _)| {
request
.add_blocks(req_id, blocks)
.add_blocks(req_id, blocks, peer_id)
.map_err(RpcResponseError::BlockComponentCouplingError)
}),
RangeBlockComponent::Blob(req_id, resp) => resp.and_then(|(blobs, _)| {
request
.add_blobs(req_id, blobs)
.add_blobs(req_id, blobs, peer_id)
.map_err(RpcResponseError::BlockComponentCouplingError)
}),
RangeBlockComponent::CustodyColumns(req_id, resp) => {
resp.and_then(|(custody_columns, _)| {
request
.add_custody_columns(req_id, custody_columns)
.add_custody_columns(req_id, custody_columns, peer_id)
.map_err(RpcResponseError::BlockComponentCouplingError)
})
}
@@ -1154,7 +1153,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
);
let _enter = span.enter();
debug!(%peer_id, %action, %msg, "Sync reporting peer");
debug!(%peer_id, %action, %msg, client = %self.client_type(&peer_id), "Sync reporting peer");
self.network_send
.send(NetworkMessage::ReportPeer {
peer_id,
@@ -1215,7 +1214,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
/// Check whether a batch for this epoch (and only this epoch) should request just blocks or
/// blocks and blobs.
pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType {
fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType {
// Induces a compile time panic if this doesn't hold true.
#[allow(clippy::assertions_on_constants)]
const _: () = assert!(

View File

@@ -28,11 +28,17 @@ mod data_columns_by_root;
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
pub enum LookupVerifyError {
NotEnoughResponsesReturned { actual: usize },
NotEnoughResponsesReturned {
actual: usize,
},
TooManyResponses,
UnrequestedBlockRoot(Hash256),
UnrequestedIndex(u64),
UnrequestedSlot(Slot),
UnrequestedSlot {
slot: Slot,
start_slot: Slot,
end_slot: Slot,
},
InvalidInclusionProof,
DuplicatedData(Slot, u64),
InternalError(String),

View File

@@ -1,7 +1,7 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
use std::sync::Arc;
use types::{BlobSidecar, EthSpec};
use types::{BlobSidecar, EthSpec, Slot};
/// Accumulates results of a blobs_by_range request. Only returns items after receiving the
/// stream termination.
@@ -25,10 +25,15 @@ impl<E: EthSpec> ActiveRequestItems for BlobsByRangeRequestItems<E> {
type Item = Arc<BlobSidecar<E>>;
fn add(&mut self, blob: Self::Item) -> Result<bool, LookupVerifyError> {
if blob.slot() < self.request.start_slot
|| blob.slot() >= self.request.start_slot + self.request.count
{
return Err(LookupVerifyError::UnrequestedSlot(blob.slot()));
let start_slot = Slot::new(self.request.start_slot);
let end_slot = start_slot + Slot::new(self.request.count);
if blob.slot() < start_slot || blob.slot() >= end_slot {
return Err(LookupVerifyError::UnrequestedSlot {
slot: blob.slot(),
start_slot,
end_slot,
});
}
if blob.index >= self.max_blobs_per_block {
return Err(LookupVerifyError::UnrequestedIndex(blob.index));

View File

@@ -1,7 +1,7 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::BlocksByRangeRequest;
use std::sync::Arc;
use types::{EthSpec, SignedBeaconBlock};
use types::{EthSpec, SignedBeaconBlock, Slot};
/// Accumulates results of a blocks_by_range request. Only returns items after receiving the
/// stream termination.
@@ -23,10 +23,15 @@ impl<E: EthSpec> ActiveRequestItems for BlocksByRangeRequestItems<E> {
type Item = Arc<SignedBeaconBlock<E>>;
fn add(&mut self, block: Self::Item) -> Result<bool, LookupVerifyError> {
if block.slot().as_u64() < *self.request.start_slot()
|| block.slot().as_u64() >= self.request.start_slot() + self.request.count()
{
return Err(LookupVerifyError::UnrequestedSlot(block.slot()));
let start_slot = Slot::new(*self.request.start_slot());
let end_slot = start_slot + Slot::new(*self.request.count());
if block.slot() < start_slot || block.slot() >= end_slot {
return Err(LookupVerifyError::UnrequestedSlot {
slot: block.slot(),
start_slot,
end_slot,
});
}
if self
.items

View File

@@ -1,7 +1,7 @@
use super::{ActiveRequestItems, LookupVerifyError};
use lighthouse_network::rpc::methods::DataColumnsByRangeRequest;
use std::sync::Arc;
use types::{DataColumnSidecar, EthSpec};
use types::{DataColumnSidecar, EthSpec, Slot};
/// Accumulates results of a data_columns_by_range request. Only returns items after receiving the
/// stream termination.
@@ -23,10 +23,15 @@ impl<E: EthSpec> ActiveRequestItems for DataColumnsByRangeRequestItems<E> {
type Item = Arc<DataColumnSidecar<E>>;
fn add(&mut self, data_column: Self::Item) -> Result<bool, LookupVerifyError> {
if data_column.slot() < self.request.start_slot
|| data_column.slot() >= self.request.start_slot + self.request.count
{
return Err(LookupVerifyError::UnrequestedSlot(data_column.slot()));
let start_slot = Slot::new(self.request.start_slot);
let end_slot = start_slot + Slot::new(self.request.count);
if data_column.slot() < start_slot || data_column.slot() >= end_slot {
return Err(LookupVerifyError::UnrequestedSlot {
slot: data_column.slot(),
start_slot,
end_slot,
});
}
if !self.request.columns.contains(&data_column.index) {
return Err(LookupVerifyError::UnrequestedIndex(data_column.index));

View File

@@ -2,13 +2,13 @@ use beacon_chain::block_verification_types::RpcBlock;
use lighthouse_network::rpc::methods::BlocksByRangeRequest;
use lighthouse_network::service::api_types::Id;
use lighthouse_network::PeerId;
use std::collections::HashSet;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::Sub;
use std::time::{Duration, Instant};
use strum::Display;
use types::{Epoch, EthSpec, Slot};
use types::{ColumnIndex, Epoch, EthSpec, Slot};
/// The number of times to retry a batch before it is considered failed.
const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5;
@@ -26,6 +26,35 @@ pub enum ByRangeRequestType {
Blocks,
}
#[derive(Clone, Debug)]
pub struct BatchPeers {
block_peer: PeerId,
column_peers: HashMap<ColumnIndex, PeerId>,
}
impl BatchPeers {
pub fn new_from_block_peer(block_peer: PeerId) -> Self {
Self {
block_peer,
column_peers: <_>::default(),
}
}
pub fn new(block_peer: PeerId, column_peers: HashMap<ColumnIndex, PeerId>) -> Self {
Self {
block_peer,
column_peers,
}
}
pub fn block(&self) -> PeerId {
self.block_peer
}
pub fn column(&self, index: &ColumnIndex) -> Option<&PeerId> {
self.column_peers.get(index)
}
}
/// Allows customisation of the above constants used in other sync methods such as BackFillSync.
pub trait BatchConfig {
/// The maximum batch download attempts.
@@ -110,8 +139,6 @@ pub struct BatchInfo<E: EthSpec, B: BatchConfig = RangeSyncBatchConfig> {
failed_download_attempts: Vec<Option<PeerId>>,
/// State of the batch.
state: BatchState<E>,
/// Whether this batch contains all blocks or all blocks and blobs.
batch_type: ByRangeRequestType,
/// Pin the generic
marker: std::marker::PhantomData<B>,
}
@@ -134,7 +161,7 @@ pub enum BatchState<E: EthSpec> {
/// The batch is being downloaded.
Downloading(Id),
/// The batch has been completely downloaded and is ready for processing.
AwaitingProcessing(PeerId, Vec<RpcBlock<E>>, Instant),
AwaitingProcessing(BatchPeers, Vec<RpcBlock<E>>, Instant),
/// The batch is being processed.
Processing(Attempt),
/// The batch was successfully processed and is waiting to be validated.
@@ -171,7 +198,7 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
/// fork boundary will be of mixed type (all blocks and one last blockblob), and I don't want to
/// deal with this for now.
/// This means finalization might be slower in deneb
pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ByRangeRequestType) -> Self {
pub fn new(start_epoch: &Epoch, num_of_epochs: u64) -> Self {
let start_slot = start_epoch.start_slot(E::slots_per_epoch());
let end_slot = start_slot + num_of_epochs * E::slots_per_epoch();
BatchInfo {
@@ -181,20 +208,22 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
failed_download_attempts: Vec::new(),
non_faulty_processing_attempts: 0,
state: BatchState::AwaitingDownload,
batch_type,
marker: std::marker::PhantomData,
}
}
/// Gives a list of peers from which this batch has had a failed download or processing
/// attempt.
pub fn failed_peers(&self) -> HashSet<PeerId> {
///
/// TODO(das): Returns only block peers to keep the mainnet path equivalent. The failed peers
/// mechanism is broken for PeerDAS and will be fixed with https://github.com/sigp/lighthouse/issues/6258
pub fn failed_block_peers(&self) -> HashSet<PeerId> {
let mut peers = HashSet::with_capacity(
self.failed_processing_attempts.len() + self.failed_download_attempts.len(),
);
for attempt in &self.failed_processing_attempts {
peers.insert(attempt.peer_id);
peers.insert(attempt.peers.block());
}
for peer in self.failed_download_attempts.iter().flatten() {
@@ -212,13 +241,13 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
false
}
/// Returns the peer that is currently responsible for progressing the state of the batch.
pub fn processing_peer(&self) -> Option<&PeerId> {
/// Returns the peers that provided this batch's downloaded contents
pub fn processing_peers(&self) -> Option<&BatchPeers> {
match &self.state {
BatchState::AwaitingDownload | BatchState::Failed | BatchState::Downloading(..) => None,
BatchState::AwaitingProcessing(peer_id, _, _)
| BatchState::Processing(Attempt { peer_id, .. })
| BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(peer_id),
BatchState::AwaitingProcessing(peers, _, _)
| BatchState::Processing(Attempt { peers, .. })
| BatchState::AwaitingValidation(Attempt { peers, .. }) => Some(peers),
BatchState::Poisoned => unreachable!("Poisoned batch"),
}
}
@@ -237,13 +266,10 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
}
/// Returns a BlocksByRange request associated with the batch.
pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) {
(
BlocksByRangeRequest::new(
self.start_slot.into(),
self.end_slot.sub(self.start_slot).into(),
),
self.batch_type,
pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest {
BlocksByRangeRequest::new(
self.start_slot.into(),
self.end_slot.sub(self.start_slot).into(),
)
}
@@ -275,12 +301,12 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
pub fn download_completed(
&mut self,
blocks: Vec<RpcBlock<E>>,
peer: PeerId,
batch_peers: BatchPeers,
) -> Result<usize /* Received blocks */, WrongState> {
match self.state.poison() {
BatchState::Downloading(_) => {
BatchState::Downloading(_request_id) => {
let received = blocks.len();
self.state = BatchState::AwaitingProcessing(peer, blocks, Instant::now());
self.state = BatchState::AwaitingProcessing(batch_peers, blocks, Instant::now());
Ok(received)
}
BatchState::Poisoned => unreachable!("Poisoned batch"),
@@ -305,10 +331,9 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
peer: Option<PeerId>,
) -> Result<BatchOperationOutcome, WrongState> {
match self.state.poison() {
BatchState::Downloading(_) => {
BatchState::Downloading(_request_id) => {
// register the attempt and check if the batch can be tried again
self.failed_download_attempts.push(peer);
self.state = if self.failed_download_attempts.len()
>= B::max_batch_download_attempts() as usize
{
@@ -349,8 +374,8 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
pub fn start_processing(&mut self) -> Result<(Vec<RpcBlock<E>>, Duration), WrongState> {
match self.state.poison() {
BatchState::AwaitingProcessing(peer, blocks, start_instant) => {
self.state = BatchState::Processing(Attempt::new::<B, E>(peer, &blocks));
BatchState::AwaitingProcessing(peers, blocks, start_instant) => {
self.state = BatchState::Processing(Attempt::new::<B, E>(peers, &blocks));
Ok((blocks, start_instant.elapsed()))
}
BatchState::Poisoned => unreachable!("Poisoned batch"),
@@ -438,39 +463,41 @@ impl<E: EthSpec, B: BatchConfig> BatchInfo<E, B> {
}
}
/// Represents a peer's attempt and providing the result for this batch.
/// Represents a batch attempt awaiting validation
///
/// Invalid attempts will downscore a peer.
#[derive(PartialEq, Debug)]
/// Invalid attempts will downscore its peers
#[derive(Debug)]
pub struct Attempt {
/// The peer that made the attempt.
pub peer_id: PeerId,
/// The peers that served this batch contents
peers: BatchPeers,
/// The hash of the blocks of the attempt.
pub hash: u64,
}
impl Attempt {
fn new<B: BatchConfig, E: EthSpec>(peer_id: PeerId, blocks: &[RpcBlock<E>]) -> Self {
fn new<B: BatchConfig, E: EthSpec>(peers: BatchPeers, blocks: &[RpcBlock<E>]) -> Self {
let hash = B::batch_attempt_hash(blocks);
Attempt { peer_id, hash }
Attempt { peers, hash }
}
pub fn block_peer(&self) -> PeerId {
self.peers.block()
}
}
impl<E: EthSpec> std::fmt::Debug for BatchState<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BatchState::Processing(Attempt {
ref peer_id,
hash: _,
}) => write!(f, "Processing({})", peer_id),
BatchState::AwaitingValidation(Attempt {
ref peer_id,
hash: _,
}) => write!(f, "AwaitingValidation({})", peer_id),
BatchState::Processing(Attempt { ref peers, hash: _ }) => {
write!(f, "Processing({})", peers.block())
}
BatchState::AwaitingValidation(Attempt { ref peers, hash: _ }) => {
write!(f, "AwaitingValidation({})", peers.block())
}
BatchState::AwaitingDownload => f.write_str("AwaitingDownload"),
BatchState::Failed => f.write_str("Failed"),
BatchState::AwaitingProcessing(ref peer, ref blocks, _) => {
write!(f, "AwaitingProcessing({}, {} blocks)", peer, blocks.len())
BatchState::AwaitingProcessing(_, ref blocks, _) => {
write!(f, "AwaitingProcessing({} blocks)", blocks.len())
}
BatchState::Downloading(request_id) => {
write!(f, "Downloading({})", request_id)

View File

@@ -1,4 +1,4 @@
use super::batch::{BatchInfo, BatchProcessingResult, BatchState};
use super::batch::{BatchInfo, BatchPeers, BatchProcessingResult, BatchState};
use super::RangeSyncType;
use crate::metrics;
use crate::network_beacon_processor::ChainSegmentProcessId;
@@ -6,6 +6,7 @@ use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcRespo
use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult};
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::BeaconChainTypes;
use itertools::Itertools;
use lighthouse_network::service::api_types::Id;
use lighthouse_network::{PeerAction, PeerId};
use logging::crit;
@@ -216,7 +217,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
&mut self,
network: &mut SyncNetworkContext<T>,
batch_id: BatchId,
peer_id: &PeerId,
batch_peers: BatchPeers,
request_id: Id,
blocks: Vec<RpcBlock<T::EthSpec>>,
) -> ProcessingResult {
@@ -244,8 +245,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
// A stream termination has been sent. This batch has ended. Process a completed batch.
// Remove the request from the peer's active batches
// TODO(das): should use peer group here https://github.com/sigp/lighthouse/issues/6258
let received = batch.download_completed(blocks, *peer_id)?;
let received = batch.download_completed(blocks, batch_peers)?;
let awaiting_batches = batch_id
.saturating_sub(self.optimistic_start.unwrap_or(self.processing_target))
/ EPOCHS_PER_BATCH;
@@ -447,7 +447,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
}
};
let peer = batch.processing_peer().cloned().ok_or_else(|| {
let batch_peers = batch.processing_peers().ok_or_else(|| {
RemoveChain::WrongBatchState(format!(
"Processing target is in wrong state: {:?}",
batch.state(),
@@ -458,7 +458,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
debug!(
result = ?result,
batch_epoch = %batch_id,
client = %network.client_type(&peer),
batch_state = ?batch_state,
?batch,
"Batch processing result"
@@ -521,10 +520,30 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
}
BatchProcessResult::FaultyFailure {
imported_blocks,
penalty,
peer_action,
// TODO(sync): propagate error in logs
error: _,
} => {
// Penalize the peer appropiately.
network.report_peer(peer, *penalty, "faulty_batch");
// TODO(sync): De-dup between back and forwards sync
if let Some(penalty) = peer_action.block_peer {
// Penalize the peer appropiately.
network.report_peer(batch_peers.block(), penalty, "faulty_batch");
}
// Penalize each peer only once. Currently a peer_action does not mix different
// PeerAction levels.
for (peer, penalty) in peer_action
.column_peer
.iter()
.filter_map(|(column_index, penalty)| {
batch_peers
.column(column_index)
.map(|peer| (*peer, *penalty))
})
.unique()
{
network.report_peer(peer, penalty, "faulty_batch_column");
}
// Check if this batch is allowed to continue
match batch.processing_completed(BatchProcessingResult::FaultyFailure)? {
@@ -540,6 +559,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
self.handle_invalid_batch(network, batch_id)
}
BatchOperationOutcome::Failed { blacklist } => {
// TODO(das): what peer action should we apply to the rest of
// peers? Say a batch repeatedly fails because a custody peer is not
// sending us its custody columns
let penalty = PeerAction::LowToleranceError;
// Check that we have not exceeded the re-process retry counter,
// If a batch has exceeded the invalid batch lookup attempts limit, it means
// that it is likely all peers in this chain are are sending invalid batches
@@ -554,7 +578,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
);
for peer in self.peers.drain() {
network.report_peer(peer, *penalty, "faulty_chain");
network.report_peer(peer, penalty, "faulty_chain");
}
Err(RemoveChain::ChainFailed {
blacklist,
@@ -633,17 +657,20 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
// The validated batch has been re-processed
if attempt.hash != processed_attempt.hash {
// The re-downloaded version was different
if processed_attempt.peer_id != attempt.peer_id {
// TODO(das): should penalize other peers?
let valid_attempt_peer = processed_attempt.block_peer();
let bad_attempt_peer = attempt.block_peer();
if valid_attempt_peer != bad_attempt_peer {
// A different peer sent the correct batch, the previous peer did not
// We negatively score the original peer.
let action = PeerAction::LowToleranceError;
debug!(
batch_epoch = %id, score_adjustment = %action,
original_peer = %attempt.peer_id, new_peer = %processed_attempt.peer_id,
original_peer = %bad_attempt_peer, new_peer = %valid_attempt_peer,
"Re-processed batch validated. Scoring original peer"
);
network.report_peer(
attempt.peer_id,
bad_attempt_peer,
action,
"batch_reprocessed_original_peer",
);
@@ -654,12 +681,12 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
debug!(
batch_epoch = %id,
score_adjustment = %action,
original_peer = %attempt.peer_id,
new_peer = %processed_attempt.peer_id,
original_peer = %bad_attempt_peer,
new_peer = %valid_attempt_peer,
"Re-processed batch validated by the same peer"
);
network.report_peer(
attempt.peer_id,
bad_attempt_peer,
action,
"batch_reprocessed_same_peer",
);
@@ -888,8 +915,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
) -> ProcessingResult {
let batch_state = self.visualize_batch_state();
if let Some(batch) = self.batches.get_mut(&batch_id) {
let (request, batch_type) = batch.to_blocks_by_range_request();
let failed_peers = batch.failed_peers();
let request = batch.to_blocks_by_range_request();
let failed_peers = batch.failed_block_peers();
// TODO(das): we should request only from peers that are part of this SyncingChain.
// However, then we hit the NoPeer error frequently which causes the batch to fail and
@@ -903,7 +930,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
.collect::<HashSet<_>>();
match network.block_components_by_range_request(
batch_type,
request,
RangeRequestId::RangeSync {
chain_id: self.id,
@@ -999,8 +1025,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
}
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
let batch_type = network.batch_type(epoch);
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH, batch_type);
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
entry.insert(optimistic_batch);
self.send_batch(network, epoch)?;
}
@@ -1101,8 +1126,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
self.include_next_batch(network)
}
Entry::Vacant(entry) => {
let batch_type = network.batch_type(next_batch_id);
entry.insert(BatchInfo::new(&next_batch_id, EPOCHS_PER_BATCH, batch_type));
entry.insert(BatchInfo::new(&next_batch_id, EPOCHS_PER_BATCH));
self.to_be_downloaded += EPOCHS_PER_BATCH;
Some(next_batch_id)
}

View File

@@ -8,7 +8,7 @@ mod range;
mod sync_type;
pub use batch::{
BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState,
BatchConfig, BatchInfo, BatchOperationOutcome, BatchPeers, BatchProcessingResult, BatchState,
ByRangeRequestType,
};
pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH};

View File

@@ -42,6 +42,7 @@
use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain};
use super::chain_collection::{ChainCollection, SyncChainStatus};
use super::sync_type::RangeSyncType;
use super::BatchPeers;
use crate::metrics;
use crate::status::ToStatusMessage;
use crate::sync::network_context::{RpcResponseError, SyncNetworkContext};
@@ -227,7 +228,7 @@ where
pub fn blocks_by_range_response(
&mut self,
network: &mut SyncNetworkContext<T>,
peer_id: PeerId,
batch_peers: BatchPeers,
chain_id: ChainId,
batch_id: BatchId,
request_id: Id,
@@ -235,7 +236,7 @@ where
) {
// check if this chunk removes the chain
match self.chains.call_by_id(chain_id, |chain| {
chain.on_block_response(network, batch_id, &peer_id, request_id, blocks)
chain.on_block_response(network, batch_id, batch_peers, request_id, blocks)
}) {
Ok((removed_chain, sync_type)) => {
if let Some((removed_chain, remove_reason)) = removed_chain {

View File

@@ -449,12 +449,13 @@ fn build_rpc_block(
RpcBlock::new(None, block, Some(blobs.clone())).unwrap()
}
Some(DataSidecars::DataColumns(columns)) => {
// TODO(das): Assumes CGC = max value. Change if we want to do more complex tests
let expected_custody_indices = columns.iter().map(|d| d.index()).collect::<Vec<_>>();
RpcBlock::new_with_custody_columns(
None,
block,
columns.clone(),
// TODO(das): Assumes CGC = max value. Change if we want to do more complex tests
columns.len(),
expected_custody_indices,
spec,
)
.unwrap()