miscelaneous fixes on syncing, rpc and responding to peer's sync related requests (#3827)

- there was a bug in responding range blob requests where we would incorrectly label the first slot of an epoch as a non-skipped slot if it were skipped. this bug did not exist in the code for responding to block range request because the logic error was mitigated by defensive coding elsewhere
- there was a bug where a block received during range sync without a corresponding blob (and vice versa) was incorrectly interpreted as a stream termination
- RPC size limit fixes.
- Our blob cache was dead locking so I removed use of it for now.
- Because of our change in finalized sync batch size from 2 to 1 and our transition to using exact epoch boundaries for batches (rather than one slot past the epoch boundary), we need to sync finalized sync to 2 epochs + 1 slot past our peer's finalized slot in order to finalize the chain locally.
- use fork context bytes in rpc methods on both the server and client side
This commit is contained in:
realbigsean
2022-12-21 15:50:51 -05:00
committed by GitHub
parent cc420caaa5
commit 33d01a7911
6 changed files with 78 additions and 20 deletions

View File

@@ -433,7 +433,17 @@ impl<T: BeaconChainTypes> Worker<T> {
};
// Pick out the required blocks, ignoring skip-slots.
let mut last_block_root = None;
let mut last_block_root = req
.start_slot
.checked_sub(1)
.map(|prev_slot| {
self.chain
.block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev)
})
.transpose()
.ok()
.flatten()
.flatten();
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
// map skip slots to None
@@ -602,7 +612,17 @@ impl<T: BeaconChainTypes> Worker<T> {
};
// Pick out the required blocks, ignoring skip-slots.
let mut last_block_root = None;
let mut last_block_root = req
.start_slot
.checked_sub(1)
.map(|prev_slot| {
self.chain
.block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev)
})
.transpose()
.ok()
.flatten()
.flatten();
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
// map skip slots to None
@@ -669,7 +689,7 @@ impl<T: BeaconChainTypes> Worker<T> {
self.log,
"BlobsByRange Response processed";
"peer" => %peer_id,
"msg" => "Failed to return all requested blocks",
"msg" => "Failed to return all requested blobs",
"start_slot" => req.start_slot,
"current_slot" => current_slot,
"requested" => req.count,

View File

@@ -314,14 +314,24 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
let (chain_id, batch_id, info) = entry.get_mut();
let chain_id = chain_id.clone();
let batch_id = batch_id.clone();
let stream_terminator = maybe_block.is_none();
info.add_block_response(maybe_block);
let maybe_block = info.pop_response().map(|block_sidecar_pair| {
let maybe_block_wrapped = info.pop_response().map(|block_sidecar_pair| {
BlockWrapper::BlockAndBlob { block_sidecar_pair }
});
if stream_terminator && !info.is_finished() {
return None;
}
if !stream_terminator && maybe_block_wrapped.is_none() {
return None;
}
if info.is_finished() {
entry.remove();
}
Some((chain_id, batch_id, maybe_block))
Some((chain_id, batch_id, maybe_block_wrapped))
}
Entry::Vacant(_) => None,
}
@@ -356,13 +366,24 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
let (chain_id, batch_id, info) = entry.get_mut();
let chain_id = chain_id.clone();
let batch_id = batch_id.clone();
let stream_terminator = maybe_sidecar.is_none();
info.add_sidecar_response(maybe_sidecar);
let maybe_block = info
.pop_response()
.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair });
if stream_terminator && !info.is_finished() {
return None;
}
if !stream_terminator && maybe_block.is_none() {
return None;
}
if info.is_finished() {
entry.remove();
}
Some((chain_id, batch_id, maybe_block))
}
Entry::Vacant(_) => None,

View File

@@ -137,10 +137,16 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
let target_slot = if is_finalized_segment {
target_head_slot + (2 * T::EthSpec::slots_per_epoch()) + 1
} else {
target_head_slot
};
SyncingChain {
id,
start_epoch,
target_head_slot,
target_head_slot: target_slot,
target_head_root,
batches: BTreeMap::new(),
peers,