mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-22 22:34:45 +00:00
Blob syncing (#24)
* add a rt is_blob_batch * use the mixed type everywhere * glue * more glue * minor fixes * fix range tests * filling in the gaps * moore filling in the gaps
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
use super::batch::{BatchInfo, BatchProcessingResult, BatchState};
|
||||
use super::BatchTy;
|
||||
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
|
||||
use crate::sync::manager::BlockTy;
|
||||
use crate::sync::{
|
||||
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
||||
};
|
||||
@@ -10,8 +12,7 @@ use rand::seq::SliceRandom;
|
||||
use slog::{crit, debug, o, warn};
|
||||
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::sync::Arc;
|
||||
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
use types::{Epoch, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
|
||||
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
|
||||
@@ -19,7 +20,7 @@ use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
/// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which
|
||||
/// case the responder will fill the response up to the max request size, assuming they have the
|
||||
/// bandwidth to do so.
|
||||
pub const EPOCHS_PER_BATCH: u64 = 2;
|
||||
pub const EPOCHS_PER_BATCH: u64 = 1;
|
||||
|
||||
/// The maximum number of batches to queue before requesting more.
|
||||
const BATCH_BUFFER_SIZE: u8 = 5;
|
||||
@@ -225,7 +226,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
batch_id: BatchId,
|
||||
peer_id: &PeerId,
|
||||
request_id: Id,
|
||||
beacon_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||
beacon_block: Option<BlockTy<T::EthSpec>>,
|
||||
) -> ProcessingResult {
|
||||
// check if we have this batch
|
||||
let batch = match self.batches.get_mut(&batch_id) {
|
||||
@@ -326,9 +327,14 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
|
||||
self.current_processing_batch = Some(batch_id);
|
||||
|
||||
if let Err(e) =
|
||||
beacon_processor_send.try_send(BeaconWorkEvent::chain_segment(process_id, blocks))
|
||||
{
|
||||
let work_event = match blocks {
|
||||
BatchTy::Blocks(blocks) => BeaconWorkEvent::chain_segment(process_id, blocks),
|
||||
BatchTy::BlocksAndBlobs(blocks_and_blobs) => {
|
||||
BeaconWorkEvent::blob_chain_segment(process_id, blocks_and_blobs)
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = beacon_processor_send.try_send(work_event) {
|
||||
crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch",
|
||||
"error" => %e, "batch" => self.processing_target);
|
||||
// This is unlikely to happen but it would stall syncing since the batch now has no
|
||||
@@ -897,8 +903,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
peer: PeerId,
|
||||
) -> ProcessingResult {
|
||||
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
||||
let request = batch.to_blocks_by_range_request();
|
||||
match network.blocks_by_range_request(peer, request, self.id, batch_id) {
|
||||
let (request, batch_type) = batch.to_blocks_by_range_request();
|
||||
match network.blocks_by_range_request(peer, batch_type, request, self.id, batch_id) {
|
||||
Ok(request_id) => {
|
||||
// inform the batch about the new request
|
||||
batch.start_downloading_from_peer(peer, request_id)?;
|
||||
@@ -1002,7 +1008,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
if let Some(epoch) = self.optimistic_start {
|
||||
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
|
||||
if let Some(peer) = idle_peers.pop() {
|
||||
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
|
||||
let batch_type = network.batch_type(epoch);
|
||||
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH, batch_type);
|
||||
entry.insert(optimistic_batch);
|
||||
self.send_batch(network, epoch, peer)?;
|
||||
}
|
||||
@@ -1011,7 +1018,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
}
|
||||
|
||||
while let Some(peer) = idle_peers.pop() {
|
||||
if let Some(batch_id) = self.include_next_batch() {
|
||||
if let Some(batch_id) = self.include_next_batch(network) {
|
||||
// send the batch
|
||||
self.send_batch(network, batch_id, peer)?;
|
||||
} else {
|
||||
@@ -1025,7 +1032,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
|
||||
/// Creates the next required batch from the chain. If there are no more batches required,
|
||||
/// `false` is returned.
|
||||
fn include_next_batch(&mut self) -> Option<BatchId> {
|
||||
fn include_next_batch(&mut self, network: &mut SyncNetworkContext<T>) -> Option<BatchId> {
|
||||
// don't request batches beyond the target head slot
|
||||
if self
|
||||
.to_be_downloaded
|
||||
@@ -1059,10 +1066,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
Entry::Occupied(_) => {
|
||||
// this batch doesn't need downloading, let this same function decide the next batch
|
||||
self.to_be_downloaded += EPOCHS_PER_BATCH;
|
||||
self.include_next_batch()
|
||||
self.include_next_batch(network)
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH));
|
||||
let batch_type = network.batch_type(batch_id);
|
||||
entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH, batch_type));
|
||||
self.to_be_downloaded += EPOCHS_PER_BATCH;
|
||||
Some(batch_id)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user