Improved syncing compilation issues

This commit is contained in:
Age Manning
2019-08-24 01:09:29 +10:00
parent c259d6c006
commit b078385362
4 changed files with 622 additions and 530 deletions

View File

@@ -1,129 +1,164 @@
const MAX_BLOCKS_PER_REQUEST: usize = 10;
use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE};
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::RequestId;
use eth2_libp2p::PeerId;
use slog::{debug, info, trace, warn, Logger};
use std::collections::{HashMap, HashSet};
use std::ops::{Add, Sub};
use std::sync::Arc;
use types::{BeaconBlock, EthSpec, Hash256, Slot};
const MAX_BLOCKS_PER_REQUEST: u64 = 10;
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
const SLOT_IMPORT_TOLERANCE: u64 = 10;
const SLOT_IMPORT_TOLERANCE: usize = 10;
const PARENT_FAIL_TOLERANCE: usize = 3;
const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2;
const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2;
#[derive(PartialEq)]
enum BlockRequestsState {
QueuedForward,
QueuedBackward,
Pending(RequestId),
Complete,
Failed,
}
struct BlockRequests {
target_head_slot: Slot
struct BlockRequests<T: EthSpec> {
target_head_slot: Slot,
target_head_root: Hash256,
downloaded_blocks: Vec<BeaconBlock>,
state: State,
downloaded_blocks: Vec<BeaconBlock<T>>,
state: BlockRequestsState,
}
struct ParentRequests {
downloaded_blocks: Vec<BeaconBlock>,
attempts: usize,
struct ParentRequests<T: EthSpec> {
downloaded_blocks: Vec<BeaconBlock<T>>,
failed_attempts: usize,
last_submitted_peer: PeerId, // to downvote the submitting peer.
state: BlockRequestsState,
}
impl BlockRequests {
impl<T: EthSpec> BlockRequests<T> {
// gets the start slot for next batch
// last block slot downloaded plus 1
fn next_start_slot(&self) -> Option<Slot> {
if !self.downloaded_blocks.is_empty() {
match self.state {
BlockRequestsState::QueuedForward => {
let last_element_index = self.downloaded_blocks.len() -1;
Some(downloaded_blocks[last_element_index].slot.add(1))
let last_element_index = self.downloaded_blocks.len() - 1;
Some(self.downloaded_blocks[last_element_index].slot.add(1))
}
BlockRequestsState::QueuedBackward => {
let earliest_known_slot = self.downloaded_blocks[0].slot;
Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST))
}
_ => {
// pending/complete/failed
None
}
}
}
else {
} else {
None
}
}
}
#[derive(PartialEq, Debug, Clone)]
enum ManagerState {
Syncing,
Regular,
Stalled,
}
enum ImportManagerOutcome {
pub(crate) enum ImportManagerOutcome {
Idle,
RequestBlocks{
RequestBlocks {
peer_id: PeerId,
request_id: RequestId,
request: BeaconBlocksRequest,
},
/// Updates information with peer via requesting another HELLO handshake.
Hello(PeerId),
RecentRequest(PeerId, RecentBeaconBlocksRequest),
DownvotePeer(PeerId),
}
pub struct ImportManager {
pub struct ImportManager<T: BeaconChainTypes> {
/// A reference to the underlying beacon chain.
chain: Arc<BeaconChain<T>>,
state: MangerState,
import_queue: HashMap<PeerId, BlockRequests>,
parent_queue: Vec<ParentRequests>,
full_peers: Hashset<PeerId>,
state: ManagerState,
import_queue: HashMap<PeerId, BlockRequests<T::EthSpec>>,
parent_queue: Vec<ParentRequests<T::EthSpec>>,
full_peers: HashSet<PeerId>,
current_req_id: usize,
log: Logger,
}
impl ImportManager {
impl<T: BeaconChainTypes> ImportManager<T> {
pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: &slog::Logger) -> Self {
ImportManager {
chain: beacon_chain.clone(),
state: ManagerState::Regular,
import_queue: HashMap::new(),
parent_queue: Vec::new(),
full_peers: HashSet::new(),
current_req_id: 0,
log: log.clone(),
}
}
pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) {
pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) {
// TODO: Improve comments.
// initially try to download blocks from our current head
// then backwards search all the way back to our finalized epoch until we match on a chain
// has to be done sequentially to find next slot to start the batch from
let local = PeerSyncInfo::from(&self.chain);
// If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync
if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE {
if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE {
trace!(self.log, "Ignoring full sync with peer";
"peer" => peer_id,
"peer_head_slot" => remote.head_slot,
"local_head_slot" => local.head_slot,
);
"peer" => format!("{:?}", peer_id),
"peer_head_slot" => remote.head_slot,
"local_head_slot" => local.head_slot,
);
// remove the peer from the queue if it exists
self.import_queue.remove(&peer_id);
self.import_queue.remove(&peer_id);
return;
}
if let Some(block_requests) = self.import_queue.get_mut(&peer_id) {
// update the target head slot
if remote.head_slot > requested_block.target_head_slot {
if remote.head_slot > block_requests.target_head_slot {
block_requests.target_head_slot = remote.head_slot;
}
} else {
} else {
let block_requests = BlockRequests {
target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called
target_head_root: remote.head_root,
downloaded_blocks: Vec::new(),
state: RequestedBlockState::Queued
}
state: BlockRequestsState::QueuedForward,
};
self.import_queue.insert(peer_id, block_requests);
}
}
pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec<BeaconBlock>) {
pub fn beacon_blocks_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
mut blocks: Vec<BeaconBlock<T::EthSpec>>,
) {
// find the request
let block_requests = match self.import_queue.get_mut(&peer_id) {
Some(req) if req.state = RequestedBlockState::Pending(request_id) => req,
None => {
let block_requests = match self
.import_queue
.get_mut(&peer_id)
.filter(|r| r.state == BlockRequestsState::Pending(request_id))
{
Some(req) => req,
_ => {
// No pending request, invalid request_id or coding error
warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id);
return;
@@ -142,100 +177,115 @@ impl ImportManager {
if blocks.is_empty() {
warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id);
block_requests.state = RequestedBlockState::Failed;
block_requests.state = BlockRequestsState::Failed;
return;
}
// Add the newly downloaded blocks to the current list of downloaded blocks. This also
// determines if we are syncing forward or backward.
let syncing_forwards = {
if block_requests.blocks.is_empty() {
block_requests.blocks.push(blocks);
if block_requests.downloaded_blocks.is_empty() {
block_requests.downloaded_blocks.append(&mut blocks);
true
}
else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards
// verify the peer hasn't sent overlapping blocks - ensuring the strictly
// increasing blocks in a batch will be verified during the processing
if block_requests.next_slot() > blocks[0].slot {
warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot());
block_requests.state = RequestedBlockState::Failed;
return;
}
block_requests.blocks.push(blocks);
true
} else if block_requests.downloaded_blocks[0].slot < blocks[0].slot {
// syncing forwards
// verify the peer hasn't sent overlapping blocks - ensuring the strictly
// increasing blocks in a batch will be verified during the processing
if block_requests.next_start_slot() > Some(blocks[0].slot) {
warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot());
block_requests.state = BlockRequestsState::Failed;
return;
}
else { false }
block_requests.downloaded_blocks.append(&mut blocks);
true
} else {
false
}
};
// Determine if more blocks need to be downloaded. There are a few cases:
// - We have downloaded a batch from our head_slot, which has not reached the remotes head
// (target head). Therefore we need to download another sequential batch.
// - The latest batch includes blocks that greater than or equal to the target_head slot,
// which means we have caught up to their head. We then check to see if the first
// which means we have caught up to their head. We then check to see if the first
// block downloaded matches our head. If so, we are on the same chain and can process
// the blocks. If not we need to sync back further until we are on the same chain. So
// request more blocks.
// - We are syncing backwards (from our head slot) and need to check if we are on the same
// chain. If so, process the blocks, if not, request more blocks all the way up to
// our last finalized slot.
if syncing_forwards {
// does the batch contain the target_head_slot
let last_element_index = block_requests.blocks.len()-1;
if block_requests[last_element_index].slot >= block_requests.target_slot {
let last_element_index = block_requests.downloaded_blocks.len() - 1;
if block_requests.downloaded_blocks[last_element_index].slot
>= block_requests.target_head_slot
{
// if the batch is on our chain, this is complete and we can then process.
// Otherwise start backwards syncing until we reach a common chain.
let earliest_slot = block_requests_blocks[0].slot
if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) {
block_requests.state = RequestedBlockState::Complete;
let earliest_slot = block_requests.downloaded_blocks[0].slot;
//TODO: Decide which is faster. Reading block from db and comparing or calculating
//the hash tree root and comparing.
if Some(block_requests.downloaded_blocks[0].canonical_root())
== root_at_slot(self.chain, earliest_slot)
{
block_requests.state = BlockRequestsState::Complete;
return;
}
// not on the same chain, request blocks backwards
// binary search, request half the distance between the earliest block and our
// finalized slot
let state = &beacon_chain.head().beacon_state;
let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot
// check that the request hasn't failed by having no common chain
if local_finalized_slot >= block_requests.blocks[0] {
let state = &self.chain.head().beacon_state;
let local_finalized_slot = state
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
// check that the request hasn't failed by having no common chain
if local_finalized_slot >= block_requests.downloaded_blocks[0].slot {
warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id);
block_requests.state = RequestedBlockState::Failed;
block_requests.state = BlockRequestsState::Failed;
return;
}
// Start a backwards sync by requesting earlier blocks
// Start a backwards sync by requesting earlier blocks
// There can be duplication in downloaded blocks here if there are a large number
// of skip slots. In all cases we at least re-download the earliest known block.
// It is unlikely that a backwards sync in required, so we accept this duplication
// for now.
block_requests.state = RequestedBlockState::QueuedBackward;
block_requests.state = BlockRequestsState::QueuedBackward;
} else {
// batch doesn't contain the head slot, request the next batch
block_requests.state = BlockRequestsState::QueuedForward;
}
else {
// batch doesn't contain the head slot, request the next batch
block_requests.state = RequestedBlockState::QueuedForward;
}
}
else {
} else {
// syncing backwards
// if the batch is on our chain, this is complete and we can then process.
// Otherwise continue backwards
let earliest_slot = block_requests_blocks[0].slot
if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) {
block_requests.state = RequestedBlockState::Complete;
let earliest_slot = block_requests.downloaded_blocks[0].slot;
if Some(block_requests.downloaded_blocks[0].canonical_root())
== root_at_slot(self.chain, earliest_slot)
{
block_requests.state = BlockRequestsState::Complete;
return;
}
block_requests.state = RequestedBlockState::QueuedBackward;
block_requests.state = BlockRequestsState::QueuedBackward;
}
}
pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec<BeaconBlock>) {
pub fn recent_blocks_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
blocks: Vec<BeaconBlock<T::EthSpec>>,
) {
// find the request
let parent_request = match self.parent_queue.get_mut(&peer_id) {
Some(req) if req.state = RequestedBlockState::Pending(request_id) => req,
let parent_request = match self
.parent_queue
.iter_mut()
.find(|request| request.state == BlockRequestsState::Pending(request_id))
{
Some(req) => req,
None => {
// No pending request, invalid request_id or coding error
warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id);
@@ -245,8 +295,8 @@ impl ImportManager {
// if an empty response is given, the peer didn't have the requested block, try again
if blocks.is_empty() {
parent_request.attempts += 1;
parent_request.state = RequestedBlockState::QueuedForward;
parent_request.failed_attempts += 1;
parent_request.state = BlockRequestsState::QueuedForward;
parent_request.last_submitted_peer = peer_id;
return;
}
@@ -256,29 +306,27 @@ impl ImportManager {
if blocks.len() != 1 {
//TODO: Potentially downvote the peer
debug!(self.log, "Peer sent more than 1 parent. Ignoring";
"peer_id" => peer_id,
"no_parents" => blocks.len()
);
"peer_id" => format!("{:?}", peer_id),
"no_parents" => blocks.len()
);
return;
}
// queue for processing
parent_request.state = RequestedBlockState::Complete;
parent_request.state = BlockRequestsState::Complete;
}
pub fn inject_error(peer_id: PeerId, id: RequestId) {
//TODO: Remove block state from pending
}
pub fn peer_disconnect(peer_id: PeerId) {
self.import_queue.remove(&peer_id);
self.full_peers.remove(&peer_id);
pub fn peer_disconnect(&mut self, peer_id: &PeerId) {
self.import_queue.remove(peer_id);
self.full_peers.remove(peer_id);
self.update_state();
}
pub fn add_full_peer(peer_id: PeerId) {
pub fn add_full_peer(&mut self, peer_id: PeerId) {
debug!(
self.log, "Fully synced peer added";
"peer" => format!("{:?}", peer_id),
@@ -287,32 +335,36 @@ impl ImportManager {
self.update_state();
}
pub fn add_unknown_block(&mut self,block: BeaconBlock) {
pub fn add_unknown_block(&mut self, block: BeaconBlock<T::EthSpec>, peer_id: PeerId) {
// if we are not in regular sync mode, ignore this block
if self.state == ManagerState::Regular {
if let ManagerState::Regular = self.state {
return;
}
// make sure this block is not already being searched for
// TODO: Potentially store a hashset of blocks for O(1) lookups
for parent_req in self.parent_queue.iter() {
if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) {
if let Some(_) = parent_req
.downloaded_blocks
.iter()
.find(|d_block| d_block == &&block)
{
// we are already searching for this block, ignore it
return;
}
}
let req = ParentRequests {
let req = ParentRequests {
downloaded_blocks: vec![block],
failed_attempts: 0,
state: RequestedBlockState::QueuedBackward
}
last_submitted_peer: peer_id,
state: BlockRequestsState::QueuedBackward,
};
self.parent_queue.push(req);
}
pub fn poll() -> ImportManagerOutcome {
pub fn poll(&mut self) -> ImportManagerOutcome {
loop {
// update the state of the manager
self.update_state();
@@ -336,304 +388,340 @@ impl ImportManager {
if let (re_run, outcome) = self.process_complete_parent_requests() {
if let Some(outcome) = outcome {
return outcome;
}
else if !re_run {
} else if !re_run {
break;
}
}
}
return ImportManagerOutcome::Idle;
return ImportManagerOutcome::Idle;
}
fn update_state(&mut self) {
let previous_state = self.state;
let previous_state = self.state.clone();
self.state = {
if !self.import_queue.is_empty() {
ManagerState::Syncing
} else if !self.full_peers.is_empty() {
ManagerState::Regular
} else {
ManagerState::Stalled
}
else if !self.full_peers.is_empty() {
ManagerState::Regualar
}
else {
ManagerState::Stalled }
};
if self.state != previous_state {
info!(self.log, "Syncing state updated",
"old_state" => format!("{:?}", previous_state)
"new_state" => format!("{:?}", self.state)
);
info!(self.log, "Syncing state updated";
"old_state" => format!("{:?}", previous_state),
"new_state" => format!("{:?}", self.state),
);
}
}
fn process_potential_block_requests(&mut self) -> Option<ImportManagerOutcome> {
fn process_potential_block_requests(&mut self) -> Option<ImportManagerOutcome> {
// check if an outbound request is required
// Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p
// layer and not needed here.
// If any in queued state we submit a request.
// If any in queued state we submit a request.
// remove any failed batches
self.import_queue.retain(|peer_id, block_request| {
if block_request.state == RequestedBlockState::Failed {
debug!(self.log, "Block import from peer failed",
"peer_id" => peer_id,
"downloaded_blocks" => block_request.downloaded.blocks.len()
);
if let BlockRequestsState::Failed = block_request.state {
debug!(self.log, "Block import from peer failed";
"peer_id" => format!("{:?}", peer_id),
"downloaded_blocks" => block_request.downloaded_blocks.len()
);
false
} else {
true
}
else { true }
});
// process queued block requests
for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| {
req.state == BlockRequestsState::QueuedForward
|| req.state == BlockRequestsState::QueuedBackward
}) {
let request_id = self.current_req_id;
block_requests.state = BlockRequestsState::Pending(request_id);
self.current_req_id += 1;
for (peer_id, block_requests) in self.import_queue.iter_mut() {
if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) {
let request.state = RequestedBlockState::Pending(self.current_req_id);
self.current_req_id +=1;
let req = BeaconBlocksRequest {
head_block_root: request.target_root,
start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot),
count: MAX_BLOCKS_PER_REQUEST,
step: 0
}
return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req });
}
let request = BeaconBlocksRequest {
head_block_root: block_requests.target_head_root,
start_slot: block_requests
.next_start_slot()
.unwrap_or_else(|| self.chain.best_slot())
.as_u64(),
count: MAX_BLOCKS_PER_REQUEST,
step: 0,
};
return Some(ImportManagerOutcome::RequestBlocks {
peer_id: peer_id.clone(),
request,
request_id,
});
}
None
}
fn process_complete_batches(&mut self) -> Option<ImportManagerOutcome> {
let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::<Vec<PeerId>>();
let completed_batches = self
.import_queue
.iter()
.filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete)
.map(|(peer, _)| peer)
.cloned()
.collect::<Vec<PeerId>>();
for peer_id in completed_batches {
let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists");
match self.process_blocks(block_requests.downloaded_blocks) {
Ok(()) => {
//TODO: Verify it's impossible to have empty downloaded_blocks
last_element = block_requests.downloaded_blocks.len() -1
debug!(self.log, "Blocks processed successfully";
"peer" => peer_id,
"start_slot" => block_requests.downloaded_blocks[0].slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1,
);
// Re-HELLO to ensure we are up to the latest head
return Some(ImportManagerOutcome::Hello(peer_id));
}
Err(e) => {
last_element = block_requests.downloaded_blocks.len() -1
warn!(self.log, "Block processing failed";
"peer" => peer_id,
"start_slot" => block_requests.downloaded_blocks[0].slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1,
"error" => format!("{:?}", e),
);
return Some(ImportManagerOutcome::DownvotePeer(peer_id));
}
let block_requests = self.import_queue.remove(&peer_id).expect("key exists");
match self.process_blocks(block_requests.downloaded_blocks.clone()) {
Ok(()) => {
//TODO: Verify it's impossible to have empty downloaded_blocks
let last_element = block_requests.downloaded_blocks.len() - 1;
debug!(self.log, "Blocks processed successfully";
"peer" => format!("{:?}", peer_id),
"start_slot" => block_requests.downloaded_blocks[0].slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1,
);
// Re-HELLO to ensure we are up to the latest head
return Some(ImportManagerOutcome::Hello(peer_id));
}
Err(e) => {
let last_element = block_requests.downloaded_blocks.len() - 1;
warn!(self.log, "Block processing failed";
"peer" => format!("{:?}", peer_id),
"start_slot" => block_requests.downloaded_blocks[0].slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1,
"error" => format!("{:?}", e),
);
return Some(ImportManagerOutcome::DownvotePeer(peer_id));
}
}
}
None
}
fn process_parent_requests(&mut self) -> Option<ImportManagerOutcome> {
// remove any failed requests
self.parent_queue.retain(|parent_request| {
if parent_request.state == RequestedBlockState::Failed {
debug!(self.log, "Parent import failed",
"block" => parent_request.downloaded_blocks[0].hash,
"siblings found" => parent_request.len()
);
if parent_request.state == BlockRequestsState::Failed {
debug!(self.log, "Parent import failed";
"block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()),
"ancestors_found" => parent_request.downloaded_blocks.len()
);
false
} else {
true
}
else { true }
});
// check to make sure there are peers to search for the parent from
if self.full_peers.is_empty() {
return;
return None;
}
// check if parents need to be searched for
for parent_request in self.parent_queue.iter_mut() {
if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE {
parent_request.state == BlockRequestsState::Failed
continue;
}
else if parent_request.state == BlockRequestsState::QueuedForward {
parent_request.state == BlockRequestsState::Failed;
continue;
} else if parent_request.state == BlockRequestsState::QueuedForward {
parent_request.state = BlockRequestsState::Pending(self.current_req_id);
self.current_req_id +=1;
let parent_hash =
self.current_req_id += 1;
let last_element_index = parent_request.downloaded_blocks.len() - 1;
let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root;
let req = RecentBeaconBlocksRequest {
block_roots: vec![parent_hash],
};
// select a random fully synced peer to attempt to download the parent block
let peer_id = self.full_peers.iter().next().expect("List is not empty");
let peer_id = self.full_peers.iter().next().expect("List is not empty");
return Some(ImportManagerOutcome::RecentRequest(peer_id, req);
return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req));
}
}
None
}
fn process_complete_parent_requests(&mut self) => (bool, Option<ImportManagerOutcome>) {
}
fn process_complete_parent_requests(&mut self) -> (bool, Option<ImportManagerOutcome>) {
// flag to determine if there is more process to drive or if the manager can be switched to
// an idle state
let mut re_run = false;
// verify the last added block is the parent of the last requested block
let last_index = parent_requests.downloaded_blocks.len() -1;
let expected_hash = parent_requests.downloaded_blocks[last_index].parent ;
let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root();
if block_hash != expected_hash {
//TODO: Potentially downvote the peer
debug!(self.log, "Peer sent invalid parent. Ignoring";
"peer_id" => peer_id,
"received_block" => block_hash,
"expected_parent" => expected_hash,
);
return;
}
let mut re_run = false;
// Find any parent_requests ready to be processed
for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) {
for completed_request in self
.parent_queue
.iter_mut()
.filter(|req| req.state == BlockRequestsState::Complete)
{
// verify the last added block is the parent of the last requested block
let last_index = completed_request.downloaded_blocks.len() - 1;
let expected_hash = completed_request.downloaded_blocks[last_index].parent_root;
// Note: the length must be greater than 1 so this cannot panic.
let block_hash = completed_request.downloaded_blocks[last_index - 1].canonical_root();
if block_hash != expected_hash {
// remove the head block
let _ = completed_request.downloaded_blocks.pop();
completed_request.state = BlockRequestsState::QueuedForward;
//TODO: Potentially downvote the peer
let peer = completed_request.last_submitted_peer.clone();
debug!(self.log, "Peer sent invalid parent. Ignoring";
"peer_id" => format!("{:?}",peer),
"received_block" => format!("{}", block_hash),
"expected_parent" => format!("{}", expected_hash),
);
return (true, Some(ImportManagerOutcome::DownvotePeer(peer)));
}
// try and process the list of blocks up to the requested block
while !completed_request.downloaded_blocks.is_empty() {
let block = completed_request.downloaded_blocks.pop();
match self.chain_process_block(block.clone()) {
Ok(BlockProcessingOutcome::ParentUnknown { parent } => {
let block = completed_request
.downloaded_blocks
.pop()
.expect("Block must exist exist");
match self.chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => {
// need to keep looking for parents
completed_request.downloaded_blocks.push(block);
completed_request.state == BlockRequestsState::QueuedForward;
re_run = true;
break;
}
Ok(BlockProcessingOutcome::Processed { _ } => { }
Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again
completed_request.failed_attempts +=1;
Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {}
Ok(outcome) => {
// it's a future slot or an invalid block, remove it and try again
completed_request.failed_attempts += 1;
trace!(
self.log, "Invalid parent block";
"outcome" => format!("{:?}", outcome);
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", completed_request.last_submitted_peer),
);
completed_request.state == BlockRequestsState::QueuedForward;
re_run = true;
return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer)));
return (
re_run,
Some(ImportManagerOutcome::DownvotePeer(
completed_request.last_submitted_peer.clone(),
)),
);
}
Err(e) => {
completed_request.failed_attempts +=1;
Err(e) => {
completed_request.failed_attempts += 1;
warn!(
self.log, "Parent processing error";
"error" => format!("{:?}", e);
"error" => format!("{:?}", e)
);
completed_request.state == BlockRequestsState::QueuedForward;
re_run = true;
return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer)));
}
return (
re_run,
Some(ImportManagerOutcome::DownvotePeer(
completed_request.last_submitted_peer.clone(),
)),
);
}
}
}
}
// remove any full completed and processed parent chains
self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true });
self.parent_queue.retain(|req| {
if req.state == BlockRequestsState::Complete {
false
} else {
true
}
});
(re_run, None)
}
fn process_blocks(
&mut self,
blocks: Vec<BeaconBlock<T::EthSpec>>,
) -> Result<(), String> {
fn process_blocks(&mut self, blocks: Vec<BeaconBlock<T::EthSpec>>) -> Result<(), String> {
for block in blocks {
let processing_result = self.chain.process_block(block.clone());
let processing_result = self.chain.process_block(block.clone());
if let Ok(outcome) = processing_result {
match outcome {
BlockProcessingOutcome::Processed { block_root } => {
// The block was valid and we processed it successfully.
trace!(
self.log, "Imported block from network";
"source" => source,
"slot" => block.slot,
"block_root" => format!("{}", block_root),
"peer" => format!("{:?}", peer_id),
);
}
BlockProcessingOutcome::ParentUnknown { parent } => {
// blocks should be sequential and all parents should exist
trace!(
self.log, "ParentBlockUnknown";
"source" => source,
"parent_root" => format!("{}", parent),
"baby_block_slot" => block.slot,
);
return Err(format!("Block at slot {} has an unknown parent.", block.slot));
}
BlockProcessingOutcome::FutureSlot {
present_slot,
block_slot,
} => {
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
// The block is too far in the future, drop it.
if let Ok(outcome) = processing_result {
match outcome {
BlockProcessingOutcome::Processed { block_root } => {
// The block was valid and we processed it successfully.
trace!(
self.log, "FutureBlock";
"source" => source,
"msg" => "block for future slot rejected, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
return Err(format!("Block at slot {} is too far in the future", block.slot));
} else {
// The block is in the future, but not too far.
trace!(
self.log, "QueuedFutureBlock";
"source" => source,
"msg" => "queuing future block, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
self.log, "Imported block from network";
"slot" => block.slot,
"block_root" => format!("{}", block_root),
);
}
BlockProcessingOutcome::ParentUnknown { parent } => {
// blocks should be sequential and all parents should exist
trace!(
self.log, "ParentBlockUnknown";
"parent_root" => format!("{}", parent),
"baby_block_slot" => block.slot,
);
return Err(format!(
"Block at slot {} has an unknown parent.",
block.slot
));
}
BlockProcessingOutcome::FutureSlot {
present_slot,
block_slot,
} => {
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
// The block is too far in the future, drop it.
trace!(
self.log, "FutureBlock";
"msg" => "block for future slot rejected, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
);
return Err(format!(
"Block at slot {} is too far in the future",
block.slot
));
} else {
// The block is in the future, but not too far.
trace!(
self.log, "QueuedFutureBlock";
"msg" => "queuing future block, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
);
}
}
_ => {
trace!(
self.log, "InvalidBlock";
"msg" => "peer sent invalid block",
"outcome" => format!("{:?}", outcome),
);
return Err(format!("Invalid block at slot {}", block.slot));
}
}
_ => {
trace!(
self.log, "InvalidBlock";
"source" => source,
"msg" => "peer sent invalid block",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", peer_id),
);
return Err(format!("Invalid block at slot {}", block.slot));
}
} else {
trace!(
self.log, "BlockProcessingFailure";
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", processing_result)
);
return Err(format!(
"Unexpected block processing error: {:?}",
processing_result
));
}
Ok(())
} else {
trace!(
self.log, "BlockProcessingFailure";
"source" => source,
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", processing_result)
);
return Err(format!("Unexpected block processing error: {:?}", processing_result));
}
}
Ok(())
}
}
fn root_at_slot<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
target_slot: Slot,
) -> Option<Hash256> {
chain
.rev_iter_block_roots()
.find(|(_root, slot)| *slot == target_slot)
.map(|(root, _slot)| root)
}