Fix clippy warnings (#813)

* Clippy account manager

* Clippy account_manager

* Clippy beacon_node/beacon_chain

* Clippy beacon_node/client

* Clippy beacon_node/eth1

* Clippy beacon_node/eth2-libp2p

* Clippy beacon_node/genesis

* Clippy beacon_node/network

* Clippy beacon_node/rest_api

* Clippy beacon_node/src

* Clippy beacon_node/store

* Clippy eth2/lmd_ghost

* Clippy eth2/operation_pool

* Clippy eth2/state_processing

* Clippy eth2/types

* Clippy eth2/utils/bls

* Clippy eth2/utils/cahced_tree_hash

* Clippy eth2/utils/deposit_contract

* Clippy eth2/utils/eth2_interop_keypairs

* Clippy eth2/utils/eth2_testnet_config

* Clippy eth2/utils/lighthouse_metrics

* Clippy eth2/utils/ssz

* Clippy eth2/utils/ssz_types

* Clippy eth2/utils/tree_hash_derive

* Clippy lcli

* Clippy tests/beacon_chain_sim

* Clippy validator_client

* Cargo fmt
This commit is contained in:
pscott
2020-01-21 11:38:56 +04:00
committed by Age Manning
parent 1abb964652
commit 7396cd2cab
78 changed files with 387 additions and 416 deletions

View File

@@ -229,7 +229,7 @@ impl<T: BeaconChainTypes> MessageHandler<T> {
.on_block_gossip(peer_id.clone(), block);
// TODO: Apply more sophisticated validation and decoding logic
if should_forward_on {
self.propagate_message(id, peer_id.clone());
self.propagate_message(id, peer_id);
}
}
Err(e) => {

View File

@@ -203,7 +203,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
);
self.network
.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
.disconnect(peer_id, GoodbyeReason::IrrelevantNetwork);
} else if remote.head_slot
> self.chain.slot().unwrap_or_else(|_| Slot::from(0u64)) + FUTURE_SLOT_TOLERANCE
{
@@ -219,7 +219,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
"reason" => "different system clocks or genesis time"
);
self.network
.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
.disconnect(peer_id, GoodbyeReason::IrrelevantNetwork);
} else if remote.finalized_epoch <= local.finalized_epoch
&& remote.finalized_root != Hash256::zero()
&& local.finalized_root != Hash256::zero()
@@ -239,7 +239,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
"reason" => "different finalized chain"
);
self.network
.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
.disconnect(peer_id, GoodbyeReason::IrrelevantNetwork);
} else if remote.finalized_epoch < local.finalized_epoch {
// The node has a lower finalized epoch, their chain is not useful to us. There are two
// cases where a node can have a lower finalized epoch:
@@ -512,7 +512,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
// Inform the sync manager to find parents for this block
trace!(self.log, "Block with unknown parent received";
"peer_id" => format!("{:?}",peer_id));
self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block.clone())));
self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block)));
SHOULD_FORWARD_GOSSIP_BLOCK
}
BlockProcessingOutcome::FutureSlot {

View File

@@ -263,7 +263,7 @@ fn network_service(
id,
source,
message,
topics: _,
..
} => {
message_handler_send
.try_send(HandlerMessage::PubsubMessage(id, source, message))

View File

@@ -66,7 +66,7 @@ impl SyncNetworkContext {
"count" => request.count,
"peer" => format!("{:?}", peer_id)
);
self.send_rpc_request(peer_id.clone(), RPCRequest::BlocksByRange(request))
self.send_rpc_request(peer_id, RPCRequest::BlocksByRange(request))
}
pub fn blocks_by_root_request(
@@ -81,7 +81,7 @@ impl SyncNetworkContext {
"count" => request.block_roots.len(),
"peer" => format!("{:?}", peer_id)
);
self.send_rpc_request(peer_id.clone(), RPCRequest::BlocksByRoot(request))
self.send_rpc_request(peer_id, RPCRequest::BlocksByRoot(request))
}
pub fn downvote_peer(&mut self, peer_id: PeerId) {
@@ -91,7 +91,7 @@ impl SyncNetworkContext {
"peer" => format!("{:?}", peer_id)
);
// TODO: Implement reputation
self.disconnect(peer_id.clone(), GoodbyeReason::Fault);
self.disconnect(peer_id, GoodbyeReason::Fault);
}
fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) {

View File

@@ -62,16 +62,16 @@ impl<T: EthSpec> PendingBatches<T> {
let peer_request = batch.current_peer.clone();
self.peer_requests
.entry(peer_request)
.or_insert_with(|| HashSet::new())
.or_insert_with(HashSet::new)
.insert(request_id);
self.batches.insert(request_id, batch)
}
pub fn remove(&mut self, request_id: &RequestId) -> Option<Batch<T>> {
if let Some(batch) = self.batches.remove(request_id) {
pub fn remove(&mut self, request_id: RequestId) -> Option<Batch<T>> {
if let Some(batch) = self.batches.remove(&request_id) {
if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone())
{
entry.get_mut().remove(request_id);
entry.get_mut().remove(&request_id);
if entry.get().is_empty() {
entry.remove();
@@ -85,8 +85,8 @@ impl<T: EthSpec> PendingBatches<T> {
/// Adds a block to the batches if the request id exists. Returns None if there is no batch
/// matching the request id.
pub fn add_block(&mut self, request_id: &RequestId, block: BeaconBlock<T>) -> Option<()> {
let batch = self.batches.get_mut(request_id)?;
pub fn add_block(&mut self, request_id: RequestId, block: BeaconBlock<T>) -> Option<()> {
let batch = self.batches.get_mut(&request_id)?;
batch.downloaded_blocks.push(block);
Some(())
}
@@ -101,7 +101,7 @@ impl<T: EthSpec> PendingBatches<T> {
pub fn remove_batch_by_peer(&mut self, peer_id: &PeerId) -> Option<Batch<T>> {
let request_ids = self.peer_requests.get(peer_id)?;
let request_id = request_ids.iter().next()?.clone();
self.remove(&request_id)
let request_id = *request_ids.iter().next()?;
self.remove(request_id)
}
}

View File

@@ -144,11 +144,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
) -> Option<ProcessingResult> {
if let Some(block) = beacon_block {
// This is not a stream termination, simply add the block to the request
self.pending_batches.add_block(&request_id, block.clone())?;
return Some(ProcessingResult::KeepChain);
self.pending_batches.add_block(request_id, block.clone())?;
Some(ProcessingResult::KeepChain)
} else {
// A stream termination has been sent. This batch has ended. Process a completed batch.
let batch = self.pending_batches.remove(&request_id)?;
let batch = self.pending_batches.remove(request_id)?;
Some(self.process_completed_batch(chain.clone(), network, batch, log))
}
}
@@ -433,7 +433,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
return true;
}
}
return false;
false
}
/// Returns a peer if there exists a peer which does not currently have a pending request.
@@ -500,10 +500,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
&mut self,
network: &mut SyncNetworkContext,
peer_id: &PeerId,
request_id: &RequestId,
request_id: RequestId,
log: &slog::Logger,
) -> Option<ProcessingResult> {
if let Some(batch) = self.pending_batches.remove(&request_id) {
if let Some(batch) = self.pending_batches.remove(request_id) {
warn!(log, "Batch failed. RPC Error"; "id" => batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id));
Some(self.failed_batch(network, batch, log))

View File

@@ -188,7 +188,7 @@ impl<T: BeaconChainTypes> RangeSync<T> {
debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote.head_root), "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id));
// add the peer to the head's pool
chain.add_peer(network, peer_id.clone(), &self.log);
chain.add_peer(network, peer_id, &self.log);
} else {
// There are no other head chains that match this peer's status, create a new one, and
let start_slot = std::cmp::min(local_info.head_slot, remote_finalized_slot);
@@ -305,29 +305,28 @@ impl<T: BeaconChainTypes> RangeSync<T> {
/// retries. In this case, we need to remove the chain and re-status all the peers.
fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) {
let log_ref = &self.log;
match self.chains.head_finalized_request(|chain| {
if chain.peer_pool.remove(peer_id) {
// this chain contained the peer
while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) {
if let ProcessingResult::RemoveChain =
chain.failed_batch(network, batch, log_ref)
{
// a single batch failed, remove the chain
return Some(ProcessingResult::RemoveChain);
if let Some((index, ProcessingResult::RemoveChain)) =
self.chains.head_finalized_request(|chain| {
if chain.peer_pool.remove(peer_id) {
// this chain contained the peer
while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) {
if let ProcessingResult::RemoveChain =
chain.failed_batch(network, batch, log_ref)
{
// a single batch failed, remove the chain
return Some(ProcessingResult::RemoveChain);
}
}
// peer removed from chain, no batch failed
Some(ProcessingResult::KeepChain)
} else {
None
}
// peer removed from chain, no batch failed
Some(ProcessingResult::KeepChain)
} else {
None
}
}) {
Some((index, ProcessingResult::RemoveChain)) => {
// the chain needed to be removed
debug!(self.log, "Chain being removed due to failed batch");
self.chains.remove_chain(network, index, &self.log);
}
_ => {} // chain didn't need to be removed, ignore
})
{
// the chain needed to be removed
debug!(self.log, "Chain being removed due to failed batch");
self.chains.remove_chain(network, index, &self.log);
}
}
@@ -344,7 +343,7 @@ impl<T: BeaconChainTypes> RangeSync<T> {
// check that this request is pending
let log_ref = &self.log;
match self.chains.head_finalized_request(|chain| {
chain.inject_error(network, &peer_id, &request_id, log_ref)
chain.inject_error(network, &peer_id, request_id, log_ref)
}) {
Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists
Some((index, ProcessingResult::RemoveChain)) => {