Apply clippy lints to beacon node

This commit is contained in:
Age Manning
2019-11-28 14:06:46 +11:00
parent 2bbac2ed18
commit 6f2fc7560a
11 changed files with 42 additions and 52 deletions

View File

@@ -290,11 +290,11 @@ fn eth1_block_hash_at_start_of_voting_period<T: EthSpec, S: Store>(
let slot = (state.slot / period) * period;
let prev_state_root = state
.get_state_root(slot)
.map_err(|e| Error::UnableToGetPreviousStateRoot(e))?;
.map_err(Error::UnableToGetPreviousStateRoot)?;
store
.get_state::<T>(&prev_state_root, Some(slot))
.map_err(|e| Error::StoreError(e))?
.map_err(Error::StoreError)?
.map(|state| state.eth1_data.block_hash)
.ok_or_else(|| Error::PreviousStateNotInDB)
}

View File

@@ -145,7 +145,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
// Fast-forward the state to the start slot of the epoch where it was justified.
for _ in block.slot.as_u64()..block_justified_slot.as_u64() {
per_slot_processing(&mut state, &chain.spec)
.map_err(|e| BeaconChainError::SlotProcessingError(e))?
.map_err(BeaconChainError::SlotProcessingError)?
}
(state, block_root, block_justified_slot)

View File

@@ -210,7 +210,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
/// Publishes a message on the pubsub (gossipsub) behaviour.
pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) {
let message_data = message.to_data();
let message_data = message.into_data();
for topic in topics {
self.gossipsub.publish(topic, message_data.clone());
}
@@ -295,7 +295,7 @@ impl PubsubMessage {
* Also note that a message can be associated with many topics. As soon as one of the topics is
* known we match. If none of the topics are known we return an unknown state.
*/
fn from_topics(topics: &Vec<TopicHash>, data: Vec<u8>) -> Self {
fn from_topics(topics: &[TopicHash], data: Vec<u8>) -> Self {
for topic in topics {
// compare the prefix and postfix, then match on the topic
let topic_parts: Vec<&str> = topic.as_str().split('/').collect();
@@ -316,7 +316,7 @@ impl PubsubMessage {
PubsubMessage::Unknown(data)
}
fn to_data(self) -> Vec<u8> {
fn into_data(self) -> Vec<u8> {
match self {
PubsubMessage::Block(data)
| PubsubMessage::Attestation(data)

View File

@@ -174,7 +174,6 @@ where
}
/// Opens an outbound substream with a request.
#[inline]
pub fn send_request(&mut self, rpc_event: RPCEvent) {
self.keep_alive = KeepAlive::Yes;
@@ -194,12 +193,10 @@ where
type OutboundProtocol = RPCRequest;
type OutboundOpenInfo = RPCEvent; // Keep track of the id and the request
#[inline]
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
self.listen_protocol.clone()
}
#[inline]
fn inject_fully_negotiated_inbound(
&mut self,
out: <RPCProtocol as InboundUpgrade<TSubstream>>::Output,
@@ -226,7 +223,6 @@ where
self.current_substream_id += 1;
}
#[inline]
fn inject_fully_negotiated_outbound(
&mut self,
out: <RPCRequest as OutboundUpgrade<TSubstream>>::Output,
@@ -272,7 +268,6 @@ where
// Note: If the substream has closed due to inactivity, or the substream is in the
// wrong state a response will fail silently.
#[inline]
fn inject_event(&mut self, rpc_event: Self::InEvent) {
match rpc_event {
RPCEvent::Request(_, _) => self.send_request(rpc_event),
@@ -347,7 +342,6 @@ where
}
}
#[inline]
fn inject_dial_upgrade_error(
&mut self,
_: Self::OutboundOpenInfo,
@@ -360,7 +354,6 @@ where
}
}
#[inline]
fn connection_keep_alive(&self) -> KeepAlive {
self.keep_alive
}
@@ -412,7 +405,7 @@ where
// Drain all queued items until all messages have been processed for this stream
// TODO Improve this code logic
let mut new_items_to_send = true;
while new_items_to_send == true {
while new_items_to_send {
new_items_to_send = false;
match self.inbound_substreams.entry(request_id) {
Entry::Occupied(mut entry) => {
@@ -514,7 +507,7 @@ where
entry.get_mut().0 =
OutboundSubstreamState::RequestPendingResponse {
substream,
request: request,
request,
};
let delay_key = &entry.get().1;
self.outbound_substreams_delay

View File

@@ -22,8 +22,11 @@ use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite};
pub(crate) mod codec;
#[allow(clippy::type_complexity)]
#[allow(clippy::congitive_complexity)]
mod handler;
pub mod methods;
#[allow(clippy::type_complexity)]
mod protocol;
/// The return type used in the behaviour and the resultant event from the protocols handler.
@@ -77,7 +80,7 @@ impl<TSubstream> RPC<TSubstream> {
RPC {
events: Vec::new(),
marker: PhantomData,
log: log,
log,
}
}

View File

@@ -133,14 +133,7 @@ impl Service {
topics.push(topic_builder(ATTESTER_SLASHING_TOPIC));
// Add any topics specified by the user
topics.append(
&mut config
.topics
.iter()
.cloned()
.map(|s| Topic::new(s))
.collect(),
);
topics.append(&mut config.topics.iter().cloned().map(Topic::new).collect());
let mut subscribed_topics = vec![];
for topic in topics {

View File

@@ -1,5 +1,6 @@
/// This crate provides the network server for Lighthouse.
pub mod error;
#[allow(clippy::unit_arg)]
pub mod message_handler;
pub mod service;
pub mod sync;

View File

@@ -190,7 +190,7 @@ fn network_service(
} else {
trace!(log, "Propagating gossipsub message";
"propagation_peer" => format!("{:?}", propagation_source),
"message_id" => format!("{}", message_id),
"message_id" => message_id.to_string(),
);
libp2p_service
.lock()

View File

@@ -107,18 +107,18 @@ pub enum SyncMessage<T: EthSpec> {
BlocksByRangeResponse {
peer_id: PeerId,
request_id: RequestId,
beacon_block: Option<BeaconBlock<T>>,
beacon_block: Option<Box<BeaconBlock<T>>>,
},
/// A `BlocksByRoot` response has been received.
BlocksByRootResponse {
peer_id: PeerId,
request_id: RequestId,
beacon_block: Option<BeaconBlock<T>>,
beacon_block: Option<Box<BeaconBlock<T>>>,
},
/// A block with an unknown parent has been received.
UnknownBlock(PeerId, BeaconBlock<T>),
UnknownBlock(PeerId, Box<BeaconBlock<T>>),
/// A peer has sent an object that references a block that is unknown. This triggers the
/// manager to attempt to find the block matching the unknown hash.
@@ -520,7 +520,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
parent_request.failed_attempts += 1;
parent_request.state = BlockRequestsState::Queued;
parent_request.last_submitted_peer = peer_id;
return;
}
}
}
@@ -549,7 +548,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
BlockProcessingOutcome::Processed { block_root } => {
info!(self.log, "Processed block"; "block" => format!("{}", block_root));
}
BlockProcessingOutcome::ParentUnknown { parent: _ } => {
BlockProcessingOutcome::ParentUnknown { .. } => {
// We don't know of the blocks parent, begin a parent lookup search
self.add_unknown_block(peer_id, block);
}
@@ -580,10 +579,10 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// make sure this block is not already being searched for
// TODO: Potentially store a hashset of blocks for O(1) lookups
for parent_req in self.parent_queue.iter() {
if let Some(_) = parent_req
if parent_req
.downloaded_blocks
.iter()
.find(|d_block| d_block == &&block)
.any(|d_block| d_block == &block)
{
// we are already searching for this block, ignore it
return;
@@ -915,14 +914,14 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// check if the chain exists
if let Some(chain) = self.chain.upgrade() {
match chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => {
Ok(BlockProcessingOutcome::ParentUnknown { .. }) => {
// need to keep looking for parents
completed_request.downloaded_blocks.push(block);
completed_request.state = BlockRequestsState::Queued;
re_run_poll = true;
break;
}
Ok(BlockProcessingOutcome::Processed { block_root: _ })
Ok(BlockProcessingOutcome::Processed { .. })
| Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => {}
Ok(outcome) => {
// it's a future slot or an invalid block, remove it and try again
@@ -965,13 +964,8 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
// remove any fully processed parent chains
self.parent_queue.retain(|req| {
if req.state == BlockRequestsState::ReadyToProcess {
false
} else {
true
}
});
self.parent_queue
.retain(|req| !(req.state == BlockRequestsState::ReadyToProcess));
re_run_poll
}
}
@@ -1172,17 +1166,21 @@ impl<T: BeaconChainTypes> Future for SyncManager<T> {
request_id,
beacon_block,
} => {
self.blocks_by_range_response(peer_id, request_id, beacon_block);
self.blocks_by_range_response(
peer_id,
request_id,
beacon_block.map(|b| *b),
);
}
SyncMessage::BlocksByRootResponse {
peer_id,
request_id,
beacon_block,
} => {
self.blocks_by_root_response(peer_id, request_id, beacon_block);
self.blocks_by_root_response(peer_id, request_id, beacon_block.map(|b| *b));
}
SyncMessage::UnknownBlock(peer_id, block) => {
self.add_unknown_block(peer_id, block);
self.add_unknown_block(peer_id, *block);
}
SyncMessage::UnknownBlockHash(peer_id, block_hash) => {
self.search_for_block(peer_id, block_hash);
@@ -1228,7 +1226,7 @@ impl<T: BeaconChainTypes> Future for SyncManager<T> {
}
// Shutdown the thread if the chain has termined
if let None = self.chain.upgrade() {
if self.chain.upgrade().is_none() {
return Ok(Async::Ready(()));
}
@@ -1240,6 +1238,6 @@ impl<T: BeaconChainTypes> Future for SyncManager<T> {
// update the state of the manager
self.update_state();
return Ok(Async::NotReady);
Ok(Async::NotReady)
}
}

View File

@@ -396,6 +396,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
request_id: RequestId,
beacon_block: Option<BeaconBlock<T::EthSpec>>,
) {
let beacon_block = beacon_block.map(Box::new);
trace!(
self.log,
"Received BlocksByRange Response";
@@ -416,6 +417,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
request_id: RequestId,
beacon_block: Option<BeaconBlock<T::EthSpec>>,
) {
let beacon_block = beacon_block.map(Box::new);
trace!(
self.log,
"Received BlocksByRoot Response";
@@ -442,11 +444,11 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
"peer_id" => format!("{:?}",peer_id));
SHOULD_FORWARD_GOSSIP_BLOCK
}
BlockProcessingOutcome::ParentUnknown { parent: _ } => {
BlockProcessingOutcome::ParentUnknown { .. } => {
// Inform the sync manager to find parents for this block
trace!(self.log, "Block with unknown parent received";
"peer_id" => format!("{:?}",peer_id));
self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block.clone()));
self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block.clone())));
SHOULD_FORWARD_GOSSIP_BLOCK
}
BlockProcessingOutcome::FutureSlot {

View File

@@ -110,9 +110,9 @@ fn process_testnet_subcommand(
if let Some(propagation_percentage_string) = cli_args.value_of("random-propagation") {
let percentage = propagation_percentage_string
.parse::<u8>()
.map_err(|_| format!("Unable to parse the propagation percentage"))?;
.map_err(|_| "Unable to parse the propagation percentage".to_string())?;
if percentage > 100 {
return Err(format!("Propagation percentage greater than 100"));
return Err("Propagation percentage greater than 100".to_string());
}
builder.client_config.network.propagation_percentage = Some(percentage);
}
@@ -255,7 +255,7 @@ fn process_testnet_subcommand(
client_config.eth1.deposit_contract_address =
"0x802dF6aAaCe28B2EEb1656bb18dF430dDC42cc2e".to_string();
client_config.eth1.deposit_contract_deploy_block = 1487270;
client_config.eth1.deposit_contract_deploy_block = 1_487_270;
client_config.eth1.follow_distance = 16;
client_config.dummy_eth1_backend = false;
@@ -608,7 +608,7 @@ impl ConfigBuilder {
/// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand
/// cli_args).
pub fn build(mut self, cli_args: &ArgMatches) -> Result<Config> {
self.client_config.apply_cli_args(cli_args, &mut self.log)?;
self.client_config.apply_cli_args(cli_args, &self.log)?;
if let Some(bump) = cli_args.value_of("port-bump") {
let bump = bump