mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-06 18:21:45 +00:00
rust 1.53.0 updates (#2411)
## Issue Addressed `make lint` failing on rust 1.53.0. ## Proposed Changes 1.53.0 updates ## Additional Info I haven't figure out why yet, we were now hitting the recursion limit in a few crates. So I had to add `#![recursion_limit = "256"]` in a few places Co-authored-by: realbigsean <seananderson33@gmail.com> Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
@@ -559,11 +559,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
return;
|
||||
}
|
||||
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
||||
if self
|
||||
if !self
|
||||
.unsubscriptions
|
||||
.keys()
|
||||
.find(|s| s.subnet_id == subnet_id)
|
||||
.is_none()
|
||||
.any(|s| s.subnet_id == subnet_id)
|
||||
{
|
||||
// we are not at capacity, unsubscribe from the current subnet.
|
||||
debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id);
|
||||
@@ -601,11 +600,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
|
||||
for subnet_id in to_remove_subnets {
|
||||
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
||||
if self
|
||||
if !self
|
||||
.unsubscriptions
|
||||
.keys()
|
||||
.find(|s| s.subnet_id == *subnet_id)
|
||||
.is_none()
|
||||
.any(|s| s.subnet_id == *subnet_id)
|
||||
{
|
||||
self.events
|
||||
.push_back(AttServiceMessage::Unsubscribe(*subnet_id));
|
||||
|
||||
@@ -147,10 +147,10 @@ async fn get_events<S: Stream<Item = AttServiceMessage> + Unpin>(
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = collect_stream_fut => {return events}
|
||||
_ = collect_stream_fut => {events}
|
||||
_ = tokio::time::sleep(
|
||||
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
|
||||
) => { return events; }
|
||||
) => { events }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ pub fn construct_upnp_mappings<T: EthSpec>(
|
||||
"tcp",
|
||||
&log,
|
||||
).and_then(|_| {
|
||||
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port)).map_err(|_| ());
|
||||
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new((*ip).into(), config.tcp_port)).map_err(|_| ());
|
||||
info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
||||
external_socket
|
||||
}).ok();
|
||||
|
||||
@@ -933,10 +933,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
// check if we have the batch for our optimistic start. If not, request it first.
|
||||
// We wait for this batch before requesting any other batches.
|
||||
if let Some(epoch) = self.optimistic_start {
|
||||
if !self.batches.contains_key(&epoch) {
|
||||
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
|
||||
if let Some(peer) = idle_peers.pop() {
|
||||
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
|
||||
self.batches.insert(epoch, optimistic_batch);
|
||||
entry.insert(optimistic_batch);
|
||||
self.send_batch(network, epoch, peer)?;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user