mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 00:31:50 +00:00
* Add logging on shutdown
* Replace tokio::spawn with handle.spawn
* Upgrade tokio
* Add a task executor
* Beacon chain tasks use task executor
* Validator client tasks use task executor
* Rename runtime_handle to executor
* Add duration histograms; minor fixes
* Cleanup
* Fix logs
* Fix tests
* Remove random file
* Get enr dependency instead of libp2p
* Address some review comments
* Libp2p takes a TaskExecutor
* Ugly fix libp2p tests
* Move TaskExecutor to own file
* Upgrade Dockerfile rust version
* Minor fixes
* Revert "Ugly fix libp2p tests"
This reverts commit 58d4bb690f.
* Pretty fix libp2p tests
* Add spawn_without_exit; change Counter to Gauge
* Tidy
* Move log from RuntimeContext to TaskExecutor
* Fix errors
* Replace histogram with int_gauge for async tasks
* Fix todo
* Fix memory leak in test by exiting all spawned tasks at the end
87 lines
3.4 KiB
Rust
87 lines
3.4 KiB
Rust
use crate::{is_synced::is_synced, ProductionValidatorClient};
|
|
use futures::StreamExt;
|
|
use slog::{error, info};
|
|
use slot_clock::SlotClock;
|
|
use tokio::time::{interval_at, Duration, Instant};
|
|
use types::EthSpec;
|
|
|
|
/// Spawns a notifier service which periodically logs information about the node.
|
|
pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Result<(), String> {
|
|
let context = client.context.service_context("notifier".into());
|
|
let executor = context.executor.clone();
|
|
let duties_service = client.duties_service.clone();
|
|
let allow_unsynced_beacon_node = client.config.allow_unsynced_beacon_node;
|
|
|
|
let slot_duration = Duration::from_millis(context.eth2_config.spec.milliseconds_per_slot);
|
|
let duration_to_next_slot = duties_service
|
|
.slot_clock
|
|
.duration_to_next_slot()
|
|
.ok_or_else(|| "slot_notifier unable to determine time to next slot")?;
|
|
|
|
// Run the notifier half way through each slot.
|
|
let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2);
|
|
let mut interval = interval_at(start_instant, slot_duration);
|
|
|
|
let interval_fut = async move {
|
|
let log = context.log();
|
|
|
|
while interval.next().await.is_some() {
|
|
if !is_synced(
|
|
&duties_service.beacon_node,
|
|
&duties_service.slot_clock,
|
|
Some(&log),
|
|
)
|
|
.await
|
|
&& !allow_unsynced_beacon_node
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if let Some(slot) = duties_service.slot_clock.now() {
|
|
let epoch = slot.epoch(T::slots_per_epoch());
|
|
|
|
let total_validators = duties_service.total_validator_count();
|
|
let proposing_validators = duties_service.proposer_count(epoch);
|
|
let attesting_validators = duties_service.attester_count(epoch);
|
|
|
|
if total_validators == 0 {
|
|
error!(log, "No validators present")
|
|
} else if total_validators == attesting_validators {
|
|
info!(
|
|
log,
|
|
"All validators active";
|
|
"proposers" => proposing_validators,
|
|
"active_validators" => attesting_validators,
|
|
"total_validators" => total_validators,
|
|
"epoch" => format!("{}", epoch),
|
|
"slot" => format!("{}", slot),
|
|
);
|
|
} else if attesting_validators > 0 {
|
|
info!(
|
|
log,
|
|
"Some validators active";
|
|
"proposers" => proposing_validators,
|
|
"active_validators" => attesting_validators,
|
|
"total_validators" => total_validators,
|
|
"epoch" => format!("{}", epoch),
|
|
"slot" => format!("{}", slot),
|
|
);
|
|
} else {
|
|
info!(
|
|
log,
|
|
"Awaiting activation";
|
|
"validators" => total_validators,
|
|
"epoch" => format!("{}", epoch),
|
|
"slot" => format!("{}", slot),
|
|
);
|
|
}
|
|
} else {
|
|
error!(log, "Unable to read slot clock");
|
|
}
|
|
}
|
|
};
|
|
|
|
executor.spawn(interval_fut, "validator_notifier");
|
|
Ok(())
|
|
}
|