mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-19 13:58:28 +00:00
Monitoring service api (#2251)
## Issue Addressed N/A ## Proposed Changes Adds a client side api for collecting system and process metrics and pushing it to a monitoring service.
This commit is contained in:
@@ -76,11 +76,147 @@ pub struct ValidatorInclusionData {
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
use {procinfo::pid, psutil::process::Process};
|
||||
use {
|
||||
procinfo::pid, psutil::cpu::os::linux::CpuTimesExt,
|
||||
psutil::memory::os::linux::VirtualMemoryExt, psutil::process::Process,
|
||||
};
|
||||
|
||||
/// Reports on the health of the Lighthouse instance.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Health {
|
||||
#[serde(flatten)]
|
||||
pub system: SystemHealth,
|
||||
#[serde(flatten)]
|
||||
pub process: ProcessHealth,
|
||||
}
|
||||
|
||||
/// System related health.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SystemHealth {
|
||||
/// Total virtual memory on the system
|
||||
pub sys_virt_mem_total: u64,
|
||||
/// Total virtual memory available for new processes.
|
||||
pub sys_virt_mem_available: u64,
|
||||
/// Total virtual memory used on the system.
|
||||
pub sys_virt_mem_used: u64,
|
||||
/// Total virtual memory not used on the system.
|
||||
pub sys_virt_mem_free: u64,
|
||||
/// Percentage of virtual memory used on the system.
|
||||
pub sys_virt_mem_percent: f32,
|
||||
/// Total cached virtual memory on the system.
|
||||
pub sys_virt_mem_cached: u64,
|
||||
/// Total buffered virtual memory on the system.
|
||||
pub sys_virt_mem_buffers: u64,
|
||||
|
||||
/// System load average over 1 minute.
|
||||
pub sys_loadavg_1: f64,
|
||||
/// System load average over 5 minutes.
|
||||
pub sys_loadavg_5: f64,
|
||||
/// System load average over 15 minutes.
|
||||
pub sys_loadavg_15: f64,
|
||||
|
||||
/// Total cpu cores.
|
||||
pub cpu_cores: u64,
|
||||
/// Total cpu threads.
|
||||
pub cpu_threads: u64,
|
||||
|
||||
/// Total time spent in kernel mode.
|
||||
pub system_seconds_total: u64,
|
||||
/// Total time spent in user mode.
|
||||
pub user_seconds_total: u64,
|
||||
/// Total time spent in waiting for io.
|
||||
pub iowait_seconds_total: u64,
|
||||
/// Total idle cpu time.
|
||||
pub idle_seconds_total: u64,
|
||||
/// Total cpu time.
|
||||
pub cpu_time_total: u64,
|
||||
|
||||
/// Total capacity of disk.
|
||||
pub disk_node_bytes_total: u64,
|
||||
/// Free space in disk.
|
||||
pub disk_node_bytes_free: u64,
|
||||
/// Number of disk reads.
|
||||
pub disk_node_reads_total: u64,
|
||||
/// Number of disk writes.
|
||||
pub disk_node_writes_total: u64,
|
||||
|
||||
/// Total bytes received over all network interfaces.
|
||||
pub network_node_bytes_total_received: u64,
|
||||
/// Total bytes sent over all network interfaces.
|
||||
pub network_node_bytes_total_transmit: u64,
|
||||
|
||||
/// Boot time
|
||||
pub misc_node_boot_ts_seconds: u64,
|
||||
/// OS
|
||||
pub misc_os: String,
|
||||
}
|
||||
|
||||
impl SystemHealth {
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub fn observe() -> Result<Self, String> {
|
||||
Err("Health is only available on Linux".into())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn observe() -> Result<Self, String> {
|
||||
let vm = psutil::memory::virtual_memory()
|
||||
.map_err(|e| format!("Unable to get virtual memory: {:?}", e))?;
|
||||
let loadavg =
|
||||
psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?;
|
||||
|
||||
let cpu =
|
||||
psutil::cpu::cpu_times().map_err(|e| format!("Unable to get cpu times: {:?}", e))?;
|
||||
|
||||
let disk_usage = psutil::disk::disk_usage("/")
|
||||
.map_err(|e| format!("Unable to disk usage info: {:?}", e))?;
|
||||
|
||||
let disk = psutil::disk::DiskIoCountersCollector::default()
|
||||
.disk_io_counters()
|
||||
.map_err(|e| format!("Unable to get disk counters: {:?}", e))?;
|
||||
|
||||
let net = psutil::network::NetIoCountersCollector::default()
|
||||
.net_io_counters()
|
||||
.map_err(|e| format!("Unable to get network io counters: {:?}", e))?;
|
||||
|
||||
let boot_time = psutil::host::boot_time()
|
||||
.map_err(|e| format!("Unable to get system boot time: {:?}", e))?
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map_err(|e| format!("Boot time is lower than unix epoch: {}", e))?
|
||||
.as_secs();
|
||||
|
||||
Ok(Self {
|
||||
sys_virt_mem_total: vm.total(),
|
||||
sys_virt_mem_available: vm.available(),
|
||||
sys_virt_mem_used: vm.used(),
|
||||
sys_virt_mem_free: vm.free(),
|
||||
sys_virt_mem_cached: vm.cached(),
|
||||
sys_virt_mem_buffers: vm.buffers(),
|
||||
sys_virt_mem_percent: vm.percent(),
|
||||
sys_loadavg_1: loadavg.one,
|
||||
sys_loadavg_5: loadavg.five,
|
||||
sys_loadavg_15: loadavg.fifteen,
|
||||
cpu_cores: psutil::cpu::cpu_count_physical(),
|
||||
cpu_threads: psutil::cpu::cpu_count(),
|
||||
system_seconds_total: cpu.system().as_secs(),
|
||||
cpu_time_total: cpu.total().as_secs(),
|
||||
user_seconds_total: cpu.user().as_secs(),
|
||||
iowait_seconds_total: cpu.iowait().as_secs(),
|
||||
idle_seconds_total: cpu.idle().as_secs(),
|
||||
disk_node_bytes_total: disk_usage.total(),
|
||||
disk_node_bytes_free: disk_usage.free(),
|
||||
disk_node_reads_total: disk.read_count(),
|
||||
disk_node_writes_total: disk.write_count(),
|
||||
network_node_bytes_total_received: net.bytes_recv(),
|
||||
network_node_bytes_total_transmit: net.bytes_sent(),
|
||||
misc_node_boot_ts_seconds: boot_time,
|
||||
misc_os: std::env::consts::OS.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Process specific health
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ProcessHealth {
|
||||
/// The pid of this process.
|
||||
pub pid: u32,
|
||||
/// The number of threads used by this pid.
|
||||
@@ -89,25 +225,11 @@ pub struct Health {
|
||||
pub pid_mem_resident_set_size: u64,
|
||||
/// The total virtual memory used by this pid.
|
||||
pub pid_mem_virtual_memory_size: u64,
|
||||
/// Total virtual memory on the system
|
||||
pub sys_virt_mem_total: u64,
|
||||
/// Total virtual memory available for new processes.
|
||||
pub sys_virt_mem_available: u64,
|
||||
/// Total virtual memory used on the system
|
||||
pub sys_virt_mem_used: u64,
|
||||
/// Total virtual memory not used on the system
|
||||
pub sys_virt_mem_free: u64,
|
||||
/// Percentage of virtual memory used on the system
|
||||
pub sys_virt_mem_percent: f32,
|
||||
/// System load average over 1 minute.
|
||||
pub sys_loadavg_1: f64,
|
||||
/// System load average over 5 minutes.
|
||||
pub sys_loadavg_5: f64,
|
||||
/// System load average over 15 minutes.
|
||||
pub sys_loadavg_15: f64,
|
||||
/// Number of cpu seconds consumed by this pid.
|
||||
pub pid_process_seconds_total: u64,
|
||||
}
|
||||
|
||||
impl Health {
|
||||
impl ProcessHealth {
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub fn observe() -> Result<Self, String> {
|
||||
Err("Health is only available on Linux".into())
|
||||
@@ -123,25 +245,33 @@ impl Health {
|
||||
.map_err(|e| format!("Unable to get process memory info: {:?}", e))?;
|
||||
|
||||
let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?;
|
||||
|
||||
let vm = psutil::memory::virtual_memory()
|
||||
.map_err(|e| format!("Unable to get virtual memory: {:?}", e))?;
|
||||
let loadavg =
|
||||
psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?;
|
||||
let process_times = process
|
||||
.cpu_times()
|
||||
.map_err(|e| format!("Unable to get process cpu times : {:?}", e))?;
|
||||
|
||||
Ok(Self {
|
||||
pid: process.pid(),
|
||||
pid_num_threads: stat.num_threads,
|
||||
pid_mem_resident_set_size: process_mem.rss(),
|
||||
pid_mem_virtual_memory_size: process_mem.vms(),
|
||||
sys_virt_mem_total: vm.total(),
|
||||
sys_virt_mem_available: vm.available(),
|
||||
sys_virt_mem_used: vm.used(),
|
||||
sys_virt_mem_free: vm.free(),
|
||||
sys_virt_mem_percent: vm.percent(),
|
||||
sys_loadavg_1: loadavg.one,
|
||||
sys_loadavg_5: loadavg.five,
|
||||
sys_loadavg_15: loadavg.fifteen,
|
||||
pid_process_seconds_total: process_times.busy().as_secs()
|
||||
+ process_times.children_system().as_secs()
|
||||
+ process_times.children_system().as_secs(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Health {
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub fn observe() -> Result<Self, String> {
|
||||
Err("Health is only available on Linux".into())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn observe() -> Result<Self, String> {
|
||||
Ok(Self {
|
||||
process: ProcessHealth::observe()?,
|
||||
system: SystemHealth::observe()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ use std::time::Duration;
|
||||
|
||||
use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec};
|
||||
pub use prometheus::{
|
||||
proto::{Metric, MetricFamily, MetricType},
|
||||
Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge,
|
||||
IntGaugeVec, Result, TextEncoder,
|
||||
};
|
||||
|
||||
24
common/monitoring_api/Cargo.toml
Normal file
24
common/monitoring_api/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "monitoring_api"
|
||||
version = "0.1.0"
|
||||
authors = ["pawan <pawandhananjay@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
reqwest = { version = "0.11.0", features = ["json","stream"] }
|
||||
futures = "0.3.7"
|
||||
task_executor = { path = "../task_executor" }
|
||||
tokio = "1.1.0"
|
||||
eth2 = {path = "../eth2"}
|
||||
serde_json = "1.0.58"
|
||||
serde = "1.0.116"
|
||||
serde_derive = "1.0.116"
|
||||
lighthouse_version = { path = "../lighthouse_version"}
|
||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||
slog = "2.5.2"
|
||||
store = { path = "../../beacon_node/store" }
|
||||
lazy_static = "1.4.0"
|
||||
regex = "1"
|
||||
sensitive_url = { path = "../sensitive_url" }
|
||||
193
common/monitoring_api/src/gather.rs
Normal file
193
common/monitoring_api/src/gather.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics};
|
||||
use lazy_static::lazy_static;
|
||||
use lighthouse_metrics::{MetricFamily, MetricType};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
/// Represents a metric that needs to be fetched from lighthouse metrics registry
|
||||
/// and sent to the remote monitoring service.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JsonMetric {
|
||||
/// Name of the metric as used in Lighthouse metrics.
|
||||
lighthouse_metric_name: &'static str,
|
||||
/// Json key for the metric that we send to the remote monitoring endpoint.
|
||||
json_output_key: &'static str,
|
||||
/// Type of the json value to be sent to the remote monitoring endpoint
|
||||
ty: JsonType,
|
||||
}
|
||||
|
||||
impl JsonMetric {
|
||||
const fn new(
|
||||
lighthouse_metric_name: &'static str,
|
||||
json_output_key: &'static str,
|
||||
ty: JsonType,
|
||||
) -> Self {
|
||||
Self {
|
||||
lighthouse_metric_name,
|
||||
json_output_key,
|
||||
ty,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a json value given given the metric type.
|
||||
fn get_typed_value(&self, value: i64) -> serde_json::Value {
|
||||
match self.ty {
|
||||
JsonType::Integer => json!(value),
|
||||
JsonType::Boolean => {
|
||||
if value > 0 {
|
||||
json!(true)
|
||||
} else {
|
||||
json!(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The required metrics for the beacon and validator processes.
|
||||
const BEACON_PROCESS_METRICS: &[JsonMetric] = &[
|
||||
JsonMetric::new(
|
||||
"sync_eth1_fallback_configured",
|
||||
"sync_eth1_fallback_configured",
|
||||
JsonType::Boolean,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"sync_eth1_fallback_connected",
|
||||
"sync_eth1_fallback_connected",
|
||||
JsonType::Boolean,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"sync_eth1_connected",
|
||||
"sync_eth1_connected",
|
||||
JsonType::Boolean,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"store_disk_db_size",
|
||||
"disk_beaconchain_bytes_total",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"libp2p_peer_connected_peers_total",
|
||||
"network_peers_connected",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"libp2p_outbound_bytes",
|
||||
"network_libp2p_bytes_total_transmit",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"libp2p_inbound_bytes",
|
||||
"network_libp2p_bytes_total_receive",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"notifier_head_slot",
|
||||
"sync_beacon_head_slot",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new("sync_eth2_synced", "sync_eth2_synced", JsonType::Boolean),
|
||||
];
|
||||
|
||||
const VALIDATOR_PROCESS_METRICS: &[JsonMetric] = &[
|
||||
JsonMetric::new(
|
||||
"vc_validators_enabled_count",
|
||||
"validator_active",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"vc_validators_total_count",
|
||||
"validator_total",
|
||||
JsonType::Integer,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"sync_eth2_fallback_configured",
|
||||
"sync_eth2_fallback_configured",
|
||||
JsonType::Boolean,
|
||||
),
|
||||
JsonMetric::new(
|
||||
"sync_eth2_fallback_connected",
|
||||
"sync_eth2_fallback_connected",
|
||||
JsonType::Boolean,
|
||||
),
|
||||
];
|
||||
|
||||
/// Represents the type for the JSON output.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum JsonType {
|
||||
Integer,
|
||||
Boolean,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
/// HashMap representing the `BEACON_PROCESS_METRICS`.
|
||||
pub static ref BEACON_METRICS_MAP: HashMap<String, JsonMetric> = BEACON_PROCESS_METRICS
|
||||
.iter()
|
||||
.map(|metric| (metric.lighthouse_metric_name.to_string(), metric.clone()))
|
||||
.collect();
|
||||
/// HashMap representing the `VALIDATOR_PROCESS_METRICS`.
|
||||
pub static ref VALIDATOR_METRICS_MAP: HashMap<String,JsonMetric> =
|
||||
VALIDATOR_PROCESS_METRICS
|
||||
.iter()
|
||||
.map(|metric| (metric.lighthouse_metric_name.to_string(), metric.clone()))
|
||||
.collect();
|
||||
}
|
||||
|
||||
/// Returns the value from a Counter/Gauge `MetricType` assuming that it has no associated labels
|
||||
/// else it returns `None`.
|
||||
fn get_value(mf: &MetricFamily) -> Option<i64> {
|
||||
let metric = mf.get_metric().first()?;
|
||||
match mf.get_field_type() {
|
||||
MetricType::COUNTER => Some(metric.get_counter().get_value() as i64),
|
||||
MetricType::GAUGE => Some(metric.get_gauge().get_value() as i64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Collects all metrics and returns a `serde_json::Value` object with the required metrics
|
||||
/// from the metrics hashmap.
|
||||
pub fn gather_metrics(metrics_map: &HashMap<String, JsonMetric>) -> Option<serde_json::Value> {
|
||||
let metric_families = lighthouse_metrics::gather();
|
||||
let mut res = serde_json::Map::with_capacity(metrics_map.len());
|
||||
for mf in metric_families.iter() {
|
||||
let metric_name = mf.get_name();
|
||||
if metrics_map.contains_key(metric_name) {
|
||||
let value = get_value(&mf).unwrap_or_default();
|
||||
let metric = metrics_map.get(metric_name)?;
|
||||
let value = metric.get_typed_value(value);
|
||||
let _ = res.insert(metric.json_output_key.to_string(), value);
|
||||
};
|
||||
}
|
||||
Some(serde_json::Value::Object(res))
|
||||
}
|
||||
|
||||
/// Gathers and returns the lighthouse beacon metrics.
|
||||
pub fn gather_beacon_metrics(
|
||||
db_path: &Path,
|
||||
freezer_db_path: &Path,
|
||||
) -> Result<BeaconProcessMetrics, String> {
|
||||
// Update db size metrics
|
||||
store::metrics::scrape_for_metrics(db_path, freezer_db_path);
|
||||
|
||||
let beacon_metrics = gather_metrics(&BEACON_METRICS_MAP)
|
||||
.ok_or_else(|| "Failed to gather beacon metrics".to_string())?;
|
||||
let process = eth2::lighthouse::ProcessHealth::observe()?.into();
|
||||
|
||||
Ok(BeaconProcessMetrics {
|
||||
beacon: beacon_metrics,
|
||||
common: process,
|
||||
})
|
||||
}
|
||||
|
||||
/// Gathers and returns the lighthouse validator metrics.
|
||||
pub fn gather_validator_metrics() -> Result<ValidatorProcessMetrics, String> {
|
||||
let validator_metrics = gather_metrics(&VALIDATOR_METRICS_MAP)
|
||||
.ok_or_else(|| "Failed to gather validator metrics".to_string())?;
|
||||
|
||||
let process = eth2::lighthouse::ProcessHealth::observe()?.into();
|
||||
Ok(ValidatorProcessMetrics {
|
||||
validator: validator_metrics,
|
||||
common: process,
|
||||
})
|
||||
}
|
||||
208
common/monitoring_api/src/lib.rs
Normal file
208
common/monitoring_api/src/lib.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
mod gather;
|
||||
mod types;
|
||||
use std::{path::PathBuf, time::Duration};
|
||||
|
||||
use eth2::lighthouse::SystemHealth;
|
||||
use gather::{gather_beacon_metrics, gather_validator_metrics};
|
||||
use reqwest::{IntoUrl, Response};
|
||||
pub use reqwest::{StatusCode, Url};
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slog::{debug, error, info};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::time::{interval_at, Instant};
|
||||
use types::*;
|
||||
|
||||
pub use types::ProcessType;
|
||||
|
||||
/// Duration after which we collect and send metrics to remote endpoint.
|
||||
pub const UPDATE_DURATION: u64 = 60;
|
||||
/// Timeout for HTTP requests.
|
||||
pub const TIMEOUT_DURATION: u64 = 5;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// The `reqwest` client raised an error.
|
||||
Reqwest(reqwest::Error),
|
||||
/// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`.
|
||||
InvalidUrl(SensitiveUrl),
|
||||
SystemMetricsFailed(String),
|
||||
BeaconMetricsFailed(String),
|
||||
ValidatorMetricsFailed(String),
|
||||
/// The server returned an error message where the body was able to be parsed.
|
||||
ServerMessage(ErrorMessage),
|
||||
/// The server returned an error message where the body was unable to be parsed.
|
||||
StatusCode(StatusCode),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match &self {
|
||||
Error::Reqwest(e) => write!(f, "Reqwest error: {}", e),
|
||||
// Print the debug value
|
||||
e => write!(f, "{:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct Config {
|
||||
/// Endpoint
|
||||
pub monitoring_endpoint: String,
|
||||
/// Path for the hot database required for fetching beacon db size metrics.
|
||||
/// Note: not relevant for validator and system metrics.
|
||||
pub db_path: Option<PathBuf>,
|
||||
/// Path for the cold database required for fetching beacon db size metrics.
|
||||
/// Note: not relevant for validator and system metrics.
|
||||
pub freezer_db_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MonitoringHttpClient {
|
||||
client: reqwest::Client,
|
||||
/// Path to the hot database. Required for getting db size metrics
|
||||
db_path: Option<PathBuf>,
|
||||
/// Path to the freezer database.
|
||||
freezer_db_path: Option<PathBuf>,
|
||||
monitoring_endpoint: SensitiveUrl,
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl MonitoringHttpClient {
|
||||
pub fn new(config: &Config, log: slog::Logger) -> Result<Self, String> {
|
||||
Ok(Self {
|
||||
client: reqwest::Client::new(),
|
||||
db_path: config.db_path.clone(),
|
||||
freezer_db_path: config.freezer_db_path.clone(),
|
||||
monitoring_endpoint: SensitiveUrl::parse(&config.monitoring_endpoint)
|
||||
.map_err(|e| format!("Invalid monitoring endpoint: {:?}", e))?,
|
||||
log,
|
||||
})
|
||||
}
|
||||
|
||||
/// Perform a HTTP POST request.
|
||||
async fn post<T: Serialize, U: IntoUrl>(&self, url: U, body: &T) -> Result<(), Error> {
|
||||
let response = self
|
||||
.client
|
||||
.post(url)
|
||||
.json(body)
|
||||
.timeout(Duration::from_secs(TIMEOUT_DURATION))
|
||||
.send()
|
||||
.await
|
||||
.map_err(Error::Reqwest)?;
|
||||
ok_or_error(response).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a task which periodically sends the provided process metrics
|
||||
/// to the configured remote endpoint.
|
||||
pub fn auto_update(self, executor: TaskExecutor, processes: Vec<ProcessType>) {
|
||||
let mut interval = interval_at(
|
||||
// Have some initial delay for the metrics to get initialized
|
||||
Instant::now() + Duration::from_secs(25),
|
||||
Duration::from_secs(UPDATE_DURATION),
|
||||
);
|
||||
|
||||
info!(self.log, "Starting monitoring api"; "endpoint" => %self.monitoring_endpoint);
|
||||
|
||||
let update_future = async move {
|
||||
loop {
|
||||
interval.tick().await;
|
||||
match self.send_metrics(&processes).await {
|
||||
Ok(()) => {
|
||||
debug!(self.log, "Metrics sent to remote server"; "endpoint" => %self.monitoring_endpoint);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(self.log, "Failed to send metrics to remote endpoint"; "error" => %e)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
executor.spawn(update_future, "monitoring_api");
|
||||
}
|
||||
|
||||
/// Gets beacon metrics and updates the metrics struct
|
||||
pub fn get_beacon_metrics(&self) -> Result<MonitoringMetrics, Error> {
|
||||
let db_path = self.db_path.as_ref().ok_or_else(|| {
|
||||
Error::BeaconMetricsFailed("Beacon metrics require db path".to_string())
|
||||
})?;
|
||||
|
||||
let freezer_db_path = self.db_path.as_ref().ok_or_else(|| {
|
||||
Error::BeaconMetricsFailed("Beacon metrics require freezer db path".to_string())
|
||||
})?;
|
||||
let metrics = gather_beacon_metrics(&db_path, &freezer_db_path)
|
||||
.map_err(Error::BeaconMetricsFailed)?;
|
||||
Ok(MonitoringMetrics {
|
||||
metadata: Metadata::new(ProcessType::BeaconNode),
|
||||
process_metrics: Process::Beacon(metrics),
|
||||
})
|
||||
}
|
||||
|
||||
/// Gets validator process metrics by querying the validator metrics endpoint
|
||||
pub fn get_validator_metrics(&self) -> Result<MonitoringMetrics, Error> {
|
||||
let metrics = gather_validator_metrics().map_err(Error::BeaconMetricsFailed)?;
|
||||
Ok(MonitoringMetrics {
|
||||
metadata: Metadata::new(ProcessType::Validator),
|
||||
process_metrics: Process::Validator(metrics),
|
||||
})
|
||||
}
|
||||
|
||||
/// Gets system metrics by observing capturing the SystemHealth metrics.
|
||||
pub fn get_system_metrics(&self) -> Result<MonitoringMetrics, Error> {
|
||||
let system_health = SystemHealth::observe().map_err(Error::SystemMetricsFailed)?;
|
||||
Ok(MonitoringMetrics {
|
||||
metadata: Metadata::new(ProcessType::System),
|
||||
process_metrics: Process::System(system_health.into()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Return metric based on process type.
|
||||
pub async fn get_metrics(
|
||||
&self,
|
||||
process_type: &ProcessType,
|
||||
) -> Result<MonitoringMetrics, Error> {
|
||||
match process_type {
|
||||
ProcessType::BeaconNode => self.get_beacon_metrics(),
|
||||
ProcessType::System => self.get_system_metrics(),
|
||||
ProcessType::Validator => self.get_validator_metrics(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Send metrics to the remote endpoint
|
||||
pub async fn send_metrics(&self, processes: &[ProcessType]) -> Result<(), Error> {
|
||||
let mut metrics = Vec::new();
|
||||
for process in processes {
|
||||
match self.get_metrics(process).await {
|
||||
Err(e) => error!(
|
||||
self.log,
|
||||
"Failed to get metrics";
|
||||
"process_type" => ?process,
|
||||
"error" => %e
|
||||
),
|
||||
Ok(metric) => metrics.push(metric),
|
||||
}
|
||||
}
|
||||
info!(
|
||||
self.log,
|
||||
"Sending metrics to remote endpoint";
|
||||
"endpoint" => %self.monitoring_endpoint
|
||||
);
|
||||
self.post(self.monitoring_endpoint.full.clone(), &metrics)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an
|
||||
/// appropriate error message.
|
||||
async fn ok_or_error(response: Response) -> Result<Response, Error> {
|
||||
let status = response.status();
|
||||
|
||||
if status == StatusCode::OK {
|
||||
Ok(response)
|
||||
} else if let Ok(message) = response.json().await {
|
||||
Err(Error::ServerMessage(message))
|
||||
} else {
|
||||
Err(Error::StatusCode(status))
|
||||
}
|
||||
}
|
||||
177
common/monitoring_api/src/types.rs
Normal file
177
common/monitoring_api/src/types.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use eth2::lighthouse::{ProcessHealth, SystemHealth};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
|
||||
pub const VERSION: u64 = 1;
|
||||
pub const CLIENT_NAME: &str = "lighthouse";
|
||||
|
||||
/// An API error serializable to JSON.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ErrorMessage {
|
||||
pub code: u16,
|
||||
pub message: String,
|
||||
#[serde(default)]
|
||||
pub stacktraces: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct MonitoringMetrics {
|
||||
#[serde(flatten)]
|
||||
pub metadata: Metadata,
|
||||
#[serde(flatten)]
|
||||
pub process_metrics: Process,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ProcessType {
|
||||
BeaconNode,
|
||||
Validator,
|
||||
System,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Metadata {
|
||||
version: u64,
|
||||
timestamp: u128,
|
||||
process: ProcessType,
|
||||
}
|
||||
|
||||
impl Metadata {
|
||||
pub fn new(process: ProcessType) -> Self {
|
||||
Self {
|
||||
version: VERSION,
|
||||
timestamp: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("time should be greater than unix epoch")
|
||||
.as_millis(),
|
||||
process,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Process {
|
||||
Beacon(BeaconProcessMetrics),
|
||||
System(SystemMetrics),
|
||||
Validator(ValidatorProcessMetrics),
|
||||
}
|
||||
|
||||
/// Common metrics for all processes.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ProcessMetrics {
|
||||
cpu_process_seconds_total: u64,
|
||||
memory_process_bytes: u64,
|
||||
|
||||
client_name: String,
|
||||
client_version: String,
|
||||
client_build: u64,
|
||||
}
|
||||
|
||||
impl From<ProcessHealth> for ProcessMetrics {
|
||||
fn from(health: ProcessHealth) -> Self {
|
||||
Self {
|
||||
cpu_process_seconds_total: health.pid_process_seconds_total,
|
||||
memory_process_bytes: health.pid_mem_resident_set_size,
|
||||
client_name: CLIENT_NAME.to_string(),
|
||||
client_version: client_version().unwrap_or_default(),
|
||||
client_build: client_build(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Metrics related to the system.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SystemMetrics {
|
||||
cpu_cores: u64,
|
||||
cpu_threads: u64,
|
||||
cpu_node_system_seconds_total: u64,
|
||||
cpu_node_user_seconds_total: u64,
|
||||
cpu_node_iowait_seconds_total: u64,
|
||||
cpu_node_idle_seconds_total: u64,
|
||||
|
||||
memory_node_bytes_total: u64,
|
||||
memory_node_bytes_free: u64,
|
||||
memory_node_bytes_cached: u64,
|
||||
memory_node_bytes_buffers: u64,
|
||||
|
||||
disk_node_bytes_total: u64,
|
||||
disk_node_bytes_free: u64,
|
||||
|
||||
disk_node_io_seconds: u64,
|
||||
disk_node_reads_total: u64,
|
||||
disk_node_writes_total: u64,
|
||||
|
||||
network_node_bytes_total_receive: u64,
|
||||
network_node_bytes_total_transmit: u64,
|
||||
|
||||
misc_node_boot_ts_seconds: u64,
|
||||
misc_os: String,
|
||||
}
|
||||
|
||||
impl From<SystemHealth> for SystemMetrics {
|
||||
fn from(health: SystemHealth) -> Self {
|
||||
// Export format uses 3 letter os names
|
||||
let misc_os = health.misc_os.get(0..3).unwrap_or("unk").to_string();
|
||||
Self {
|
||||
cpu_cores: health.cpu_cores,
|
||||
cpu_threads: health.cpu_threads,
|
||||
cpu_node_system_seconds_total: health.cpu_time_total,
|
||||
cpu_node_user_seconds_total: health.user_seconds_total,
|
||||
cpu_node_iowait_seconds_total: health.iowait_seconds_total,
|
||||
cpu_node_idle_seconds_total: health.idle_seconds_total,
|
||||
|
||||
memory_node_bytes_total: health.sys_virt_mem_total,
|
||||
memory_node_bytes_free: health.sys_virt_mem_free,
|
||||
memory_node_bytes_cached: health.sys_virt_mem_cached,
|
||||
memory_node_bytes_buffers: health.sys_virt_mem_buffers,
|
||||
|
||||
disk_node_bytes_total: health.disk_node_bytes_total,
|
||||
disk_node_bytes_free: health.disk_node_bytes_free,
|
||||
|
||||
// Unavaliable for now
|
||||
disk_node_io_seconds: 0,
|
||||
disk_node_reads_total: health.disk_node_reads_total,
|
||||
disk_node_writes_total: health.disk_node_writes_total,
|
||||
|
||||
network_node_bytes_total_receive: health.network_node_bytes_total_received,
|
||||
network_node_bytes_total_transmit: health.network_node_bytes_total_transmit,
|
||||
|
||||
misc_node_boot_ts_seconds: health.misc_node_boot_ts_seconds,
|
||||
misc_os,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// All beacon process metrics.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BeaconProcessMetrics {
|
||||
#[serde(flatten)]
|
||||
pub common: ProcessMetrics,
|
||||
#[serde(flatten)]
|
||||
pub beacon: serde_json::Value,
|
||||
}
|
||||
|
||||
/// All validator process metrics
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ValidatorProcessMetrics {
|
||||
#[serde(flatten)]
|
||||
pub common: ProcessMetrics,
|
||||
#[serde(flatten)]
|
||||
pub validator: serde_json::Value,
|
||||
}
|
||||
|
||||
/// Returns the client version
|
||||
fn client_version() -> Option<String> {
|
||||
let re = regex::Regex::new(r"\d+\.\d+\.\d+").expect("Regex is valid");
|
||||
re.find(lighthouse_version::VERSION)
|
||||
.map(|m| m.as_str().to_string())
|
||||
}
|
||||
|
||||
/// Returns the client build
|
||||
/// Note: Lighthouse does not support build numbers, this is effectively a null-value.
|
||||
fn client_build() -> u64 {
|
||||
0
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use eth2::lighthouse::Health;
|
||||
use eth2::lighthouse::{ProcessHealth, SystemHealth};
|
||||
use lighthouse_metrics::*;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
@@ -14,6 +14,10 @@ lazy_static::lazy_static! {
|
||||
"process_virtual_memory_bytes",
|
||||
"Virtual memory used by the current process"
|
||||
);
|
||||
pub static ref PROCESS_SECONDS: Result<IntGauge> = try_create_int_gauge(
|
||||
"process_cpu_seconds_total",
|
||||
"Total cpu time taken by the current process"
|
||||
);
|
||||
pub static ref SYSTEM_VIRT_MEM_TOTAL: Result<IntGauge> =
|
||||
try_create_int_gauge("system_virt_mem_total_bytes", "Total system virtual memory");
|
||||
pub static ref SYSTEM_VIRT_MEM_AVAILABLE: Result<IntGauge> = try_create_int_gauge(
|
||||
@@ -24,6 +28,10 @@ lazy_static::lazy_static! {
|
||||
try_create_int_gauge("system_virt_mem_used_bytes", "Used system virtual memory");
|
||||
pub static ref SYSTEM_VIRT_MEM_FREE: Result<IntGauge> =
|
||||
try_create_int_gauge("system_virt_mem_free_bytes", "Free system virtual memory");
|
||||
pub static ref SYSTEM_VIRT_MEM_CACHED: Result<IntGauge> =
|
||||
try_create_int_gauge("system_virt_mem_cached_bytes", "Used system virtual memory");
|
||||
pub static ref SYSTEM_VIRT_MEM_BUFFERS: Result<IntGauge> =
|
||||
try_create_int_gauge("system_virt_mem_buffer_bytes", "Free system virtual memory");
|
||||
pub static ref SYSTEM_VIRT_MEM_PERCENTAGE: Result<Gauge> = try_create_float_gauge(
|
||||
"system_virt_mem_percentage",
|
||||
"Percentage of used virtual memory"
|
||||
@@ -34,15 +42,62 @@ lazy_static::lazy_static! {
|
||||
try_create_float_gauge("system_loadavg_5", "Loadavg over 5 minutes");
|
||||
pub static ref SYSTEM_LOADAVG_15: Result<Gauge> =
|
||||
try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes");
|
||||
|
||||
pub static ref CPU_CORES: Result<IntGauge> =
|
||||
try_create_int_gauge("cpu_cores", "Number of physical cpu cores");
|
||||
pub static ref CPU_THREADS: Result<IntGauge> =
|
||||
try_create_int_gauge("cpu_threads", "Number of logical cpu cores");
|
||||
|
||||
pub static ref CPU_SYSTEM_SECONDS_TOTAL: Result<IntGauge> =
|
||||
try_create_int_gauge("cpu_system_seconds_total", "Total time spent in kernel mode");
|
||||
pub static ref CPU_USER_SECONDS_TOTAL: Result<IntGauge> =
|
||||
try_create_int_gauge("cpu_user_seconds_total", "Total time spent in user mode");
|
||||
pub static ref CPU_IOWAIT_SECONDS_TOTAL: Result<IntGauge> =
|
||||
try_create_int_gauge("cpu_iowait_seconds_total", "Total time spent waiting for io");
|
||||
pub static ref CPU_IDLE_SECONDS_TOTAL: Result<IntGauge> =
|
||||
try_create_int_gauge("cpu_idle_seconds_total", "Total time spent idle");
|
||||
|
||||
pub static ref DISK_BYTES_TOTAL: Result<IntGauge> =
|
||||
try_create_int_gauge("disk_node_bytes_total", "Total capacity of disk");
|
||||
|
||||
pub static ref DISK_BYTES_FREE: Result<IntGauge> =
|
||||
try_create_int_gauge("disk_node_bytes_free", "Free space in disk");
|
||||
|
||||
pub static ref DISK_READS: Result<IntGauge> =
|
||||
try_create_int_gauge("disk_node_reads_total", "Number of disk reads");
|
||||
|
||||
pub static ref DISK_WRITES: Result<IntGauge> =
|
||||
try_create_int_gauge("disk_node_writes_total", "Number of disk writes");
|
||||
|
||||
pub static ref NETWORK_BYTES_RECEIVED: Result<IntGauge> =
|
||||
try_create_int_gauge("network_node_bytes_total_received", "Total bytes received over all network interfaces");
|
||||
pub static ref NETWORK_BYTES_SENT: Result<IntGauge> =
|
||||
try_create_int_gauge("network_node_bytes_total_transmit", "Total bytes sent over all network interfaces");
|
||||
|
||||
pub static ref BOOT_TIME: Result<IntGauge> =
|
||||
try_create_int_gauge("misc_node_boot_ts_seconds", "Boot time as unix epoch timestamp");
|
||||
}
|
||||
|
||||
pub fn scrape_health_metrics() {
|
||||
scrape_process_health_metrics();
|
||||
scrape_system_health_metrics();
|
||||
}
|
||||
|
||||
pub fn scrape_process_health_metrics() {
|
||||
// This will silently fail if we are unable to observe the health. This is desired behaviour
|
||||
// since we don't support `Health` for all platforms.
|
||||
if let Ok(health) = Health::observe() {
|
||||
if let Ok(health) = ProcessHealth::observe() {
|
||||
set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads as i64);
|
||||
set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64);
|
||||
set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64);
|
||||
set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scrape_system_health_metrics() {
|
||||
// This will silently fail if we are unable to observe the health. This is desired behaviour
|
||||
// since we don't support `Health` for all platforms.
|
||||
if let Ok(health) = SystemHealth::observe() {
|
||||
set_gauge(&SYSTEM_VIRT_MEM_TOTAL, health.sys_virt_mem_total as i64);
|
||||
set_gauge(
|
||||
&SYSTEM_VIRT_MEM_AVAILABLE,
|
||||
@@ -57,5 +112,34 @@ pub fn scrape_health_metrics() {
|
||||
set_float_gauge(&SYSTEM_LOADAVG_1, health.sys_loadavg_1);
|
||||
set_float_gauge(&SYSTEM_LOADAVG_5, health.sys_loadavg_5);
|
||||
set_float_gauge(&SYSTEM_LOADAVG_15, health.sys_loadavg_15);
|
||||
|
||||
set_gauge(&CPU_CORES, health.cpu_cores as i64);
|
||||
set_gauge(&CPU_THREADS, health.cpu_threads as i64);
|
||||
|
||||
set_gauge(
|
||||
&CPU_SYSTEM_SECONDS_TOTAL,
|
||||
health.system_seconds_total as i64,
|
||||
);
|
||||
set_gauge(&CPU_USER_SECONDS_TOTAL, health.user_seconds_total as i64);
|
||||
set_gauge(
|
||||
&CPU_IOWAIT_SECONDS_TOTAL,
|
||||
health.iowait_seconds_total as i64,
|
||||
);
|
||||
set_gauge(&CPU_IDLE_SECONDS_TOTAL, health.idle_seconds_total as i64);
|
||||
|
||||
set_gauge(&DISK_BYTES_TOTAL, health.disk_node_bytes_total as i64);
|
||||
|
||||
set_gauge(&DISK_BYTES_FREE, health.disk_node_bytes_free as i64);
|
||||
set_gauge(&DISK_READS, health.disk_node_reads_total as i64);
|
||||
set_gauge(&DISK_WRITES, health.disk_node_writes_total as i64);
|
||||
|
||||
set_gauge(
|
||||
&NETWORK_BYTES_RECEIVED,
|
||||
health.network_node_bytes_total_received as i64,
|
||||
);
|
||||
set_gauge(
|
||||
&NETWORK_BYTES_SENT,
|
||||
health.network_node_bytes_total_transmit as i64,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user