mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-14 02:12:33 +00:00
Implement VC API (#1657)
## Issue Addressed
NA
## Proposed Changes
- Implements a HTTP API for the validator client.
- Creates EIP-2335 keystores with an empty `description` field, instead of a missing `description` field. Adds option to set name.
- Be more graceful with setups without any validators (yet)
- Remove an error log when there are no validators.
- Create the `validator` dir if it doesn't exist.
- Allow building a `ValidatorDir` without a withdrawal keystore (required for the API method where we only post a voting keystore).
- Add optional `description` field to `validator_definitions.yml`
## TODO
- [x] Signature header, as per https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855
- [x] Return validator descriptions
- [x] Return deposit data
- [x] Respect the mnemonic offset
- [x] Check that mnemonic can derive returned keys
- [x] Be strict about non-localhost
- [x] Allow graceful start without any validators (+ create validator dir)
- [x] Docs final pass
- [x] Swap to EIP-2335 description field.
- [x] Fix Zerioze TODO in VC api types.
- [x] Zeroize secp256k1 key
## Endpoints
- [x] `GET /lighthouse/version`
- [x] `GET /lighthouse/health`
- [x] `GET /lighthouse/validators`
- [x] `POST /lighthouse/validators/hd`
- [x] `POST /lighthouse/validators/keystore`
- [x] `PATCH /lighthouse/validators/:validator_pubkey`
- [ ] ~~`POST /lighthouse/validators/:validator_pubkey/exit/:epoch`~~ Future works
## Additional Info
TBC
This commit is contained in:
@@ -10,6 +10,8 @@ path = "src/lib.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.21", features = ["time", "rt-threaded", "macros"] }
|
||||
tempfile = "3.1.0"
|
||||
deposit_contract = { path = "../common/deposit_contract" }
|
||||
|
||||
[dependencies]
|
||||
eth2_ssz = "0.1.2"
|
||||
@@ -47,3 +49,11 @@ validator_dir = { path = "../common/validator_dir" }
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||
account_utils = { path = "../common/account_utils" }
|
||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||
warp_utils = { path = "../common/warp_utils" }
|
||||
warp = "0.2.5"
|
||||
hyper = "0.13.5"
|
||||
serde_utils = { path = "../consensus/serde_utils" }
|
||||
libsecp256k1 = "0.3.5"
|
||||
ring = "0.16.12"
|
||||
rand = "0.7.3"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::config::DEFAULT_HTTP_SERVER;
|
||||
use crate::config::DEFAULT_BEACON_NODE;
|
||||
use clap::{App, Arg};
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
@@ -8,13 +8,22 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
"When connected to a beacon node, performs the duties of a staked \
|
||||
validator (e.g., proposing blocks and attestations).",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("beacon-node")
|
||||
.long("beacon-node")
|
||||
.value_name("NETWORK_ADDRESS")
|
||||
.help("Address to a beacon node HTTP API")
|
||||
.default_value(&DEFAULT_BEACON_NODE)
|
||||
.takes_value(true),
|
||||
)
|
||||
// This argument is deprecated, use `--beacon-node` instead.
|
||||
.arg(
|
||||
Arg::with_name("server")
|
||||
.long("server")
|
||||
.value_name("NETWORK_ADDRESS")
|
||||
.help("Address to connect to BeaconNode.")
|
||||
.default_value(&DEFAULT_HTTP_SERVER)
|
||||
.takes_value(true),
|
||||
.help("Deprecated. Use --beacon-node.")
|
||||
.takes_value(true)
|
||||
.conflicts_with("beacon-node"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("validators-dir")
|
||||
@@ -97,4 +106,40 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.value_name("GRAFFITI")
|
||||
.takes_value(true)
|
||||
)
|
||||
/* REST API related arguments */
|
||||
.arg(
|
||||
Arg::with_name("http")
|
||||
.long("http")
|
||||
.help("Enable the RESTful HTTP API server. Disabled by default.")
|
||||
.takes_value(false),
|
||||
)
|
||||
/*
|
||||
* Note: there is purposefully no `--http-address` flag provided.
|
||||
*
|
||||
* The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is unsafe to
|
||||
* publish on a public network.
|
||||
*
|
||||
* We restrict the user to `127.0.0.1` and they must provide some other transport-layer
|
||||
* encryption (e.g., SSH tunnels).
|
||||
*/
|
||||
.arg(
|
||||
Arg::with_name("http-port")
|
||||
.long("http-port")
|
||||
.value_name("PORT")
|
||||
.help("Set the listen TCP port for the RESTful HTTP API server. This server does **not** \
|
||||
provide encryption and is completely unsuitable to expose to a public network. \
|
||||
We do not provide a --http-address flag and restrict the user to listening on \
|
||||
127.0.0.1. For access via the Internet, apply a transport-layer security like \
|
||||
a HTTPS reverse-proxy or SSH tunnelling.")
|
||||
.default_value("5062")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("http-allow-origin")
|
||||
.long("http-allow-origin")
|
||||
.value_name("ORIGIN")
|
||||
.help("Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not recommended in production)")
|
||||
.default_value("")
|
||||
.takes_value(true),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::http_api;
|
||||
use clap::ArgMatches;
|
||||
use clap_utils::{parse_optional, parse_required};
|
||||
use directory::{
|
||||
@@ -6,10 +7,12 @@ use directory::{
|
||||
};
|
||||
use eth2::types::Graffiti;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog::{warn, Logger};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use types::GRAFFITI_BYTES_LEN;
|
||||
|
||||
pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/";
|
||||
pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/";
|
||||
|
||||
/// Stores the core configuration for this validator instance.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
@@ -21,7 +24,7 @@ pub struct Config {
|
||||
/// The http endpoint of the beacon node API.
|
||||
///
|
||||
/// Should be similar to `http://localhost:8080`
|
||||
pub http_server: String,
|
||||
pub beacon_node: String,
|
||||
/// If true, the validator client will still poll for duties and produce blocks even if the
|
||||
/// beacon node is not synced at startup.
|
||||
pub allow_unsynced_beacon_node: bool,
|
||||
@@ -33,6 +36,8 @@ pub struct Config {
|
||||
pub strict_slashing_protection: bool,
|
||||
/// Graffiti to be inserted everytime we create a block.
|
||||
pub graffiti: Option<Graffiti>,
|
||||
/// Configuration for the HTTP REST API.
|
||||
pub http_api: http_api::Config,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@@ -49,12 +54,13 @@ impl Default for Config {
|
||||
Self {
|
||||
validator_dir,
|
||||
secrets_dir,
|
||||
http_server: DEFAULT_HTTP_SERVER.to_string(),
|
||||
beacon_node: DEFAULT_BEACON_NODE.to_string(),
|
||||
allow_unsynced_beacon_node: false,
|
||||
delete_lockfiles: false,
|
||||
disable_auto_discover: false,
|
||||
strict_slashing_protection: false,
|
||||
graffiti: None,
|
||||
http_api: <_>::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -62,7 +68,7 @@ impl Default for Config {
|
||||
impl Config {
|
||||
/// Returns a `Default` implementation of `Self` with some parameters modified by the supplied
|
||||
/// `cli_args`.
|
||||
pub fn from_cli(cli_args: &ArgMatches) -> Result<Config, String> {
|
||||
pub fn from_cli(cli_args: &ArgMatches, log: &Logger) -> Result<Config, String> {
|
||||
let mut config = Config::default();
|
||||
|
||||
let default_root_dir = dirs::home_dir()
|
||||
@@ -95,14 +101,22 @@ impl Config {
|
||||
});
|
||||
|
||||
if !config.validator_dir.exists() {
|
||||
return Err(format!(
|
||||
"The directory for validator data does not exist: {:?}",
|
||||
config.validator_dir
|
||||
));
|
||||
fs::create_dir_all(&config.validator_dir)
|
||||
.map_err(|e| format!("Failed to create {:?}: {:?}", config.validator_dir, e))?;
|
||||
}
|
||||
|
||||
if let Some(beacon_node) = parse_optional(cli_args, "beacon-node")? {
|
||||
config.beacon_node = beacon_node;
|
||||
}
|
||||
|
||||
// To be deprecated.
|
||||
if let Some(server) = parse_optional(cli_args, "server")? {
|
||||
config.http_server = server;
|
||||
warn!(
|
||||
log,
|
||||
"The --server flag is deprecated";
|
||||
"msg" => "please use --beacon-node instead"
|
||||
);
|
||||
config.beacon_node = server;
|
||||
}
|
||||
|
||||
config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced");
|
||||
@@ -129,6 +143,29 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Http API server
|
||||
*/
|
||||
|
||||
if cli_args.is_present("http") {
|
||||
config.http_api.enabled = true;
|
||||
}
|
||||
|
||||
if let Some(port) = cli_args.value_of("http-port") {
|
||||
config.http_api.listen_port = port
|
||||
.parse::<u16>()
|
||||
.map_err(|_| "http-port is not a valid u16.")?;
|
||||
}
|
||||
|
||||
if let Some(allow_origin) = cli_args.value_of("http-allow-origin") {
|
||||
// Pre-validate the config value to give feedback to the user on node startup, instead of
|
||||
// as late as when the first API response is produced.
|
||||
hyper::header::HeaderValue::from_str(allow_origin)
|
||||
.map_err(|_| "Invalid allow-origin value")?;
|
||||
|
||||
config.http_api.allow_origin = Some(allow_origin.to_string());
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,31 +2,32 @@ use environment::RuntimeContext;
|
||||
use eth2::{types::StateId, BeaconNodeHttpClient};
|
||||
use futures::StreamExt;
|
||||
use parking_lot::RwLock;
|
||||
use slog::Logger;
|
||||
use slog::{debug, trace};
|
||||
use slot_clock::SlotClock;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use tokio::time::{interval_at, Duration, Instant};
|
||||
use types::{ChainSpec, EthSpec, Fork};
|
||||
use types::{EthSpec, Fork};
|
||||
|
||||
/// Delay this period of time after the slot starts. This allows the node to process the new slot.
|
||||
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80);
|
||||
|
||||
/// Builds a `ForkService`.
|
||||
pub struct ForkServiceBuilder<T, E: EthSpec> {
|
||||
pub struct ForkServiceBuilder<T> {
|
||||
fork: Option<Fork>,
|
||||
slot_clock: Option<T>,
|
||||
beacon_node: Option<BeaconNodeHttpClient>,
|
||||
context: Option<RuntimeContext<E>>,
|
||||
log: Option<Logger>,
|
||||
}
|
||||
|
||||
impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> {
|
||||
impl<T: SlotClock + 'static> ForkServiceBuilder<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
fork: None,
|
||||
slot_clock: None,
|
||||
beacon_node: None,
|
||||
context: None,
|
||||
log: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,12 +41,12 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
|
||||
self.context = Some(context);
|
||||
pub fn log(mut self, log: Logger) -> Self {
|
||||
self.log = Some(log);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<ForkService<T, E>, String> {
|
||||
pub fn build(self) -> Result<ForkService<T>, String> {
|
||||
Ok(ForkService {
|
||||
inner: Arc::new(Inner {
|
||||
fork: RwLock::new(self.fork),
|
||||
@@ -55,28 +56,48 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> {
|
||||
beacon_node: self
|
||||
.beacon_node
|
||||
.ok_or_else(|| "Cannot build ForkService without beacon_node")?,
|
||||
context: self
|
||||
.context
|
||||
.ok_or_else(|| "Cannot build ForkService without runtime_context")?,
|
||||
log: self
|
||||
.log
|
||||
.ok_or_else(|| "Cannot build ForkService without logger")?
|
||||
.clone(),
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(dead_code)]
|
||||
impl ForkServiceBuilder<slot_clock::TestingSlotClock> {
|
||||
pub fn testing_only(log: Logger) -> Self {
|
||||
Self {
|
||||
fork: Some(types::Fork::default()),
|
||||
slot_clock: Some(slot_clock::TestingSlotClock::new(
|
||||
types::Slot::new(0),
|
||||
std::time::Duration::from_secs(42),
|
||||
std::time::Duration::from_secs(42),
|
||||
)),
|
||||
beacon_node: Some(eth2::BeaconNodeHttpClient::new(
|
||||
eth2::Url::parse("http://127.0.0.1").unwrap(),
|
||||
)),
|
||||
log: Some(log),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to minimise `Arc` usage.
|
||||
pub struct Inner<T, E: EthSpec> {
|
||||
pub struct Inner<T> {
|
||||
fork: RwLock<Option<Fork>>,
|
||||
beacon_node: BeaconNodeHttpClient,
|
||||
context: RuntimeContext<E>,
|
||||
log: Logger,
|
||||
slot_clock: T,
|
||||
}
|
||||
|
||||
/// Attempts to download the `Fork` struct from the beacon node at the start of each epoch.
|
||||
pub struct ForkService<T, E: EthSpec> {
|
||||
inner: Arc<Inner<T, E>>,
|
||||
pub struct ForkService<T> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
impl<T, E: EthSpec> Clone for ForkService<T, E> {
|
||||
impl<T> Clone for ForkService<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
@@ -84,22 +105,27 @@ impl<T, E: EthSpec> Clone for ForkService<T, E> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E: EthSpec> Deref for ForkService<T, E> {
|
||||
type Target = Inner<T, E>;
|
||||
impl<T> Deref for ForkService<T> {
|
||||
type Target = Inner<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.inner.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
||||
impl<T: SlotClock + 'static> ForkService<T> {
|
||||
/// Returns the last fork downloaded from the beacon node, if any.
|
||||
pub fn fork(&self) -> Option<Fork> {
|
||||
*self.fork.read()
|
||||
}
|
||||
|
||||
/// Starts the service that periodically polls for the `Fork`.
|
||||
pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
|
||||
pub fn start_update_service<E: EthSpec>(
|
||||
self,
|
||||
context: &RuntimeContext<E>,
|
||||
) -> Result<(), String> {
|
||||
let spec = &context.eth2_config.spec;
|
||||
|
||||
let duration_to_next_epoch = self
|
||||
.slot_clock
|
||||
.duration_to_next_epoch(E::slots_per_epoch())
|
||||
@@ -115,13 +141,12 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
||||
};
|
||||
|
||||
// Run an immediate update before starting the updater service.
|
||||
self.inner
|
||||
.context
|
||||
context
|
||||
.executor
|
||||
.runtime_handle()
|
||||
.spawn(self.clone().do_update());
|
||||
|
||||
let executor = self.inner.context.executor.clone();
|
||||
let executor = context.executor.clone();
|
||||
|
||||
let interval_fut = async move {
|
||||
while interval.next().await.is_some() {
|
||||
@@ -136,8 +161,6 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
||||
|
||||
/// Attempts to download the `Fork` from the server.
|
||||
async fn do_update(self) -> Result<(), ()> {
|
||||
let log = self.context.log();
|
||||
|
||||
let fork = self
|
||||
.inner
|
||||
.beacon_node
|
||||
@@ -145,14 +168,14 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
trace!(
|
||||
log,
|
||||
self.log,
|
||||
"Fork update failed";
|
||||
"error" => format!("Error retrieving fork: {:?}", e)
|
||||
)
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
trace!(
|
||||
log,
|
||||
self.log,
|
||||
"Fork update failed";
|
||||
"error" => "The beacon head fork is unknown"
|
||||
)
|
||||
@@ -163,7 +186,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
||||
*(self.fork.write()) = Some(fork);
|
||||
}
|
||||
|
||||
debug!(log, "Fork update success");
|
||||
debug!(self.log, "Fork update success");
|
||||
|
||||
// Returning an error will stop the interval. This is not desired, a single failure
|
||||
// should not stop all future attempts.
|
||||
|
||||
184
validator_client/src/http_api/api_secret.rs
Normal file
184
validator_client/src/http_api/api_secret.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use eth2::lighthouse_vc::{PK_LEN, SECRET_PREFIX as PK_PREFIX};
|
||||
use rand::thread_rng;
|
||||
use ring::digest::{digest, SHA256};
|
||||
use secp256k1::{Message, PublicKey, SecretKey};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use warp::Filter;
|
||||
|
||||
/// The name of the file which stores the secret key.
|
||||
///
|
||||
/// It is purposefully opaque to prevent users confusing it with the "secret" that they need to
|
||||
/// share with API consumers (which is actually the public key).
|
||||
pub const SK_FILENAME: &str = ".secp-sk";
|
||||
|
||||
/// Length of the raw secret key, in bytes.
|
||||
pub const SK_LEN: usize = 32;
|
||||
|
||||
/// The name of the file which stores the public key.
|
||||
///
|
||||
/// For users, this public key is a "secret" that can be shared with API consumers to provide them
|
||||
/// access to the API. We avoid calling it a "public" key to users, since they should not post this
|
||||
/// value in a public forum.
|
||||
pub const PK_FILENAME: &str = "api-token.txt";
|
||||
|
||||
/// Contains a `secp256k1` keypair that is saved-to/loaded-from disk on instantiation. The keypair
|
||||
/// is used for authorization/authentication for requests/responses on the HTTP API.
|
||||
///
|
||||
/// Provides convenience functions to ultimately provide:
|
||||
///
|
||||
/// - A signature across outgoing HTTP responses, applied to the `Signature` header.
|
||||
/// - Verification of proof-of-knowledge of the public key in `self` for incoming HTTP requests,
|
||||
/// via the `Authorization` header.
|
||||
///
|
||||
/// The aforementioned scheme was first defined here:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855
|
||||
pub struct ApiSecret {
|
||||
pk: PublicKey,
|
||||
sk: SecretKey,
|
||||
}
|
||||
|
||||
impl ApiSecret {
|
||||
/// If both the secret and public keys are already on-disk, parse them and ensure they're both
|
||||
/// from the same keypair.
|
||||
///
|
||||
/// The provided `dir` is a directory containing two files, `SK_FILENAME` and `PK_FILENAME`.
|
||||
///
|
||||
/// If either the secret or public key files are missing on disk, create a new keypair and
|
||||
/// write it to disk (over-writing any existing files).
|
||||
pub fn create_or_open<P: AsRef<Path>>(dir: P) -> Result<Self, String> {
|
||||
let sk_path = dir.as_ref().join(SK_FILENAME);
|
||||
let pk_path = dir.as_ref().join(PK_FILENAME);
|
||||
|
||||
if !(sk_path.exists() && pk_path.exists()) {
|
||||
let sk = SecretKey::random(&mut thread_rng());
|
||||
let pk = PublicKey::from_secret_key(&sk);
|
||||
|
||||
fs::write(
|
||||
&sk_path,
|
||||
serde_utils::hex::encode(&sk.serialize()).as_bytes(),
|
||||
)
|
||||
.map_err(|e| e.to_string())?;
|
||||
fs::write(
|
||||
&pk_path,
|
||||
format!(
|
||||
"{}{}",
|
||||
PK_PREFIX,
|
||||
serde_utils::hex::encode(&pk.serialize_compressed()[..])
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.map_err(|e| e.to_string())?;
|
||||
}
|
||||
|
||||
let sk = fs::read(&sk_path)
|
||||
.map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e))
|
||||
.and_then(|bytes| {
|
||||
serde_utils::hex::decode(&String::from_utf8_lossy(&bytes))
|
||||
.map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME))
|
||||
})
|
||||
.and_then(|bytes| {
|
||||
if bytes.len() == SK_LEN {
|
||||
let mut array = [0; SK_LEN];
|
||||
array.copy_from_slice(&bytes);
|
||||
SecretKey::parse(&array).map_err(|e| format!("invalid {}: {}", SK_FILENAME, e))
|
||||
} else {
|
||||
Err(format!(
|
||||
"{} expected {} bytes not {}",
|
||||
SK_FILENAME,
|
||||
SK_LEN,
|
||||
bytes.len()
|
||||
))
|
||||
}
|
||||
})?;
|
||||
|
||||
let pk = fs::read(&pk_path)
|
||||
.map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))
|
||||
.and_then(|bytes| {
|
||||
let hex =
|
||||
String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?;
|
||||
if hex.starts_with(PK_PREFIX) {
|
||||
serde_utils::hex::decode(&hex[PK_PREFIX.len()..])
|
||||
.map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME))
|
||||
} else {
|
||||
Err(format!("unable to parse {}", SK_FILENAME))
|
||||
}
|
||||
})
|
||||
.and_then(|bytes| {
|
||||
if bytes.len() == PK_LEN {
|
||||
let mut array = [0; PK_LEN];
|
||||
array.copy_from_slice(&bytes);
|
||||
PublicKey::parse_compressed(&array)
|
||||
.map_err(|e| format!("invalid {}: {}", PK_FILENAME, e))
|
||||
} else {
|
||||
Err(format!(
|
||||
"{} expected {} bytes not {}",
|
||||
PK_FILENAME,
|
||||
PK_LEN,
|
||||
bytes.len()
|
||||
))
|
||||
}
|
||||
})?;
|
||||
|
||||
// Ensure that the keys loaded from disk are indeed a pair.
|
||||
if PublicKey::from_secret_key(&sk) != pk {
|
||||
fs::remove_file(&sk_path)
|
||||
.map_err(|e| format!("unable to remove {}: {}", SK_FILENAME, e))?;
|
||||
fs::remove_file(&pk_path)
|
||||
.map_err(|e| format!("unable to remove {}: {}", PK_FILENAME, e))?;
|
||||
return Err(format!(
|
||||
"{:?} does not match {:?} and the files have been deleted. Please try again.",
|
||||
sk_path, pk_path
|
||||
));
|
||||
}
|
||||
|
||||
Ok(Self { sk, pk })
|
||||
}
|
||||
|
||||
/// Returns the public key of `self` as a 0x-prefixed hex string.
|
||||
fn pubkey_string(&self) -> String {
|
||||
serde_utils::hex::encode(&self.pk.serialize_compressed()[..])
|
||||
}
|
||||
|
||||
/// Returns the API token.
|
||||
pub fn api_token(&self) -> String {
|
||||
format!("{}{}", PK_PREFIX, self.pubkey_string())
|
||||
}
|
||||
|
||||
/// Returns the value of the `Authorization` header which is used for verifying incoming HTTP
|
||||
/// requests.
|
||||
fn auth_header_value(&self) -> String {
|
||||
format!("Basic {}", self.api_token())
|
||||
}
|
||||
|
||||
/// Returns a `warp` header which filters out request that have a missing or inaccurate
|
||||
/// `Authorization` header.
|
||||
pub fn authorization_header_filter(&self) -> warp::filters::BoxedFilter<()> {
|
||||
let expected = self.auth_header_value();
|
||||
warp::any()
|
||||
.map(move || expected.clone())
|
||||
.and(warp::filters::header::header("Authorization"))
|
||||
.and_then(move |expected: String, header: String| async move {
|
||||
if header == expected {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(warp_utils::reject::invalid_auth(header))
|
||||
}
|
||||
})
|
||||
.untuple_one()
|
||||
.boxed()
|
||||
}
|
||||
|
||||
/// Returns a closure which produces a signature over some bytes using the secret key in
|
||||
/// `self`. The signature is a 32-byte hash formatted as a 0x-prefixed string.
|
||||
pub fn signer(&self) -> impl Fn(&[u8]) -> String + Clone {
|
||||
let sk = self.sk.clone();
|
||||
move |input: &[u8]| -> String {
|
||||
let message =
|
||||
Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes");
|
||||
let (signature, _) = secp256k1::sign(&message, &sk);
|
||||
serde_utils::hex::encode(signature.serialize_der().as_ref())
|
||||
}
|
||||
}
|
||||
}
|
||||
151
validator_client/src/http_api/create_validator.rs
Normal file
151
validator_client/src/http_api/create_validator.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use crate::ValidatorStore;
|
||||
use account_utils::{
|
||||
eth2_wallet::{bip39::Mnemonic, WalletBuilder},
|
||||
random_mnemonic, random_password, ZeroizeString,
|
||||
};
|
||||
use eth2::lighthouse_vc::types::{self as api_types};
|
||||
use slot_clock::SlotClock;
|
||||
use std::path::Path;
|
||||
use types::ChainSpec;
|
||||
use types::EthSpec;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
|
||||
/// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in
|
||||
/// this validator client.
|
||||
///
|
||||
/// Returns the list of created validators and the mnemonic used to derive them via EIP-2334.
|
||||
///
|
||||
/// ## Detail
|
||||
///
|
||||
/// If `mnemonic_opt` is not supplied it will be randomly generated and returned in the response.
|
||||
///
|
||||
/// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at
|
||||
/// this point.
|
||||
pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
|
||||
mnemonic_opt: Option<Mnemonic>,
|
||||
key_derivation_path_offset: Option<u32>,
|
||||
validator_requests: &[api_types::ValidatorRequest],
|
||||
validator_dir: P,
|
||||
validator_store: &ValidatorStore<T, E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(Vec<api_types::CreatedValidator>, Mnemonic), warp::Rejection> {
|
||||
let mnemonic = mnemonic_opt.unwrap_or_else(random_mnemonic);
|
||||
|
||||
let wallet_password = random_password();
|
||||
let mut wallet =
|
||||
WalletBuilder::from_mnemonic(&mnemonic, wallet_password.as_bytes(), String::new())
|
||||
.and_then(|builder| builder.build())
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to create EIP-2386 wallet: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
if let Some(nextaccount) = key_derivation_path_offset {
|
||||
wallet.set_nextaccount(nextaccount).map_err(|()| {
|
||||
warp_utils::reject::custom_server_error("unable to set wallet nextaccount".to_string())
|
||||
})?;
|
||||
}
|
||||
|
||||
let mut validators = Vec::with_capacity(validator_requests.len());
|
||||
|
||||
for request in validator_requests {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
let voting_password_string = ZeroizeString::from(
|
||||
String::from_utf8(voting_password.as_bytes().to_vec()).map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"locally generated password is not utf8: {:?}",
|
||||
e
|
||||
))
|
||||
})?,
|
||||
);
|
||||
|
||||
let mut keystores = wallet
|
||||
.next_validator(
|
||||
wallet_password.as_bytes(),
|
||||
voting_password.as_bytes(),
|
||||
withdrawal_password.as_bytes(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to create validator keys: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
keystores
|
||||
.voting
|
||||
.set_description(request.description.clone());
|
||||
keystores
|
||||
.withdrawal
|
||||
.set_description(request.description.clone());
|
||||
|
||||
let voting_pubkey = format!("0x{}", keystores.voting.pubkey())
|
||||
.parse()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"created invalid public key: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let validator_dir = ValidatorDirBuilder::new(validator_dir.as_ref().into())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.create_eth1_tx_data(request.deposit_gwei, &spec)
|
||||
.store_withdrawal_keystore(false)
|
||||
.build()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to build validator directory: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let eth1_deposit_data = validator_dir
|
||||
.eth1_deposit_data()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to read local deposit data: {:?}",
|
||||
e
|
||||
))
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error(
|
||||
"failed to create local deposit data: {:?}".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
if eth1_deposit_data.deposit_data.amount != request.deposit_gwei {
|
||||
return Err(warp_utils::reject::custom_server_error(format!(
|
||||
"invalid deposit_gwei {}, expected {}",
|
||||
eth1_deposit_data.deposit_data.amount, request.deposit_gwei
|
||||
)));
|
||||
}
|
||||
|
||||
tokio::runtime::Handle::current()
|
||||
.block_on(validator_store.add_validator_keystore(
|
||||
validator_dir.voting_keystore_path(),
|
||||
voting_password_string,
|
||||
request.enable,
|
||||
))
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to initialize validator: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
validators.push(api_types::CreatedValidator {
|
||||
enabled: request.enable,
|
||||
description: request.description.clone(),
|
||||
voting_pubkey,
|
||||
eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp),
|
||||
deposit_gwei: request.deposit_gwei,
|
||||
});
|
||||
}
|
||||
|
||||
Ok((validators, mnemonic))
|
||||
}
|
||||
488
validator_client/src/http_api/mod.rs
Normal file
488
validator_client/src/http_api/mod.rs
Normal file
@@ -0,0 +1,488 @@
|
||||
mod api_secret;
|
||||
mod create_validator;
|
||||
mod tests;
|
||||
|
||||
use crate::ValidatorStore;
|
||||
use account_utils::mnemonic_from_phrase;
|
||||
use create_validator::create_validators;
|
||||
use eth2::lighthouse_vc::types::{self as api_types, PublicKey, PublicKeyBytes};
|
||||
use lighthouse_version::version_with_platform;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slog::{crit, info, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use types::{ChainSpec, EthSpec, YamlConfig};
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
use warp::{
|
||||
http::{
|
||||
header::{HeaderValue, CONTENT_TYPE},
|
||||
response::Response,
|
||||
StatusCode,
|
||||
},
|
||||
Filter,
|
||||
};
|
||||
|
||||
pub use api_secret::ApiSecret;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
Warp(warp::Error),
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl From<warp::Error> for Error {
|
||||
fn from(e: warp::Error) -> Self {
|
||||
Error::Warp(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(e: String) -> Self {
|
||||
Error::Other(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around all the items required to spawn the HTTP server.
|
||||
///
|
||||
/// The server will gracefully handle the case where any fields are `None`.
|
||||
pub struct Context<T: Clone, E: EthSpec> {
|
||||
pub api_secret: ApiSecret,
|
||||
pub validator_store: Option<ValidatorStore<T, E>>,
|
||||
pub validator_dir: Option<PathBuf>,
|
||||
pub spec: ChainSpec,
|
||||
pub config: Config,
|
||||
pub log: Logger,
|
||||
pub _phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
/// Configuration for the HTTP server.
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub enabled: bool,
|
||||
pub listen_addr: Ipv4Addr,
|
||||
pub listen_port: u16,
|
||||
pub allow_origin: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 5062,
|
||||
allow_origin: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a server that will serve requests using information from `ctx`.
|
||||
///
|
||||
/// The server will shut down gracefully when the `shutdown` future resolves.
|
||||
///
|
||||
/// ## Returns
|
||||
///
|
||||
/// This function will bind the server to the provided address and then return a tuple of:
|
||||
///
|
||||
/// - `SocketAddr`: the address that the HTTP server will listen on.
|
||||
/// - `Future`: the actual server future that will need to be awaited.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// Returns an error if the server is unable to bind or there is another error during
|
||||
/// configuration.
|
||||
pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
ctx: Arc<Context<T, E>>,
|
||||
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
|
||||
) -> Result<(SocketAddr, impl Future<Output = ()>), Error> {
|
||||
let config = &ctx.config;
|
||||
let log = ctx.log.clone();
|
||||
let allow_origin = config.allow_origin.clone();
|
||||
|
||||
// Sanity check.
|
||||
if !config.enabled {
|
||||
crit!(log, "Cannot start disabled metrics HTTP server");
|
||||
return Err(Error::Other(
|
||||
"A disabled metrics server should not be started".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let authorization_header_filter = ctx.api_secret.authorization_header_filter();
|
||||
let api_token = ctx.api_secret.api_token();
|
||||
let signer = ctx.api_secret.signer();
|
||||
let signer = warp::any().map(move || signer.clone());
|
||||
|
||||
let inner_validator_store = ctx.validator_store.clone();
|
||||
let validator_store_filter = warp::any()
|
||||
.map(move || inner_validator_store.clone())
|
||||
.and_then(|validator_store: Option<_>| async move {
|
||||
validator_store.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"validator store is not initialized.".to_string(),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
let inner_validator_dir = ctx.validator_dir.clone();
|
||||
let validator_dir_filter = warp::any()
|
||||
.map(move || inner_validator_dir.clone())
|
||||
.and_then(|validator_dir: Option<_>| async move {
|
||||
validator_dir.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"validator_dir directory is not initialized.".to_string(),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
let inner_spec = Arc::new(ctx.spec.clone());
|
||||
let spec_filter = warp::any().map(move || inner_spec.clone());
|
||||
|
||||
// GET lighthouse/version
|
||||
let get_node_version = warp::path("lighthouse")
|
||||
.and(warp::path("version"))
|
||||
.and(warp::path::end())
|
||||
.and(signer.clone())
|
||||
.and_then(|signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
Ok(api_types::GenericResponse::from(api_types::VersionData {
|
||||
version: version_with_platform(),
|
||||
}))
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/health
|
||||
let get_lighthouse_health = warp::path("lighthouse")
|
||||
.and(warp::path("health"))
|
||||
.and(warp::path::end())
|
||||
.and(signer.clone())
|
||||
.and_then(|signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
eth2::lighthouse::Health::observe()
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map_err(warp_utils::reject::custom_bad_request)
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/spec
|
||||
let get_lighthouse_spec = warp::path("lighthouse")
|
||||
.and(warp::path("spec"))
|
||||
.and(warp::path::end())
|
||||
.and(spec_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(|spec: Arc<_>, signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
Ok(api_types::GenericResponse::from(
|
||||
YamlConfig::from_spec::<E>(&spec),
|
||||
))
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/validators
|
||||
let get_lighthouse_validators = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::end())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(|validator_store: ValidatorStore<T, E>, signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let validators = validator_store
|
||||
.initialized_validators()
|
||||
.read()
|
||||
.validator_definitions()
|
||||
.iter()
|
||||
.map(|def| api_types::ValidatorData {
|
||||
enabled: def.enabled,
|
||||
description: def.description.clone(),
|
||||
voting_pubkey: PublicKeyBytes::from(&def.voting_public_key),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(api_types::GenericResponse::from(validators))
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/validators/{validator_pubkey}
|
||||
let get_lighthouse_validators_pubkey = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::param::<PublicKey>())
|
||||
.and(warp::path::end())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|validator_pubkey: PublicKey, validator_store: ValidatorStore<T, E>, signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let validator = validator_store
|
||||
.initialized_validators()
|
||||
.read()
|
||||
.validator_definitions()
|
||||
.iter()
|
||||
.find(|def| def.voting_public_key == validator_pubkey)
|
||||
.map(|def| api_types::ValidatorData {
|
||||
enabled: def.enabled,
|
||||
description: def.description.clone(),
|
||||
voting_pubkey: PublicKeyBytes::from(&def.voting_public_key),
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"no validator for {:?}",
|
||||
validator_pubkey
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(api_types::GenericResponse::from(validator))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/validators/
|
||||
let post_validators = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_dir_filter.clone())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(spec_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|body: Vec<api_types::ValidatorRequest>,
|
||||
validator_dir: PathBuf,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
spec: Arc<ChainSpec>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let (validators, mnemonic) = create_validators(
|
||||
None,
|
||||
None,
|
||||
&body,
|
||||
&validator_dir,
|
||||
&validator_store,
|
||||
&spec,
|
||||
)?;
|
||||
let response = api_types::PostValidatorsResponseData {
|
||||
mnemonic: mnemonic.into_phrase().into(),
|
||||
validators,
|
||||
};
|
||||
Ok(api_types::GenericResponse::from(response))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/validators/mnemonic
|
||||
let post_validators_mnemonic = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path("mnemonic"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_dir_filter.clone())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(spec_filter)
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|body: api_types::CreateValidatorsMnemonicRequest,
|
||||
validator_dir: PathBuf,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
spec: Arc<ChainSpec>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid mnemonic: {:?}", e))
|
||||
})?;
|
||||
let (validators, _mnemonic) = create_validators(
|
||||
Some(mnemonic),
|
||||
Some(body.key_derivation_path_offset),
|
||||
&body.validators,
|
||||
&validator_dir,
|
||||
&validator_store,
|
||||
&spec,
|
||||
)?;
|
||||
Ok(api_types::GenericResponse::from(validators))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/validators/keystore
|
||||
let post_validators_keystore = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path("keystore"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_dir_filter)
|
||||
.and(validator_store_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|body: api_types::KeystoreValidatorsPostRequest,
|
||||
validator_dir: PathBuf,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
// Check to ensure the password is correct.
|
||||
let keypair = body
|
||||
.keystore
|
||||
.decrypt_keypair(body.password.as_ref())
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"invalid keystore: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let validator_dir = ValidatorDirBuilder::new(validator_dir.clone())
|
||||
.voting_keystore(body.keystore.clone(), body.password.as_ref())
|
||||
.store_withdrawal_keystore(false)
|
||||
.build()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to build validator directory: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let voting_password = body.password.clone();
|
||||
|
||||
let validator_def = tokio::runtime::Handle::current()
|
||||
.block_on(validator_store.add_validator_keystore(
|
||||
validator_dir.voting_keystore_path(),
|
||||
voting_password,
|
||||
body.enable,
|
||||
))
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to initialize validator: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(api_types::GenericResponse::from(api_types::ValidatorData {
|
||||
enabled: body.enable,
|
||||
description: validator_def.description,
|
||||
voting_pubkey: keypair.pk.into(),
|
||||
}))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// PATCH lighthouse/validators/{validator_pubkey}
|
||||
let patch_validators = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::param::<PublicKey>())
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_store_filter)
|
||||
.and(signer)
|
||||
.and_then(
|
||||
|validator_pubkey: PublicKey,
|
||||
body: api_types::ValidatorPatchRequest,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
||||
let mut initialized_validators = initialized_validators_rw_lock.write();
|
||||
|
||||
match initialized_validators.is_enabled(&validator_pubkey) {
|
||||
None => Err(warp_utils::reject::custom_not_found(format!(
|
||||
"no validator for {:?}",
|
||||
validator_pubkey
|
||||
))),
|
||||
Some(enabled) if enabled == body.enabled => Ok(()),
|
||||
Some(_) => {
|
||||
tokio::runtime::Handle::current()
|
||||
.block_on(
|
||||
initialized_validators
|
||||
.set_validator_status(&validator_pubkey, body.enabled),
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to set validator status: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
let routes = warp::any()
|
||||
.and(authorization_header_filter)
|
||||
.and(
|
||||
warp::get().and(
|
||||
get_node_version
|
||||
.or(get_lighthouse_health)
|
||||
.or(get_lighthouse_spec)
|
||||
.or(get_lighthouse_validators)
|
||||
.or(get_lighthouse_validators_pubkey),
|
||||
),
|
||||
)
|
||||
.or(warp::post().and(
|
||||
post_validators
|
||||
.or(post_validators_keystore)
|
||||
.or(post_validators_mnemonic),
|
||||
))
|
||||
.or(warp::patch().and(patch_validators))
|
||||
// Maps errors into HTTP responses.
|
||||
.recover(warp_utils::reject::handle_rejection)
|
||||
// Add a `Server` header.
|
||||
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
||||
// Maybe add some CORS headers.
|
||||
.map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref()));
|
||||
|
||||
let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown(
|
||||
SocketAddrV4::new(config.listen_addr, config.listen_port),
|
||||
async {
|
||||
shutdown.await;
|
||||
},
|
||||
)?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"HTTP API started";
|
||||
"listen_address" => listening_socket.to_string(),
|
||||
"api_token" => api_token,
|
||||
);
|
||||
|
||||
Ok((listening_socket, server))
|
||||
}
|
||||
|
||||
/// Executes `func` in blocking tokio task (i.e., where long-running tasks are permitted).
|
||||
/// JSON-encodes the return value of `func`, using the `signer` function to produce a signature of
|
||||
/// those bytes.
|
||||
pub async fn blocking_signed_json_task<S, F, T>(
|
||||
signer: S,
|
||||
func: F,
|
||||
) -> Result<impl warp::Reply, warp::Rejection>
|
||||
where
|
||||
S: Fn(&[u8]) -> String,
|
||||
F: Fn() -> Result<T, warp::Rejection>,
|
||||
T: Serialize,
|
||||
{
|
||||
warp_utils::task::blocking_task(func)
|
||||
.await
|
||||
.map(|func_output| {
|
||||
let mut response = match serde_json::to_vec(&func_output) {
|
||||
Ok(body) => {
|
||||
let mut res = Response::new(body);
|
||||
res.headers_mut()
|
||||
.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
|
||||
res
|
||||
}
|
||||
Err(_) => Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(vec![])
|
||||
.expect("can produce simple response from static values"),
|
||||
};
|
||||
|
||||
let body: &Vec<u8> = response.body();
|
||||
let signature = signer(body);
|
||||
let header_value =
|
||||
HeaderValue::from_str(&signature).expect("hash can be encoded as header");
|
||||
|
||||
response.headers_mut().append("Signature", header_value);
|
||||
|
||||
response
|
||||
})
|
||||
}
|
||||
527
validator_client/src/http_api/tests.rs
Normal file
527
validator_client/src/http_api/tests.rs
Normal file
@@ -0,0 +1,527 @@
|
||||
#![cfg(test)]
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use crate::{
|
||||
http_api::{ApiSecret, Config as HttpConfig, Context},
|
||||
Config, ForkServiceBuilder, InitializedValidators, ValidatorDefinitions, ValidatorStore,
|
||||
};
|
||||
use account_utils::{
|
||||
eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password,
|
||||
ZeroizeString,
|
||||
};
|
||||
use deposit_contract::decode_eth1_tx_data;
|
||||
use environment::null_logger;
|
||||
use eth2::{
|
||||
lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*},
|
||||
Url,
|
||||
};
|
||||
use eth2_keystore::KeystoreBuilder;
|
||||
use parking_lot::RwLock;
|
||||
use slot_clock::TestingSlotClock;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::Arc;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
const PASSWORD_BYTES: &[u8] = &[42, 13, 37];
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
struct ApiTester {
|
||||
client: ValidatorClientHttpClient,
|
||||
initialized_validators: Arc<RwLock<InitializedValidators>>,
|
||||
url: Url,
|
||||
_server_shutdown: oneshot::Sender<()>,
|
||||
_validator_dir: TempDir,
|
||||
}
|
||||
|
||||
impl ApiTester {
|
||||
pub async fn new() -> Self {
|
||||
let log = null_logger().unwrap();
|
||||
|
||||
let validator_dir = tempdir().unwrap();
|
||||
let secrets_dir = tempdir().unwrap();
|
||||
|
||||
let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap();
|
||||
|
||||
let initialized_validators = InitializedValidators::from_definitions(
|
||||
validator_defs,
|
||||
validator_dir.path().into(),
|
||||
false,
|
||||
log.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap();
|
||||
let api_pubkey = api_secret.api_token();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.validator_dir = validator_dir.path().into();
|
||||
config.secrets_dir = secrets_dir.path().into();
|
||||
|
||||
let fork_service = ForkServiceBuilder::testing_only(log.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let validator_store: ValidatorStore<TestingSlotClock, E> = ValidatorStore::new(
|
||||
initialized_validators,
|
||||
&config,
|
||||
Hash256::repeat_byte(42),
|
||||
E::default_spec(),
|
||||
fork_service.clone(),
|
||||
log.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let initialized_validators = validator_store.initialized_validators();
|
||||
|
||||
let context: Arc<Context<TestingSlotClock, E>> = Arc::new(Context {
|
||||
api_secret,
|
||||
validator_dir: Some(validator_dir.path().into()),
|
||||
validator_store: Some(validator_store),
|
||||
spec: E::default_spec(),
|
||||
config: HttpConfig {
|
||||
enabled: true,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 0,
|
||||
allow_origin: None,
|
||||
},
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let server_shutdown = async {
|
||||
// It's not really interesting why this triggered, just that it happened.
|
||||
let _ = shutdown_rx.await;
|
||||
};
|
||||
let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap();
|
||||
|
||||
tokio::spawn(async { server.await });
|
||||
|
||||
let url = Url::parse(&format!(
|
||||
"http://{}:{}",
|
||||
listening_socket.ip(),
|
||||
listening_socket.port()
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey).unwrap();
|
||||
|
||||
Self {
|
||||
initialized_validators,
|
||||
_validator_dir: validator_dir,
|
||||
client,
|
||||
url,
|
||||
_server_shutdown: shutdown_tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn invalidate_api_token(mut self) -> Self {
|
||||
let tmp = tempdir().unwrap();
|
||||
let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap();
|
||||
let invalid_pubkey = api_secret.api_token();
|
||||
|
||||
self.client = ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap();
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_version_invalid(self) -> Self {
|
||||
self.client.get_lighthouse_version().await.unwrap_err();
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_spec(self) -> Self {
|
||||
let result = self.client.get_lighthouse_spec().await.unwrap().data;
|
||||
|
||||
let expected = YamlConfig::from_spec::<E>(&E::default_spec());
|
||||
|
||||
assert_eq!(result, expected);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_version(self) -> Self {
|
||||
let result = self.client.get_lighthouse_version().await.unwrap().data;
|
||||
|
||||
let expected = VersionData {
|
||||
version: lighthouse_version::version_with_platform(),
|
||||
};
|
||||
|
||||
assert_eq!(result, expected);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub async fn test_get_lighthouse_health(self) -> Self {
|
||||
self.client.get_lighthouse_health().await.unwrap();
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub async fn test_get_lighthouse_health(self) -> Self {
|
||||
self.client.get_lighthouse_health().await.unwrap_err();
|
||||
|
||||
self
|
||||
}
|
||||
pub fn vals_total(&self) -> usize {
|
||||
self.initialized_validators.read().num_total()
|
||||
}
|
||||
|
||||
pub fn vals_enabled(&self) -> usize {
|
||||
self.initialized_validators.read().num_enabled()
|
||||
}
|
||||
|
||||
pub fn assert_enabled_validators_count(self, count: usize) -> Self {
|
||||
assert_eq!(self.vals_enabled(), count);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn assert_validators_count(self, count: usize) -> Self {
|
||||
assert_eq!(self.vals_total(), count);
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn create_hd_validators(self, s: HdValidatorScenario) -> Self {
|
||||
let initial_vals = self.vals_total();
|
||||
let initial_enabled_vals = self.vals_enabled();
|
||||
|
||||
let validators = (0..s.count)
|
||||
.map(|i| ValidatorRequest {
|
||||
enable: !s.disabled.contains(&i),
|
||||
description: format!("boi #{}", i),
|
||||
deposit_gwei: E::default_spec().max_effective_balance,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let (response, mnemonic) = if s.specify_mnemonic {
|
||||
let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string());
|
||||
let request = CreateValidatorsMnemonicRequest {
|
||||
mnemonic: mnemonic.clone(),
|
||||
key_derivation_path_offset: s.key_derivation_path_offset,
|
||||
validators: validators.clone(),
|
||||
};
|
||||
let response = self
|
||||
.client
|
||||
.post_lighthouse_validators_mnemonic(&request)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
(response, mnemonic)
|
||||
} else {
|
||||
assert_eq!(
|
||||
s.key_derivation_path_offset, 0,
|
||||
"cannot use a derivation offset without specifying a mnemonic"
|
||||
);
|
||||
let response = self
|
||||
.client
|
||||
.post_lighthouse_validators(validators.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
(response.validators.clone(), response.mnemonic.clone())
|
||||
};
|
||||
|
||||
assert_eq!(response.len(), s.count);
|
||||
assert_eq!(self.vals_total(), initial_vals + s.count);
|
||||
assert_eq!(
|
||||
self.vals_enabled(),
|
||||
initial_enabled_vals + s.count - s.disabled.len()
|
||||
);
|
||||
|
||||
let server_vals = self.client.get_lighthouse_validators().await.unwrap().data;
|
||||
|
||||
assert_eq!(server_vals.len(), self.vals_total());
|
||||
|
||||
// Ensure the server lists all of these newly created validators.
|
||||
for validator in &response {
|
||||
assert!(server_vals
|
||||
.iter()
|
||||
.any(|server_val| server_val.voting_pubkey == validator.voting_pubkey));
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that we can regenerate all the keys from the mnemonic.
|
||||
*/
|
||||
|
||||
let mnemonic = mnemonic_from_phrase(mnemonic.as_str()).unwrap();
|
||||
let mut wallet = WalletBuilder::from_mnemonic(&mnemonic, PASSWORD_BYTES, "".to_string())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
wallet
|
||||
.set_nextaccount(s.key_derivation_path_offset)
|
||||
.unwrap();
|
||||
|
||||
for i in 0..s.count {
|
||||
let keypairs = wallet
|
||||
.next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES)
|
||||
.unwrap();
|
||||
let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
response[i].voting_pubkey,
|
||||
voting_keypair.pk.clone().into(),
|
||||
"the locally generated voting pk should match the server response"
|
||||
);
|
||||
|
||||
let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap();
|
||||
|
||||
let deposit_bytes =
|
||||
serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap();
|
||||
|
||||
let (deposit_data, _) =
|
||||
decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
deposit_data.pubkey,
|
||||
voting_keypair.pk.clone().into(),
|
||||
"the locally generated voting pk should match the deposit data"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
deposit_data.withdrawal_credentials,
|
||||
Hash256::from_slice(&bls::get_withdrawal_credentials(
|
||||
&withdrawal_keypair.pk,
|
||||
E::default_spec().bls_withdrawal_prefix_byte
|
||||
)),
|
||||
"the locally generated withdrawal creds should match the deposit data"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
deposit_data.signature,
|
||||
deposit_data.create_signature(&voting_keypair.sk, &E::default_spec()),
|
||||
"the locally-generated deposit sig should create the same deposit sig"
|
||||
);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn create_keystore_validators(self, s: KeystoreValidatorScenario) -> Self {
|
||||
let initial_vals = self.vals_total();
|
||||
let initial_enabled_vals = self.vals_enabled();
|
||||
|
||||
let password = random_password();
|
||||
let keypair = Keypair::random();
|
||||
let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
if !s.correct_password {
|
||||
let request = KeystoreValidatorsPostRequest {
|
||||
enable: s.enabled,
|
||||
password: String::from_utf8(random_password().as_ref().to_vec())
|
||||
.unwrap()
|
||||
.into(),
|
||||
keystore,
|
||||
};
|
||||
|
||||
self.client
|
||||
.post_lighthouse_validators_keystore(&request)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
let request = KeystoreValidatorsPostRequest {
|
||||
enable: s.enabled,
|
||||
password: String::from_utf8(password.as_ref().to_vec())
|
||||
.unwrap()
|
||||
.into(),
|
||||
keystore,
|
||||
};
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.post_lighthouse_validators_keystore(&request)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
let num_enabled = s.enabled as usize;
|
||||
|
||||
assert_eq!(self.vals_total(), initial_vals + 1);
|
||||
assert_eq!(self.vals_enabled(), initial_enabled_vals + num_enabled);
|
||||
|
||||
let server_vals = self.client.get_lighthouse_validators().await.unwrap().data;
|
||||
|
||||
assert_eq!(server_vals.len(), self.vals_total());
|
||||
|
||||
assert_eq!(response.voting_pubkey, keypair.pk.into());
|
||||
assert_eq!(response.enabled, s.enabled);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
|
||||
self.client
|
||||
.patch_lighthouse_validators(&validator.voting_pubkey, enabled)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
self.initialized_validators
|
||||
.read()
|
||||
.is_enabled(&validator.voting_pubkey.decompress().unwrap())
|
||||
.unwrap(),
|
||||
enabled
|
||||
);
|
||||
|
||||
assert!(self
|
||||
.client
|
||||
.get_lighthouse_validators()
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.into_iter()
|
||||
.find(|v| v.voting_pubkey == validator.voting_pubkey)
|
||||
.map(|v| v.enabled == enabled)
|
||||
.unwrap());
|
||||
|
||||
// Check the server via an individual request.
|
||||
assert_eq!(
|
||||
self.client
|
||||
.get_lighthouse_validators_pubkey(&validator.voting_pubkey)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.data
|
||||
.enabled,
|
||||
enabled
|
||||
);
|
||||
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
struct HdValidatorScenario {
|
||||
count: usize,
|
||||
specify_mnemonic: bool,
|
||||
key_derivation_path_offset: u32,
|
||||
disabled: Vec<usize>,
|
||||
}
|
||||
|
||||
struct KeystoreValidatorScenario {
|
||||
enabled: bool,
|
||||
correct_password: bool,
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn invalid_pubkey() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.invalidate_api_token()
|
||||
.test_get_lighthouse_version_invalid()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn simple_getters() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.test_get_lighthouse_version()
|
||||
.await
|
||||
.test_get_lighthouse_health()
|
||||
.await
|
||||
.test_get_lighthouse_spec()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn hd_validator_creation() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.assert_enabled_validators_count(0)
|
||||
.assert_validators_count(0)
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 2,
|
||||
specify_mnemonic: true,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2)
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 1,
|
||||
specify_mnemonic: false,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![0],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(3)
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 0,
|
||||
specify_mnemonic: true,
|
||||
key_derivation_path_offset: 4,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(3);
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn validator_enabling() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 2,
|
||||
specify_mnemonic: false,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2)
|
||||
.set_validator_enabled(0, false)
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(2)
|
||||
.set_validator_enabled(0, true)
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2);
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn keystore_validator_creation() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.assert_enabled_validators_count(0)
|
||||
.assert_validators_count(0)
|
||||
.create_keystore_validators(KeystoreValidatorScenario {
|
||||
correct_password: true,
|
||||
enabled: true,
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(1)
|
||||
.create_keystore_validators(KeystoreValidatorScenario {
|
||||
correct_password: false,
|
||||
enabled: true,
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(1)
|
||||
.create_keystore_validators(KeystoreValidatorScenario {
|
||||
correct_password: true,
|
||||
enabled: false,
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(2);
|
||||
}
|
||||
@@ -56,6 +56,8 @@ pub enum Error {
|
||||
TokioJoin(tokio::task::JoinError),
|
||||
/// There was a filesystem error when deleting a lockfile.
|
||||
UnableToDeleteLockfile(io::Error),
|
||||
/// Cannot initialize the same validator twice.
|
||||
DuplicatePublicKey,
|
||||
}
|
||||
|
||||
/// A method used by a validator to sign messages.
|
||||
@@ -322,6 +324,42 @@ impl InitializedValidators {
|
||||
.map(|v| v.voting_keypair())
|
||||
}
|
||||
|
||||
/// Add a validator definition to `self`, overwriting the on-disk representation of `self`.
|
||||
pub async fn add_definition(&mut self, def: ValidatorDefinition) -> Result<(), Error> {
|
||||
if self
|
||||
.definitions
|
||||
.as_slice()
|
||||
.iter()
|
||||
.any(|existing| existing.voting_public_key == def.voting_public_key)
|
||||
{
|
||||
return Err(Error::DuplicatePublicKey);
|
||||
}
|
||||
|
||||
self.definitions.push(def);
|
||||
|
||||
self.update_validators().await?;
|
||||
|
||||
self.definitions
|
||||
.save(&self.validators_dir)
|
||||
.map_err(Error::UnableToSaveDefinitions)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a slice of all defined validators (regardless of their enabled state).
|
||||
pub fn validator_definitions(&self) -> &[ValidatorDefinition] {
|
||||
self.definitions.as_slice()
|
||||
}
|
||||
|
||||
/// Indicates if the `voting_public_key` exists in self and if it is enabled.
|
||||
pub fn is_enabled(&self, voting_public_key: &PublicKey) -> Option<bool> {
|
||||
self.definitions
|
||||
.as_slice()
|
||||
.iter()
|
||||
.find(|def| def.voting_public_key == *voting_public_key)
|
||||
.map(|def| def.enabled)
|
||||
}
|
||||
|
||||
/// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled` values.
|
||||
///
|
||||
/// ## Notes
|
||||
|
||||
@@ -10,6 +10,8 @@ mod notifier;
|
||||
mod validator_duty;
|
||||
mod validator_store;
|
||||
|
||||
pub mod http_api;
|
||||
|
||||
pub use cli::cli_app;
|
||||
pub use config::Config;
|
||||
|
||||
@@ -22,11 +24,14 @@ use environment::RuntimeContext;
|
||||
use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Url};
|
||||
use fork_service::{ForkService, ForkServiceBuilder};
|
||||
use futures::channel::mpsc;
|
||||
use http_api::ApiSecret;
|
||||
use initialized_validators::InitializedValidators;
|
||||
use notifier::spawn_notifier;
|
||||
use slog::{error, info, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use slot_clock::SystemTimeSlotClock;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::time::{delay_for, Duration};
|
||||
@@ -42,9 +47,11 @@ const HTTP_TIMEOUT: Duration = Duration::from_secs(12);
|
||||
pub struct ProductionValidatorClient<T: EthSpec> {
|
||||
context: RuntimeContext<T>,
|
||||
duties_service: DutiesService<SystemTimeSlotClock, T>,
|
||||
fork_service: ForkService<SystemTimeSlotClock, T>,
|
||||
fork_service: ForkService<SystemTimeSlotClock>,
|
||||
block_service: BlockService<SystemTimeSlotClock, T>,
|
||||
attestation_service: AttestationService<SystemTimeSlotClock, T>,
|
||||
validator_store: ValidatorStore<SystemTimeSlotClock, T>,
|
||||
http_api_listen_addr: Option<SocketAddr>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
@@ -55,7 +62,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
context: RuntimeContext<T>,
|
||||
cli_args: &ArgMatches<'_>,
|
||||
) -> Result<Self, String> {
|
||||
let config = Config::from_cli(&cli_args)
|
||||
let config = Config::from_cli(&cli_args, context.log())
|
||||
.map_err(|e| format!("Unable to initialize config: {}", e))?;
|
||||
Self::new(context, config).await
|
||||
}
|
||||
@@ -68,7 +75,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
info!(
|
||||
log,
|
||||
"Starting validator client";
|
||||
"beacon_node" => &config.http_server,
|
||||
"beacon_node" => &config.beacon_node,
|
||||
"validator_dir" => format!("{:?}", config.validator_dir),
|
||||
);
|
||||
|
||||
@@ -106,7 +113,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
);
|
||||
|
||||
let beacon_node_url: Url = config
|
||||
.http_server
|
||||
.beacon_node
|
||||
.parse()
|
||||
.map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?;
|
||||
let beacon_node_http_client = ClientBuilder::new()
|
||||
@@ -144,7 +151,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
let fork_service = ForkServiceBuilder::new()
|
||||
.slot_clock(slot_clock.clone())
|
||||
.beacon_node(beacon_node.clone())
|
||||
.runtime_context(context.service_context("fork".into()))
|
||||
.log(log.clone())
|
||||
.build()?;
|
||||
|
||||
let validator_store: ValidatorStore<SystemTimeSlotClock, T> = ValidatorStore::new(
|
||||
@@ -183,7 +190,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
let attestation_service = AttestationServiceBuilder::new()
|
||||
.duties_service(duties_service.clone())
|
||||
.slot_clock(slot_clock)
|
||||
.validator_store(validator_store)
|
||||
.validator_store(validator_store.clone())
|
||||
.beacon_node(beacon_node)
|
||||
.runtime_context(context.service_context("attestation".into()))
|
||||
.build()?;
|
||||
@@ -194,7 +201,9 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
fork_service,
|
||||
block_service,
|
||||
attestation_service,
|
||||
validator_store,
|
||||
config,
|
||||
http_api_listen_addr: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -204,6 +213,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
// whole epoch!
|
||||
let channel_capacity = T::slots_per_epoch() as usize;
|
||||
let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity);
|
||||
let log = self.context.log();
|
||||
|
||||
self.duties_service
|
||||
.clone()
|
||||
@@ -215,7 +225,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
|
||||
self.fork_service
|
||||
.clone()
|
||||
.start_update_service(&self.context.eth2_config.spec)
|
||||
.start_update_service(&self.context)
|
||||
.map_err(|e| format!("Unable to start fork service: {}", e))?;
|
||||
|
||||
self.block_service
|
||||
@@ -230,6 +240,35 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
|
||||
spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?;
|
||||
|
||||
let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?;
|
||||
|
||||
self.http_api_listen_addr = if self.config.http_api.enabled {
|
||||
let ctx: Arc<http_api::Context<SystemTimeSlotClock, T>> = Arc::new(http_api::Context {
|
||||
api_secret,
|
||||
validator_store: Some(self.validator_store.clone()),
|
||||
validator_dir: Some(self.config.validator_dir.clone()),
|
||||
spec: self.context.eth2_config.spec.clone(),
|
||||
config: self.config.http_api.clone(),
|
||||
log: log.clone(),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
let exit = self.context.executor.exit();
|
||||
|
||||
let (listen_addr, server) = http_api::serve(ctx, exit)
|
||||
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
||||
|
||||
self.context
|
||||
.clone()
|
||||
.executor
|
||||
.spawn_without_exit(async move { server.await }, "http-api");
|
||||
|
||||
Some(listen_addr)
|
||||
} else {
|
||||
info!(log, "HTTP API server is disabled");
|
||||
None
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,12 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
|
||||
let attesting_validators = duties_service.attester_count(epoch);
|
||||
|
||||
if total_validators == 0 {
|
||||
error!(log, "No validators present")
|
||||
info!(
|
||||
log,
|
||||
"No validators present";
|
||||
"msg" => "see `lighthouse account validator create --help` \
|
||||
or the HTTP API documentation"
|
||||
)
|
||||
} else if total_validators == attesting_validators {
|
||||
info!(
|
||||
log,
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
use crate::{
|
||||
config::Config, fork_service::ForkService, initialized_validators::InitializedValidators,
|
||||
};
|
||||
use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString};
|
||||
use parking_lot::RwLock;
|
||||
use slashing_protection::{NotSafe, Safe, SlashingDatabase, SLASHING_PROTECTION_FILENAME};
|
||||
use slog::{crit, error, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tempdir::TempDir;
|
||||
use types::{
|
||||
@@ -47,7 +49,7 @@ pub struct ValidatorStore<T, E: EthSpec> {
|
||||
spec: Arc<ChainSpec>,
|
||||
log: Logger,
|
||||
temp_dir: Option<Arc<TempDir>>,
|
||||
fork_service: ForkService<T, E>,
|
||||
fork_service: ForkService<T>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
@@ -57,7 +59,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
|
||||
config: &Config,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: ChainSpec,
|
||||
fork_service: ForkService<T, E>,
|
||||
fork_service: ForkService<T>,
|
||||
log: Logger,
|
||||
) -> Result<Self, String> {
|
||||
let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||
@@ -91,6 +93,43 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn initialized_validators(&self) -> Arc<RwLock<InitializedValidators>> {
|
||||
self.validators.clone()
|
||||
}
|
||||
|
||||
/// Insert a new validator to `self`, where the validator is represented by an EIP-2335
|
||||
/// keystore on the filesystem.
|
||||
///
|
||||
/// This function includes:
|
||||
///
|
||||
/// - Add the validator definition to the YAML file, saving it to the filesystem.
|
||||
/// - Enable validator with the slashing protection database.
|
||||
/// - If `enable == true`, start performing duties for the validator.
|
||||
pub async fn add_validator_keystore<P: AsRef<Path>>(
|
||||
&self,
|
||||
voting_keystore_path: P,
|
||||
password: ZeroizeString,
|
||||
enable: bool,
|
||||
) -> Result<ValidatorDefinition, String> {
|
||||
let mut validator_def =
|
||||
ValidatorDefinition::new_keystore_with_password(voting_keystore_path, Some(password))
|
||||
.map_err(|e| format!("failed to create validator definitions: {:?}", e))?;
|
||||
|
||||
self.slashing_protection
|
||||
.register_validator(&validator_def.voting_public_key)
|
||||
.map_err(|e| format!("failed to register validator: {:?}", e))?;
|
||||
|
||||
validator_def.enabled = enable;
|
||||
|
||||
self.validators
|
||||
.write()
|
||||
.add_definition(validator_def.clone())
|
||||
.await
|
||||
.map_err(|e| format!("Unable to add definition: {:?}", e))?;
|
||||
|
||||
Ok(validator_def)
|
||||
}
|
||||
|
||||
/// Register all known validators with the slashing protection database.
|
||||
///
|
||||
/// Registration is required to protect against a lost or missing slashing database,
|
||||
|
||||
Reference in New Issue
Block a user