mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-15 02:42:38 +00:00
Implement VC API (#1657)
## Issue Addressed
NA
## Proposed Changes
- Implements a HTTP API for the validator client.
- Creates EIP-2335 keystores with an empty `description` field, instead of a missing `description` field. Adds option to set name.
- Be more graceful with setups without any validators (yet)
- Remove an error log when there are no validators.
- Create the `validator` dir if it doesn't exist.
- Allow building a `ValidatorDir` without a withdrawal keystore (required for the API method where we only post a voting keystore).
- Add optional `description` field to `validator_definitions.yml`
## TODO
- [x] Signature header, as per https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855
- [x] Return validator descriptions
- [x] Return deposit data
- [x] Respect the mnemonic offset
- [x] Check that mnemonic can derive returned keys
- [x] Be strict about non-localhost
- [x] Allow graceful start without any validators (+ create validator dir)
- [x] Docs final pass
- [x] Swap to EIP-2335 description field.
- [x] Fix Zerioze TODO in VC api types.
- [x] Zeroize secp256k1 key
## Endpoints
- [x] `GET /lighthouse/version`
- [x] `GET /lighthouse/health`
- [x] `GET /lighthouse/validators`
- [x] `POST /lighthouse/validators/hd`
- [x] `POST /lighthouse/validators/keystore`
- [x] `PATCH /lighthouse/validators/:validator_pubkey`
- [ ] ~~`POST /lighthouse/validators/:validator_pubkey/exit/:epoch`~~ Future works
## Additional Info
TBC
This commit is contained in:
184
validator_client/src/http_api/api_secret.rs
Normal file
184
validator_client/src/http_api/api_secret.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use eth2::lighthouse_vc::{PK_LEN, SECRET_PREFIX as PK_PREFIX};
|
||||
use rand::thread_rng;
|
||||
use ring::digest::{digest, SHA256};
|
||||
use secp256k1::{Message, PublicKey, SecretKey};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use warp::Filter;
|
||||
|
||||
/// The name of the file which stores the secret key.
|
||||
///
|
||||
/// It is purposefully opaque to prevent users confusing it with the "secret" that they need to
|
||||
/// share with API consumers (which is actually the public key).
|
||||
pub const SK_FILENAME: &str = ".secp-sk";
|
||||
|
||||
/// Length of the raw secret key, in bytes.
|
||||
pub const SK_LEN: usize = 32;
|
||||
|
||||
/// The name of the file which stores the public key.
|
||||
///
|
||||
/// For users, this public key is a "secret" that can be shared with API consumers to provide them
|
||||
/// access to the API. We avoid calling it a "public" key to users, since they should not post this
|
||||
/// value in a public forum.
|
||||
pub const PK_FILENAME: &str = "api-token.txt";
|
||||
|
||||
/// Contains a `secp256k1` keypair that is saved-to/loaded-from disk on instantiation. The keypair
|
||||
/// is used for authorization/authentication for requests/responses on the HTTP API.
|
||||
///
|
||||
/// Provides convenience functions to ultimately provide:
|
||||
///
|
||||
/// - A signature across outgoing HTTP responses, applied to the `Signature` header.
|
||||
/// - Verification of proof-of-knowledge of the public key in `self` for incoming HTTP requests,
|
||||
/// via the `Authorization` header.
|
||||
///
|
||||
/// The aforementioned scheme was first defined here:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855
|
||||
pub struct ApiSecret {
|
||||
pk: PublicKey,
|
||||
sk: SecretKey,
|
||||
}
|
||||
|
||||
impl ApiSecret {
|
||||
/// If both the secret and public keys are already on-disk, parse them and ensure they're both
|
||||
/// from the same keypair.
|
||||
///
|
||||
/// The provided `dir` is a directory containing two files, `SK_FILENAME` and `PK_FILENAME`.
|
||||
///
|
||||
/// If either the secret or public key files are missing on disk, create a new keypair and
|
||||
/// write it to disk (over-writing any existing files).
|
||||
pub fn create_or_open<P: AsRef<Path>>(dir: P) -> Result<Self, String> {
|
||||
let sk_path = dir.as_ref().join(SK_FILENAME);
|
||||
let pk_path = dir.as_ref().join(PK_FILENAME);
|
||||
|
||||
if !(sk_path.exists() && pk_path.exists()) {
|
||||
let sk = SecretKey::random(&mut thread_rng());
|
||||
let pk = PublicKey::from_secret_key(&sk);
|
||||
|
||||
fs::write(
|
||||
&sk_path,
|
||||
serde_utils::hex::encode(&sk.serialize()).as_bytes(),
|
||||
)
|
||||
.map_err(|e| e.to_string())?;
|
||||
fs::write(
|
||||
&pk_path,
|
||||
format!(
|
||||
"{}{}",
|
||||
PK_PREFIX,
|
||||
serde_utils::hex::encode(&pk.serialize_compressed()[..])
|
||||
)
|
||||
.as_bytes(),
|
||||
)
|
||||
.map_err(|e| e.to_string())?;
|
||||
}
|
||||
|
||||
let sk = fs::read(&sk_path)
|
||||
.map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e))
|
||||
.and_then(|bytes| {
|
||||
serde_utils::hex::decode(&String::from_utf8_lossy(&bytes))
|
||||
.map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME))
|
||||
})
|
||||
.and_then(|bytes| {
|
||||
if bytes.len() == SK_LEN {
|
||||
let mut array = [0; SK_LEN];
|
||||
array.copy_from_slice(&bytes);
|
||||
SecretKey::parse(&array).map_err(|e| format!("invalid {}: {}", SK_FILENAME, e))
|
||||
} else {
|
||||
Err(format!(
|
||||
"{} expected {} bytes not {}",
|
||||
SK_FILENAME,
|
||||
SK_LEN,
|
||||
bytes.len()
|
||||
))
|
||||
}
|
||||
})?;
|
||||
|
||||
let pk = fs::read(&pk_path)
|
||||
.map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))
|
||||
.and_then(|bytes| {
|
||||
let hex =
|
||||
String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?;
|
||||
if hex.starts_with(PK_PREFIX) {
|
||||
serde_utils::hex::decode(&hex[PK_PREFIX.len()..])
|
||||
.map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME))
|
||||
} else {
|
||||
Err(format!("unable to parse {}", SK_FILENAME))
|
||||
}
|
||||
})
|
||||
.and_then(|bytes| {
|
||||
if bytes.len() == PK_LEN {
|
||||
let mut array = [0; PK_LEN];
|
||||
array.copy_from_slice(&bytes);
|
||||
PublicKey::parse_compressed(&array)
|
||||
.map_err(|e| format!("invalid {}: {}", PK_FILENAME, e))
|
||||
} else {
|
||||
Err(format!(
|
||||
"{} expected {} bytes not {}",
|
||||
PK_FILENAME,
|
||||
PK_LEN,
|
||||
bytes.len()
|
||||
))
|
||||
}
|
||||
})?;
|
||||
|
||||
// Ensure that the keys loaded from disk are indeed a pair.
|
||||
if PublicKey::from_secret_key(&sk) != pk {
|
||||
fs::remove_file(&sk_path)
|
||||
.map_err(|e| format!("unable to remove {}: {}", SK_FILENAME, e))?;
|
||||
fs::remove_file(&pk_path)
|
||||
.map_err(|e| format!("unable to remove {}: {}", PK_FILENAME, e))?;
|
||||
return Err(format!(
|
||||
"{:?} does not match {:?} and the files have been deleted. Please try again.",
|
||||
sk_path, pk_path
|
||||
));
|
||||
}
|
||||
|
||||
Ok(Self { sk, pk })
|
||||
}
|
||||
|
||||
/// Returns the public key of `self` as a 0x-prefixed hex string.
|
||||
fn pubkey_string(&self) -> String {
|
||||
serde_utils::hex::encode(&self.pk.serialize_compressed()[..])
|
||||
}
|
||||
|
||||
/// Returns the API token.
|
||||
pub fn api_token(&self) -> String {
|
||||
format!("{}{}", PK_PREFIX, self.pubkey_string())
|
||||
}
|
||||
|
||||
/// Returns the value of the `Authorization` header which is used for verifying incoming HTTP
|
||||
/// requests.
|
||||
fn auth_header_value(&self) -> String {
|
||||
format!("Basic {}", self.api_token())
|
||||
}
|
||||
|
||||
/// Returns a `warp` header which filters out request that have a missing or inaccurate
|
||||
/// `Authorization` header.
|
||||
pub fn authorization_header_filter(&self) -> warp::filters::BoxedFilter<()> {
|
||||
let expected = self.auth_header_value();
|
||||
warp::any()
|
||||
.map(move || expected.clone())
|
||||
.and(warp::filters::header::header("Authorization"))
|
||||
.and_then(move |expected: String, header: String| async move {
|
||||
if header == expected {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(warp_utils::reject::invalid_auth(header))
|
||||
}
|
||||
})
|
||||
.untuple_one()
|
||||
.boxed()
|
||||
}
|
||||
|
||||
/// Returns a closure which produces a signature over some bytes using the secret key in
|
||||
/// `self`. The signature is a 32-byte hash formatted as a 0x-prefixed string.
|
||||
pub fn signer(&self) -> impl Fn(&[u8]) -> String + Clone {
|
||||
let sk = self.sk.clone();
|
||||
move |input: &[u8]| -> String {
|
||||
let message =
|
||||
Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes");
|
||||
let (signature, _) = secp256k1::sign(&message, &sk);
|
||||
serde_utils::hex::encode(signature.serialize_der().as_ref())
|
||||
}
|
||||
}
|
||||
}
|
||||
151
validator_client/src/http_api/create_validator.rs
Normal file
151
validator_client/src/http_api/create_validator.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use crate::ValidatorStore;
|
||||
use account_utils::{
|
||||
eth2_wallet::{bip39::Mnemonic, WalletBuilder},
|
||||
random_mnemonic, random_password, ZeroizeString,
|
||||
};
|
||||
use eth2::lighthouse_vc::types::{self as api_types};
|
||||
use slot_clock::SlotClock;
|
||||
use std::path::Path;
|
||||
use types::ChainSpec;
|
||||
use types::EthSpec;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
|
||||
/// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in
|
||||
/// this validator client.
|
||||
///
|
||||
/// Returns the list of created validators and the mnemonic used to derive them via EIP-2334.
|
||||
///
|
||||
/// ## Detail
|
||||
///
|
||||
/// If `mnemonic_opt` is not supplied it will be randomly generated and returned in the response.
|
||||
///
|
||||
/// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at
|
||||
/// this point.
|
||||
pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
|
||||
mnemonic_opt: Option<Mnemonic>,
|
||||
key_derivation_path_offset: Option<u32>,
|
||||
validator_requests: &[api_types::ValidatorRequest],
|
||||
validator_dir: P,
|
||||
validator_store: &ValidatorStore<T, E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(Vec<api_types::CreatedValidator>, Mnemonic), warp::Rejection> {
|
||||
let mnemonic = mnemonic_opt.unwrap_or_else(random_mnemonic);
|
||||
|
||||
let wallet_password = random_password();
|
||||
let mut wallet =
|
||||
WalletBuilder::from_mnemonic(&mnemonic, wallet_password.as_bytes(), String::new())
|
||||
.and_then(|builder| builder.build())
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to create EIP-2386 wallet: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
if let Some(nextaccount) = key_derivation_path_offset {
|
||||
wallet.set_nextaccount(nextaccount).map_err(|()| {
|
||||
warp_utils::reject::custom_server_error("unable to set wallet nextaccount".to_string())
|
||||
})?;
|
||||
}
|
||||
|
||||
let mut validators = Vec::with_capacity(validator_requests.len());
|
||||
|
||||
for request in validator_requests {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
let voting_password_string = ZeroizeString::from(
|
||||
String::from_utf8(voting_password.as_bytes().to_vec()).map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"locally generated password is not utf8: {:?}",
|
||||
e
|
||||
))
|
||||
})?,
|
||||
);
|
||||
|
||||
let mut keystores = wallet
|
||||
.next_validator(
|
||||
wallet_password.as_bytes(),
|
||||
voting_password.as_bytes(),
|
||||
withdrawal_password.as_bytes(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to create validator keys: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
keystores
|
||||
.voting
|
||||
.set_description(request.description.clone());
|
||||
keystores
|
||||
.withdrawal
|
||||
.set_description(request.description.clone());
|
||||
|
||||
let voting_pubkey = format!("0x{}", keystores.voting.pubkey())
|
||||
.parse()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"created invalid public key: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let validator_dir = ValidatorDirBuilder::new(validator_dir.as_ref().into())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.create_eth1_tx_data(request.deposit_gwei, &spec)
|
||||
.store_withdrawal_keystore(false)
|
||||
.build()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to build validator directory: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let eth1_deposit_data = validator_dir
|
||||
.eth1_deposit_data()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to read local deposit data: {:?}",
|
||||
e
|
||||
))
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error(
|
||||
"failed to create local deposit data: {:?}".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
if eth1_deposit_data.deposit_data.amount != request.deposit_gwei {
|
||||
return Err(warp_utils::reject::custom_server_error(format!(
|
||||
"invalid deposit_gwei {}, expected {}",
|
||||
eth1_deposit_data.deposit_data.amount, request.deposit_gwei
|
||||
)));
|
||||
}
|
||||
|
||||
tokio::runtime::Handle::current()
|
||||
.block_on(validator_store.add_validator_keystore(
|
||||
validator_dir.voting_keystore_path(),
|
||||
voting_password_string,
|
||||
request.enable,
|
||||
))
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to initialize validator: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
validators.push(api_types::CreatedValidator {
|
||||
enabled: request.enable,
|
||||
description: request.description.clone(),
|
||||
voting_pubkey,
|
||||
eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp),
|
||||
deposit_gwei: request.deposit_gwei,
|
||||
});
|
||||
}
|
||||
|
||||
Ok((validators, mnemonic))
|
||||
}
|
||||
488
validator_client/src/http_api/mod.rs
Normal file
488
validator_client/src/http_api/mod.rs
Normal file
@@ -0,0 +1,488 @@
|
||||
mod api_secret;
|
||||
mod create_validator;
|
||||
mod tests;
|
||||
|
||||
use crate::ValidatorStore;
|
||||
use account_utils::mnemonic_from_phrase;
|
||||
use create_validator::create_validators;
|
||||
use eth2::lighthouse_vc::types::{self as api_types, PublicKey, PublicKeyBytes};
|
||||
use lighthouse_version::version_with_platform;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slog::{crit, info, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use types::{ChainSpec, EthSpec, YamlConfig};
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
use warp::{
|
||||
http::{
|
||||
header::{HeaderValue, CONTENT_TYPE},
|
||||
response::Response,
|
||||
StatusCode,
|
||||
},
|
||||
Filter,
|
||||
};
|
||||
|
||||
pub use api_secret::ApiSecret;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
Warp(warp::Error),
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl From<warp::Error> for Error {
|
||||
fn from(e: warp::Error) -> Self {
|
||||
Error::Warp(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(e: String) -> Self {
|
||||
Error::Other(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around all the items required to spawn the HTTP server.
|
||||
///
|
||||
/// The server will gracefully handle the case where any fields are `None`.
|
||||
pub struct Context<T: Clone, E: EthSpec> {
|
||||
pub api_secret: ApiSecret,
|
||||
pub validator_store: Option<ValidatorStore<T, E>>,
|
||||
pub validator_dir: Option<PathBuf>,
|
||||
pub spec: ChainSpec,
|
||||
pub config: Config,
|
||||
pub log: Logger,
|
||||
pub _phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
/// Configuration for the HTTP server.
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub enabled: bool,
|
||||
pub listen_addr: Ipv4Addr,
|
||||
pub listen_port: u16,
|
||||
pub allow_origin: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 5062,
|
||||
allow_origin: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a server that will serve requests using information from `ctx`.
|
||||
///
|
||||
/// The server will shut down gracefully when the `shutdown` future resolves.
|
||||
///
|
||||
/// ## Returns
|
||||
///
|
||||
/// This function will bind the server to the provided address and then return a tuple of:
|
||||
///
|
||||
/// - `SocketAddr`: the address that the HTTP server will listen on.
|
||||
/// - `Future`: the actual server future that will need to be awaited.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// Returns an error if the server is unable to bind or there is another error during
|
||||
/// configuration.
|
||||
pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
ctx: Arc<Context<T, E>>,
|
||||
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
|
||||
) -> Result<(SocketAddr, impl Future<Output = ()>), Error> {
|
||||
let config = &ctx.config;
|
||||
let log = ctx.log.clone();
|
||||
let allow_origin = config.allow_origin.clone();
|
||||
|
||||
// Sanity check.
|
||||
if !config.enabled {
|
||||
crit!(log, "Cannot start disabled metrics HTTP server");
|
||||
return Err(Error::Other(
|
||||
"A disabled metrics server should not be started".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let authorization_header_filter = ctx.api_secret.authorization_header_filter();
|
||||
let api_token = ctx.api_secret.api_token();
|
||||
let signer = ctx.api_secret.signer();
|
||||
let signer = warp::any().map(move || signer.clone());
|
||||
|
||||
let inner_validator_store = ctx.validator_store.clone();
|
||||
let validator_store_filter = warp::any()
|
||||
.map(move || inner_validator_store.clone())
|
||||
.and_then(|validator_store: Option<_>| async move {
|
||||
validator_store.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"validator store is not initialized.".to_string(),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
let inner_validator_dir = ctx.validator_dir.clone();
|
||||
let validator_dir_filter = warp::any()
|
||||
.map(move || inner_validator_dir.clone())
|
||||
.and_then(|validator_dir: Option<_>| async move {
|
||||
validator_dir.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"validator_dir directory is not initialized.".to_string(),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
let inner_spec = Arc::new(ctx.spec.clone());
|
||||
let spec_filter = warp::any().map(move || inner_spec.clone());
|
||||
|
||||
// GET lighthouse/version
|
||||
let get_node_version = warp::path("lighthouse")
|
||||
.and(warp::path("version"))
|
||||
.and(warp::path::end())
|
||||
.and(signer.clone())
|
||||
.and_then(|signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
Ok(api_types::GenericResponse::from(api_types::VersionData {
|
||||
version: version_with_platform(),
|
||||
}))
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/health
|
||||
let get_lighthouse_health = warp::path("lighthouse")
|
||||
.and(warp::path("health"))
|
||||
.and(warp::path::end())
|
||||
.and(signer.clone())
|
||||
.and_then(|signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
eth2::lighthouse::Health::observe()
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map_err(warp_utils::reject::custom_bad_request)
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/spec
|
||||
let get_lighthouse_spec = warp::path("lighthouse")
|
||||
.and(warp::path("spec"))
|
||||
.and(warp::path::end())
|
||||
.and(spec_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(|spec: Arc<_>, signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
Ok(api_types::GenericResponse::from(
|
||||
YamlConfig::from_spec::<E>(&spec),
|
||||
))
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/validators
|
||||
let get_lighthouse_validators = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::end())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(|validator_store: ValidatorStore<T, E>, signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let validators = validator_store
|
||||
.initialized_validators()
|
||||
.read()
|
||||
.validator_definitions()
|
||||
.iter()
|
||||
.map(|def| api_types::ValidatorData {
|
||||
enabled: def.enabled,
|
||||
description: def.description.clone(),
|
||||
voting_pubkey: PublicKeyBytes::from(&def.voting_public_key),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(api_types::GenericResponse::from(validators))
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/validators/{validator_pubkey}
|
||||
let get_lighthouse_validators_pubkey = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::param::<PublicKey>())
|
||||
.and(warp::path::end())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|validator_pubkey: PublicKey, validator_store: ValidatorStore<T, E>, signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let validator = validator_store
|
||||
.initialized_validators()
|
||||
.read()
|
||||
.validator_definitions()
|
||||
.iter()
|
||||
.find(|def| def.voting_public_key == validator_pubkey)
|
||||
.map(|def| api_types::ValidatorData {
|
||||
enabled: def.enabled,
|
||||
description: def.description.clone(),
|
||||
voting_pubkey: PublicKeyBytes::from(&def.voting_public_key),
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"no validator for {:?}",
|
||||
validator_pubkey
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(api_types::GenericResponse::from(validator))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/validators/
|
||||
let post_validators = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_dir_filter.clone())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(spec_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|body: Vec<api_types::ValidatorRequest>,
|
||||
validator_dir: PathBuf,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
spec: Arc<ChainSpec>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let (validators, mnemonic) = create_validators(
|
||||
None,
|
||||
None,
|
||||
&body,
|
||||
&validator_dir,
|
||||
&validator_store,
|
||||
&spec,
|
||||
)?;
|
||||
let response = api_types::PostValidatorsResponseData {
|
||||
mnemonic: mnemonic.into_phrase().into(),
|
||||
validators,
|
||||
};
|
||||
Ok(api_types::GenericResponse::from(response))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/validators/mnemonic
|
||||
let post_validators_mnemonic = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path("mnemonic"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_dir_filter.clone())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(spec_filter)
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|body: api_types::CreateValidatorsMnemonicRequest,
|
||||
validator_dir: PathBuf,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
spec: Arc<ChainSpec>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!("invalid mnemonic: {:?}", e))
|
||||
})?;
|
||||
let (validators, _mnemonic) = create_validators(
|
||||
Some(mnemonic),
|
||||
Some(body.key_derivation_path_offset),
|
||||
&body.validators,
|
||||
&validator_dir,
|
||||
&validator_store,
|
||||
&spec,
|
||||
)?;
|
||||
Ok(api_types::GenericResponse::from(validators))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/validators/keystore
|
||||
let post_validators_keystore = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path("keystore"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_dir_filter)
|
||||
.and(validator_store_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and_then(
|
||||
|body: api_types::KeystoreValidatorsPostRequest,
|
||||
validator_dir: PathBuf,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
// Check to ensure the password is correct.
|
||||
let keypair = body
|
||||
.keystore
|
||||
.decrypt_keypair(body.password.as_ref())
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"invalid keystore: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let validator_dir = ValidatorDirBuilder::new(validator_dir.clone())
|
||||
.voting_keystore(body.keystore.clone(), body.password.as_ref())
|
||||
.store_withdrawal_keystore(false)
|
||||
.build()
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to build validator directory: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let voting_password = body.password.clone();
|
||||
|
||||
let validator_def = tokio::runtime::Handle::current()
|
||||
.block_on(validator_store.add_validator_keystore(
|
||||
validator_dir.voting_keystore_path(),
|
||||
voting_password,
|
||||
body.enable,
|
||||
))
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to initialize validator: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(api_types::GenericResponse::from(api_types::ValidatorData {
|
||||
enabled: body.enable,
|
||||
description: validator_def.description,
|
||||
voting_pubkey: keypair.pk.into(),
|
||||
}))
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// PATCH lighthouse/validators/{validator_pubkey}
|
||||
let patch_validators = warp::path("lighthouse")
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::param::<PublicKey>())
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_store_filter)
|
||||
.and(signer)
|
||||
.and_then(
|
||||
|validator_pubkey: PublicKey,
|
||||
body: api_types::ValidatorPatchRequest,
|
||||
validator_store: ValidatorStore<T, E>,
|
||||
signer| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
||||
let mut initialized_validators = initialized_validators_rw_lock.write();
|
||||
|
||||
match initialized_validators.is_enabled(&validator_pubkey) {
|
||||
None => Err(warp_utils::reject::custom_not_found(format!(
|
||||
"no validator for {:?}",
|
||||
validator_pubkey
|
||||
))),
|
||||
Some(enabled) if enabled == body.enabled => Ok(()),
|
||||
Some(_) => {
|
||||
tokio::runtime::Handle::current()
|
||||
.block_on(
|
||||
initialized_validators
|
||||
.set_validator_status(&validator_pubkey, body.enabled),
|
||||
)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"unable to set validator status: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
let routes = warp::any()
|
||||
.and(authorization_header_filter)
|
||||
.and(
|
||||
warp::get().and(
|
||||
get_node_version
|
||||
.or(get_lighthouse_health)
|
||||
.or(get_lighthouse_spec)
|
||||
.or(get_lighthouse_validators)
|
||||
.or(get_lighthouse_validators_pubkey),
|
||||
),
|
||||
)
|
||||
.or(warp::post().and(
|
||||
post_validators
|
||||
.or(post_validators_keystore)
|
||||
.or(post_validators_mnemonic),
|
||||
))
|
||||
.or(warp::patch().and(patch_validators))
|
||||
// Maps errors into HTTP responses.
|
||||
.recover(warp_utils::reject::handle_rejection)
|
||||
// Add a `Server` header.
|
||||
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
||||
// Maybe add some CORS headers.
|
||||
.map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref()));
|
||||
|
||||
let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown(
|
||||
SocketAddrV4::new(config.listen_addr, config.listen_port),
|
||||
async {
|
||||
shutdown.await;
|
||||
},
|
||||
)?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"HTTP API started";
|
||||
"listen_address" => listening_socket.to_string(),
|
||||
"api_token" => api_token,
|
||||
);
|
||||
|
||||
Ok((listening_socket, server))
|
||||
}
|
||||
|
||||
/// Executes `func` in blocking tokio task (i.e., where long-running tasks are permitted).
|
||||
/// JSON-encodes the return value of `func`, using the `signer` function to produce a signature of
|
||||
/// those bytes.
|
||||
pub async fn blocking_signed_json_task<S, F, T>(
|
||||
signer: S,
|
||||
func: F,
|
||||
) -> Result<impl warp::Reply, warp::Rejection>
|
||||
where
|
||||
S: Fn(&[u8]) -> String,
|
||||
F: Fn() -> Result<T, warp::Rejection>,
|
||||
T: Serialize,
|
||||
{
|
||||
warp_utils::task::blocking_task(func)
|
||||
.await
|
||||
.map(|func_output| {
|
||||
let mut response = match serde_json::to_vec(&func_output) {
|
||||
Ok(body) => {
|
||||
let mut res = Response::new(body);
|
||||
res.headers_mut()
|
||||
.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
|
||||
res
|
||||
}
|
||||
Err(_) => Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(vec![])
|
||||
.expect("can produce simple response from static values"),
|
||||
};
|
||||
|
||||
let body: &Vec<u8> = response.body();
|
||||
let signature = signer(body);
|
||||
let header_value =
|
||||
HeaderValue::from_str(&signature).expect("hash can be encoded as header");
|
||||
|
||||
response.headers_mut().append("Signature", header_value);
|
||||
|
||||
response
|
||||
})
|
||||
}
|
||||
527
validator_client/src/http_api/tests.rs
Normal file
527
validator_client/src/http_api/tests.rs
Normal file
@@ -0,0 +1,527 @@
|
||||
#![cfg(test)]
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use crate::{
|
||||
http_api::{ApiSecret, Config as HttpConfig, Context},
|
||||
Config, ForkServiceBuilder, InitializedValidators, ValidatorDefinitions, ValidatorStore,
|
||||
};
|
||||
use account_utils::{
|
||||
eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password,
|
||||
ZeroizeString,
|
||||
};
|
||||
use deposit_contract::decode_eth1_tx_data;
|
||||
use environment::null_logger;
|
||||
use eth2::{
|
||||
lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*},
|
||||
Url,
|
||||
};
|
||||
use eth2_keystore::KeystoreBuilder;
|
||||
use parking_lot::RwLock;
|
||||
use slot_clock::TestingSlotClock;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::Arc;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
const PASSWORD_BYTES: &[u8] = &[42, 13, 37];
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
struct ApiTester {
|
||||
client: ValidatorClientHttpClient,
|
||||
initialized_validators: Arc<RwLock<InitializedValidators>>,
|
||||
url: Url,
|
||||
_server_shutdown: oneshot::Sender<()>,
|
||||
_validator_dir: TempDir,
|
||||
}
|
||||
|
||||
impl ApiTester {
|
||||
pub async fn new() -> Self {
|
||||
let log = null_logger().unwrap();
|
||||
|
||||
let validator_dir = tempdir().unwrap();
|
||||
let secrets_dir = tempdir().unwrap();
|
||||
|
||||
let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap();
|
||||
|
||||
let initialized_validators = InitializedValidators::from_definitions(
|
||||
validator_defs,
|
||||
validator_dir.path().into(),
|
||||
false,
|
||||
log.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap();
|
||||
let api_pubkey = api_secret.api_token();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.validator_dir = validator_dir.path().into();
|
||||
config.secrets_dir = secrets_dir.path().into();
|
||||
|
||||
let fork_service = ForkServiceBuilder::testing_only(log.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let validator_store: ValidatorStore<TestingSlotClock, E> = ValidatorStore::new(
|
||||
initialized_validators,
|
||||
&config,
|
||||
Hash256::repeat_byte(42),
|
||||
E::default_spec(),
|
||||
fork_service.clone(),
|
||||
log.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let initialized_validators = validator_store.initialized_validators();
|
||||
|
||||
let context: Arc<Context<TestingSlotClock, E>> = Arc::new(Context {
|
||||
api_secret,
|
||||
validator_dir: Some(validator_dir.path().into()),
|
||||
validator_store: Some(validator_store),
|
||||
spec: E::default_spec(),
|
||||
config: HttpConfig {
|
||||
enabled: true,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 0,
|
||||
allow_origin: None,
|
||||
},
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let server_shutdown = async {
|
||||
// It's not really interesting why this triggered, just that it happened.
|
||||
let _ = shutdown_rx.await;
|
||||
};
|
||||
let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap();
|
||||
|
||||
tokio::spawn(async { server.await });
|
||||
|
||||
let url = Url::parse(&format!(
|
||||
"http://{}:{}",
|
||||
listening_socket.ip(),
|
||||
listening_socket.port()
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey).unwrap();
|
||||
|
||||
Self {
|
||||
initialized_validators,
|
||||
_validator_dir: validator_dir,
|
||||
client,
|
||||
url,
|
||||
_server_shutdown: shutdown_tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn invalidate_api_token(mut self) -> Self {
|
||||
let tmp = tempdir().unwrap();
|
||||
let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap();
|
||||
let invalid_pubkey = api_secret.api_token();
|
||||
|
||||
self.client = ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap();
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_version_invalid(self) -> Self {
|
||||
self.client.get_lighthouse_version().await.unwrap_err();
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_spec(self) -> Self {
|
||||
let result = self.client.get_lighthouse_spec().await.unwrap().data;
|
||||
|
||||
let expected = YamlConfig::from_spec::<E>(&E::default_spec());
|
||||
|
||||
assert_eq!(result, expected);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_version(self) -> Self {
|
||||
let result = self.client.get_lighthouse_version().await.unwrap().data;
|
||||
|
||||
let expected = VersionData {
|
||||
version: lighthouse_version::version_with_platform(),
|
||||
};
|
||||
|
||||
assert_eq!(result, expected);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub async fn test_get_lighthouse_health(self) -> Self {
|
||||
self.client.get_lighthouse_health().await.unwrap();
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub async fn test_get_lighthouse_health(self) -> Self {
|
||||
self.client.get_lighthouse_health().await.unwrap_err();
|
||||
|
||||
self
|
||||
}
|
||||
pub fn vals_total(&self) -> usize {
|
||||
self.initialized_validators.read().num_total()
|
||||
}
|
||||
|
||||
pub fn vals_enabled(&self) -> usize {
|
||||
self.initialized_validators.read().num_enabled()
|
||||
}
|
||||
|
||||
pub fn assert_enabled_validators_count(self, count: usize) -> Self {
|
||||
assert_eq!(self.vals_enabled(), count);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn assert_validators_count(self, count: usize) -> Self {
|
||||
assert_eq!(self.vals_total(), count);
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn create_hd_validators(self, s: HdValidatorScenario) -> Self {
|
||||
let initial_vals = self.vals_total();
|
||||
let initial_enabled_vals = self.vals_enabled();
|
||||
|
||||
let validators = (0..s.count)
|
||||
.map(|i| ValidatorRequest {
|
||||
enable: !s.disabled.contains(&i),
|
||||
description: format!("boi #{}", i),
|
||||
deposit_gwei: E::default_spec().max_effective_balance,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let (response, mnemonic) = if s.specify_mnemonic {
|
||||
let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string());
|
||||
let request = CreateValidatorsMnemonicRequest {
|
||||
mnemonic: mnemonic.clone(),
|
||||
key_derivation_path_offset: s.key_derivation_path_offset,
|
||||
validators: validators.clone(),
|
||||
};
|
||||
let response = self
|
||||
.client
|
||||
.post_lighthouse_validators_mnemonic(&request)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
(response, mnemonic)
|
||||
} else {
|
||||
assert_eq!(
|
||||
s.key_derivation_path_offset, 0,
|
||||
"cannot use a derivation offset without specifying a mnemonic"
|
||||
);
|
||||
let response = self
|
||||
.client
|
||||
.post_lighthouse_validators(validators.clone())
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
(response.validators.clone(), response.mnemonic.clone())
|
||||
};
|
||||
|
||||
assert_eq!(response.len(), s.count);
|
||||
assert_eq!(self.vals_total(), initial_vals + s.count);
|
||||
assert_eq!(
|
||||
self.vals_enabled(),
|
||||
initial_enabled_vals + s.count - s.disabled.len()
|
||||
);
|
||||
|
||||
let server_vals = self.client.get_lighthouse_validators().await.unwrap().data;
|
||||
|
||||
assert_eq!(server_vals.len(), self.vals_total());
|
||||
|
||||
// Ensure the server lists all of these newly created validators.
|
||||
for validator in &response {
|
||||
assert!(server_vals
|
||||
.iter()
|
||||
.any(|server_val| server_val.voting_pubkey == validator.voting_pubkey));
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that we can regenerate all the keys from the mnemonic.
|
||||
*/
|
||||
|
||||
let mnemonic = mnemonic_from_phrase(mnemonic.as_str()).unwrap();
|
||||
let mut wallet = WalletBuilder::from_mnemonic(&mnemonic, PASSWORD_BYTES, "".to_string())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
wallet
|
||||
.set_nextaccount(s.key_derivation_path_offset)
|
||||
.unwrap();
|
||||
|
||||
for i in 0..s.count {
|
||||
let keypairs = wallet
|
||||
.next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES)
|
||||
.unwrap();
|
||||
let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
response[i].voting_pubkey,
|
||||
voting_keypair.pk.clone().into(),
|
||||
"the locally generated voting pk should match the server response"
|
||||
);
|
||||
|
||||
let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap();
|
||||
|
||||
let deposit_bytes =
|
||||
serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap();
|
||||
|
||||
let (deposit_data, _) =
|
||||
decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
deposit_data.pubkey,
|
||||
voting_keypair.pk.clone().into(),
|
||||
"the locally generated voting pk should match the deposit data"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
deposit_data.withdrawal_credentials,
|
||||
Hash256::from_slice(&bls::get_withdrawal_credentials(
|
||||
&withdrawal_keypair.pk,
|
||||
E::default_spec().bls_withdrawal_prefix_byte
|
||||
)),
|
||||
"the locally generated withdrawal creds should match the deposit data"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
deposit_data.signature,
|
||||
deposit_data.create_signature(&voting_keypair.sk, &E::default_spec()),
|
||||
"the locally-generated deposit sig should create the same deposit sig"
|
||||
);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn create_keystore_validators(self, s: KeystoreValidatorScenario) -> Self {
|
||||
let initial_vals = self.vals_total();
|
||||
let initial_enabled_vals = self.vals_enabled();
|
||||
|
||||
let password = random_password();
|
||||
let keypair = Keypair::random();
|
||||
let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
if !s.correct_password {
|
||||
let request = KeystoreValidatorsPostRequest {
|
||||
enable: s.enabled,
|
||||
password: String::from_utf8(random_password().as_ref().to_vec())
|
||||
.unwrap()
|
||||
.into(),
|
||||
keystore,
|
||||
};
|
||||
|
||||
self.client
|
||||
.post_lighthouse_validators_keystore(&request)
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
let request = KeystoreValidatorsPostRequest {
|
||||
enable: s.enabled,
|
||||
password: String::from_utf8(password.as_ref().to_vec())
|
||||
.unwrap()
|
||||
.into(),
|
||||
keystore,
|
||||
};
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.post_lighthouse_validators_keystore(&request)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
let num_enabled = s.enabled as usize;
|
||||
|
||||
assert_eq!(self.vals_total(), initial_vals + 1);
|
||||
assert_eq!(self.vals_enabled(), initial_enabled_vals + num_enabled);
|
||||
|
||||
let server_vals = self.client.get_lighthouse_validators().await.unwrap().data;
|
||||
|
||||
assert_eq!(server_vals.len(), self.vals_total());
|
||||
|
||||
assert_eq!(response.voting_pubkey, keypair.pk.into());
|
||||
assert_eq!(response.enabled, s.enabled);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
|
||||
self.client
|
||||
.patch_lighthouse_validators(&validator.voting_pubkey, enabled)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
self.initialized_validators
|
||||
.read()
|
||||
.is_enabled(&validator.voting_pubkey.decompress().unwrap())
|
||||
.unwrap(),
|
||||
enabled
|
||||
);
|
||||
|
||||
assert!(self
|
||||
.client
|
||||
.get_lighthouse_validators()
|
||||
.await
|
||||
.unwrap()
|
||||
.data
|
||||
.into_iter()
|
||||
.find(|v| v.voting_pubkey == validator.voting_pubkey)
|
||||
.map(|v| v.enabled == enabled)
|
||||
.unwrap());
|
||||
|
||||
// Check the server via an individual request.
|
||||
assert_eq!(
|
||||
self.client
|
||||
.get_lighthouse_validators_pubkey(&validator.voting_pubkey)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.data
|
||||
.enabled,
|
||||
enabled
|
||||
);
|
||||
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
struct HdValidatorScenario {
|
||||
count: usize,
|
||||
specify_mnemonic: bool,
|
||||
key_derivation_path_offset: u32,
|
||||
disabled: Vec<usize>,
|
||||
}
|
||||
|
||||
struct KeystoreValidatorScenario {
|
||||
enabled: bool,
|
||||
correct_password: bool,
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn invalid_pubkey() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.invalidate_api_token()
|
||||
.test_get_lighthouse_version_invalid()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn simple_getters() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.test_get_lighthouse_version()
|
||||
.await
|
||||
.test_get_lighthouse_health()
|
||||
.await
|
||||
.test_get_lighthouse_spec()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn hd_validator_creation() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.assert_enabled_validators_count(0)
|
||||
.assert_validators_count(0)
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 2,
|
||||
specify_mnemonic: true,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2)
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 1,
|
||||
specify_mnemonic: false,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![0],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(3)
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 0,
|
||||
specify_mnemonic: true,
|
||||
key_derivation_path_offset: 4,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(3);
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn validator_enabling() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 2,
|
||||
specify_mnemonic: false,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2)
|
||||
.set_validator_enabled(0, false)
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(2)
|
||||
.set_validator_enabled(0, true)
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2);
|
||||
}
|
||||
|
||||
#[tokio::test(core_threads = 2)]
|
||||
async fn keystore_validator_creation() {
|
||||
ApiTester::new()
|
||||
.await
|
||||
.assert_enabled_validators_count(0)
|
||||
.assert_validators_count(0)
|
||||
.create_keystore_validators(KeystoreValidatorScenario {
|
||||
correct_password: true,
|
||||
enabled: true,
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(1)
|
||||
.create_keystore_validators(KeystoreValidatorScenario {
|
||||
correct_password: false,
|
||||
enabled: true,
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(1)
|
||||
.create_keystore_validators(KeystoreValidatorScenario {
|
||||
correct_password: true,
|
||||
enabled: false,
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(2);
|
||||
}
|
||||
Reference in New Issue
Block a user