Implement standard keystore API (#2736)

## Issue Addressed

Implements the standard key manager API from https://ethereum.github.io/keymanager-APIs/, formerly https://github.com/ethereum/beacon-APIs/pull/151
Related to https://github.com/sigp/lighthouse/issues/2557

## Proposed Changes

- [x] Add all of the new endpoints from the standard API: GET, POST and DELETE.
- [x] Add a `validators.enabled` column to the slashing protection database to support atomic disable + export.
- [x] Add tests for all the common sequential accesses of the API
- [x] Add tests for interactions with remote signer validators
- [x] Add end-to-end tests for migration of validators from one VC to another
- [x] Implement the authentication scheme from the standard (token bearer auth)

## Additional Info

The `enabled` column in the validators SQL database is necessary to prevent a race condition when exporting slashing protection data. Without the slashing protection database having a way of knowing that a key has been disabled, a concurrent request to sign a message could insert a new record into the database. The `delete_concurrent_with_signing` test exercises this code path, and was indeed failing before the `enabled` column was added.

The validator client authentication has been modified from basic auth to bearer auth, with basic auth preserved for backwards compatibility.
This commit is contained in:
Michael Sproul
2022-01-30 23:22:04 +00:00
parent ee000d5219
commit e961ff60b4
32 changed files with 2284 additions and 127 deletions

View File

@@ -162,25 +162,32 @@ impl ApiSecret {
}
/// Returns the path for the API token file
pub fn api_token_path(&self) -> &PathBuf {
&self.pk_path
pub fn api_token_path(&self) -> PathBuf {
self.pk_path.clone()
}
/// Returns the value of the `Authorization` header which is used for verifying incoming HTTP
/// requests.
fn auth_header_value(&self) -> String {
format!("Basic {}", self.api_token())
/// Returns the values of the `Authorization` header which indicate a valid incoming HTTP
/// request.
///
/// For backwards-compatibility we accept the token in a basic authentication style, but this is
/// technically invalid according to RFC 7617 because the token is not a base64-encoded username
/// and password. As such, bearer authentication should be preferred.
fn auth_header_values(&self) -> Vec<String> {
vec![
format!("Basic {}", self.api_token()),
format!("Bearer {}", self.api_token()),
]
}
/// Returns a `warp` header which filters out request that have a missing or inaccurate
/// `Authorization` header.
pub fn authorization_header_filter(&self) -> warp::filters::BoxedFilter<()> {
let expected = self.auth_header_value();
let expected = self.auth_header_values();
warp::any()
.map(move || expected.clone())
.and(warp::filters::header::header("Authorization"))
.and_then(move |expected: String, header: String| async move {
if header == expected {
.and_then(move |expected: Vec<String>, header: String| async move {
if expected.contains(&header) {
Ok(())
} else {
Err(warp_utils::reject::invalid_auth(header))

View File

@@ -0,0 +1,290 @@
//! Implementation of the standard keystore management API.
use crate::{signing_method::SigningMethod, InitializedValidators, ValidatorStore};
use account_utils::ZeroizeString;
use eth2::lighthouse_vc::std_types::{
DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus,
ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, KeystoreJsonStr,
ListKeystoresResponse, SingleKeystoreResponse, Status,
};
use eth2_keystore::Keystore;
use slog::{info, warn, Logger};
use slot_clock::SlotClock;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Weak;
use tokio::runtime::Runtime;
use types::{EthSpec, PublicKeyBytes};
use validator_dir::Builder as ValidatorDirBuilder;
use warp::Rejection;
use warp_utils::reject::{custom_bad_request, custom_server_error};
pub fn list<T: SlotClock + 'static, E: EthSpec>(
validator_store: Arc<ValidatorStore<T, E>>,
) -> ListKeystoresResponse {
let initialized_validators_rwlock = validator_store.initialized_validators();
let initialized_validators = initialized_validators_rwlock.read();
let keystores = initialized_validators
.validator_definitions()
.iter()
.filter(|def| def.enabled)
.map(|def| {
let validating_pubkey = def.voting_public_key.compress();
let (derivation_path, readonly) = initialized_validators
.signing_method(&validating_pubkey)
.map_or((None, None), |signing_method| match *signing_method {
SigningMethod::LocalKeystore {
ref voting_keystore,
..
} => (voting_keystore.path(), None),
SigningMethod::Web3Signer { .. } => (None, Some(true)),
});
SingleKeystoreResponse {
validating_pubkey,
derivation_path,
readonly,
}
})
.collect::<Vec<_>>();
ListKeystoresResponse { data: keystores }
}
pub fn import<T: SlotClock + 'static, E: EthSpec>(
request: ImportKeystoresRequest,
validator_dir: PathBuf,
validator_store: Arc<ValidatorStore<T, E>>,
runtime: Weak<Runtime>,
log: Logger,
) -> Result<ImportKeystoresResponse, Rejection> {
// Check request validity. This is the only cases in which we should return a 4xx code.
if request.keystores.len() != request.passwords.len() {
return Err(custom_bad_request(format!(
"mismatched numbers of keystores ({}) and passwords ({})",
request.keystores.len(),
request.passwords.len(),
)));
}
info!(
log,
"Importing keystores via standard HTTP API";
"count" => request.keystores.len(),
);
// Import slashing protection data before keystores, so that new keystores don't start signing
// without it. Do not return early on failure, propagate the failure to each key.
let slashing_protection_status =
if let Some(InterchangeJsonStr(slashing_protection)) = request.slashing_protection {
// Warn for missing slashing protection.
for KeystoreJsonStr(ref keystore) in &request.keystores {
if let Some(public_key) = keystore.public_key() {
let pubkey_bytes = public_key.compress();
if !slashing_protection
.data
.iter()
.any(|data| data.pubkey == pubkey_bytes)
{
warn!(
log,
"Slashing protection data not provided";
"public_key" => ?public_key,
);
}
}
}
validator_store.import_slashing_protection(slashing_protection)
} else {
warn!(log, "No slashing protection data provided with keystores");
Ok(())
};
// Import each keystore. Some keystores may fail to be imported, so we record a status for each.
let mut statuses = Vec::with_capacity(request.keystores.len());
for (KeystoreJsonStr(keystore), password) in request
.keystores
.into_iter()
.zip(request.passwords.into_iter())
{
let pubkey_str = keystore.pubkey().to_string();
let status = if let Err(e) = &slashing_protection_status {
// Slashing protection import failed, do not attempt to import the key. Record an
// error status.
Status::error(
ImportKeystoreStatus::Error,
format!("slashing protection import failed: {:?}", e),
)
} else if let Some(runtime) = runtime.upgrade() {
// Import the keystore.
match import_single_keystore(
keystore,
password,
validator_dir.clone(),
&validator_store,
runtime,
) {
Ok(status) => Status::ok(status),
Err(e) => {
warn!(
log,
"Error importing keystore, skipped";
"pubkey" => pubkey_str,
"error" => ?e,
);
Status::error(ImportKeystoreStatus::Error, e)
}
}
} else {
Status::error(
ImportKeystoreStatus::Error,
"validator client shutdown".into(),
)
};
statuses.push(status);
}
Ok(ImportKeystoresResponse { data: statuses })
}
fn import_single_keystore<T: SlotClock + 'static, E: EthSpec>(
keystore: Keystore,
password: ZeroizeString,
validator_dir_path: PathBuf,
validator_store: &ValidatorStore<T, E>,
runtime: Arc<Runtime>,
) -> Result<ImportKeystoreStatus, String> {
// Check if the validator key already exists, erroring if it is a remote signer validator.
let pubkey = keystore
.public_key()
.ok_or_else(|| format!("invalid pubkey: {}", keystore.pubkey()))?;
if let Some(def) = validator_store
.initialized_validators()
.read()
.validator_definitions()
.iter()
.find(|def| def.voting_public_key == pubkey)
{
if !def.signing_definition.is_local_keystore() {
return Err("cannot import duplicate of existing remote signer validator".into());
} else if def.enabled {
return Ok(ImportKeystoreStatus::Duplicate);
}
}
// Check that the password is correct.
// In future we should re-structure to avoid the double decryption here. It's not as simple
// as removing this check because `add_validator_keystore` will break if provided with an
// invalid validator definition (`update_validators` will get stuck trying to decrypt with the
// wrong password indefinitely).
keystore
.decrypt_keypair(password.as_ref())
.map_err(|e| format!("incorrect password: {:?}", e))?;
let validator_dir = ValidatorDirBuilder::new(validator_dir_path)
.voting_keystore(keystore, password.as_ref())
.store_withdrawal_keystore(false)
.build()
.map_err(|e| format!("failed to build validator directory: {:?}", e))?;
// Drop validator dir so that `add_validator_keystore` can re-lock the keystore.
let voting_keystore_path = validator_dir.voting_keystore_path();
drop(validator_dir);
runtime
.block_on(validator_store.add_validator_keystore(
voting_keystore_path,
password,
true,
None,
))
.map_err(|e| format!("failed to initialize validator: {:?}", e))?;
Ok(ImportKeystoreStatus::Imported)
}
pub fn delete<T: SlotClock + 'static, E: EthSpec>(
request: DeleteKeystoresRequest,
validator_store: Arc<ValidatorStore<T, E>>,
runtime: Weak<Runtime>,
log: Logger,
) -> Result<DeleteKeystoresResponse, Rejection> {
// Remove from initialized validators.
let initialized_validators_rwlock = validator_store.initialized_validators();
let mut initialized_validators = initialized_validators_rwlock.write();
let mut statuses = request
.pubkeys
.iter()
.map(|pubkey_bytes| {
match delete_single_keystore(pubkey_bytes, &mut initialized_validators, runtime.clone())
{
Ok(status) => Status::ok(status),
Err(error) => {
warn!(
log,
"Error deleting keystore";
"pubkey" => ?pubkey_bytes,
"error" => ?error,
);
Status::error(DeleteKeystoreStatus::Error, error)
}
}
})
.collect::<Vec<_>>();
// Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out
// of date as it resets when it can't be decrypted. We update it just a single time to avoid
// continually resetting it after each key deletion.
if let Some(runtime) = runtime.upgrade() {
runtime
.block_on(initialized_validators.update_validators())
.map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?;
}
// Export the slashing protection data.
let slashing_protection = validator_store
.export_slashing_protection_for_keys(&request.pubkeys)
.map_err(|e| {
custom_server_error(format!("error exporting slashing protection: {:?}", e))
})?;
// Update stasuses based on availability of slashing protection data.
for (pubkey, status) in request.pubkeys.iter().zip(statuses.iter_mut()) {
if status.status == DeleteKeystoreStatus::NotFound
&& slashing_protection
.data
.iter()
.any(|interchange_data| interchange_data.pubkey == *pubkey)
{
status.status = DeleteKeystoreStatus::NotActive;
}
}
Ok(DeleteKeystoresResponse {
data: statuses,
slashing_protection,
})
}
fn delete_single_keystore(
pubkey_bytes: &PublicKeyBytes,
initialized_validators: &mut InitializedValidators,
runtime: Weak<Runtime>,
) -> Result<DeleteKeystoreStatus, String> {
if let Some(runtime) = runtime.upgrade() {
let pubkey = pubkey_bytes
.decompress()
.map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?;
runtime
.block_on(initialized_validators.delete_definition_and_keystore(&pubkey))
.map_err(|e| format!("unable to disable and delete: {:?}", e))
} else {
Err("validator client shutdown".into())
}
}

View File

@@ -1,14 +1,18 @@
mod api_secret;
mod create_validator;
mod keystores;
mod tests;
use crate::ValidatorStore;
use account_utils::mnemonic_from_phrase;
use create_validator::{create_validators_mnemonic, create_validators_web3signer};
use eth2::lighthouse_vc::types::{self as api_types, PublicKey, PublicKeyBytes};
use eth2::lighthouse_vc::{
std_types::AuthResponse,
types::{self as api_types, PublicKey, PublicKeyBytes},
};
use lighthouse_version::version_with_platform;
use serde::{Deserialize, Serialize};
use slog::{crit, info, Logger};
use slog::{crit, info, warn, Logger};
use slot_clock::SlotClock;
use std::future::Future;
use std::marker::PhantomData;
@@ -106,7 +110,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
// Configure CORS.
let cors_builder = {
let builder = warp::cors()
.allow_methods(vec!["GET", "POST", "PATCH"])
.allow_methods(vec!["GET", "POST", "PATCH", "DELETE"])
.allow_headers(vec!["Content-Type", "Authorization"]);
warp_utils::cors::set_builder_origins(
@@ -125,7 +129,20 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
}
let authorization_header_filter = ctx.api_secret.authorization_header_filter();
let api_token_path = ctx.api_secret.api_token_path();
let mut api_token_path = ctx.api_secret.api_token_path();
// Attempt to convert the path to an absolute path, but don't error if it fails.
match api_token_path.canonicalize() {
Ok(abs_path) => api_token_path = abs_path,
Err(e) => {
warn!(
log,
"Error canonicalizing token path";
"error" => ?e,
);
}
};
let signer = ctx.api_secret.signer();
let signer = warp::any().map(move || signer.clone());
@@ -154,9 +171,15 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
})
});
let inner_ctx = ctx.clone();
let log_filter = warp::any().map(move || inner_ctx.log.clone());
let inner_spec = Arc::new(ctx.spec.clone());
let spec_filter = warp::any().map(move || inner_spec.clone());
let api_token_path_inner = api_token_path.clone();
let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone());
// GET lighthouse/version
let get_node_version = warp::path("lighthouse")
.and(warp::path("version"))
@@ -348,7 +371,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(warp::path("keystore"))
.and(warp::path::end())
.and(warp::body::json())
.and(validator_dir_filter)
.and(validator_dir_filter.clone())
.and(validator_store_filter.clone())
.and(signer.clone())
.and(runtime_filter.clone())
@@ -451,9 +474,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.and(warp::path::param::<PublicKey>())
.and(warp::path::end())
.and(warp::body::json())
.and(validator_store_filter)
.and(signer)
.and(runtime_filter)
.and(validator_store_filter.clone())
.and(signer.clone())
.and(runtime_filter.clone())
.and_then(
|validator_pubkey: PublicKey,
body: api_types::ValidatorPatchRequest,
@@ -495,6 +518,60 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
},
);
// GET /lighthouse/auth
let get_auth = warp::path("lighthouse").and(warp::path("auth").and(warp::path::end()));
let get_auth = get_auth
.and(signer.clone())
.and(api_token_path_filter)
.and_then(|signer, token_path: PathBuf| {
blocking_signed_json_task(signer, move || {
Ok(AuthResponse {
token_path: token_path.display().to_string(),
})
})
});
// Standard key-manager endpoints.
let eth_v1 = warp::path("eth").and(warp::path("v1"));
let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end());
// GET /eth/v1/keystores
let get_std_keystores = std_keystores
.and(signer.clone())
.and(validator_store_filter.clone())
.and_then(|signer, validator_store: Arc<ValidatorStore<T, E>>| {
blocking_signed_json_task(signer, move || Ok(keystores::list(validator_store)))
});
// POST /eth/v1/keystores
let post_std_keystores = std_keystores
.and(warp::body::json())
.and(signer.clone())
.and(validator_dir_filter)
.and(validator_store_filter.clone())
.and(runtime_filter.clone())
.and(log_filter.clone())
.and_then(
|request, signer, validator_dir, validator_store, runtime, log| {
blocking_signed_json_task(signer, move || {
keystores::import(request, validator_dir, validator_store, runtime, log)
})
},
);
// DELETE /eth/v1/keystores
let delete_std_keystores = std_keystores
.and(warp::body::json())
.and(signer)
.and(validator_store_filter)
.and(runtime_filter)
.and(log_filter)
.and_then(|request, signer, validator_store, runtime, log| {
blocking_signed_json_task(signer, move || {
keystores::delete(request, validator_store, runtime, log)
})
});
let routes = warp::any()
.and(authorization_header_filter)
// Note: it is critical that the `authorization_header_filter` is applied to all routes.
@@ -508,16 +585,21 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.or(get_lighthouse_health)
.or(get_lighthouse_spec)
.or(get_lighthouse_validators)
.or(get_lighthouse_validators_pubkey),
.or(get_lighthouse_validators_pubkey)
.or(get_std_keystores),
)
.or(warp::post().and(
post_validators
.or(post_validators_keystore)
.or(post_validators_mnemonic)
.or(post_validators_web3signer),
.or(post_validators_web3signer)
.or(post_std_keystores),
))
.or(warp::patch().and(patch_validators)),
.or(warp::patch().and(patch_validators))
.or(warp::delete().and(delete_std_keystores)),
)
// The auth route is the only route that is allowed to be accessed without the API token.
.or(warp::get().and(get_auth))
// Maps errors into HTTP responses.
.recover(warp_utils::reject::handle_rejection)
// Add a `Server` header.
@@ -550,7 +632,7 @@ pub async fn blocking_signed_json_task<S, F, T>(
) -> Result<impl warp::Reply, warp::Rejection>
where
S: Fn(&[u8]) -> String,
F: Fn() -> Result<T, warp::Rejection> + Send + 'static,
F: FnOnce() -> Result<T, warp::Rejection> + Send + 'static,
T: Serialize + Send + 'static,
{
warp_utils::task::blocking_task(func)

View File

@@ -1,6 +1,8 @@
#![cfg(test)]
#![cfg(not(debug_assertions))]
mod keystores;
use crate::doppelganger_service::DoppelgangerService;
use crate::{
http_api::{ApiSecret, Config as HttpConfig, Context},
@@ -9,16 +11,16 @@ use crate::{
};
use account_utils::{
eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password,
ZeroizeString,
random_password_string, ZeroizeString,
};
use deposit_contract::decode_eth1_tx_data;
use environment::null_logger;
use eth2::{
lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*},
types::ErrorMessage as ApiErrorMessage,
Error as ApiError,
};
use eth2_keystore::KeystoreBuilder;
use logging::test_logger;
use parking_lot::RwLock;
use sensitive_url::SensitiveUrl;
use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME};
@@ -40,6 +42,7 @@ type E = MainnetEthSpec;
struct ApiTester {
client: ValidatorClientHttpClient,
initialized_validators: Arc<RwLock<InitializedValidators>>,
validator_store: Arc<ValidatorStore<TestingSlotClock, E>>,
url: SensitiveUrl,
_server_shutdown: oneshot::Sender<()>,
_validator_dir: TempDir,
@@ -58,7 +61,7 @@ fn build_runtime() -> Arc<Runtime> {
impl ApiTester {
pub async fn new(runtime: std::sync::Weak<Runtime>) -> Self {
let log = null_logger().unwrap();
let log = test_logger();
let validator_dir = tempdir().unwrap();
let secrets_dir = tempdir().unwrap();
@@ -92,7 +95,7 @@ impl ApiTester {
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let executor = TaskExecutor::new(runtime.clone(), exit, log.clone(), shutdown_tx);
let validator_store = ValidatorStore::<_, E>::new(
let validator_store = Arc::new(ValidatorStore::<_, E>::new(
initialized_validators,
slashing_protection,
Hash256::repeat_byte(42),
@@ -101,7 +104,7 @@ impl ApiTester {
slot_clock,
executor,
log.clone(),
);
));
validator_store
.register_all_in_doppelganger_protection_if_enabled()
@@ -113,7 +116,7 @@ impl ApiTester {
runtime,
api_secret,
validator_dir: Some(validator_dir.path().into()),
validator_store: Some(Arc::new(validator_store)),
validator_store: Some(validator_store.clone()),
spec: E::default_spec(),
config: HttpConfig {
enabled: true,
@@ -144,11 +147,12 @@ impl ApiTester {
let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey).unwrap();
Self {
initialized_validators,
_validator_dir: validator_dir,
client,
initialized_validators,
validator_store,
url,
_server_shutdown: shutdown_tx,
_validator_dir: validator_dir,
_runtime_shutdown: runtime_shutdown,
}
}
@@ -456,7 +460,7 @@ impl ApiTester {
self.client
.post_lighthouse_validators_web3signer(&request)
.await
.unwrap_err();
.unwrap();
assert_eq!(self.vals_total(), initial_vals + s.count);
if s.enabled {
@@ -608,6 +612,34 @@ fn routes_with_invalid_auth() {
.await
})
.await
.test_with_invalid_auth(|client| async move { client.get_keystores().await })
.await
.test_with_invalid_auth(|client| async move {
let password = random_password_string();
let keypair = Keypair::random();
let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new())
.unwrap()
.build()
.map(KeystoreJsonStr)
.unwrap();
client
.post_keystores(&ImportKeystoresRequest {
keystores: vec![keystore],
passwords: vec![password],
slashing_protection: None,
})
.await
})
.await
.test_with_invalid_auth(|client| async move {
let keypair = Keypair::random();
client
.delete_keystores(&DeleteKeystoresRequest {
pubkeys: vec![keypair.pk.compress()],
})
.await
})
.await
});
}

View File

@@ -0,0 +1,977 @@
use super::*;
use account_utils::random_password_string;
use eth2::lighthouse_vc::{
http_client::ValidatorClientHttpClient as HttpClient,
std_types::{KeystoreJsonStr as Keystore, *},
types::Web3SignerValidatorRequest,
};
// use eth2_keystore::Keystore;
use itertools::Itertools;
use rand::{rngs::SmallRng, Rng, SeedableRng};
use slashing_protection::interchange::{Interchange, InterchangeMetadata};
use std::collections::HashMap;
use std::path::Path;
fn new_keystore(password: ZeroizeString) -> Keystore {
let keypair = Keypair::random();
Keystore(
KeystoreBuilder::new(&keypair, password.as_ref(), String::new())
.unwrap()
.build()
.unwrap(),
)
}
fn web3_signer_url() -> String {
"http://localhost:1/this-url-hopefully-doesnt-exist".into()
}
fn new_web3signer_validator() -> (Keypair, Web3SignerValidatorRequest) {
let keypair = Keypair::random();
let pk = keypair.pk.clone();
(keypair, web3signer_validator_with_pubkey(pk))
}
fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorRequest {
Web3SignerValidatorRequest {
enable: true,
description: "".into(),
graffiti: None,
voting_public_key: pubkey,
url: web3_signer_url(),
root_certificate_path: None,
request_timeout_ms: None,
}
}
fn run_test<F, V>(f: F)
where
F: FnOnce(ApiTester) -> V,
V: Future<Output = ()>,
{
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(async {
let tester = ApiTester::new(weak_runtime).await;
f(tester).await
});
}
fn run_dual_vc_test<F, V>(f: F)
where
F: FnOnce(ApiTester, ApiTester) -> V,
V: Future<Output = ()>,
{
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(async {
let tester1 = ApiTester::new(weak_runtime.clone()).await;
let tester2 = ApiTester::new(weak_runtime).await;
f(tester1, tester2).await
});
}
fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes {
keystore.0.public_key().unwrap().compress()
}
fn all_with_status<T: Clone>(count: usize, status: T) -> impl Iterator<Item = T> {
std::iter::repeat(status).take(count)
}
fn all_imported(count: usize) -> impl Iterator<Item = ImportKeystoreStatus> {
all_with_status(count, ImportKeystoreStatus::Imported)
}
fn all_duplicate(count: usize) -> impl Iterator<Item = ImportKeystoreStatus> {
all_with_status(count, ImportKeystoreStatus::Duplicate)
}
fn all_import_error(count: usize) -> impl Iterator<Item = ImportKeystoreStatus> {
all_with_status(count, ImportKeystoreStatus::Error)
}
fn all_deleted(count: usize) -> impl Iterator<Item = DeleteKeystoreStatus> {
all_with_status(count, DeleteKeystoreStatus::Deleted)
}
fn all_not_active(count: usize) -> impl Iterator<Item = DeleteKeystoreStatus> {
all_with_status(count, DeleteKeystoreStatus::NotActive)
}
fn all_not_found(count: usize) -> impl Iterator<Item = DeleteKeystoreStatus> {
all_with_status(count, DeleteKeystoreStatus::NotFound)
}
fn all_delete_error(count: usize) -> impl Iterator<Item = DeleteKeystoreStatus> {
all_with_status(count, DeleteKeystoreStatus::Error)
}
fn check_get_response<'a>(
response: &ListKeystoresResponse,
expected_keystores: impl IntoIterator<Item = &'a Keystore>,
) {
for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) {
assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2));
assert_eq!(ks1.derivation_path, ks2.path());
assert!(ks1.readonly == None || ks1.readonly == Some(false));
}
}
fn check_import_response(
response: &ImportKeystoresResponse,
expected_statuses: impl IntoIterator<Item = ImportKeystoreStatus>,
) {
for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) {
assert_eq!(
expected_status, status.status,
"message: {:?}",
status.message
);
}
}
fn check_delete_response<'a>(
response: &DeleteKeystoresResponse,
expected_statuses: impl IntoIterator<Item = DeleteKeystoreStatus>,
) {
for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) {
assert_eq!(
status.status, expected_status,
"message: {:?}",
status.message
);
}
}
#[test]
fn get_auth_no_token() {
run_test(|mut tester| async move {
tester.client.send_authorization_header(false);
let auth_response = tester.client.get_auth().await.unwrap();
// Load the file from the returned path.
let token_path = Path::new(&auth_response.token_path);
let token = HttpClient::load_api_token_from_file(token_path).unwrap();
// The token should match the one that the client was originally initialised with.
assert!(tester.client.api_token() == Some(&token));
})
}
#[test]
fn get_empty_keystores() {
run_test(|tester| async move {
let res = tester.client.get_keystores().await.unwrap();
assert_eq!(res, ListKeystoresResponse { data: vec![] });
})
}
#[test]
fn import_new_keystores() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..3)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
let import_res = tester
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
})
.await
.unwrap();
// All keystores should be imported.
check_import_response(&import_res, all_imported(keystores.len()));
// Check that GET lists all the imported keystores.
let get_res = tester.client.get_keystores().await.unwrap();
check_get_response(&get_res, &keystores);
})
}
#[test]
fn import_only_duplicate_keystores() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..3)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
let req = ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
};
// All keystores should be imported on first import.
let import_res = tester.client.post_keystores(&req).await.unwrap();
check_import_response(&import_res, all_imported(keystores.len()));
// No keystores should be imported on repeat import.
let import_res = tester.client.post_keystores(&req).await.unwrap();
check_import_response(&import_res, all_duplicate(keystores.len()));
// Check that GET lists all the imported keystores.
let get_res = tester.client.get_keystores().await.unwrap();
check_get_response(&get_res, &keystores);
})
}
#[test]
fn import_some_duplicate_keystores() {
run_test(|tester| async move {
let password = random_password_string();
let num_keystores = 5;
let keystores_all = (0..num_keystores)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
// Import even numbered keystores first.
let keystores1 = keystores_all
.iter()
.enumerate()
.filter_map(|(i, keystore)| {
if i % 2 == 0 {
Some(keystore.clone())
} else {
None
}
})
.collect::<Vec<_>>();
let req1 = ImportKeystoresRequest {
keystores: keystores1.clone(),
passwords: vec![password.clone(); keystores1.len()],
slashing_protection: None,
};
let req2 = ImportKeystoresRequest {
keystores: keystores_all.clone(),
passwords: vec![password.clone(); keystores_all.len()],
slashing_protection: None,
};
let import_res = tester.client.post_keystores(&req1).await.unwrap();
check_import_response(&import_res, all_imported(keystores1.len()));
// Check partial import.
let expected = (0..num_keystores).map(|i| {
if i % 2 == 0 {
ImportKeystoreStatus::Duplicate
} else {
ImportKeystoreStatus::Imported
}
});
let import_res = tester.client.post_keystores(&req2).await.unwrap();
check_import_response(&import_res, expected);
})
}
#[test]
fn import_wrong_number_of_passwords() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..3)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
let err = tester
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone()],
slashing_protection: None,
})
.await
.unwrap_err();
assert_eq!(err.status().unwrap(), 400);
})
}
#[test]
fn get_web3_signer_keystores() {
run_test(|tester| async move {
let num_local = 3;
let num_remote = 2;
// Add some local validators.
let password = random_password_string();
let keystores = (0..num_local)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
let import_res = tester
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
})
.await
.unwrap();
// All keystores should be imported.
check_import_response(&import_res, all_imported(keystores.len()));
// Add some web3signer validators.
let remote_vals = (0..num_remote)
.map(|_| new_web3signer_validator().1)
.collect::<Vec<_>>();
tester
.client
.post_lighthouse_validators_web3signer(&remote_vals)
.await
.unwrap();
// Check that both local and remote validators are returned.
let get_res = tester.client.get_keystores().await.unwrap();
let expected_responses = keystores
.iter()
.map(|local_keystore| SingleKeystoreResponse {
validating_pubkey: keystore_pubkey(local_keystore),
derivation_path: local_keystore.path(),
readonly: None,
})
.chain(remote_vals.iter().map(|remote_val| SingleKeystoreResponse {
validating_pubkey: remote_val.voting_public_key.compress(),
derivation_path: None,
readonly: Some(true),
}))
.collect::<Vec<_>>();
for response in expected_responses {
assert!(get_res.data.contains(&response), "{:?}", response);
}
})
}
#[test]
fn import_and_delete_conflicting_web3_signer_keystores() {
run_test(|tester| async move {
let num_keystores = 3;
// Create some keystores to be used as both web3signer keystores and local keystores.
let password = random_password_string();
let keystores = (0..num_keystores)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
let pubkeys = keystores.iter().map(keystore_pubkey).collect::<Vec<_>>();
// Add the validators as web3signer validators.
let remote_vals = pubkeys
.iter()
.map(|pubkey| web3signer_validator_with_pubkey(pubkey.decompress().unwrap()))
.collect::<Vec<_>>();
tester
.client
.post_lighthouse_validators_web3signer(&remote_vals)
.await
.unwrap();
// Attempt to import the same validators as local validators, which should error.
let import_req = ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
};
let import_res = tester.client.post_keystores(&import_req).await.unwrap();
check_import_response(&import_res, all_import_error(keystores.len()));
// Attempt to delete the web3signer validators, which should fail.
let delete_req = DeleteKeystoresRequest {
pubkeys: pubkeys.clone(),
};
let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap();
check_delete_response(&delete_res, all_delete_error(keystores.len()));
// Get should still list all the validators as `readonly`.
let get_res = tester.client.get_keystores().await.unwrap();
for (ks, pubkey) in get_res.data.iter().zip_eq(&pubkeys) {
assert_eq!(ks.validating_pubkey, *pubkey);
assert_eq!(ks.derivation_path, None);
assert_eq!(ks.readonly, Some(true));
}
// Disabling the web3signer validators should *still* prevent them from being
// overwritten.
for pubkey in &pubkeys {
tester
.client
.patch_lighthouse_validators(pubkey, false)
.await
.unwrap();
}
let import_res = tester.client.post_keystores(&import_req).await.unwrap();
check_import_response(&import_res, all_import_error(keystores.len()));
let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap();
check_delete_response(&delete_res, all_delete_error(keystores.len()));
})
}
#[test]
fn import_keystores_wrong_password() {
run_test(|tester| async move {
let num_keystores = 4;
let (keystores, correct_passwords): (Vec<_>, Vec<_>) = (0..num_keystores)
.map(|_| {
let password = random_password_string();
(new_keystore(password.clone()), password)
})
.unzip();
// First import with some incorrect passwords.
let incorrect_passwords = (0..num_keystores)
.map(|i| {
if i % 2 == 0 {
random_password_string()
} else {
correct_passwords[i].clone()
}
})
.collect::<Vec<_>>();
let import_res = tester
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: incorrect_passwords.clone(),
slashing_protection: None,
})
.await
.unwrap();
let expected_statuses = (0..num_keystores).map(|i| {
if i % 2 == 0 {
ImportKeystoreStatus::Error
} else {
ImportKeystoreStatus::Imported
}
});
check_import_response(&import_res, expected_statuses);
// Import again with the correct passwords and check that the statuses are as expected.
let correct_import_req = ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: correct_passwords.clone(),
slashing_protection: None,
};
let import_res = tester
.client
.post_keystores(&correct_import_req)
.await
.unwrap();
let expected_statuses = (0..num_keystores).map(|i| {
if i % 2 == 0 {
ImportKeystoreStatus::Imported
} else {
ImportKeystoreStatus::Duplicate
}
});
check_import_response(&import_res, expected_statuses);
// Import one final time, at which point all keys should be duplicates.
let import_res = tester
.client
.post_keystores(&correct_import_req)
.await
.unwrap();
check_import_response(
&import_res,
(0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate),
);
});
}
#[test]
fn import_invalid_slashing_protection() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..3)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
// Invalid slashing protection data with mismatched version and mismatched GVR.
let slashing_protection = Interchange {
metadata: InterchangeMetadata {
interchange_format_version: 0,
genesis_validators_root: Hash256::zero(),
},
data: vec![],
};
let import_res = tester
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: Some(InterchangeJsonStr(slashing_protection)),
})
.await
.unwrap();
// All keystores should be imported.
check_import_response(&import_res, all_import_error(keystores.len()));
// Check that GET lists none of the failed keystores.
let get_res = tester.client.get_keystores().await.unwrap();
check_get_response(&get_res, &[]);
})
}
fn all_indices(count: usize) -> Vec<usize> {
(0..count).collect()
}
#[test]
fn migrate_all_with_slashing_protection() {
let n = 3;
generic_migration_test(
n,
vec![
(0, make_attestation(1, 2)),
(1, make_attestation(2, 3)),
(2, make_attestation(1, 2)),
],
all_indices(n),
all_indices(n),
all_indices(n),
vec![
(0, make_attestation(1, 2), false),
(1, make_attestation(2, 3), false),
(2, make_attestation(1, 2), false),
],
);
}
#[test]
fn migrate_some_with_slashing_protection() {
let n = 3;
generic_migration_test(
n,
vec![
(0, make_attestation(1, 2)),
(1, make_attestation(2, 3)),
(2, make_attestation(1, 2)),
],
vec![0, 1],
vec![0, 1],
vec![0, 1],
vec![
(0, make_attestation(1, 2), false),
(1, make_attestation(2, 3), false),
(0, make_attestation(2, 3), true),
(1, make_attestation(3, 4), true),
],
);
}
#[test]
fn migrate_some_missing_slashing_protection() {
let n = 3;
generic_migration_test(
n,
vec![
(0, make_attestation(1, 2)),
(1, make_attestation(2, 3)),
(2, make_attestation(1, 2)),
],
vec![0, 1],
vec![0],
vec![0, 1],
vec![
(0, make_attestation(1, 2), false),
(1, make_attestation(2, 3), true),
(0, make_attestation(2, 3), true),
],
);
}
#[test]
fn migrate_some_extra_slashing_protection() {
let n = 3;
generic_migration_test(
n,
vec![
(0, make_attestation(1, 2)),
(1, make_attestation(2, 3)),
(2, make_attestation(1, 2)),
],
all_indices(n),
all_indices(n),
vec![0, 1],
vec![
(0, make_attestation(1, 2), false),
(1, make_attestation(2, 3), false),
(0, make_attestation(2, 3), true),
(1, make_attestation(3, 4), true),
(2, make_attestation(2, 3), false),
],
);
}
/// Run a test that creates some validators on one VC, and then migrates them to a second VC.
///
/// All indices given are in the range 0..`num_validators`. They are *not* validator indices in the
/// ordinary sense.
///
/// Parameters:
///
/// - `num_validators`: the total number of validators to create
/// - `first_vc_attestations`: attestations to sign on the first VC as `(validator_idx, att)`
/// - `delete_indices`: validators to delete from the first VC
/// - `slashing_protection_indices`: validators to transfer slashing protection data for. It should
/// be a subset of `delete_indices` or the test will panic.
/// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`.
/// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool
/// indicates whether the signing should be successful.
fn generic_migration_test(
num_validators: usize,
first_vc_attestations: Vec<(usize, Attestation<E>)>,
delete_indices: Vec<usize>,
slashing_protection_indices: Vec<usize>,
import_indices: Vec<usize>,
second_vc_attestations: Vec<(usize, Attestation<E>, bool)>,
) {
run_dual_vc_test(move |tester1, tester2| async move {
// Create the validators on VC1.
let (keystores, passwords): (Vec<_>, Vec<_>) = (0..num_validators)
.map(|_| {
let password = random_password_string();
(new_keystore(password.clone()), password)
})
.unzip();
let import_res = tester1
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: passwords.clone(),
slashing_protection: None,
})
.await
.unwrap();
check_import_response(&import_res, all_imported(keystores.len()));
// Sign attestations on VC1.
for (validator_index, mut attestation) in first_vc_attestations {
let public_key = keystore_pubkey(&keystores[validator_index]);
let current_epoch = attestation.data.target.epoch;
tester1
.validator_store
.sign_attestation(public_key, 0, &mut attestation, current_epoch)
.await
.unwrap();
}
// Delete the selected keys from VC1.
let delete_res = tester1
.client
.delete_keystores(&DeleteKeystoresRequest {
pubkeys: delete_indices
.iter()
.copied()
.map(|i| keystore_pubkey(&keystores[i]))
.collect(),
})
.await
.unwrap();
check_delete_response(&delete_res, all_deleted(delete_indices.len()));
// Check that slashing protection data was returned for all selected validators.
assert_eq!(
delete_res.slashing_protection.data.len(),
delete_indices.len()
);
for &i in &delete_indices {
assert!(delete_res
.slashing_protection
.data
.iter()
.any(|interchange_data| interchange_data.pubkey == keystore_pubkey(&keystores[i])));
}
// Filter slashing protection according to `slashing_protection_indices`.
let mut slashing_protection = delete_res.slashing_protection;
let data = std::mem::take(&mut slashing_protection.data);
for &i in &slashing_protection_indices {
let pubkey = keystore_pubkey(&keystores[i]);
slashing_protection.data.push(
data.iter()
.find(|interchange_data| interchange_data.pubkey == pubkey)
.expect("slashing protection indices should be subset of deleted")
.clone(),
);
}
assert_eq!(
slashing_protection.data.len(),
slashing_protection_indices.len()
);
// Import into the 2nd VC using the slashing protection data.
let import_res = tester2
.client
.post_keystores(&ImportKeystoresRequest {
keystores: import_indices
.iter()
.copied()
.map(|i| keystores[i].clone())
.collect(),
passwords: import_indices
.iter()
.copied()
.map(|i| passwords[i].clone())
.collect(),
slashing_protection: Some(InterchangeJsonStr(slashing_protection)),
})
.await
.unwrap();
check_import_response(&import_res, all_imported(import_indices.len()));
// Sign attestations on the second VC.
for (validator_index, mut attestation, should_succeed) in second_vc_attestations {
let public_key = keystore_pubkey(&keystores[validator_index]);
let current_epoch = attestation.data.target.epoch;
match tester2
.validator_store
.sign_attestation(public_key, 0, &mut attestation, current_epoch)
.await
{
Ok(()) => assert!(should_succeed),
Err(e) => assert!(!should_succeed, "{:?}", e),
}
}
});
}
#[test]
fn delete_keystores_twice() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..2)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
// 1. Import all keystores.
let import_req = ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
};
let import_res = tester.client.post_keystores(&import_req).await.unwrap();
check_import_response(&import_res, all_imported(keystores.len()));
// 2. Delete all.
let delete_req = DeleteKeystoresRequest {
pubkeys: keystores.iter().map(keystore_pubkey).collect(),
};
let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap();
check_delete_response(&delete_res, all_deleted(keystores.len()));
// 3. Delete again.
let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap();
check_delete_response(&delete_res, all_not_active(keystores.len()));
})
}
#[test]
fn delete_nonexistent_keystores() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..2)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
// Delete all.
let delete_req = DeleteKeystoresRequest {
pubkeys: keystores.iter().map(keystore_pubkey).collect(),
};
let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap();
check_delete_response(&delete_res, all_not_found(keystores.len()));
})
}
fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation<E> {
Attestation {
aggregation_bits: BitList::with_capacity(
<E as EthSpec>::MaxValidatorsPerCommittee::to_usize(),
)
.unwrap(),
data: AttestationData {
source: Checkpoint {
epoch: Epoch::new(source_epoch),
root: Hash256::from_low_u64_le(source_epoch),
},
target: Checkpoint {
epoch: Epoch::new(target_epoch),
root: Hash256::from_low_u64_le(target_epoch),
},
..AttestationData::default()
},
signature: AggregateSignature::empty(),
}
}
#[test]
fn delete_concurrent_with_signing() {
let runtime = build_runtime();
let num_keys = 8;
let num_signing_threads = 8;
let num_attestations = 100;
let num_delete_threads = 8;
let num_delete_attempts = 100;
let delete_prob = 0.01;
assert!(
num_keys % num_signing_threads == 0,
"num_keys should be divisible by num threads for simplicity"
);
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(async {
let tester = ApiTester::new(weak_runtime).await;
// Generate a lot of keys and import them.
let password = random_password_string();
let keystores = (0..num_keys)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::<Vec<_>>();
let import_res = tester
.client
.post_keystores(&ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
})
.await
.unwrap();
check_import_response(&import_res, all_imported(keystores.len()));
// Start several threads signing attestations at sequential epochs.
let mut join_handles = vec![];
for thread_index in 0..num_signing_threads {
let keys_per_thread = num_keys / num_signing_threads;
let validator_store = tester.validator_store.clone();
let thread_pubkeys = all_pubkeys
[thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread]
.to_vec();
let handle = runtime.spawn(async move {
for j in 0..num_attestations {
let mut att = make_attestation(j, j + 1);
for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() {
let _ = validator_store
.sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1))
.await;
}
}
});
join_handles.push(handle);
}
// Concurrently, delete each validator one at a time. Store the slashing protection
// data so we can ensure it doesn't change after a key is exported.
let mut delete_handles = vec![];
for _ in 0..num_delete_threads {
let client = tester.client.clone();
let all_pubkeys = all_pubkeys.clone();
let handle = runtime.spawn(async move {
let mut rng = SmallRng::from_entropy();
let mut slashing_protection = vec![];
for _ in 0..num_delete_attempts {
let to_delete = all_pubkeys
.iter()
.filter(|_| rng.gen_bool(delete_prob))
.copied()
.collect::<Vec<_>>();
if !to_delete.is_empty() {
let delete_res = client
.delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete })
.await
.unwrap();
for status in delete_res.data.iter() {
assert_ne!(status.status, DeleteKeystoreStatus::Error);
}
slashing_protection.push(delete_res.slashing_protection);
}
}
slashing_protection
});
delete_handles.push(handle);
}
// Collect slashing protection.
let mut slashing_protection_map = HashMap::new();
let collected_slashing_protection = futures::future::join_all(delete_handles).await;
for interchange in collected_slashing_protection
.into_iter()
.map(Result::unwrap)
.flatten()
{
for validator_data in interchange.data {
slashing_protection_map
.entry(validator_data.pubkey)
.and_modify(|existing| {
assert_eq!(
*existing, validator_data,
"slashing protection data changed after first export"
)
})
.or_insert(validator_data);
}
}
futures::future::join_all(join_handles).await
});
}
#[test]
fn delete_then_reimport() {
run_test(|tester| async move {
let password = random_password_string();
let keystores = (0..2)
.map(|_| new_keystore(password.clone()))
.collect::<Vec<_>>();
// 1. Import all keystores.
let import_req = ImportKeystoresRequest {
keystores: keystores.clone(),
passwords: vec![password.clone(); keystores.len()],
slashing_protection: None,
};
let import_res = tester.client.post_keystores(&import_req).await.unwrap();
check_import_response(&import_res, all_imported(keystores.len()));
// 2. Delete all.
let delete_res = tester
.client
.delete_keystores(&DeleteKeystoresRequest {
pubkeys: keystores.iter().map(keystore_pubkey).collect(),
})
.await
.unwrap();
check_delete_response(&delete_res, all_deleted(keystores.len()));
// 3. Re-import
let import_res = tester.client.post_keystores(&import_req).await.unwrap();
check_import_response(&import_res, all_imported(keystores.len()));
})
}

View File

@@ -14,19 +14,22 @@ use account_utils::{
},
ZeroizeString,
};
use eth2::lighthouse_vc::std_types::DeleteKeystoreStatus;
use eth2_keystore::Keystore;
use lighthouse_metrics::set_gauge;
use lockfile::{Lockfile, LockfileError};
use parking_lot::{MappedMutexGuard, Mutex, MutexGuard};
use reqwest::{Certificate, Client, Error as ReqwestError};
use slog::{debug, error, info, warn, Logger};
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::fs::{self, File};
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use types::{Graffiti, Keypair, PublicKey, PublicKeyBytes};
use url::{ParseError, Url};
use validator_dir::Builder as ValidatorDirBuilder;
use crate::key_cache;
use crate::key_cache::KeyCache;
@@ -67,6 +70,10 @@ pub enum Error {
UnableToSaveDefinitions(validator_definitions::Error),
/// It is not legal to try and initialize a disabled validator definition.
UnableToInitializeDisabledValidator,
/// There was an error while deleting a keystore file.
UnableToDeleteKeystore(PathBuf, io::Error),
/// There was an error while deleting a validator dir.
UnableToDeleteValidatorDir(PathBuf, io::Error),
/// There was an error reading from stdin.
UnableToReadPasswordFromUser(String),
/// There was an error running a tokio async task.
@@ -83,6 +90,8 @@ pub enum Error {
InvalidWeb3SignerRootCertificateFile(io::Error),
InvalidWeb3SignerRootCertificate(ReqwestError),
UnableToBuildWeb3SignerClient(ReqwestError),
/// Unable to apply an action to a validator because it is using a remote signer.
InvalidActionOnRemoteValidator,
}
impl From<LockfileError> for Error {
@@ -101,12 +110,15 @@ pub struct InitializedValidator {
impl InitializedValidator {
/// Return a reference to this validator's lockfile if it has one.
pub fn keystore_lockfile(&self) -> Option<&Lockfile> {
pub fn keystore_lockfile(&self) -> Option<MappedMutexGuard<Lockfile>> {
match self.signing_method.as_ref() {
SigningMethod::LocalKeystore {
ref voting_keystore_lockfile,
..
} => Some(voting_keystore_lockfile),
} => MutexGuard::try_map(voting_keystore_lockfile.lock(), |option_lockfile| {
option_lockfile.as_mut()
})
.ok(),
// Web3Signer validators do not have any lockfiles.
SigningMethod::Web3Signer { .. } => None,
}
@@ -213,7 +225,7 @@ impl InitializedValidator {
let lockfile_path = get_lockfile_path(&voting_keystore_path)
.ok_or_else(|| Error::BadVotingKeystorePath(voting_keystore_path.clone()))?;
let voting_keystore_lockfile = Lockfile::new(lockfile_path)?;
let voting_keystore_lockfile = Mutex::new(Some(Lockfile::new(lockfile_path)?));
SigningMethod::LocalKeystore {
voting_keystore_path,
@@ -381,6 +393,25 @@ impl InitializedValidators {
.map(|v| v.signing_method.clone())
}
/// Add a validator definition to `self`, replacing any disabled definition with the same
/// voting public key.
///
/// The on-disk representation of the validator definitions & the key cache will both be
/// updated.
pub async fn add_definition_replace_disabled(
&mut self,
def: ValidatorDefinition,
) -> Result<(), Error> {
// Drop any disabled definitions with the same public key.
let delete_def = |existing_def: &ValidatorDefinition| {
!existing_def.enabled && existing_def.voting_public_key == def.voting_public_key
};
self.definitions.retain(|def| !delete_def(def));
// Add the definition.
self.add_definition(def).await
}
/// Add a validator definition to `self`, overwriting the on-disk representation of `self`.
pub async fn add_definition(&mut self, def: ValidatorDefinition) -> Result<(), Error> {
if self
@@ -403,6 +434,91 @@ impl InitializedValidators {
Ok(())
}
/// Delete the validator definition and keystore for `pubkey`.
///
/// The delete is carried out in stages so that the filesystem is never left in an inconsistent
/// state, even in case of errors or crashes.
pub async fn delete_definition_and_keystore(
&mut self,
pubkey: &PublicKey,
) -> Result<DeleteKeystoreStatus, Error> {
// 1. Disable the validator definition.
//
// We disable before removing so that in case of a crash the auto-discovery mechanism
// won't re-activate the keystore.
if let Some(def) = self
.definitions
.as_mut_slice()
.iter_mut()
.find(|def| &def.voting_public_key == pubkey)
{
if def.signing_definition.is_local_keystore() {
def.enabled = false;
self.definitions
.save(&self.validators_dir)
.map_err(Error::UnableToSaveDefinitions)?;
} else {
return Err(Error::InvalidActionOnRemoteValidator);
}
} else {
return Ok(DeleteKeystoreStatus::NotFound);
}
// 2. Delete from `self.validators`, which holds the signing method.
// Delete the keystore files.
if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) {
if let SigningMethod::LocalKeystore {
ref voting_keystore_path,
ref voting_keystore_lockfile,
ref voting_keystore,
..
} = *initialized_validator.signing_method
{
// Drop the lock file so that it may be deleted. This is particularly important on
// Windows where the lockfile will fail to be deleted if it is still open.
drop(voting_keystore_lockfile.lock().take());
self.delete_keystore_or_validator_dir(voting_keystore_path, voting_keystore)?;
}
}
// 3. Delete from validator definitions entirely.
self.definitions
.retain(|def| &def.voting_public_key != pubkey);
self.definitions
.save(&self.validators_dir)
.map_err(Error::UnableToSaveDefinitions)?;
Ok(DeleteKeystoreStatus::Deleted)
}
/// Attempt to delete the voting keystore file, or its entire validator directory.
///
/// Some parts of the VC assume the existence of a validator based on the existence of a
/// directory in the validators dir named like a public key.
fn delete_keystore_or_validator_dir(
&self,
voting_keystore_path: &Path,
voting_keystore: &Keystore,
) -> Result<(), Error> {
// If the parent directory is a `ValidatorDir` within `self.validators_dir`, then
// delete the entire directory so that it may be recreated if the keystore is
// re-imported.
if let Some(validator_dir) = voting_keystore_path.parent() {
if validator_dir
== ValidatorDirBuilder::get_dir_path(&self.validators_dir, voting_keystore)
{
fs::remove_dir_all(validator_dir)
.map_err(|e| Error::UnableToDeleteValidatorDir(validator_dir.into(), e))?;
return Ok(());
}
}
// Otherwise just delete the keystore file.
fs::remove_file(voting_keystore_path)
.map_err(|e| Error::UnableToDeleteKeystore(voting_keystore_path.into(), e))?;
Ok(())
}
/// Returns a slice of all defined validators (regardless of their enabled state).
pub fn validator_definitions(&self) -> &[ValidatorDefinition] {
self.definitions.as_slice()
@@ -456,17 +572,24 @@ impl InitializedValidators {
/// Tries to decrypt the key cache.
///
/// Returns `Ok(true)` if decryption was successful, `Ok(false)` if it couldn't get decrypted
/// and an error if a needed password couldn't get extracted.
/// Returns the decrypted cache if decryption was successful, or an error if a required password
/// wasn't provided and couldn't be read interactively.
///
/// In the case that the cache contains UUIDs for unknown validator definitions then it cannot
/// be decrypted and will be replaced by a new empty cache.
///
/// The mutable `key_stores` argument will be used to accelerate decyption by bypassing
/// filesystem accesses for keystores that are already known. In the case that a keystore
/// from the validator definitions is not yet in this map, it will be loaded from disk and
/// inserted into the map.
async fn decrypt_key_cache(
&self,
mut cache: KeyCache,
key_stores: &mut HashMap<PathBuf, Keystore>,
) -> Result<KeyCache, Error> {
//read relevant key_stores
// Read relevant key stores from the filesystem.
let mut definitions_map = HashMap::new();
for def in self.definitions.as_slice() {
for def in self.definitions.as_slice().iter().filter(|def| def.enabled) {
match &def.signing_definition {
SigningDefinition::LocalKeystore {
voting_keystore_path,
@@ -487,10 +610,11 @@ impl InitializedValidators {
//check if all paths are in the definitions_map
for uuid in cache.uuids() {
if !definitions_map.contains_key(uuid) {
warn!(
debug!(
self.log,
"Unknown uuid in cache";
"uuid" => format!("{}", uuid)
"Resetting the key cache";
"keystore_uuid" => %uuid,
"reason" => "impossible to decrypt due to missing keystore",
);
return Ok(KeyCache::new());
}
@@ -547,7 +671,7 @@ impl InitializedValidators {
/// A validator is considered "already known" and skipped if the public key is already known.
/// I.e., if there are two different definitions with the same public key then the second will
/// be ignored.
async fn update_validators(&mut self) -> Result<(), Error> {
pub(crate) async fn update_validators(&mut self) -> Result<(), Error> {
//use key cache if available
let mut key_stores = HashMap::new();

View File

@@ -6,6 +6,7 @@
use crate::http_metrics::metrics;
use eth2_keystore::Keystore;
use lockfile::Lockfile;
use parking_lot::Mutex;
use reqwest::Client;
use std::path::PathBuf;
use std::sync::Arc;
@@ -75,7 +76,7 @@ pub enum SigningMethod {
/// A validator that is defined by an EIP-2335 keystore on the local filesystem.
LocalKeystore {
voting_keystore_path: PathBuf,
voting_keystore_lockfile: Lockfile,
voting_keystore_lockfile: Mutex<Option<Lockfile>>,
voting_keystore: Keystore,
voting_keypair: Arc<Keypair>,
},

View File

@@ -6,7 +6,9 @@ use crate::{
};
use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString};
use parking_lot::{Mutex, RwLock};
use slashing_protection::{NotSafe, Safe, SlashingDatabase};
use slashing_protection::{
interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase,
};
use slog::{crit, error, info, warn, Logger};
use slot_clock::SlotClock;
use std::iter::FromIterator;
@@ -183,7 +185,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
self.validators
.write()
.add_definition(validator_def.clone())
.add_definition_replace_disabled(validator_def.clone())
.await
.map_err(|e| format!("Unable to add definition: {:?}", e))?;
@@ -693,6 +695,48 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
Ok(SignedContributionAndProof { message, signature })
}
pub fn import_slashing_protection(
&self,
interchange: Interchange,
) -> Result<(), InterchangeError> {
self.slashing_protection
.import_interchange_info(interchange, self.genesis_validators_root)?;
Ok(())
}
/// Export slashing protection data while also disabling the given keys in the database.
///
/// If any key is unknown to the slashing protection database it will be silently omitted
/// from the result. It is the caller's responsibility to check whether all keys provided
/// had data returned for them.
pub fn export_slashing_protection_for_keys(
&self,
pubkeys: &[PublicKeyBytes],
) -> Result<Interchange, InterchangeError> {
self.slashing_protection.with_transaction(|txn| {
let known_pubkeys = pubkeys
.iter()
.filter_map(|pubkey| {
let validator_id = self
.slashing_protection
.get_validator_id_ignoring_status(txn, pubkey)
.ok()?;
Some(
self.slashing_protection
.update_validator_status(txn, validator_id, false)
.map(|()| *pubkey),
)
})
.collect::<Result<Vec<PublicKeyBytes>, _>>()?;
self.slashing_protection.export_interchange_info_in_txn(
self.genesis_validators_root,
Some(&known_pubkeys),
txn,
)
})
}
/// Prune the slashing protection database so that it remains performant.
///
/// This function will only do actual pruning periodically, so it should usually be