From e98a0155bd0b75814a73d03321ae5207e4794e62 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 24 Aug 2022 15:54:52 +1000 Subject: [PATCH] Progress with `move` implementation --- validator_manager/src/lib.rs | 2 +- validator_manager/src/validators/common.rs | 126 ++++++++++++++- .../src/validators/create_validators.rs | 20 +-- .../src/validators/import_validators.rs | 144 ++++++------------ .../src/validators/move_validators.rs | 115 +++++++++++++- 5 files changed, 286 insertions(+), 121 deletions(-) diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 4b52b8edb3..0571cd7f4c 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -4,7 +4,7 @@ use environment::Environment; use serde::Serialize; use std::path::PathBuf; use types::EthSpec; -use validators::create_validators::write_to_json_file; +use validators::common::write_to_json_file; pub mod validators; diff --git a/validator_manager/src/validators/common.rs b/validator_manager/src/validators/common.rs index 9e3d279c02..9bd4358af9 100644 --- a/validator_manager/src/validators/common.rs +++ b/validator_manager/src/validators/common.rs @@ -1,7 +1,11 @@ use account_utils::ZeroizeString; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ - lighthouse_vc::{http_client::ValidatorClientHttpClient, std_types::SingleKeystoreResponse}, + lighthouse_vc::{ + http_client::ValidatorClientHttpClient, + std_types::{ImportKeystoresRequest, SingleKeystoreResponse}, + types::UpdateFeeRecipientRequest, + }, SensitiveUrl, }; use serde::{Deserialize, Serialize}; @@ -10,6 +14,8 @@ use std::path::{Path, PathBuf}; use tree_hash::TreeHash; use types::*; +pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; + /// When the `ethereum/staking-deposit-cli` tool generates deposit data JSON, it adds a /// `deposit_cli_version` to protect the web-based "Launchpad" tool against a breaking change that /// was introduced in `ethereum/staking-deposit-cli`. Lighthouse don't really have a version that it @@ -19,6 +25,16 @@ use types::*; /// 2. Weird enough to identify Lighthouse. const LIGHTHOUSE_DEPOSIT_CLI_VERSION: &str = "20.18.20"; +#[derive(Debug)] +pub enum UploadError { + InvalidPublicKey, + DuplicateValidator(PublicKeyBytes), + FailedToListKeys(eth2::Error), + KeyUploadFailed(eth2::Error), + FeeRecipientUpdateFailed(eth2::Error), + PatchValidatorFailed(eth2::Error), +} + #[derive(Serialize, Deserialize)] pub struct ValidatorSpecification { pub voting_keystore: KeystoreJsonStr, @@ -30,6 +46,97 @@ pub struct ValidatorSpecification { pub enabled: Option, } +impl ValidatorSpecification { + /// Upload the validator to a validator client via HTTP. + pub async fn upload( + self, + http_client: &ValidatorClientHttpClient, + ignore_duplicates: bool, + ) -> Result<(), UploadError> { + let ValidatorSpecification { + voting_keystore, + voting_keystore_password, + slashing_protection, + fee_recipient, + gas_limit, + builder_proposals, + enabled, + } = self; + + let voting_public_key = voting_keystore + .public_key() + .ok_or(UploadError::InvalidPublicKey)? + .into(); + + let request = ImportKeystoresRequest { + keystores: vec![voting_keystore], + passwords: vec![voting_keystore_password], + slashing_protection, + }; + + // Check to see if this validator already exists on the remote validator. + match http_client.get_keystores().await { + Ok(response) => { + if response + .data + .iter() + .find(|validator| validator.validating_pubkey == voting_public_key) + .is_some() + { + if ignore_duplicates { + eprintln!( + "Duplicate validators are ignored, ignoring {:?} which exists \ + on the destination validator client", + voting_public_key + ); + } else { + return Err(UploadError::DuplicateValidator(voting_public_key)); + } + } + } + Err(e) => { + return Err(UploadError::FailedToListKeys(e)); + } + }; + + if let Err(e) = http_client.post_keystores(&request).await { + // Return here *without* writing the deposit JSON file. This might help prevent + // users from submitting duplicate deposits or deposits for validators that weren't + // initialized on a VC. + // + // Next the the user runs with the --ignore-duplicates flag there should be a new, + // complete deposit JSON file created. + return Err(UploadError::KeyUploadFailed(e)); + } + + if let Some(fee_recipient) = fee_recipient { + http_client + .post_fee_recipient( + &voting_public_key, + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient, + }, + ) + .await + .map_err(UploadError::FeeRecipientUpdateFailed)?; + } + + if gas_limit.is_some() || builder_proposals.is_some() || enabled.is_some() { + http_client + .patch_lighthouse_validators( + &voting_public_key, + enabled, + gas_limit, + builder_proposals, + ) + .await + .map_err(UploadError::PatchValidatorFailed)?; + } + + Ok(()) + } +} + #[derive(Serialize, Deserialize)] pub struct CreateSpec { pub mnemonic: String, @@ -225,3 +332,20 @@ pub async fn vc_http_client>( Ok((http_client, remote_keystores)) } + +/// Write some object to a file as JSON. +/// +/// The file must be created new, it must not already exist. +pub fn write_to_json_file, S: Serialize>( + path: P, + contents: &S, +) -> Result<(), String> { + eprintln!("Writing {:?}", path.as_ref()); + let mut file = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&path) + .map_err(|e| format!("Failed to open {:?}: {:?}", path.as_ref(), e))?; + serde_json::to_writer(&mut file, contents) + .map_err(|e| format!("Failed to write JSON to {:?}: {:?}", path.as_ref(), e)) +} diff --git a/validator_manager/src/validators/create_validators.rs b/validator_manager/src/validators/create_validators.rs index e2a0ec8f7a..8c5d43d57a 100644 --- a/validator_manager/src/validators/create_validators.rs +++ b/validator_manager/src/validators/create_validators.rs @@ -10,7 +10,7 @@ use eth2::{ use eth2_wallet::WalletBuilder; use serde::{Deserialize, Serialize}; use std::fs; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::time::Duration; use types::*; @@ -505,28 +505,12 @@ async fn run<'a, T: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result<( Ok(()) } -/// Write some object to a file as JSON. -/// -/// The file must be created new, it must not already exist. -pub fn write_to_json_file, S: Serialize>( - path: P, - contents: &S, -) -> Result<(), String> { - eprintln!("Writing {:?}", path.as_ref()); - let mut file = fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(&path) - .map_err(|e| format!("Failed to open {:?}: {:?}", path.as_ref(), e))?; - serde_json::to_writer(&mut file, contents) - .map_err(|e| format!("Failed to write JSON to {:?}: {:?}", path.as_ref(), e)) -} - #[cfg(test)] pub mod tests { use super::*; use eth2_network_config::Eth2NetworkConfig; use regex::Regex; + use std::path::Path; use std::str::FromStr; use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; diff --git a/validator_manager/src/validators/import_validators.rs b/validator_manager/src/validators/import_validators.rs index 4d2c2a6530..0247aa695b 100644 --- a/validator_manager/src/validators/import_validators.rs +++ b/validator_manager/src/validators/import_validators.rs @@ -1,10 +1,7 @@ use super::common::*; use crate::DumpConfig; use clap::{App, Arg, ArgMatches}; -use eth2::{ - lighthouse_vc::{std_types::ImportKeystoresRequest, types::UpdateFeeRecipientRequest}, - SensitiveUrl, -}; +use eth2::SensitiveUrl; use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; @@ -13,7 +10,6 @@ pub const CMD: &str = "import"; pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; pub const VALIDATOR_CLIENT_URL_FLAG: &str = "validator-client-url"; pub const VALIDATOR_CLIENT_TOKEN_FLAG: &str = "validator-client-token"; -pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; @@ -137,105 +133,57 @@ async fn run<'a>(config: ImportConfig) -> Result<(), String> { ); for (i, validator) in validators.into_iter().enumerate() { - let ValidatorSpecification { - voting_keystore, - voting_keystore_password, - slashing_protection, - fee_recipient, - gas_limit, - builder_proposals, - enabled, - } = validator; - - let voting_public_key = voting_keystore - .public_key() - .ok_or_else(|| format!("Validator keystore at index {} is missing a public key", i))? - .into(); - - let request = ImportKeystoresRequest { - keystores: vec![voting_keystore], - passwords: vec![voting_keystore_password], - slashing_protection, - }; - - // Check to see if this validator already exists on the remote validator. - match http_client.get_keystores().await { - Ok(response) => { - if response - .data - .iter() - .find(|validator| validator.validating_pubkey == voting_public_key) - .is_some() - { - if ignore_duplicates { - eprintln!( - "Duplicate validators are ignored, ignoring {:?} which exists \ - on the validator client and in {:?}", - voting_public_key, validators_file_path - ); - } else { - eprintln!( - "{} {:?} exists on the remote validator client and in {:?}", - DETECTED_DUPLICATE_MESSAGE, voting_public_key, validators_file_path - ); - return Err(DETECTED_DUPLICATE_MESSAGE.to_string()); - } - } + match validator.upload(&http_client, ignore_duplicates).await { + Ok(()) => eprintln!("Uploaded keystore {} of {} to the VC", i + 1, count), + e @ Err(UploadError::InvalidPublicKey) => { + eprintln!("Validator {} has an invalid public key", i); + return Err(format!("{:?}", e)); } - Err(e) => { + ref e @ Err(UploadError::DuplicateValidator(voting_public_key)) => { eprintln!( - "Failed to list keystores during batch {}. Some keys may have been imported whilst \ - others may not have been imported. A potential solution is to use the \ - --{} flag, however care should be taken to ensure that there are no \ - duplicate deposits submitted.", - i, IGNORE_DUPLICATES_FLAG + "Duplicate validator {:?} already exists on the destination validator client. \ + This may indicate that some validators are running in two places at once, which \ + can lead to slashing. If you are certain that there is no risk, add the --{} flag.", + voting_public_key, IGNORE_DUPLICATES_FLAG ); - return Err(format!("Failed to list keys: {:?}", e)); + return Err(format!("{:?}", e)); } - }; - - if let Err(e) = http_client.post_keystores(&request).await { - eprintln!( - "Failed to upload batch {}. Some keys may have been imported whilst \ - others may not have been imported. A potential solution is to use the \ - --{} flag, however care should be taken to ensure that there are no \ + Err(UploadError::FailedToListKeys(e)) => { + eprintln!( + "Failed to list keystores. Some keys may have been imported whilst \ + others may not have been imported. A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure that there are no \ duplicate deposits submitted.", - i, IGNORE_DUPLICATES_FLAG - ); - // Return here *without* writing the deposit JSON file. This might help prevent - // users from submitting duplicate deposits or deposits for validators that weren't - // initialized on a VC. - // - // Next the the user runs with the --ignore-duplicates flag there should be a new, - // complete deposit JSON file created. - return Err(format!("Key upload failed: {:?}", e)); + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::KeyUploadFailed(e)) => { + eprintln!( + "Failed to upload keystore. Some keys may have been imported whilst \ + others may not have been imported. A potential solution is run this command again \ + using the --{} flag, however care should be taken to ensure that there are no \ + duplicate deposits submitted.", + IGNORE_DUPLICATES_FLAG + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::FeeRecipientUpdateFailed(e)) => { + eprintln!( + "Failed to set fee recipient for validator {}. This value may need \ + to be set manually. Continuing with other validators. Error was {:?}", + i, e + ); + } + Err(UploadError::PatchValidatorFailed(e)) => { + eprintln!( + "Failed to set some values on validator {} (e.g., builder, enabled or gas limit. \ + These values value may need to be set manually. Continuing with other validators. \ + Error was {:?}", + i, e + ); + } } - - if let Some(fee_recipient) = fee_recipient { - http_client - .post_fee_recipient( - &voting_public_key, - &UpdateFeeRecipientRequest { - ethaddress: fee_recipient, - }, - ) - .await - .map_err(|e| format!("Failed to update fee recipient on VC: {:?}", e))?; - } - - if gas_limit.is_some() || builder_proposals.is_some() || enabled.is_some() { - http_client - .patch_lighthouse_validators( - &voting_public_key, - enabled, - gas_limit, - builder_proposals, - ) - .await - .map_err(|e| format!("Failed to update lighthouse validator on VC: {:?}", e))?; - } - - eprintln!("Uploaded keystore {} of {} to the VC", i + 1, count); } Ok(()) diff --git a/validator_manager/src/validators/move_validators.rs b/validator_manager/src/validators/move_validators.rs index 4ac5256245..9070dea31f 100644 --- a/validator_manager/src/validators/move_validators.rs +++ b/validator_manager/src/validators/move_validators.rs @@ -11,11 +11,12 @@ use eth2::{ use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::str::FromStr; use types::{Address, PublicKeyBytes}; pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; +pub const VALIDATOR_SPECIFICATION_FILE: &str = "validator-specification.json"; pub const CMD: &str = "move"; pub const WORKING_DIRECTORY_FLAG: &str = "working-directory"; @@ -244,7 +245,7 @@ async fn run<'a>(config: MoveConfig) -> Result<(), String> { let (src_http_client, src_keystores) = vc_http_client(src_vc_url.clone(), &src_vc_token_path).await?; - let (dest_http_client, dest_keystores) = + let (dest_http_client, _dest_keystores) = vc_http_client(dest_vc_url.clone(), &dest_vc_token_path).await?; let pubkeys_to_move = match validators { @@ -275,7 +276,8 @@ async fn run<'a>(config: MoveConfig) -> Result<(), String> { .map(|k| (k.validating_pubkey, k)) .collect(); - for pubkey_to_move in pubkeys_to_move { + let count = pubkeys_to_move.len(); + for (i, &pubkey_to_move) in pubkeys_to_move.iter().enumerate() { // Skip read-only validators rather than exiting. This makes it a bit easier to use the // "all" flag. if src_keystores_map @@ -412,11 +414,118 @@ async fn run<'a>(config: MoveConfig) -> Result<(), String> { builder_proposals: Some(builder_proposals), enabled: Some(true), }; + + let validator_specification_path = + working_directory_path.join(VALIDATOR_SPECIFICATION_FILE); + if let Err(e) = write_to_json_file(&validator_specification_path, &validator_specification) + { + eprintln!( + "Validator {:?} was removed from the source validator but it could not be \ + saved to disk locally in the case of an upload failure. The application will \ + continue since it may be possible to upload the validator successfully, \ + however recovery options are limited. Write filed with {:?}", + pubkey_to_move, e + ); + } + + // We might as well just ignore validators that already exist on the destination machine, + // there doesn't appear to be much harm just adding them again. + let ignore_duplicates = true; + + match validator_specification + .upload(&dest_http_client, ignore_duplicates) + .await + { + Ok(()) => eprintln!( + "Uploaded keystore {} of {} to the destination VC", + i + 1, + count + ), + e @ Err(UploadError::InvalidPublicKey) => { + eprintln!("Validator {} has an invalid public key", i); + return Err(format!("{:?}", e)); + } + Err(UploadError::DuplicateValidator(_)) => { + return Err(format!( + "Duplicate validator detected when duplicates are ignored" + )); + } + Err(UploadError::FailedToListKeys(e)) => { + eprintln!( + "Failed to list keystores. Some keys may have been moved whilst \ + others may not.", + ); + eprint_recovery_advice( + &working_directory_path, + &validator_specification_path, + &dest_vc_url, + &dest_vc_token_path, + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::KeyUploadFailed(e)) => { + eprintln!( + "Failed to upload keystore. Some keys may have been moved whilst \ + others may not.", + ); + eprint_recovery_advice( + &working_directory_path, + &validator_specification_path, + &dest_vc_url, + &dest_vc_token_path, + ); + return Err(format!("{:?}", e)); + } + Err(UploadError::FeeRecipientUpdateFailed(e)) => { + eprintln!( + "Failed to set fee recipient for validator {}. This value may need \ + to be set manually. Continuing with other validators. Error was {:?}", + i, e + ); + } + Err(UploadError::PatchValidatorFailed(e)) => { + eprintln!( + "Failed to set some values on validator {} (e.g., builder, enabled or gas limit. \ + These values value may need to be set manually. Continuing with other validators. \ + Error was {:?}", + i, e + ); + } + } } Ok(()) } +pub fn eprint_recovery_advice>( + working_directory_path: P, + validator_file: P, + dest_vc_url: &SensitiveUrl, + dest_vc_token_path: P, +) { + use crate::validators::import_validators::{ + CMD, VALIDATORS_FILE_FLAG, VALIDATOR_CLIENT_TOKEN_FLAG, VALIDATOR_CLIENT_URL_FLAG, + }; + + eprintln!( + "It may be possible to recover this validator by running the following command: \n\n\ + lighthouse {} {} {} --{} {:?} --{} {} --{} {:?} \n\n\ + The {:?} directory contains a backup of the validator that was unable to be uploaded. \ + That backup contains the unencrypted validator secret key and should not be shared with \ + anyone. If the recovery command (above) succeeds, it is safe to remove that directory.", + crate::CMD, + crate::validators::CMD, + CMD, + VALIDATORS_FILE_FLAG, + validator_file.as_ref().as_os_str(), + VALIDATOR_CLIENT_URL_FLAG, + dest_vc_url.full, + VALIDATOR_CLIENT_TOKEN_FLAG, + dest_vc_token_path.as_ref().as_os_str(), + working_directory_path.as_ref().as_os_str(), + ) +} + /* #[cfg(test)] mod test {