diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 476b3cf179..75063ee2e0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -210,9 +210,9 @@ jobs: ## Testing Checklist (DELETE ME) - - [ ] Run on synced Prater Sigma Prime nodes. + - [ ] Run on synced Holesky Sigma Prime nodes. - [ ] Run on synced Canary (mainnet) Sigma Prime nodes. - - [ ] Resync a Prater node. + - [ ] Resync a Holesky node. - [ ] Resync a mainnet node. ## Release Checklist (DELETE ME) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 928833cbef..b1239705fd 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -324,7 +324,6 @@ jobs: run: | make - name: Install lcli - if: env.SELF_HOSTED_RUNNERS == 'false' run: make install-lcli - name: Run the doppelganger protection failure test script run: | diff --git a/Cargo.lock b/Cargo.lock index 15c2e1280c..f2ac7c91b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.3.5" dependencies = [ "account_utils", "bls", - "clap", + "clap 4.5.4", "clap_utils", "directory", "environment", @@ -30,10 +30,7 @@ dependencies = [ "filesystem", "safe_arith", "sensitive_url", - "serde", - "serde_json", "slashing_protection", - "slog", "slot_clock", "tempfile", "tokio", @@ -284,12 +281,51 @@ dependencies = [ ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "anstream" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ - "winapi", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" + +[[package]] +name = "anstyle-parse" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", ] [[package]] @@ -815,26 +851,22 @@ name = "beacon_node" version = "5.1.3" dependencies = [ "beacon_chain", - "clap", + "clap 4.5.4", "clap_utils", "client", "directory", "dirs", "environment", "eth2_config", - "eth2_network_config", "execution_layer", - "futures", "genesis", "hex", "http_api", "hyper 1.3.1", "lighthouse_network", - "lighthouse_version", "monitoring_api", "node_test_rig", "sensitive_url", - "serde", "serde_json", "slasher", "slog", @@ -849,11 +881,8 @@ dependencies = [ name = "beacon_processor" version = "0.1.0" dependencies = [ - "derivative", - "ethereum_ssz", "fnv", "futures", - "hex", "itertools", "lazy_static", "lighthouse_metrics", @@ -1028,7 +1057,7 @@ name = "boot_node" version = "5.1.3" dependencies = [ "beacon_node", - "clap", + "clap 4.5.4", "clap_utils", "eth2_network_config", "ethereum_ssz", @@ -1037,12 +1066,9 @@ dependencies = [ "log", "logging", "serde", - "serde_json", - "serde_yaml", "slog", "slog-async", "slog-scope", - "slog-stdlog", "slog-term", "tokio", "types", @@ -1072,7 +1098,6 @@ dependencies = [ "reqwest", "sensitive_url", "serde", - "serde_json", ] [[package]] @@ -1294,20 +1319,44 @@ version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term", - "atty", "bitflags 1.3.2", - "strsim 0.8.0", "textwrap", "unicode-width", - "vec_map", ] +[[package]] +name = "clap" +version = "4.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.0", + "terminal_size", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ - "clap", + "clap 4.5.4", "dirs", "eth2_network_config", "ethereum-types 0.14.1", @@ -1341,12 +1390,9 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", - "logging", "monitoring_api", "network", - "num_cpus", "operation_pool", - "parking_lot 0.12.2", "sensitive_url", "serde", "serde_yaml", @@ -1360,7 +1406,6 @@ dependencies = [ "time", "timer", "tokio", - "tree_hash", "types", ] @@ -1373,6 +1418,12 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "compare_fields" version = "0.2.0" @@ -1480,7 +1531,7 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap", + "clap 2.34.0", "criterion-plot", "csv", "itertools", @@ -1782,16 +1833,13 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", - "clap", + "clap 4.5.4", "clap_utils", "environment", "hex", - "logging", "slog", - "sloggers", "store", "strum", - "tempfile", "types", ] @@ -1975,7 +2023,7 @@ dependencies = [ name = "directory" version = "0.1.0" dependencies = [ - "clap", + "clap 4.5.4", "clap_utils", "eth2_network_config", ] @@ -2132,13 +2180,11 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bls", - "cached_tree_hash", "compare_fields", "compare_fields_derive", "derivative", "eth2_network_config", "ethereum-types 0.14.1", - "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", @@ -2154,7 +2200,6 @@ dependencies = [ "serde_yaml", "snap", "state_processing", - "store", "swap_or_not_shuffle", "tree_hash", "tree_hash_derive", @@ -2336,15 +2381,12 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "hex", "lazy_static", "lighthouse_metrics", "merkle_proof", "parking_lot 0.12.2", - "reqwest", "sensitive_url", "serde", - "serde_json", "serde_yaml", "slog", "sloggers", @@ -2386,7 +2428,6 @@ dependencies = [ "libsecp256k1", "lighthouse_network", "mediatype", - "mime", "pretty_reqwest_error", "procfs", "proto_array", @@ -2400,7 +2441,6 @@ dependencies = [ "ssz_types", "store", "tokio", - "tree_hash", "types", ] @@ -2472,7 +2512,6 @@ dependencies = [ "pretty_reqwest_error", "reqwest", "sensitive_url", - "serde_json", "serde_yaml", "sha2 0.9.9", "slog", @@ -2802,7 +2841,6 @@ version = "0.1.0" dependencies = [ "async-channel", "deposit_contract", - "environment", "ethers-core", "ethers-providers", "execution_layer", @@ -2827,7 +2865,6 @@ dependencies = [ "alloy-consensus", "alloy-rlp", "arc-swap", - "async-trait", "builder_client", "bytes", "environment", @@ -2837,7 +2874,6 @@ dependencies = [ "ethereum_ssz", "ethers-core", "fork_choice", - "futures", "hash-db", "hash256-std-hasher", "hex", @@ -3340,7 +3376,6 @@ dependencies = [ "futures-timer", "getrandom", "hex_fmt", - "instant", "libp2p", "prometheus-client", "quick-protobuf", @@ -3350,9 +3385,9 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "smallvec", "tracing", "void", + "web-time", ] [[package]] @@ -4287,10 +4322,9 @@ dependencies = [ "account_utils", "beacon_chain", "bls", - "clap", + "clap 4.5.4", "clap_utils", "deposit_contract", - "directory", "env_logger 0.9.3", "environment", "eth1_test_rig", @@ -4302,7 +4336,6 @@ dependencies = [ "execution_layer", "genesis", "hex", - "int_to_bytes", "lighthouse_network", "lighthouse_version", "log", @@ -4866,7 +4899,7 @@ dependencies = [ "beacon_processor", "bls", "boot_node", - "clap", + "clap 4.5.4", "clap_utils", "database_manager", "directory", @@ -4889,10 +4922,8 @@ dependencies = [ "slasher", "slashing_protection", "slog", - "sloggers", "task_executor", "tempfile", - "tracing-subscriber", "types", "unused_port", "validator_client", @@ -4904,7 +4935,6 @@ dependencies = [ name = "lighthouse_metrics" version = "0.2.0" dependencies = [ - "lazy_static", "prometheus", ] @@ -4913,8 +4943,6 @@ name = "lighthouse_network" version = "0.2.0" dependencies = [ "async-channel", - "base64 0.21.7", - "byteorder", "bytes", "delay_map", "directory", @@ -4926,12 +4954,8 @@ dependencies = [ "ethereum_ssz_derive", "fnv", "futures", - "futures-ticker", - "getrandom", "gossipsub", "hex", - "hex_fmt", - "instant", "lazy_static", "libp2p", "libp2p-mplex", @@ -4961,9 +4985,6 @@ dependencies = [ "tokio", "tokio-io-timeout", "tokio-util 0.6.10", - "tracing", - "tree_hash", - "tree_hash_derive", "types", "unsigned-varint 0.6.0", "unused_port", @@ -5053,7 +5074,6 @@ dependencies = [ "serde", "serde_json", "slog", - "slog-async", "slog-term", "sloggers", "take_mut", @@ -5487,10 +5507,8 @@ dependencies = [ "beacon_processor", "delay_map", "derivative", - "environment", "error-chain", "eth2", - "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", "fnv", @@ -5504,10 +5522,8 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", - "lru", "lru_cache", "matches", - "num_cpus", "operation_pool", "parking_lot 0.12.2", "rand", @@ -5524,7 +5540,6 @@ dependencies = [ "task_executor", "tokio", "tokio-stream", - "tokio-util 0.6.10", "types", ] @@ -7586,11 +7601,9 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ - "clap", + "clap 4.5.4", "env_logger 0.9.3", - "eth1", "eth2_network_config", - "ethereum-types 0.14.1", "execution_layer", "futures", "node_test_rig", @@ -7598,7 +7611,6 @@ dependencies = [ "rayon", "sensitive_url", "serde_json", - "ssz_types", "tokio", "types", ] @@ -7642,7 +7654,6 @@ dependencies = [ "safe_arith", "serde", "slog", - "sloggers", "strum", "tempfile", "tree_hash", @@ -7952,7 +7963,6 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", - "bls", "db-key", "directory", "ethereum_ssz", @@ -7961,14 +7971,11 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "logging", "lru", "parking_lot 0.12.2", - "safe_arith", "serde", "slog", "sloggers", - "smallvec", "state_processing", "strum", "tempfile", @@ -7986,18 +7993,18 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + [[package]] name = "strum" version = "0.24.1" @@ -8137,7 +8144,6 @@ dependencies = [ "lighthouse_network", "parking_lot 0.12.2", "serde", - "serde_json", "sysinfo", "types", ] @@ -8213,6 +8219,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.34", + "windows-sys 0.48.0", +] + [[package]] name = "test-test_logger" version = "0.1.0" @@ -8811,7 +8827,6 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", - "strum", "superstruct", "swap_or_not_shuffle", "tempfile", @@ -8978,6 +8993,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" @@ -8995,7 +9016,7 @@ dependencies = [ "account_utils", "bincode", "bls", - "clap", + "clap 4.5.4", "clap_utils", "deposit_contract", "directory", @@ -9067,12 +9088,10 @@ name = "validator_manager" version = "0.1.0" dependencies = [ "account_utils", - "bls", - "clap", + "clap 4.5.4", "clap_utils", "environment", "eth2", - "eth2_keystore", "eth2_network_config", "eth2_wallet", "ethereum_serde_utils", @@ -9308,13 +9327,12 @@ dependencies = [ "beacon_chain", "beacon_node", "bls", - "byteorder", - "clap", + "clap 4.5.4", + "clap_utils", "diesel", "diesel_migrations", "env_logger 0.9.3", "eth2", - "hex", "http_api", "hyper 1.3.1", "log", @@ -9345,6 +9363,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "web3signer_tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 09e2b7c434..5fe9812bc6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,10 +102,10 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" +clap = { version = "4.5.4", features = ["cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. -c-kzg = { version = "1", default-features = false } -clap = "2" +c-kzg = { version = "1", default-features = false } compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.3" delay_map = "0.3" diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 0fab7b31fe..7f2fa05a88 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -27,9 +27,6 @@ safe_arith = { workspace = true } slot_clock = { workspace = true } filesystem = { workspace = true } sensitive_url = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -slog = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index ce7e8a42c2..f1160fff9c 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -2,8 +2,11 @@ mod common; pub mod validator; pub mod wallet; -use clap::App; +use clap::Arg; +use clap::ArgAction; use clap::ArgMatches; +use clap::Command; +use clap_utils::FLAG_HEADER; use environment::Environment; use types::EthSpec; @@ -13,25 +16,36 @@ pub const VALIDATOR_DIR_FLAG: &str = "validator-dir"; pub const VALIDATOR_DIR_FLAG_ALIAS: &str = "validators-dir"; pub const WALLETS_DIR_FLAG: &str = "wallets-dir"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["a", "am", "account", CMD]) +pub fn cli_app() -> Command { + Command::new(CMD) + .visible_aliases(["a", "am", "account", CMD]) .about("Utilities for generating and managing Ethereum 2.0 accounts.") + .display_order(0) + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) .subcommand(wallet::cli_app()) .subcommand(validator::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. -pub fn run(matches: &ArgMatches<'_>, env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), String> { match matches.subcommand() { - (wallet::CMD, Some(matches)) => wallet::cli_run(matches)?, - (validator::CMD, Some(matches)) => validator::cli_run(matches, env)?, - (unknown, _) => { + Some((wallet::CMD, matches)) => wallet::cli_run(matches)?, + Some((validator::CMD, matches)) => validator::cli_run(matches, env)?, + Some((unknown, _)) => { return Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD )); } + _ => return Err("No subcommand provided, see --help for options".to_string()), } Ok(()) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 93b041c61c..cfe4d8e94a 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -4,7 +4,8 @@ use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{ ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, }; @@ -26,73 +27,83 @@ pub const COUNT_FLAG: &str = "count"; pub const AT_MOST_FLAG: &str = "at-most"; pub const WALLET_PASSWORD_PROMPT: &str = "Enter your wallet's password:"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key \ derivation scheme.", ) .arg( - Arg::with_name(WALLET_NAME_FLAG) + Arg::new(WALLET_NAME_FLAG) .long(WALLET_NAME_FLAG) .value_name("WALLET_NAME") .help("Use the wallet identified by this name") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(WALLET_PASSWORD_FLAG) + Arg::new(WALLET_PASSWORD_FLAG) .long(WALLET_PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help("A path to a file containing the password which will unlock the wallet.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(WALLETS_DIR_FLAG) + Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) .value_name(WALLETS_DIR_FLAG) .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets") - .takes_value(true) - .conflicts_with("datadir"), + .action(ArgAction::Set) + .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name(SECRETS_DIR_FLAG) + Arg::new(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .takes_value(true), + .conflicts_with("datadir") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(DEPOSIT_GWEI_FLAG) + Arg::new(DEPOSIT_GWEI_FLAG) .long(DEPOSIT_GWEI_FLAG) .value_name("DEPOSIT_GWEI") .help( "The GWEI value of the deposit amount. Defaults to the minimum amount \ required for an active validator (MAX_EFFECTIVE_BALANCE)", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STORE_WITHDRAW_FLAG) + Arg::new(STORE_WITHDRAW_FLAG) .long(STORE_WITHDRAW_FLAG) .help( "If present, the withdrawal keystore will be stored alongside the voting \ keypair. It is generally recommended to *not* store the withdrawal key and \ instead generate them from the wallet seed when required.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") .conflicts_with("at-most") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(AT_MOST_FLAG) + Arg::new(AT_MOST_FLAG) .long(AT_MOST_FLAG) .value_name("AT_MOST_VALIDATORS") .help( @@ -100,14 +111,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { reach the given count. Never deletes an existing validator.", ) .conflicts_with("count") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) + .action(ArgAction::SetTrue) ) } @@ -119,15 +134,15 @@ pub fn cli_run( let spec = env.core_context().eth2_config.spec; let name: Option = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); - let wallet_base_dir = if matches.value_of("datadir").is_some() { + let wallet_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) } else { parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? }; - let secrets_dir = if matches.value_of("datadir").is_some() { + let secrets_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_SECRET_DIR) } else { @@ -144,7 +159,7 @@ pub fn cli_run( return Err(format!( "No wallet directory at {:?}. Use the `lighthouse --network {} {} {} {}` command to create a wallet", wallet_base_dir, - matches.value_of("network").unwrap_or(""), + matches.get_one::("network").unwrap_or(&String::from("")), crate::CMD, crate::wallet::CMD, crate::wallet::create::CMD @@ -245,7 +260,7 @@ pub fn cli_run( .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .create_eth1_tx_data(deposit_gwei, &spec) - .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) + .store_withdrawal_keystore(matches.get_flag(STORE_WITHDRAW_FLAG)) .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index bc9e0ee1dd..277d2ae8ec 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -1,6 +1,7 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; use bls::{Keypair, PublicKey}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use environment::Environment; use eth2::{ types::{GenesisData, StateId, ValidatorData, ValidatorId, ValidatorStatus}, @@ -28,48 +29,59 @@ pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("exit") +pub fn cli_app() -> Command { + Command::new("exit") .about("Submits a VoluntaryExit to the beacon chain for a given validator keystore.") .arg( - Arg::with_name(KEYSTORE_FLAG) + Arg::new(KEYSTORE_FLAG) .long(KEYSTORE_FLAG) .value_name("KEYSTORE_PATH") .help("The path to the EIP-2335 voting keystore for the validator") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0) ) .arg( - Arg::with_name(PASSWORD_FILE_FLAG) + Arg::new(PASSWORD_FILE_FLAG) .long(PASSWORD_FILE_FLAG) .value_name("PASSWORD_FILE_PATH") .help("The path to the password file which unlocks the validator voting keystore") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(BEACON_SERVER_FLAG) + Arg::new(BEACON_SERVER_FLAG) .long(BEACON_SERVER_FLAG) .value_name("NETWORK_ADDRESS") .help("Address to a beacon node HTTP API") .default_value(DEFAULT_BEACON_NODE) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(NO_WAIT) + Arg::new(NO_WAIT) .long(NO_WAIT) .help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(NO_CONFIRMATION) + Arg::new(NO_CONFIRMATION) .long(NO_CONFIRMATION) .help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution") + .display_order(0) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) } @@ -78,9 +90,9 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let password_file_path: Option = clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); - let no_wait = matches.is_present(NO_WAIT); - let no_confirmation = matches.is_present(NO_CONFIRMATION); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); + let no_wait = matches.get_flag(NO_WAIT); + let no_confirmation = matches.get_flag(NO_CONFIRMATION); let spec = env.eth2_config().spec.clone(); let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?; diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index bf000385f3..a7c72679f7 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -9,7 +9,8 @@ use account_utils::{ }, ZeroizeString, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::fs; use std::path::PathBuf; @@ -25,8 +26,8 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter t pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \ ANOTHER CLIENT, OR YOU WILL GET SLASHED."; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \ requesting passwords interactively. The directory flag provides a convenient \ @@ -34,16 +35,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Python utility.", ) .arg( - Arg::with_name(KEYSTORE_FLAG) + Arg::new(KEYSTORE_FLAG) .long(KEYSTORE_FLAG) .value_name("KEYSTORE_PATH") .help("Path to a single keystore to be imported.") .conflicts_with(DIR_FLAG) - .required_unless(DIR_FLAG) - .takes_value(true), + .required_unless_present(DIR_FLAG) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DIR_FLAG) + Arg::new(DIR_FLAG) .long(DIR_FLAG) .value_name("KEYSTORES_DIRECTORY") .help( @@ -53,23 +55,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { has the '.json' extension will be attempted to be imported.", ) .conflicts_with(KEYSTORE_FLAG) - .required_unless(KEYSTORE_FLAG) - .takes_value(true), + .required_unless_present(KEYSTORE_FLAG) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) .arg( - Arg::with_name(REUSE_PASSWORD_FLAG) + Arg::new(REUSE_PASSWORD_FLAG) .long(REUSE_PASSWORD_FLAG) - .help("If present, the same password will be used for all imported keystores."), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, the same password will be used for all imported keystores.") + .display_order(0), ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("KEYSTORE_PASSWORD_PATH") .requires(REUSE_PASSWORD_FLAG) @@ -79,15 +87,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { The password will be copied to the `validator_definitions.yml` file, so after \ import we strongly recommend you delete the file at KEYSTORE_PASSWORD_PATH.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { let keystore: Option = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?; let keystores_dir: Option = clap_utils::parse_optional(matches, DIR_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); - let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); + let reuse_password = matches.get_flag(REUSE_PASSWORD_FLAG); let keystore_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; diff --git a/account_manager/src/validator/list.rs b/account_manager/src/validator/list.rs index 3385728369..d082a49590 100644 --- a/account_manager/src/validator/list.rs +++ b/account_manager/src/validator/list.rs @@ -1,11 +1,11 @@ use account_utils::validator_definitions::ValidatorDefinitions; -use clap::App; +use clap::Command; use std::path::PathBuf; pub const CMD: &str = "list"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD).about("Lists the public keys of all validators.") +pub fn cli_app() -> Command { + Command::new(CMD).about("Lists the public keys of all validators.") } pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index af977dcf03..6616bb0c45 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -7,7 +7,8 @@ pub mod recover; pub mod slashing_protection; use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; use std::path::PathBuf; @@ -15,11 +16,21 @@ use types::EthSpec; pub const CMD: &str = "validator"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) + .display_order(0) .about("Provides commands for managing Eth2 validators.") .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(VALIDATOR_DIR_FLAG) .long(VALIDATOR_DIR_FLAG) .alias(VALIDATOR_DIR_FLAG_ALIAS) .value_name("VALIDATOR_DIRECTORY") @@ -27,7 +38,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The path to search for validator directories. \ Defaults to ~/.lighthouse/{network}/validators", ) - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir"), ) .subcommand(create::cli_app()) @@ -40,7 +51,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { - let validator_base_dir = if matches.value_of("datadir").is_some() { + let validator_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_VALIDATOR_DIR) } else { @@ -49,18 +60,19 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< eprintln!("validator-dir path: {:?}", validator_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run::(matches, env, validator_base_dir), - (modify::CMD, Some(matches)) => modify::cli_run(matches, validator_base_dir), - (import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir), - (list::CMD, Some(_)) => list::cli_run(validator_base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir), - (slashing_protection::CMD, Some(matches)) => { + Some((create::CMD, matches)) => create::cli_run::(matches, env, validator_base_dir), + Some((modify::CMD, matches)) => modify::cli_run(matches, validator_base_dir), + Some((import::CMD, matches)) => import::cli_run(matches, validator_base_dir), + Some((list::CMD, _)) => list::cli_run(validator_base_dir), + Some((recover::CMD, matches)) => recover::cli_run(matches, validator_base_dir), + Some((slashing_protection::CMD, matches)) => { slashing_protection::cli_run(matches, env, validator_base_dir) } - (exit::CMD, Some(matches)) => exit::cli_run(matches, env), - (unknown, _) => Err(format!( + Some((exit::CMD, matches)) => exit::cli_run(matches, env), + Some((unknown, _)) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown )), + _ => Err(format!("No command provided for {}. See --help", CMD)), } } diff --git a/account_manager/src/validator/modify.rs b/account_manager/src/validator/modify.rs index bd4ae4d8f4..571cd28bf5 100644 --- a/account_manager/src/validator/modify.rs +++ b/account_manager/src/validator/modify.rs @@ -1,6 +1,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use bls::PublicKey; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use std::{collections::HashSet, path::PathBuf}; pub const CMD: &str = "modify"; @@ -10,43 +11,50 @@ pub const DISABLE: &str = "disable"; pub const PUBKEY_FLAG: &str = "pubkey"; pub const ALL: &str = "all"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Modify validator status in validator_definitions.yml.") + .display_order(0) .subcommand( - App::new(ENABLE) + Command::new(ENABLE) .about("Enable validator(s) in validator_definitions.yml.") .arg( - Arg::with_name(PUBKEY_FLAG) + Arg::new(PUBKEY_FLAG) .long(PUBKEY_FLAG) .value_name("PUBKEY") .help("Validator pubkey to enable") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(ALL) + Arg::new(ALL) .long(ALL) .help("Enable all validators in the validator directory") - .takes_value(false) - .conflicts_with(PUBKEY_FLAG), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with(PUBKEY_FLAG) + .display_order(0), ), ) .subcommand( - App::new(DISABLE) + Command::new(DISABLE) .about("Disable validator(s) in validator_definitions.yml.") .arg( - Arg::with_name(PUBKEY_FLAG) + Arg::new(PUBKEY_FLAG) .long(PUBKEY_FLAG) .value_name("PUBKEY") .help("Validator pubkey to disable") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(ALL) + Arg::new(ALL) .long(ALL) .help("Disable all validators in the validator directory") - .takes_value(false) - .conflicts_with(PUBKEY_FLAG), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with(PUBKEY_FLAG) + .display_order(0), ), ) } @@ -55,14 +63,15 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin // `true` implies we are setting `validator_definition.enabled = true` and // vice versa. let (enabled, sub_matches) = match matches.subcommand() { - (ENABLE, Some(sub_matches)) => (true, sub_matches), - (DISABLE, Some(sub_matches)) => (false, sub_matches), - (unknown, _) => { + Some((ENABLE, sub_matches)) => (true, sub_matches), + Some((DISABLE, sub_matches)) => (false, sub_matches), + Some((unknown, _)) => { return Err(format!( "{} does not have a {} command. See --help", CMD, unknown )) } + _ => return Err(format!("No command provided for {}. See --help", CMD)), }; let mut defs = ValidatorDefinitions::open(&validator_dir).map_err(|e| { format!( @@ -70,7 +79,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin validator_dir, e ) })?; - let pubkeys_to_modify = if sub_matches.is_present(ALL) { + let pubkeys_to_modify = if sub_matches.get_flag(ALL) { defs.as_slice() .iter() .map(|def| def.voting_public_key.clone()) diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 33d3b18926..4677db18df 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -4,7 +4,8 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::{random_password, read_mnemonic_from_cli}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; @@ -15,70 +16,79 @@ pub const CMD: &str = "recover"; pub const FIRST_INDEX_FLAG: &str = "first-index"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Recovers validator private keys given a BIP-39 mnemonic phrase. \ If you did not specify a `--first-index` or count `--count`, by default this will \ only recover the keys associated with the validator at index 0 for an HD wallet \ in accordance with the EIP-2333 spec.") .arg( - Arg::with_name(FIRST_INDEX_FLAG) + Arg::new(FIRST_INDEX_FLAG) .long(FIRST_INDEX_FLAG) .value_name("FIRST_INDEX") .help("The first of consecutive key indexes you wish to recover.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("0"), + .default_value("0") + .display_order(0) ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("COUNT") .help("The number of validator keys you wish to recover. Counted consecutively from the provided `--first_index`.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("1"), + .default_value("1") + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help( "If present, the mnemonic will be read in from this file.", ) - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(SECRETS_DIR_FLAG) + Arg::new(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STORE_WITHDRAW_FLAG) + Arg::new(STORE_WITHDRAW_FLAG) .long(STORE_WITHDRAW_FLAG) .help( "If present, the withdrawal keystore will be stored alongside the voting \ keypair. It is generally recommended to *not* store the withdrawal key and \ instead generate them from the wallet seed when required.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { - let secrets_dir = if matches.value_of("datadir").is_some() { + let secrets_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_SECRET_DIR) } else { @@ -87,7 +97,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?; let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?; let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!("secrets-dir path: {:?}", secrets_dir); @@ -131,7 +141,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .password_dir(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) - .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) + .store_withdrawal_keystore(matches.get_flag(STORE_WITHDRAW_FLAG)) .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index ff2eeb9cbf..bcd860a484 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,4 +1,4 @@ -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ interchange::Interchange, InterchangeError, InterchangeImportOutcome, SlashingDatabase, @@ -18,43 +18,47 @@ pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE"; pub const PUBKEYS_FLAG: &str = "pubkeys"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Import or export slashing protection data to or from another client") + .display_order(0) .subcommand( - App::new(IMPORT_CMD) + Command::new(IMPORT_CMD) .about("Import an interchange file") .arg( - Arg::with_name(IMPORT_FILE_ARG) - .takes_value(true) + Arg::new(IMPORT_FILE_ARG) + .action(ArgAction::Set) .value_name("FILE") + .display_order(0) .help("The slashing protection interchange file to import (.json)"), ) ) .subcommand( - App::new(EXPORT_CMD) + Command::new(EXPORT_CMD) .about("Export an interchange file") .arg( - Arg::with_name(EXPORT_FILE_ARG) - .takes_value(true) + Arg::new(EXPORT_FILE_ARG) + .action(ArgAction::Set) .value_name("FILE") - .help("The filename to export the interchange file to"), + .help("The filename to export the interchange file to") + .display_order(0) ) .arg( - Arg::with_name(PUBKEYS_FLAG) + Arg::new(PUBKEYS_FLAG) .long(PUBKEYS_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("PUBKEYS") .help( "List of public keys to export history for. Keys should be 0x-prefixed, \ comma-separated. All known keys will be exported if omitted", - ), + ) + .display_order(0) ) ) } pub fn cli_run( - matches: &ArgMatches<'_>, + matches: &ArgMatches, env: Environment, validator_base_dir: PathBuf, ) -> Result<(), String> { @@ -68,7 +72,7 @@ pub fn cli_run( .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; match matches.subcommand() { - (IMPORT_CMD, Some(matches)) => { + Some((IMPORT_CMD, matches)) => { let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?; let import_file = File::open(&import_filename).map_err(|e| { format!( @@ -168,7 +172,7 @@ pub fn cli_run( Ok(()) } - (EXPORT_CMD, Some(matches)) => { + Some((EXPORT_CMD, matches)) => { let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?; let selected_pubkeys = if let Some(pubkeys) = @@ -215,7 +219,7 @@ pub fn cli_run( Ok(()) } - ("", _) => Err("No subcommand provided, see --help for options".to_string()), - (command, _) => Err(format!("No such subcommand `{}`", command)), + Some((command, _)) => Err(format!("No such subcommand `{}`", command)), + _ => Err("No subcommand provided, see --help for options".to_string()), } } diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index accee11b5a..12aa5d3801 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -3,7 +3,7 @@ use crate::WALLETS_DIR_FLAG; use account_utils::{ is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2_wallet::{ bip39::{Language, Mnemonic, MnemonicType}, PlainText, @@ -33,21 +33,22 @@ pub const NEW_WALLET_PASSWORD_PROMPT: &str = "Enter a password for your new wallet that is at least 12 characters long:"; pub const RETYPE_PASSWORD_PROMPT: &str = "Please re-enter your wallet's new password:"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Creates a new HD (hierarchical-deterministic) EIP-2386 wallet.") .arg( - Arg::with_name(NAME_FLAG) + Arg::new(NAME_FLAG) .long(NAME_FLAG) .value_name("WALLET_NAME") .help( "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help( @@ -56,49 +57,65 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(TYPE_FLAG) + Arg::new(TYPE_FLAG) .long(TYPE_FLAG) .value_name("WALLET_TYPE") .help( "The type of wallet to create. Only HD (hierarchical-deterministic) \ wallets are supported presently..", ) - .takes_value(true) - .possible_values(&[HD_TYPE]) - .default_value(HD_TYPE), + .action(ArgAction::Set) + .value_parser([HD_TYPE]) + .default_value(HD_TYPE) + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help( "If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC.", ) - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_LENGTH_FLAG) + Arg::new(MNEMONIC_LENGTH_FLAG) .long(MNEMONIC_LENGTH_FLAG) .value_name("MNEMONIC_LENGTH") .help("The number of words to use for the mnemonic phrase.") - .takes_value(true) - .validator(|len| { - match len.parse::().ok().and_then(|words| MnemonicType::for_word_count(words).ok()) { - Some(_) => Ok(()), - None => Err(format!("Mnemonic length must be one of {}", MNEMONIC_TYPES.iter().map(|t| t.word_count().to_string()).collect::>().join(", "))), - } + .action(ArgAction::Set) + .value_parser(|len: &str| { + match len + .parse::() + .ok() + .and_then(|words| MnemonicType::for_word_count(words).ok()) + { + Some(_) => Ok(len.to_string()), + None => Err(format!( + "Mnemonic length must be one of {}", + MNEMONIC_TYPES + .iter() + .map(|t| t.word_count().to_string()) + .collect::>() + .join(", ") + )), + } }) - .default_value("24"), + .default_value("24") + .display_order(0) ) } @@ -153,7 +170,7 @@ pub fn create_wallet_from_mnemonic( let name: Option = clap_utils::parse_optional(matches, NAME_FLAG)?; let wallet_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); let wallet_type = match type_field.as_ref() { HD_TYPE => WalletType::Hd, unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 9190de3915..a551ffae12 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -1,12 +1,12 @@ use crate::WALLETS_DIR_FLAG; -use clap::App; +use clap::Command; use eth2_wallet_manager::WalletManager; use std::path::PathBuf; pub const CMD: &str = "list"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD).about("Lists the names of all wallets.") +pub fn cli_app() -> Command { + Command::new(CMD).about("Lists the names of all wallets.") } pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index dfadebf57f..59f5f36252 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -3,21 +3,32 @@ pub mod list; pub mod recover; use crate::WALLETS_DIR_FLAG; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; use std::path::PathBuf; pub const CMD: &str = "wallet"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Manage wallets, from which validator keys can be derived.") + .display_order(0) .arg( - Arg::with_name(WALLETS_DIR_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) .value_name("WALLETS_DIRECTORY") .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir"), ) .subcommand(create::cli_app()) @@ -26,7 +37,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let wallet_base_dir = if matches.value_of("datadir").is_some() { + let wallet_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) } else { @@ -37,12 +48,13 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { eprintln!("wallet-dir path: {:?}", wallet_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir), - (list::CMD, Some(_)) => list::cli_run(wallet_base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir), - (unknown, _) => Err(format!( + Some((create::CMD, matches)) => create::cli_run(matches, wallet_base_dir), + Some((list::CMD, _)) => list::cli_run(wallet_base_dir), + Some((recover::CMD, matches)) => recover::cli_run(matches, wallet_base_dir), + Some((unknown, _)) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown )), + _ => Err("No subcommand provided, see --help for options".to_string()), } } diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index 6e047aca8d..b9641f1152 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,27 +1,28 @@ use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; use account_utils::read_mnemonic_from_cli; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use std::path::PathBuf; pub const CMD: &str = "recover"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Recovers an EIP-2386 wallet from a given a BIP-39 mnemonic phrase.") .arg( - Arg::with_name(NAME_FLAG) + Arg::new(NAME_FLAG) .long(NAME_FLAG) .value_name("WALLET_NAME") .help( "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("PASSWORD_FILE_PATH") .help( @@ -31,39 +32,43 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help("If present, the mnemonic will be read in from this file.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(TYPE_FLAG) + Arg::new(TYPE_FLAG) .long(TYPE_FLAG) .value_name("WALLET_TYPE") .help( "The type of wallet to create. Only HD (hierarchical-deterministic) \ wallets are supported presently..", ) - .takes_value(true) - .possible_values(&[HD_TYPE]) - .default_value(HD_TYPE), + .action(ArgAction::Set) + .value_parser([HD_TYPE]) + .default_value(HD_TYPE) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) } pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!(); eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7cc6e2b6ae..e2f6c681c1 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -29,18 +29,14 @@ clap = { workspace = true } slog = { workspace = true } dirs = { workspace = true } directory = { workspace = true } -futures = { workspace = true } environment = { workspace = true } task_executor = { workspace = true } genesis = { workspace = true } -eth2_network_config = { workspace = true } execution_layer = { workspace = true } lighthouse_network = { workspace = true } -serde = { workspace = true } serde_json = { workspace = true } clap_utils = { workspace = true } hyper = { workspace = true } -lighthouse_version = { workspace = true } hex = { workspace = true } slasher = { workspace = true } monitoring_api = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 9c44c5529b..932374d486 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -45,7 +45,10 @@ use proto_array::Block as ProtoBlock; use slog::debug; use slot_clock::SlotClock; use state_processing::{ - common::{attesting_indices_base, attesting_indices_electra}, + common::{ + attesting_indices_base, + attesting_indices_electra::{self, get_committee_indices}, + }, per_block_processing::errors::{AttestationValidationError, BlockOperationError}, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, @@ -55,10 +58,11 @@ use state_processing::{ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; use types::{ - Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec, - CommitteeIndex, Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, SelectionProof, - SignedAggregateAndProof, Slot, SubnetId, + Attestation, AttestationData, AttestationRef, BeaconCommittee, BeaconStateError, + BeaconStateError::NoCommitteeFound, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, + Hash256, IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -139,6 +143,12 @@ pub enum Error { /// /// The peer has sent an invalid message. ValidatorIndexTooHigh(usize), + /// The validator index is not set to zero after Electra. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + CommitteeIndexNonZero(usize), /// The `attestation.data.beacon_block_root` block is unknown. /// /// ## Peer scoring @@ -187,6 +197,12 @@ pub enum Error { /// /// The peer has sent an invalid message. NotExactlyOneAggregationBitSet(usize), + /// The attestation doesn't have only one aggregation bit set. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + NotExactlyOneCommitteeBitSet(usize), /// We have already observed an attestation for the `validator_index` and refuse to process /// another. /// @@ -248,9 +264,30 @@ pub enum Error { BeaconChainError(BeaconChainError), } +// TODO(electra) the error conversion changes here are to get a test case to pass +// this could easily be cleaned up impl From for Error { fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) + match &e { + BeaconChainError::BeaconStateError(beacon_state_error) => { + if let BeaconStateError::AggregatorNotInCommittee { aggregator_index } = + beacon_state_error + { + Self::AggregatorNotInCommittee { + aggregator_index: *aggregator_index, + } + } else if let BeaconStateError::InvalidSelectionProof { aggregator_index } = + beacon_state_error + { + Self::InvalidSelectionProof { + aggregator_index: *aggregator_index, + } + } else { + Error::BeaconChainError(e) + } + } + _ => Error::BeaconChainError(e), + } } } @@ -265,10 +302,17 @@ enum CheckAttestationSignature { /// `IndexedAttestation` can be derived. /// /// These attestations have *not* undergone signature verification. +/// The `observed_attestation_key_root` is the hashed value of an `ObservedAttestationKey`. struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { signed_aggregate: &'a SignedAggregateAndProof, indexed_attestation: IndexedAttestation, - attestation_data_root: Hash256, + observed_attestation_key_root: Hash256, +} + +#[derive(TreeHash)] +pub struct ObservedAttestationKey { + pub committee_index: u64, + pub attestation_data: AttestationData, } /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can @@ -466,18 +510,27 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { }); } - // Ensure the valid aggregated attestation has not already been seen locally. - let attestation_data = attestation.data(); - let attestation_data_root = attestation_data.tree_hash_root(); + let observed_attestation_key_root = ObservedAttestationKey { + committee_index: attestation + .committee_index() + .ok_or(Error::NotExactlyOneCommitteeBitSet(0))?, + attestation_data: attestation.data().clone(), + } + .tree_hash_root(); + + // [New in Electra:EIP7549] + verify_committee_index(attestation, &chain.spec)?; if chain .observed_attestations .write() - .is_known_subset(attestation, attestation_data_root) + .is_known_subset(attestation, observed_attestation_key_root) .map_err(|e| Error::BeaconChainError(e.into()))? { metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); - return Err(Error::AttestationSupersetKnown(attestation_data_root)); + return Err(Error::AttestationSupersetKnown( + observed_attestation_key_root, + )); } let aggregator_index = signed_aggregate.message().aggregator_index(); @@ -523,7 +576,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { if attestation.is_aggregation_bits_zero() { Err(Error::EmptyAggregationBitfield) } else { - Ok(attestation_data_root) + Ok(observed_attestation_key_root) } } @@ -533,10 +586,8 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result> { use AttestationSlashInfo::*; - - let attestation = signed_aggregate.message().aggregate(); - let aggregator_index = signed_aggregate.message().aggregator_index(); - let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) { + let observed_attestation_key_root = match Self::verify_early_checks(signed_aggregate, chain) + { Ok(root) => root, Err(e) => { return Err(SignatureNotChecked( @@ -545,11 +596,12 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { )) } }; - let get_indexed_attestation_with_committee = |(committees, _): (Vec, CommitteesPerSlot)| { - match attestation { - AttestationRef::Base(att) => { + match signed_aggregate { + SignedAggregateAndProof::Base(signed_aggregate) => { + let att = &signed_aggregate.message.aggregate; + let aggregator_index = signed_aggregate.message.aggregator_index; let committee = committees .iter() .filter(|&committee| committee.index == att.data.index) @@ -559,13 +611,13 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { index: att.data.index, })?; + // TODO(electra): + // Note: this clones the signature which is known to be a relatively slow operation. + // + // Future optimizations should remove this clone. if let Some(committee) = committee { - // TODO(electra): - // Note: this clones the signature which is known to be a relatively slow operation. - // - // Future optimizations should remove this clone. let selection_proof = SelectionProof::from( - signed_aggregate.message().selection_proof().clone(), + signed_aggregate.message.selection_proof.clone(), ); if !selection_proof @@ -579,6 +631,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { if !committee.committee.contains(&(aggregator_index as usize)) { return Err(Error::AggregatorNotInCommittee { aggregator_index }); } + attesting_indices_base::get_indexed_attestation( committee.committee, att, @@ -591,13 +644,18 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { }) } } - AttestationRef::Electra(att) => { - attesting_indices_electra::get_indexed_attestation(&committees, att) - .map_err(|e| BeaconChainError::from(e).into()) + SignedAggregateAndProof::Electra(signed_aggregate) => { + attesting_indices_electra::get_indexed_attestation_from_signed_aggregate( + &committees, + signed_aggregate, + &chain.spec, + ) + .map_err(|e| BeaconChainError::from(e).into()) } } }; + let attestation = signed_aggregate.message().aggregate(); let indexed_attestation = match map_attestation_committees( chain, attestation, @@ -611,11 +669,10 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { )) } }; - Ok(IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_data_root, + observed_attestation_key_root, }) } } @@ -624,7 +681,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { /// Run the checks that happen after the indexed attestation and signature have been checked. fn verify_late_checks( signed_aggregate: &SignedAggregateAndProof, - attestation_data_root: Hash256, + observed_attestation_key_root: Hash256, chain: &BeaconChain, ) -> Result<(), Error> { let attestation = signed_aggregate.message().aggregate(); @@ -637,11 +694,13 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { if let ObserveOutcome::Subset = chain .observed_attestations .write() - .observe_item(attestation, Some(attestation_data_root)) + .observe_item(attestation, Some(observed_attestation_key_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); - return Err(Error::AttestationSupersetKnown(attestation_data_root)); + return Err(Error::AttestationSupersetKnown( + observed_attestation_key_root, + )); } // Observe the aggregator so we don't process another aggregate from them. @@ -701,7 +760,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { let IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_data_root, + observed_attestation_key_root, } = signed_aggregate; match check_signature { @@ -725,7 +784,9 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { CheckAttestationSignature::No => (), }; - if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) { + if let Err(e) = + Self::verify_late_checks(signed_aggregate, observed_attestation_key_root, chain) + { return Err(SignatureValid(indexed_attestation, e)); } @@ -775,6 +836,9 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { return Err(Error::NotExactlyOneAggregationBitSet(num_aggregation_bits)); } + // [New in Electra:EIP7549] + verify_committee_index(attestation, &chain.spec)?; + // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. // @@ -797,7 +861,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result<(u64, SubnetId), Error> { let expected_subnet_id = SubnetId::compute_subnet_for_attestation::( - &attestation, + attestation, committees_per_slot, &chain.spec, ) @@ -1101,14 +1165,13 @@ pub fn verify_propagation_slot_range( let current_fork = spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); - let earliest_permissible_slot = match current_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - one_epoch_prior - } - // EIP-7045 - ForkName::Deneb | ForkName::Electra => one_epoch_prior + let earliest_permissible_slot = if current_fork < ForkName::Deneb { + one_epoch_prior + // EIP-7045 + } else { + one_epoch_prior .epoch(E::slots_per_epoch()) - .start_slot(E::slots_per_epoch()), + .start_slot(E::slots_per_epoch()) }; if attestation_slot < earliest_permissible_slot { @@ -1147,7 +1210,6 @@ pub fn verify_attestation_signature( &chain.spec, ) .map_err(BeaconChainError::SignatureSetError)?; - metrics::stop_timer(signature_setup_timer); let _signature_verification_timer = @@ -1273,6 +1335,35 @@ pub fn verify_signed_aggregate_signatures( Ok(verify_signature_sets(signature_sets.iter())) } +/// Verify that the `attestation` committee index is properly set for the attestation's fork. +/// This function will only apply verification post-Electra. +pub fn verify_committee_index( + attestation: AttestationRef, + spec: &ChainSpec, +) -> Result<(), Error> { + if spec.fork_name_at_slot::(attestation.data().slot) >= ForkName::Electra { + // Check to ensure that the attestation is for a single committee. + let num_committee_bits = get_committee_indices::( + attestation + .committee_bits() + .map_err(|e| Error::BeaconChainError(e.into()))?, + ); + if num_committee_bits.len() != 1 { + return Err(Error::NotExactlyOneCommitteeBitSet( + num_committee_bits.len(), + )); + } + + // Ensure the attestation index is set to zero post Electra. + if attestation.data().index != 0 { + return Err(Error::CommitteeIndexNonZero( + attestation.data().index as usize, + )); + } + } + Ok(()) +} + /// Assists in readability. type CommitteesPerSlot = u64; @@ -1309,7 +1400,7 @@ pub fn obtain_indexed_attestation_and_committees_per_slot( attesting_indices_electra::get_indexed_attestation(&committees, att) .map(|attestation| (attestation, committees_per_slot)) .map_err(|e| { - let index = att.committee_index(); + let index = att.committee_index().unwrap_or(0); if e == BlockOperationError::BeaconStateError(NoCommitteeFound(index)) { Error::NoCommitteeForSlotAndIndex { slot: att.data.slot, @@ -1324,16 +1415,14 @@ pub fn obtain_indexed_attestation_and_committees_per_slot( }) } -// TODO(electra) update comments below to reflect logic changes -// i.e. this now runs the map_fn on a list of committees for the slot of the provided attestation /// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`. /// -/// This function exists in this odd "map" pattern because efficiently obtaining the committee for -/// an attestation can be complex. It might involve reading straight from the +/// This function exists in this odd "map" pattern because efficiently obtaining the committees for +/// an attestations slot can be complex. It might involve reading straight from the /// `beacon_chain.shuffling_cache` or it might involve reading it from a state from the DB. Due to /// the complexities of `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here. /// -/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state +/// If the committees for an `attestation`'s slot isn't found in the `shuffling_cache`, we will read a state /// from disk and then update the `shuffling_cache`. fn map_attestation_committees( chain: &BeaconChain, @@ -1373,7 +1462,7 @@ where .unwrap_or_else(|_| { Err(Error::NoCommitteeForSlotAndIndex { slot: attestation.data().slot, - index: attestation.committee_index(), + index: attestation.committee_index().unwrap_or(0), }) })) }) diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 1ec752ff5c..07fad1bd4a 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -66,7 +66,6 @@ where .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; let mut signature_sets = Vec::with_capacity(num_indexed * 3); - // Iterate, flattening to get only the `Ok` values. for indexed in indexing_results.iter().flatten() { let signed_aggregate = &indexed.signed_aggregate; diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 0c92b7c1f6..f0a68b6be5 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,4 +1,4 @@ -use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; @@ -410,15 +410,14 @@ impl BeaconBlockStreamer { fn check_caches(&self, root: Hash256) -> Option>> { if self.check_caches == CheckCaches::Yes { - self.beacon_chain - .reqresp_pre_import_cache - .read() - .get(&root) - .map(|block| { + match self.beacon_chain.get_block_process_status(&root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => { metrics::inc_counter(&metrics::BEACON_REQRESP_PRE_IMPORT_CACHE_HITS); - block.clone() - }) - .or(self.beacon_chain.early_attester_cache.get_block(root)) + Some(block) + } + } } else { None } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a0578e853e..a0c2ab3f23 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -85,7 +85,9 @@ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use kzg::Kzg; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; +use operation_pool::{ + CompactAttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella, +}; use parking_lot::{Mutex, RwLock}; use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; @@ -338,6 +340,20 @@ struct PartialBeaconBlock { bls_to_execution_changes: Vec, } +pub enum BlockProcessStatus { + /// Block is not in any pre-import cache. Block may be in the data-base or in the fork-choice. + Unknown, + /// Block is currently processing but not yet validated. + NotValidated(Arc>), + /// Block is fully valid, but not yet imported. It's cached in the da_checker while awaiting + /// missing block components. + ExecutionValidated(Arc>), +} + +pub struct BeaconChainMetrics { + pub reqresp_pre_import_cache_len: usize, +} + pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); pub type BeaconForkChoice = ForkChoice< @@ -1238,6 +1254,27 @@ impl BeaconChain { Ok(self.store.get_blinded_block(block_root)?) } + /// Return the status of a block as it progresses through the various caches of the beacon + /// chain. Used by sync to learn the status of a block and prevent repeated downloads / + /// processing attempts. + pub fn get_block_process_status(&self, block_root: &Hash256) -> BlockProcessStatus { + if let Some(block) = self + .data_availability_checker + .get_execution_valid_block(block_root) + { + return BlockProcessStatus::ExecutionValidated(block); + } + + if let Some(block) = self.reqresp_pre_import_cache.read().get(block_root) { + // A block is on the `reqresp_pre_import_cache` but NOT in the + // `data_availability_checker` only if it is actively processing. We can expect a future + // event with the result of processing + return BlockProcessStatus::NotValidated(block.clone()); + } + + BlockProcessStatus::Unknown + } + /// Returns the state at the given root, if any. /// /// ## Errors @@ -1609,6 +1646,21 @@ impl BeaconChain { Ok((duties, dependent_root, execution_status)) } + pub fn get_aggregated_attestation( + &self, + attestation: AttestationRef, + ) -> Result>, Error> { + match attestation { + AttestationRef::Base(att) => self.get_aggregated_attestation_base(&att.data), + AttestationRef::Electra(att) => self.get_aggregated_attestation_electra( + att.data.slot, + &att.data.tree_hash_root(), + att.committee_index() + .ok_or(Error::AttestationCommitteeIndexNotSet)?, + ), + } + } + /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. /// /// The attestation will be obtained from `self.naive_aggregation_pool`. @@ -2185,7 +2237,7 @@ impl BeaconChain { self.log, "Stored unaggregated attestation"; "outcome" => ?outcome, - "index" => attestation.data().index, + "index" => attestation.committee_index(), "slot" => attestation.data().slot.as_u64(), ), Err(NaiveAggregationError::SlotTooLow { @@ -2204,7 +2256,7 @@ impl BeaconChain { self.log, "Failed to store unaggregated attestation"; "error" => ?e, - "index" => attestation.data().index, + "index" => attestation.committee_index(), "slot" => attestation.data().slot.as_u64(), ); return Err(Error::from(e).into()); @@ -2329,7 +2381,7 @@ impl BeaconChain { pub fn filter_op_pool_attestation( &self, filter_cache: &mut HashMap<(Hash256, Epoch), bool>, - att: &AttestationRef, + att: &CompactAttestationRef, state: &BeaconState, ) -> bool { *filter_cache @@ -2601,6 +2653,14 @@ impl BeaconChain { bls_to_execution_change: SigVerifiedOp, received_pre_capella: ReceivedPreCapella, ) -> bool { + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_bls_to_execution_change_subscribers() { + event_handler.register(EventKind::BlsToExecutionChange(Box::new( + bls_to_execution_change.clone().into_inner(), + ))); + } + } + if self.eth1_chain.is_some() { self.op_pool .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) @@ -4951,11 +5011,11 @@ impl BeaconChain { initialize_epoch_cache(&mut state, &self.spec)?; let mut prev_filter_cache = HashMap::new(); - let prev_attestation_filter = |att: &AttestationRef| { + let prev_attestation_filter = |att: &CompactAttestationRef| { self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) }; let mut curr_filter_cache = HashMap::new(); - let curr_attestation_filter = |att: &AttestationRef| { + let curr_attestation_filter = |att: &CompactAttestationRef| { self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) }; @@ -5131,50 +5191,6 @@ impl BeaconChain { }, ); - // TODO(electra): figure out what should *actually* be done here when we have attestations / attester_slashings of the wrong type - match &state { - BeaconState::Base(_) - | BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) => { - if !attestations_electra.is_empty() { - error!( - self.log, - "Tried to produce block with attestations of the wrong type"; - "slot" => slot, - "attestations" => attestations_electra.len(), - ); - } - if !attester_slashings_electra.is_empty() { - error!( - self.log, - "Tried to produce block with attester slashings of the wrong type"; - "slot" => slot, - "attester_slashings" => attester_slashings_electra.len(), - ); - } - } - BeaconState::Electra(_) => { - if !attestations_base.is_empty() { - error!( - self.log, - "Tried to produce block with attestations of the wrong type"; - "slot" => slot, - "attestations" => attestations_base.len(), - ); - } - if !attester_slashings_base.is_empty() { - error!( - self.log, - "Tried to produce block with attester slashings of the wrong type"; - "slot" => slot, - "attester_slashings" => attester_slashings_base.len(), - ); - } - } - }; - let (inner_block, maybe_blobs_and_proofs, execution_payload_value) = match &state { BeaconState::Base(_) => ( BeaconBlock::Base(BeaconBlockBase { @@ -6504,9 +6520,8 @@ impl BeaconChain { /// account the current slot when accounting for skips. pub fn is_healthy(&self, parent_root: &Hash256) -> Result { let cached_head = self.canonical_head.cached_head(); - // Check if the merge has been finalized. - if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { - if ExecutionBlockHash::zero() == finalized_hash { + if let Some(head_hash) = cached_head.forkchoice_update_parameters().head_hash { + if ExecutionBlockHash::zero() == head_hash { return Ok(ChainHealth::PreMerge); } } else { @@ -6749,6 +6764,12 @@ impl BeaconChain { ForkName::Base => Err(Error::UnsupportedFork), } } + + pub fn metrics(&self) -> BeaconChainMetrics { + BeaconChainMetrics { + reqresp_pre_import_cache_len: self.reqresp_pre_import_cache.read().len(), + } + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index a981d31e55..57f718f62d 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -86,17 +86,12 @@ impl DataAvailabilityChecker { /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. - pub fn has_execution_valid_block(&self, block_root: &Hash256) -> bool { + pub fn get_execution_valid_block( + &self, + block_root: &Hash256, + ) -> Option>> { self.availability_cache - .has_execution_valid_block(block_root) - } - - /// Return the required blobs `block_root` expects if the block is currenlty in the cache. - pub fn num_expected_blobs(&self, block_root: &Hash256) -> Option { - self.availability_cache - .peek_pending_components(block_root, |components| { - components.and_then(|components| components.num_expected_blobs()) - }) + .get_execution_valid_block(block_root) } /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 2e3c4aac55..e350181c86 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -44,7 +44,7 @@ use ssz_types::{FixedVector, VariableList}; use std::num::NonZeroUsize; use std::{collections::HashSet, sync::Arc}; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; +use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This represents the components of a partially available block /// @@ -544,12 +544,19 @@ impl OverflowLRUCache { } /// Returns true if the block root is known, without altering the LRU ordering - pub fn has_execution_valid_block(&self, block_root: &Hash256) -> bool { - if let Some(pending_components) = self.critical.read().peek_pending_components(block_root) { - pending_components.executed_block.is_some() - } else { - false - } + pub fn get_execution_valid_block( + &self, + block_root: &Hash256, + ) -> Option>> { + self.critical + .read() + .peek_pending_components(block_root) + .and_then(|pending_components| { + pending_components + .executed_block + .as_ref() + .map(|block| block.block_cloned()) + }) } /// Fetch a blob from the cache without affecting the LRU ordering diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index f8a243bd9e..9775d54c02 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -37,6 +37,10 @@ impl DietAvailabilityPendingExecutedBlock { &self.block } + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } + pub fn num_blobs_expected(&self) -> usize { self.block .message() diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 8ed4e5db40..936f4de3ee 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -123,7 +123,6 @@ impl EarlyAttesterCache { item.committee_lengths .get_committee_length::(request_slot, request_index, spec)?; - // TODO(electra) make fork-agnostic let attestation = if spec.fork_name_at_slot::(request_slot) >= ForkName::Electra { let mut committee_bits = BitVector::default(); if committee_len > 0 { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 340f1f9f79..3d61d4f32d 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -226,6 +226,8 @@ pub enum BeaconChainError { LightClientError(LightClientError), UnsupportedFork, MilhouseError(MilhouseError), + AttestationError(AttestationError), + AttestationCommitteeIndexNotSet, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -256,6 +258,7 @@ easy_from_to!(AvailabilityCheckError, BeaconChainError); easy_from_to!(EpochCacheError, BeaconChainError); easy_from_to!(LightClientError, BeaconChainError); easy_from_to!(MilhouseError, BeaconChainError); +easy_from_to!(AttestationError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 8700675a66..5f91fe5d0c 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -22,6 +22,7 @@ pub struct ServerSentEventHandler { block_reward_tx: Sender>, proposer_slashing_tx: Sender>, attester_slashing_tx: Sender>, + bls_to_execution_change_tx: Sender>, log: Logger, } @@ -49,6 +50,7 @@ impl ServerSentEventHandler { let (block_reward_tx, _) = broadcast::channel(capacity); let (proposer_slashing_tx, _) = broadcast::channel(capacity); let (attester_slashing_tx, _) = broadcast::channel(capacity); + let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -66,6 +68,7 @@ impl ServerSentEventHandler { block_reward_tx, proposer_slashing_tx, attester_slashing_tx, + bls_to_execution_change_tx, log, } } @@ -140,6 +143,10 @@ impl ServerSentEventHandler { .attester_slashing_tx .send(kind) .map(|count| log_count("attester slashing", count)), + EventKind::BlsToExecutionChange(_) => self + .bls_to_execution_change_tx + .send(kind) + .map(|count| log_count("bls to execution change", count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -206,6 +213,10 @@ impl ServerSentEventHandler { self.proposer_slashing_tx.subscribe() } + pub fn subscribe_bls_to_execution_change(&self) -> Receiver> { + self.bls_to_execution_change_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -257,4 +268,8 @@ impl ServerSentEventHandler { pub fn has_attester_slashing_subscribers(&self) -> bool { self.attester_slashing_tx.receiver_count() > 0 } + + pub fn has_bls_to_execution_change_subscribers(&self) -> bool { + self.bls_to_execution_change_tx.receiver_count() > 0 + } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 221bb8b292..f419429e09 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -62,9 +62,10 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, - BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, - StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, + ChainSegmentResult, ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, + ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 3b2453c311..4ceaf675ce 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1192,6 +1192,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { } let attestation_stats = beacon_chain.op_pool.attestation_stats(); + let chain_metrics = beacon_chain.metrics(); set_gauge_by_usize( &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, @@ -1200,7 +1201,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { set_gauge_by_usize( &BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE, - beacon_chain.reqresp_pre_import_cache.read().len(), + chain_metrics.reqresp_pre_import_cache_len, ); let da_checker_metrics = beacon_chain.data_availability_checker.metrics(); diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index a1c736cd0e..b16ced789f 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -261,9 +261,9 @@ impl AggregateMap for AggregatedAttestationMap { }; let attestation_key = AttestationKey::from_attestation_ref(a)?; - let attestation_data_root = attestation_key.tree_hash_root(); + let attestation_key_root = attestation_key.tree_hash_root(); - if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) { + if let Some(existing_attestation) = self.map.get_mut(&attestation_key_root) { if existing_attestation .get_aggregation_bit(aggregation_bit) .map_err(|_| Error::InconsistentBitfieldLengths)? @@ -285,7 +285,7 @@ impl AggregateMap for AggregatedAttestationMap { } self.map - .insert(attestation_data_root, a.clone_as_attestation()); + .insert(attestation_key_root, a.clone_as_attestation()); Ok(InsertOutcome::NewItemInserted { committee_index: aggregation_bit, }) diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 63eb72c43a..06d189a8c0 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,6 +2,7 @@ mod migration_schema_v17; mod migration_schema_v18; mod migration_schema_v19; +mod migration_schema_v20; use crate::beacon_chain::BeaconChainTypes; use crate::types::ChainSpec; @@ -78,6 +79,14 @@ pub fn migrate_schema( let ops = migration_schema_v19::downgrade_from_v19::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(19), SchemaVersion(20)) => { + let ops = migration_schema_v20::upgrade_to_v20::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(20), SchemaVersion(19)) => { + let ops = migration_schema_v20::downgrade_from_v20::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs new file mode 100644 index 0000000000..737fcd0a93 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs @@ -0,0 +1,103 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use types::Attestation; + +pub fn upgrade_to_v20( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V15 op pool and transform it to V20. + let Some(PersistedOperationPoolV15:: { + attestations_v15, + sync_contributions, + attester_slashings_v15, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }) = db.get_item(&OP_POOL_DB_KEY)? + else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let attestations = attestations_v15 + .into_iter() + .map(|(attestation, indices)| (Attestation::Base(attestation).into(), indices)) + .collect(); + + let attester_slashings = attester_slashings_v15 + .into_iter() + .map(|slashing| slashing.into()) + .collect(); + + let v20 = PersistedOperationPool::V20(PersistedOperationPoolV20 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }); + Ok(vec![v20.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v20( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V20 op pool and transform it to V15. + let Some(PersistedOperationPoolV20:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }) = db.get_item(&OP_POOL_DB_KEY)? + else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let attestations_v15 = attestations + .into_iter() + .filter_map(|(attestation, indices)| { + if let Attestation::Base(attestation) = attestation.into() { + Some((attestation, indices)) + } else { + info!(log, "Dropping attestation during downgrade"; "reason" => "not a base attestation"); + None + } + }) + .collect(); + + let attester_slashings_v15 = attester_slashings + .into_iter() + .filter_map(|slashing| match slashing.try_into() { + Ok(slashing) => Some(slashing), + Err(_) => { + info!(log, "Dropping attester slashing during downgrade"; "reason" => "not a base attester slashing"); + None + } + }) + .collect(); + + let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { + attestations_v15, + sync_contributions, + attester_slashings_v15, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }); + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 09e9009381..34ad6d4324 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -686,6 +686,7 @@ where .set_builder_url( SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), None, + None, ) .unwrap(); @@ -1169,7 +1170,7 @@ where }; let subnet_id = SubnetId::compute_subnet_for_attestation::( - &attestation.to_ref(), + attestation.to_ref(), committee_count, &self.chain.spec, ) @@ -1343,7 +1344,10 @@ where // If there are any attestations in this committee, create an aggregate. if let Some((attestation, _)) = committee_attestations.first() { let bc = state - .get_beacon_committee(attestation.data().slot, attestation.data().index) + .get_beacon_committee( + attestation.data().slot, + attestation.committee_index().unwrap(), + ) .unwrap(); // Find an aggregator if one exists. Return `None` if there are no @@ -1370,21 +1374,42 @@ where }) .copied()?; + let fork_name = self.spec.fork_name_at_slot::(slot); + + let aggregate = if fork_name >= ForkName::Electra { + self.chain + .get_aggregated_attestation_electra( + slot, + &attestation.data().tree_hash_root(), + bc.index, + ) + .unwrap() + .unwrap_or_else(|| { + committee_attestations.iter().skip(1).fold( + attestation.clone(), + |mut agg, (att, _)| { + agg.aggregate(att.to_ref()); + agg + }, + ) + }) + } else { + self.chain + .get_aggregated_attestation_base(attestation.data()) + .unwrap() + .unwrap_or_else(|| { + committee_attestations.iter().skip(1).fold( + attestation.clone(), + |mut agg, (att, _)| { + agg.aggregate(att.to_ref()); + agg + }, + ) + }) + }; + // If the chain is able to produce an aggregate, use that. Otherwise, build an // aggregate locally. - let aggregate = self - .chain - .get_aggregated_attestation_base(attestation.data()) - .unwrap() - .unwrap_or_else(|| { - committee_attestations.iter().skip(1).fold( - attestation.clone(), - |mut agg, (att, _)| { - agg.aggregate(att.to_ref()); - agg - }, - ) - }); let signed_aggregate = SignedAggregateAndProof::from_aggregate( aggregator_index as u64, @@ -1514,54 +1539,101 @@ where ) -> AttesterSlashing { let fork = self.chain.canonical_head.cached_head().head_fork(); - // TODO(electra): consider making this test fork-agnostic - let mut attestation_1 = IndexedAttestationBase { - attesting_indices: VariableList::new(validator_indices).unwrap(), - data: AttestationData { - slot: Slot::new(0), - index: 0, - beacon_block_root: Hash256::zero(), - target: Checkpoint { - root: Hash256::zero(), - epoch: target1.unwrap_or(fork.epoch), + let fork_name = self.spec.fork_name_at_slot::(Slot::new(0)); + + let mut attestation_1 = if fork_name >= ForkName::Electra { + IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: target1.unwrap_or(fork.epoch), + }, + source: Checkpoint { + root: Hash256::zero(), + epoch: source1.unwrap_or(Epoch::new(0)), + }, }, - source: Checkpoint { - root: Hash256::zero(), - epoch: source1.unwrap_or(Epoch::new(0)), + signature: AggregateSignature::infinity(), + }) + } else { + IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: target1.unwrap_or(fork.epoch), + }, + source: Checkpoint { + root: Hash256::zero(), + epoch: source1.unwrap_or(Epoch::new(0)), + }, }, - }, - signature: AggregateSignature::infinity(), + signature: AggregateSignature::infinity(), + }) }; let mut attestation_2 = attestation_1.clone(); - attestation_2.data.index += 1; - attestation_2.data.source.epoch = source2.unwrap_or(Epoch::new(0)); - attestation_2.data.target.epoch = target2.unwrap_or(fork.epoch); + attestation_2.data_mut().index += 1; + attestation_2.data_mut().source.epoch = source2.unwrap_or(Epoch::new(0)); + attestation_2.data_mut().target.epoch = target2.unwrap_or(fork.epoch); for attestation in &mut [&mut attestation_1, &mut attestation_2] { - // TODO(electra) we could explore iter mut here - for i in attestation.attesting_indices.iter() { - let sk = &self.validator_keypairs[*i as usize].sk; + match attestation { + IndexedAttestation::Base(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; - let genesis_validators_root = self.chain.genesis_validators_root; + let genesis_validators_root = self.chain.genesis_validators_root; - let domain = self.chain.spec.get_domain( - attestation.data.target.epoch, - Domain::BeaconAttester, - &fork, - genesis_validators_root, - ); - let message = attestation.data.signing_root(domain); + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); - attestation.signature.add_assign(&sk.sign(message)); + attestation.signature.add_assign(&sk.sign(message)); + } + } + IndexedAttestation::Electra(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; + + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } } } - // TODO(electra): fix this test - AttesterSlashing::Base(AttesterSlashingBase { - attestation_1, - attestation_2, - }) + if fork_name >= ForkName::Electra { + AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: attestation_1.as_electra().unwrap().clone(), + attestation_2: attestation_2.as_electra().unwrap().clone(), + }) + } else { + AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: attestation_1.as_base().unwrap().clone(), + attestation_2: attestation_2.as_base().unwrap().clone(), + }) + } } pub fn make_attester_slashing_different_indices( @@ -1569,6 +1641,8 @@ where validator_indices_1: Vec, validator_indices_2: Vec, ) -> AttesterSlashing { + let fork_name = self.spec.fork_name_at_slot::(Slot::new(0)); + let data = AttestationData { slot: Slot::new(0), index: 0, @@ -1583,45 +1657,95 @@ where }, }; - // TODO(electra): make this test fork-agnostic - let mut attestation_1 = IndexedAttestationBase { - attesting_indices: VariableList::new(validator_indices_1).unwrap(), - data: data.clone(), - signature: AggregateSignature::infinity(), + let (mut attestation_1, mut attestation_2) = if fork_name >= ForkName::Electra { + let attestation_1 = IndexedAttestationElectra { + attesting_indices: VariableList::new(validator_indices_1).unwrap(), + data: data.clone(), + signature: AggregateSignature::infinity(), + }; + + let attestation_2 = IndexedAttestationElectra { + attesting_indices: VariableList::new(validator_indices_2).unwrap(), + data, + signature: AggregateSignature::infinity(), + }; + + ( + IndexedAttestation::Electra(attestation_1), + IndexedAttestation::Electra(attestation_2), + ) + } else { + let attestation_1 = IndexedAttestationBase { + attesting_indices: VariableList::new(validator_indices_1).unwrap(), + data: data.clone(), + signature: AggregateSignature::infinity(), + }; + + let attestation_2 = IndexedAttestationBase { + attesting_indices: VariableList::new(validator_indices_2).unwrap(), + data, + signature: AggregateSignature::infinity(), + }; + + ( + IndexedAttestation::Base(attestation_1), + IndexedAttestation::Base(attestation_2), + ) }; - let mut attestation_2 = IndexedAttestationBase { - attesting_indices: VariableList::new(validator_indices_2).unwrap(), - data, - signature: AggregateSignature::infinity(), - }; - - attestation_2.data.index += 1; + attestation_2.data_mut().index += 1; let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { - for i in attestation.attesting_indices.iter() { - let sk = &self.validator_keypairs[*i as usize].sk; + match attestation { + IndexedAttestation::Base(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; - let genesis_validators_root = self.chain.genesis_validators_root; + let genesis_validators_root = self.chain.genesis_validators_root; - let domain = self.chain.spec.get_domain( - attestation.data.target.epoch, - Domain::BeaconAttester, - &fork, - genesis_validators_root, - ); - let message = attestation.data.signing_root(domain); + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); - attestation.signature.add_assign(&sk.sign(message)); + attestation.signature.add_assign(&sk.sign(message)); + } + } + IndexedAttestation::Electra(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; + + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } } } - // TODO(electra): fix this test - AttesterSlashing::Base(AttesterSlashingBase { - attestation_1, - attestation_2, - }) + if fork_name >= ForkName::Electra { + AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: attestation_1.as_electra().unwrap().clone(), + attestation_2: attestation_2.as_electra().unwrap().clone(), + }) + } else { + AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: attestation_1.as_base().unwrap().clone(), + attestation_2: attestation_2.as_base().unwrap().clone(), + }) + } } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { @@ -2413,6 +2537,7 @@ where AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals, }; + let state_root = state.update_tree_hash_cache().unwrap(); let (_, _, last_produced_block_hash, _) = self .add_attested_blocks_at_slots_with_sync( @@ -2539,6 +2664,7 @@ pub fn generate_rand_block_and_blobs( rng: &mut impl Rng, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); let mut blob_sidecars = vec![]; @@ -2575,7 +2701,6 @@ pub fn generate_rand_block_and_blobs( }; let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); - payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e91e8c77a3..63740c4736 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -2,6 +2,7 @@ use beacon_chain::attestation_verification::{ batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error, + ObservedAttestationKey, }; use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME}; use beacon_chain::{ @@ -128,7 +129,12 @@ fn get_valid_unaggregated_attestation( let validator_committee_index = 0; let validator_index = *head .beacon_state - .get_beacon_committee(current_slot, valid_attestation.data().index) + .get_beacon_committee( + current_slot, + valid_attestation + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees") .committee .get(validator_committee_index) @@ -146,8 +152,8 @@ fn get_valid_unaggregated_attestation( ) .expect("should sign attestation"); - let subnet_id = SubnetId::compute_subnet_for_attestation::( - &valid_attestation.to_ref(), + let subnet_id = SubnetId::compute_subnet_for_attestation::( + valid_attestation.to_ref(), head.beacon_state .get_committee_count_at_slot(current_slot) .expect("should get committee count"), @@ -173,7 +179,12 @@ fn get_valid_aggregated_attestation( let current_slot = chain.slot().expect("should get slot"); let committee = state - .get_beacon_committee(current_slot, aggregate.data().index) + .get_beacon_committee( + current_slot, + aggregate + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees"); let committee_len = committee.committee.len(); @@ -216,7 +227,7 @@ fn get_valid_aggregated_attestation( /// attestation. fn get_non_aggregator( chain: &BeaconChain, - aggregate: &AttestationRef, + aggregate: AttestationRef, ) -> (usize, SecretKey) { let head = chain.head_snapshot(); let state = &head.beacon_state; @@ -224,7 +235,12 @@ fn get_non_aggregator( // TODO(electra) make fork-agnostic let committee = state - .get_beacon_committee(current_slot, aggregate.data().index) + .get_beacon_committee( + current_slot, + aggregate + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees"); let committee_len = committee.committee.len(); @@ -371,7 +387,7 @@ impl GossipTester { pub fn non_aggregator(&self) -> (usize, SecretKey) { get_non_aggregator( &self.harness.chain, - &self.valid_aggregate.message().aggregate(), + self.valid_aggregate.message().aggregate(), ) } @@ -429,6 +445,7 @@ impl GossipTester { vec![&self.invalid_aggregate, &aggregate].into_iter(), ) .unwrap(); + assert_eq!(results.len(), 2); let batch_err = results.pop().unwrap().err().expect(&format!( "{} should error during batch_verify_aggregated_attestations_for_gossip", @@ -662,7 +679,7 @@ async fn aggregated_gossip_verification() { .chain .head_snapshot() .beacon_state - .get_beacon_committee(tester.slot(), a.message().aggregate().data().index) + .get_beacon_committee(tester.slot(), a.message().aggregate().committee_index().expect("should get committee index")) .expect("should get committees") .committee .len(); @@ -778,12 +795,7 @@ async fn aggregated_gossip_verification() { // However, the following error is triggered first: AttnError::AggregatorNotInCommittee { aggregator_index - } | - // unless were working with electra attestations - // in which case this error is triggered instead: - AttnError::AggregatorPubkeyUnknown( - aggregator_index - ) + } if aggregator_index == VALIDATOR_COUNT as u64 )) }, @@ -792,7 +804,7 @@ async fn aggregated_gossip_verification() { * The following test ensures: * * aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot -- - * i.e. is_aggregator(state, aggregate.data.slot, aggregate.data.index, + * i.e. is_aggregator(state, aggregate.data.slot, aggregate.committee_index(), * aggregate_and_proof.selection_proof) returns True. */ .inspect_aggregate_err( @@ -812,6 +824,7 @@ async fn aggregated_gossip_verification() { }, |tester, err| { let (val_index, _) = tester.non_aggregator(); + assert!(matches!( err, AttnError::InvalidSelectionProof { @@ -838,7 +851,10 @@ async fn aggregated_gossip_verification() { assert!(matches!( err, AttnError::AttestationSupersetKnown(hash) - if hash == tester.valid_aggregate.message().aggregate().data().tree_hash_root() + if hash == ObservedAttestationKey { + committee_index: tester.valid_aggregate.message().aggregate().expect("should get committee index"), + attestation_data: tester.valid_aggregate.message().aggregate().data().clone(), + }.tree_hash_root() )) }, ) diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 261e15ba92..0fe406a017 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -666,27 +666,58 @@ async fn invalid_signature_attester_slashing() { for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); - let indexed_attestation = IndexedAttestationBase { - attesting_indices: vec![0].into(), - data: AttestationData { - slot: Slot::new(0), - index: 0, - beacon_block_root: Hash256::zero(), - source: Checkpoint { - epoch: Epoch::new(0), - root: Hash256::zero(), + let fork_name = harness.chain.spec.fork_name_at_slot::(Slot::new(0)); + + let attester_slashing = if fork_name >= ForkName::Electra { + let indexed_attestation = IndexedAttestationElectra { + attesting_indices: vec![0].into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, + target: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, }, - target: Checkpoint { - epoch: Epoch::new(0), - root: Hash256::zero(), + signature: junk_aggregate_signature(), + }; + let attester_slashing = AttesterSlashingElectra { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + + AttesterSlashing::Electra(attester_slashing) + } else { + let indexed_attestation = IndexedAttestationBase { + attesting_indices: vec![0].into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, + target: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, }, - }, - signature: junk_aggregate_signature(), - }; - let attester_slashing = AttesterSlashingBase { - attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation, + signature: junk_aggregate_signature(), + }; + let attester_slashing = AttesterSlashingBase { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + + AttesterSlashing::Base(attester_slashing) }; + let (mut block, signature) = snapshots[block_index] .beacon_block .as_ref() @@ -695,31 +726,33 @@ async fn invalid_signature_attester_slashing() { match &mut block.body_mut() { BeaconBlockBodyRefMut::Base(ref mut blk) => { blk.attester_slashings - .push(attester_slashing) + .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } BeaconBlockBodyRefMut::Altair(ref mut blk) => { blk.attester_slashings - .push(attester_slashing) + .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } BeaconBlockBodyRefMut::Bellatrix(ref mut blk) => { blk.attester_slashings - .push(attester_slashing) + .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } BeaconBlockBodyRefMut::Capella(ref mut blk) => { blk.attester_slashings - .push(attester_slashing) + .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } BeaconBlockBodyRefMut::Deneb(ref mut blk) => { blk.attester_slashings - .push(attester_slashing) + .push(attester_slashing.as_base().unwrap().clone()) .expect("should update attester slashing"); } - BeaconBlockBodyRefMut::Electra(_) => { - panic!("electra test not implemented!"); + BeaconBlockBodyRefMut::Electra(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_electra().unwrap().clone()) + .expect("should update attester slashing"); } } snapshots[block_index].beacon_block = diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index bc03767ce5..4dc7d20e22 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -25,7 +25,6 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; -use tree_hash::TreeHash; use types::*; const VALIDATOR_COUNT: usize = 32; @@ -1224,13 +1223,13 @@ async fn attesting_to_optimistic_head() { let get_aggregated = || { rig.harness .chain - .get_aggregated_attestation(attestation.data()) + .get_aggregated_attestation(attestation.to_ref()) }; let get_aggregated_by_slot_and_root = || { rig.harness .chain - .get_aggregated_attestation_base(attestation.data()) + .get_aggregated_attestation(attestation.to_ref()) }; /* diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 769d178dbd..ef87390930 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1014,6 +1014,7 @@ async fn multiple_attestations_per_block() { .await; let head = harness.chain.head_snapshot(); + let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot()) @@ -1022,15 +1023,29 @@ async fn multiple_attestations_per_block() { for snapshot in harness.chain.chain_dump().unwrap() { let slot = snapshot.beacon_block.slot(); - assert_eq!( - snapshot - .beacon_block - .as_ref() - .message() - .body() - .attestations_len() as u64, - if slot <= 1 { 0 } else { committees_per_slot } - ); + let fork_name = harness.chain.spec.fork_name_at_slot::(slot); + + if fork_name >= ForkName::Electra { + assert_eq!( + snapshot + .beacon_block + .as_ref() + .message() + .body() + .attestations_len() as u64, + if slot <= 1 { 0 } else { 1 } + ); + } else { + assert_eq!( + snapshot + .beacon_block + .as_ref() + .message() + .body() + .attestations_len() as u64, + if slot <= 1 { 0 } else { committees_per_slot } + ); + } } } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 0e4745ff6b..242ed55847 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -318,7 +318,6 @@ async fn aggregated_gossip_verification() { * The contribution_and_proof.selection_proof is a valid signature of the `SyncAggregatorSelectionData` * derived from the contribution by the validator with index `contribution_and_proof.aggregator_index`. */ - assert_invalid!( "aggregate with bad selection proof signature", { @@ -354,7 +353,6 @@ async fn aggregated_gossip_verification() { * derived from the participation info in `aggregation_bits` for the subcommittee specified by * the `contribution.subcommittee_index`. */ - assert_invalid!( "aggregate with bad aggregate signature", { @@ -450,6 +448,7 @@ async fn aggregated_gossip_verification() { root: contribution.beacon_block_root, subcommittee_index: contribution.subcommittee_index, }; + assert_invalid!( "aggregate that has already been seen", valid_aggregate.clone(), diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 6c49a28ec8..3373dd1c72 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -15,10 +15,7 @@ strum = { workspace = true } task_executor = { workspace = true } slot_clock = { workspace = true } lighthouse_network = { workspace = true } -hex = { workspace = true } -derivative = { workspace = true } types = { workspace = true } -ethereum_ssz = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 21b9b84133..c3658f45c7 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -9,5 +9,4 @@ reqwest = { workspace = true } sensitive_url = { workspace = true } eth2 = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 2b373292f3..91ee00a65f 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -29,10 +29,13 @@ pub struct Timeouts { get_builder_status: Duration, } -impl Default for Timeouts { - fn default() -> Self { +impl Timeouts { + fn new(get_header_timeout: Option) -> Self { + let get_header = + get_header_timeout.unwrap_or(Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS)); + Self { - get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + get_header, post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), @@ -49,13 +52,17 @@ pub struct BuilderHttpClient { } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + pub fn new( + server: SensitiveUrl, + user_agent: Option, + builder_header_timeout: Option, + ) -> Result { let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { client, server, - timeouts: Timeouts::default(), + timeouts: Timeouts::new(builder_header_timeout), user_agent, }) } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 16c4a947a6..4ac035d17b 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -16,8 +16,6 @@ store = { workspace = true } network = { workspace = true } timer = { path = "../timer" } lighthouse_network = { workspace = true } -logging = { workspace = true } -parking_lot = { workspace = true } types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } @@ -44,6 +42,4 @@ slasher_service = { path = "../../slasher/service" } monitoring_api = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } -num_cpus = { workspace = true } ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 2f716cd19b..2ffca4a571 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -11,12 +11,9 @@ sloggers = { workspace = true } environment = { workspace = true } [dependencies] -reqwest = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } -serde_json = { workspace = true } serde = { workspace = true } -hex = { workspace = true } types = { workspace = true } merkle_proof = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 31082394ba..d68a8b6f28 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -23,7 +23,7 @@ use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. -pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; +pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Mainnet; /// Indicates the default eth1 endpoint. pub const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545"; @@ -266,7 +266,7 @@ pub struct Config { pub endpoint: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, - /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). + /// The eth1 chain id where the deposit contract is deployed (Holesky/Mainnet). pub chain_id: Eth1Id, /// Defines the first block that the `DepositCache` will start searching for deposit logs. /// @@ -450,11 +450,6 @@ impl Service { /// Returns the follow distance that has been shortened to accommodate for differences in the /// spacing between blocks. - /// - /// ## Notes - /// - /// This is useful since the spec declares `SECONDS_PER_ETH1_BLOCK` to be `14`, whilst it is - /// actually `15` on Goerli. pub fn cache_follow_distance(&self) -> u64 { self.config().cache_follow_distance() } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 28cd16e4ef..ff147ad3b4 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -8,9 +8,7 @@ edition = { workspace = true } [dependencies] types = { workspace = true } tokio = { workspace = true } -async-trait = "0.1.51" slog = { workspace = true } -futures = { workspace = true } sensitive_url = { workspace = true } reqwest = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index aea87dd7d4..3f0b2ff602 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -242,7 +242,6 @@ pub mod deposit_methods { /// Represents an eth1 chain/network id. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub enum Eth1Id { - Goerli, Mainnet, Custom(u64), } @@ -266,7 +265,6 @@ pub mod deposit_methods { fn into(self) -> u64 { match self { Eth1Id::Mainnet => 1, - Eth1Id::Goerli => 5, Eth1Id::Custom(id) => id, } } @@ -277,7 +275,6 @@ pub mod deposit_methods { let into = |x: Eth1Id| -> u64 { x.into() }; match id { id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, - id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, id => Eth1Id::Custom(id), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 15a1c19462..6e674b220e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -370,6 +370,9 @@ pub struct Config { pub execution_endpoint: Option, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// The timeout value used when making a request to fetch a block header + /// from the builder api. + pub builder_header_timeout: Option, /// User agent to send with requests to the builder API. pub builder_user_agent: Option, /// JWT secret for the above endpoint running the engine api. @@ -400,6 +403,7 @@ impl ExecutionLayer { execution_endpoint: url, builder_url, builder_user_agent, + builder_header_timeout, secret_file, suggested_fee_recipient, jwt_id, @@ -469,7 +473,7 @@ impl ExecutionLayer { }; if let Some(builder_url) = builder_url { - el.set_builder_url(builder_url, builder_user_agent)?; + el.set_builder_url(builder_url, builder_user_agent, builder_header_timeout)?; } Ok(el) @@ -491,9 +495,14 @@ impl ExecutionLayer { &self, builder_url: SensitiveUrl, builder_user_agent: Option, + builder_header_timeout: Option, ) -> Result<(), Error> { - let builder_client = BuilderHttpClient::new(builder_url.clone(), builder_user_agent) - .map_err(Error::Builder)?; + let builder_client = BuilderHttpClient::new( + builder_url.clone(), + builder_user_agent, + builder_header_timeout, + ) + .map_err(Error::Builder)?; info!( self.log(), "Using external block builder"; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e992f45849..7db0889efd 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3386,7 +3386,7 @@ pub fn serve( "error" => format!("{:?}", e), "request_index" => index, "aggregator_index" => aggregate.message().aggregator_index(), - "attestation_index" => aggregate.message().aggregate().data().index, + "attestation_index" => aggregate.message().aggregate().committee_index(), "attestation_slot" => aggregate.message().aggregate().data().slot, ); failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); @@ -3407,7 +3407,7 @@ pub fn serve( "error" => format!("{:?}", e), "request_index" => index, "aggregator_index" => verified_aggregate.aggregate().message().aggregator_index(), - "attestation_index" => verified_aggregate.attestation().data().index, + "attestation_index" => verified_aggregate.attestation().committee_index(), "attestation_slot" => verified_aggregate.attestation().data().slot, ); failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); @@ -4386,6 +4386,9 @@ pub fn serve( api_types::EventTopic::ProposerSlashing => { event_handler.subscribe_proposer_slashing() } + api_types::EventTopic::BlsToExecutionChange => { + event_handler.subscribe_bls_to_execution_change() + } }; receivers.push( diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 541ba8b787..0065476532 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -141,7 +141,7 @@ pub async fn publish_attestations( // move the `attestations` vec into the blocking task, so this small overhead is unavoidable. let attestation_metadata = attestations .iter() - .map(|att| (att.data().slot, att.data().index)) + .map(|att| (att.data().slot, att.committee_index())) .collect::>(); // Gossip validate and publish attestations that can be immediately processed. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5e22a496a6..c637fb6aa8 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -70,6 +70,7 @@ struct ApiTester { attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, + bls_to_execution_change: SignedBlsToExecutionChange, network_rx: NetworkReceivers, local_enr: Enr, external_peer_id: PeerId, @@ -128,6 +129,7 @@ impl ApiTester { }) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer_with_config() .build(); @@ -223,6 +225,7 @@ impl ApiTester { let attester_slashing = harness.make_attester_slashing(vec![0, 1]); let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); @@ -289,6 +292,7 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, + bls_to_execution_change, network_rx, local_enr, external_peer_id, @@ -301,6 +305,7 @@ impl ApiTester { BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build(), ); @@ -336,6 +341,7 @@ impl ApiTester { let attester_slashing = harness.make_attester_slashing(vec![0, 1]); let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); @@ -373,6 +379,7 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, + bls_to_execution_change, network_rx, local_enr, external_peer_id, @@ -5261,6 +5268,7 @@ impl ApiTester { EventTopic::FinalizedCheckpoint, EventTopic::AttesterSlashing, EventTopic::ProposerSlashing, + EventTopic::BlsToExecutionChange, ]; let mut events_future = self .client @@ -5303,6 +5311,20 @@ impl ApiTester { &[EventKind::VoluntaryExit(self.voluntary_exit.clone())] ); + // Produce a BLS to execution change event + self.client + .post_beacon_pool_bls_to_execution_changes(&[self.bls_to_execution_change.clone()]) + .await + .unwrap(); + + let bls_events = poll_events(&mut events_future, 1, Duration::from_millis(10000)).await; + assert_eq!( + bls_events.as_slice(), + &[EventKind::BlsToExecutionChange(Box::new( + self.bls_to_execution_change.clone() + ))] + ); + // Submit the next block, which is on an epoch boundary, so this will produce a finalized // checkpoint event, head event, and block event let block_root = self.next_block.signed_block().canonical_root(); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 1617c0bd6c..b318bd4fb3 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -13,8 +13,6 @@ types = { workspace = true } serde = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } slog = { workspace = true } lighthouse_version = { workspace = true } tokio = { workspace = true } @@ -43,18 +41,11 @@ superstruct = { workspace = true } prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } -tracing = { workspace = true } -byteorder = { workspace = true } bytes = { workspace = true } either = { workspace = true } # Local dependencies -futures-ticker = "0.0.3" -getrandom = "0.2.11" -hex_fmt = "0.3.0" -instant = "0.1.12" void = "1.0.2" -base64 = "0.21.5" libp2p-mplex = "0.41" [dependencies.libp2p] @@ -72,4 +63,3 @@ async-channel = { workspace = true } [features] libp2p-websocket = [] - diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 871955c059..d8fa445e63 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] +wasm-bindgen = ["getrandom/js"] [dependencies] async-channel = { workspace = true } @@ -25,7 +25,6 @@ futures-ticker = "0.0.3" futures-timer = "3.0.2" getrandom = "0.2.12" hex_fmt = "0.3.0" -instant = "0.1.12" libp2p = { version = "0.53", default-features = false } quick-protobuf = "0.8" quick-protobuf-codec = "0.3" @@ -33,11 +32,10 @@ rand = "0.8" regex = "1.10.3" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" -smallvec = "1.13.1" tracing = "0.1.37" void = "1.0.2" - prometheus-client = "0.22.0" +web-time = "1.1.0" [dev-dependencies] quickcheck = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index 2567a3691e..f83a24baaf 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -20,13 +20,13 @@ //! Data structure for efficiently storing known back-off's when pruning peers. use crate::topic::TopicHash; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::{ hash_map::{Entry, HashMap}, HashSet, }; use std::time::Duration; +use web_time::Instant; #[derive(Copy, Clone)] struct HeartbeatIndex(usize); diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index ce0437342e..ccebb4e267 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -34,7 +34,6 @@ use futures_ticker::Ticker; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; -use instant::Instant; use libp2p::core::{multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, Endpoint, Multiaddr}; use libp2p::identity::Keypair; use libp2p::identity::PeerId; @@ -44,6 +43,7 @@ use libp2p::swarm::{ ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; +use web_time::{Instant, SystemTime}; use super::gossip_promises::GossipPromises; use super::handler::{Handler, HandlerEvent, HandlerIn}; @@ -67,7 +67,6 @@ use super::{ types::RpcOut, }; use super::{PublishError, SubscriptionError, TopicScoreParams, ValidationError}; -use instant::SystemTime; use quick_protobuf::{MessageWrite, Writer}; use std::{cmp::Ordering::Equal, fmt::Debug}; diff --git a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs index 43ca178556..2bfb20595a 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs @@ -21,9 +21,9 @@ use super::peer_score::RejectReason; use super::MessageId; use super::ValidationError; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::HashMap; +use web_time::Instant; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] diff --git a/beacon_node/lighthouse_network/gossipsub/src/handler.rs b/beacon_node/lighthouse_network/gossipsub/src/handler.rs index 298570955f..359bf8da42 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/handler.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/handler.rs @@ -26,7 +26,6 @@ use asynchronous_codec::Framed; use futures::future::Either; use futures::prelude::*; use futures::StreamExt; -use instant::Instant; use libp2p::core::upgrade::DeniedUpgrade; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, @@ -37,6 +36,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; +use web_time::Instant; /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. diff --git a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs index 4d609434f1..fa02f06f69 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs @@ -24,11 +24,11 @@ use super::metrics::{Metrics, Penalty}; use super::time_cache::TimeCache; use super::{MessageId, TopicHash}; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; +use web_time::Instant; mod params; use super::ValidationError; diff --git a/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs b/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs index 89fd4afee0..a3e5c01ac4 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs @@ -21,13 +21,13 @@ //! This implements a time-based LRU cache for checking gossipsub message duplicates. use fnv::FnvHashMap; -use instant::Instant; use std::collections::hash_map::{ self, Entry::{Occupied, Vacant}, }; use std::collections::VecDeque; use std::time::Duration; +use web_time::Instant; struct ExpiringElement { /// The element that expires diff --git a/beacon_node/lighthouse_network/gossipsub/src/types.rs b/beacon_node/lighthouse_network/gossipsub/src/types.rs index 712698b42a..84bdfb786f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/types.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/types.rs @@ -25,7 +25,6 @@ use async_channel::{Receiver, Sender}; use futures::stream::Peekable; use futures::{Future, Stream, StreamExt}; use futures_timer::Delay; -use instant::Duration; use libp2p::identity::PeerId; use libp2p::swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; @@ -36,6 +35,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::{fmt, pin::Pin}; +use web_time::Duration; use crate::rpc_proto::proto; #[cfg(feature = "serde")] diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 8945c23a54..7be68f879f 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,13 +7,14 @@ use ssz::{Decode, Encode}; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, - SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttestationBase, AttestationElectra, AttesterSlashing, AttesterSlashingBase, + AttesterSlashingElectra, BlobSidecar, EthSpec, ForkContext, ForkName, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -182,7 +183,26 @@ impl PubsubMessage { } GossipKind::Attestation(subnet_id) => { let attestation = - Attestation::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?; + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Base) + | Some(ForkName::Altair) + | Some(ForkName::Bellatrix) + | Some(ForkName::Capella) + | Some(ForkName::Deneb) => Attestation::Base( + AttestationBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Electra) => Attestation::Electra( + AttestationElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::Attestation(Box::new(( *subnet_id, attestation, @@ -381,17 +401,17 @@ impl std::fmt::Display for PubsubMessage { ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, - "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", + "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", att.message().aggregate().data().slot, - att.message().aggregate().data().index, + att.message().aggregate().committee_index(), att.message().aggregator_index(), ), PubsubMessage::Attestation(data) => write!( f, - "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {}", + "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {:?}", *data.0, data.1.data().slot, - data.1.data().index, + data.1.committee_index(), ), PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 406015360e..0ad7f53ee7 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -39,19 +39,14 @@ logging = { workspace = true } task_executor = { workspace = true } igd-next = "0.14" itertools = { workspace = true } -num_cpus = { workspace = true } lru_cache = { workspace = true } -lru = { workspace = true } strum = { workspace = true } -tokio-util = { workspace = true } derivative = { workspace = true } delay_map = { workspace = true } -ethereum-types = { workspace = true } operation_pool = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } parking_lot = { workspace = true } -environment = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index ad1c8867e2..d78091b313 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -758,7 +758,9 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - match self.chain.process_gossip_blob(verified_blob).await { + let result = self.chain.process_gossip_blob(verified_blob).await; + + match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { // Note: Reusing block imported metric here metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -807,6 +809,16 @@ impl NetworkBeaconProcessor { ); } } + + // If a block is in the da_checker, sync maybe awaiting for an event when block is finally + // imported. A block can become imported both after processing a block or blob. If a + // importing a block results in `Imported`, notify. Do not notify of blob errors. + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } } /// Process the beacon block received from the gossip network and: @@ -2049,6 +2061,27 @@ impl NetworkBeaconProcessor { "attn_val_index_too_high", ); } + AttnError::CommitteeIndexNonZero(index) => { + /* + * The validator index is not set to zero after Electra. + * + * The peer has published an invalid consensus message. + */ + debug!( + self.log, + "Committee index non zero"; + "peer_id" => %peer_id, + "block" => ?beacon_block_root, + "type" => ?attestation_type, + "committee_index" => index, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_comm_index_non_zero", + ); + } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( self.log, @@ -2204,6 +2237,19 @@ impl NetworkBeaconProcessor { "attn_too_many_agg_bits", ); } + AttnError::NotExactlyOneCommitteeBitSet(_) => { + /* + * The attestation doesn't have only one committee bit set. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_too_many_comm_bits", + ); + } AttnError::AttestsToFutureBlock { .. } => { /* * The beacon_block_root is from a higher slot than the attestation. diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index afe9815d67..830c43cbb1 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -29,6 +29,10 @@ pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// Currently a whole slot ahead. const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; +/// The number of slots after an aggregator duty where we remove the entry from +/// `aggregate_validators_on_subnet` delay map. +const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; + #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub(crate) enum SubscriptionKind { /// Long lived subscriptions. @@ -462,23 +466,27 @@ impl AttestationService { ) -> Result<(), &'static str> { let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - // Calculate how long before we need to subscribe to the subnet. - let time_to_subscription_start = { - // The short time we schedule the subscription before it's actually required. This - // ensures we are subscribed on time, and allows consecutive subscriptions to the same - // subnet to overlap, reducing subnet churn. - let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; - // The time to the required slot. - let time_to_subscription_slot = self - .beacon_chain - .slot_clock - .duration_to_slot(slot) - .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. - time_to_subscription_slot.saturating_sub(advance_subscription_duration) - }; + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain + .slot_clock + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = + time_to_subscription_slot.saturating_sub(advance_subscription_duration); + + // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` + // delay map. + let time_to_unsubscribe = + time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - tracked_vals.insert(ExactSubnet { subnet_id, slot }); + tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); } // If the subscription should be done in the future, schedule it. Otherwise subscribe diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 94645197c9..f685b7e59d 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,11 @@ pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; /// Maximum time we allow a lookup to exist before assuming it is stuck and will never make /// progress. Assume the worse case processing time per block component set * times max depth. /// 15 * 2 * 32 = 16 minutes. -const LOOKUP_MAX_DURATION_SECS: usize = 15 * PARENT_DEPTH_TOLERANCE; +const LOOKUP_MAX_DURATION_STUCK_SECS: u64 = 15 * PARENT_DEPTH_TOLERANCE as u64; +/// The most common case of child-lookup without peers is receiving block components before the +/// attestation deadline when the node is lagging behind. Once peers start attesting for the child +/// lookup at most after 4 seconds, the lookup should gain peers. +const LOOKUP_MAX_DURATION_NO_PEERS_SECS: u64 = 10; pub enum BlockComponent { Block(DownloadResult>>), @@ -81,6 +85,11 @@ pub struct BlockLookups { log: Logger, } +#[cfg(test)] +/// Tuple of `SingleLookupId`, requested block root, awaiting parent block root (if any), +/// and list of peers that claim to have imported this set of block components. +pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec); + impl BlockLookups { pub fn new(log: Logger) -> Self { Self { @@ -103,10 +112,17 @@ impl BlockLookups { } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec<(Id, Hash256, Option)> { + pub(crate) fn active_single_lookups(&self) -> Vec { self.single_block_lookups .iter() - .map(|(id, e)| (*id, e.block_root(), e.awaiting_parent())) + .map(|(id, l)| { + ( + *id, + l.block_root(), + l.awaiting_parent(), + l.all_peers().copied().collect(), + ) + }) .collect() } @@ -240,17 +256,11 @@ impl BlockLookups { } // Do not re-request a block that is already being requested - if let Some((_, lookup)) = self + if let Some((&lookup_id, lookup)) = self .single_block_lookups .iter_mut() .find(|(_id, lookup)| lookup.is_for_block(block_root)) { - for peer in peers { - if lookup.add_peer(*peer) { - debug!(self.log, "Adding peer to existing single block lookup"; "block_root" => ?block_root, "peer" => ?peer); - } - } - if let Some(block_component) = block_component { let component_type = block_component.get_type(); let imported = lookup.add_child_components(block_component); @@ -258,6 +268,11 @@ impl BlockLookups { debug!(self.log, "Lookup child component ignored"; "block_root" => ?block_root, "type" => component_type); } } + + if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers) { + warn!(self.log, "Error adding peers to ancestor lookup"; "error" => ?e); + } + return true; } @@ -625,7 +640,11 @@ impl BlockLookups { /// dropped. pub fn drop_lookup_and_children(&mut self, dropped_id: SingleLookupId) { if let Some(dropped_lookup) = self.single_block_lookups.remove(&dropped_id) { - debug!(self.log, "Dropping child lookup"; "id" => ?dropped_id, "block_root" => ?dropped_lookup.block_root()); + debug!(self.log, "Dropping lookup"; + "id" => ?dropped_id, + "block_root" => ?dropped_lookup.block_root(), + "awaiting_parent" => ?dropped_lookup.awaiting_parent(), + ); let child_lookups = self .single_block_lookups @@ -663,6 +682,9 @@ impl BlockLookups { } false } + // If UnknownLookup do not log the request error. No need to drop child lookups nor + // update metrics because the lookup does not exist. + Err(LookupRequestError::UnknownLookup) => false, Err(error) => { debug!(self.log, "Dropping lookup on request error"; "id" => id, "source" => source, "error" => ?error); metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[error.into()]); @@ -689,6 +711,53 @@ impl BlockLookups { ); } + /// Perform some prune operations on lookups on some interval + pub fn prune_lookups(&mut self) { + self.drop_lookups_without_peers(); + self.drop_stuck_lookups(); + } + + /// Lookups without peers are allowed to exist for some time. See this common race condition: + /// + /// 1. Receive unknown block parent event + /// 2. Create child lookup with zero peers + /// 3. Parent is processed, before receiving any attestation for the child block + /// 4. Child lookup is attempted to make progress but has no peers + /// 5. We receive an attestion for child block and add a peer to the child block lookup + /// + /// On step 4 we could drop the lookup because we attempt to issue a request with no peers + /// available. This has two issues: + /// - We may drop the lookup while some other block component is processing, triggering an + /// unknown lookup error. This can potentially cause un-related child lookups to also be + /// dropped when calling `drop_lookup_and_children`. + /// - We lose all progress of the lookup, and have to re-download its components that we may + /// already have there cached. + /// + /// Instead there's no negative for keeping lookups with no peers around for some time. If we + /// regularly prune them, it should not be a memory concern (TODO: maybe yes!). + fn drop_lookups_without_peers(&mut self) { + for (lookup_id, block_root) in self + .single_block_lookups + .values() + .filter(|lookup| { + // Do not drop lookup that are awaiting events to prevent inconsinstencies. If a + // lookup gets stuck, it will be eventually pruned by `drop_stuck_lookups` + lookup.has_no_peers() + && lookup.elapsed_since_created() + > Duration::from_secs(LOOKUP_MAX_DURATION_NO_PEERS_SECS) + && !lookup.is_awaiting_event() + }) + .map(|lookup| (lookup.id, lookup.block_root())) + .collect::>() + { + debug!(self.log, "Dropping lookup with no peers"; + "id" => lookup_id, + "block_root" => ?block_root + ); + self.drop_lookup_and_children(lookup_id); + } + } + /// Safety mechanism to unstuck lookup sync. Lookup sync if purely event driven and depends on /// external components to feed it events to make progress. If there is a bug in network, in /// beacon processor, or here internally: lookups can get stuck forever. A stuck lookup can @@ -702,10 +771,10 @@ impl BlockLookups { /// /// - One single clear warn level log per stuck incident /// - If the original bug is sporadic, it reduces the time a node is stuck from forever to 15 min - pub fn drop_stuck_lookups(&mut self) { + fn drop_stuck_lookups(&mut self) { // While loop to find and drop all disjoint trees of potentially stuck lookups. while let Some(stuck_lookup) = self.single_block_lookups.values().find(|lookup| { - lookup.elapsed_since_created() > Duration::from_secs(LOOKUP_MAX_DURATION_SECS as u64) + lookup.elapsed_since_created() > Duration::from_secs(LOOKUP_MAX_DURATION_STUCK_SECS) }) { let ancestor_stuck_lookup = match self.find_oldest_ancestor_lookup(stuck_lookup) { Ok(lookup) => lookup, @@ -739,9 +808,9 @@ impl BlockLookups { /// Recursively find the oldest ancestor lookup of another lookup fn find_oldest_ancestor_lookup<'a>( &'a self, - stuck_lookup: &'a SingleBlockLookup, + lookup: &'a SingleBlockLookup, ) -> Result<&'a SingleBlockLookup, String> { - if let Some(awaiting_parent) = stuck_lookup.awaiting_parent() { + if let Some(awaiting_parent) = lookup.awaiting_parent() { if let Some(lookup) = self .single_block_lookups .values() @@ -754,7 +823,50 @@ impl BlockLookups { )) } } else { - Ok(stuck_lookup) + Ok(lookup) + } + } + + /// Adds peers to a lookup and its ancestors recursively. + /// Note: Takes a `lookup_id` as argument to allow recursion on mutable lookups, without having + /// to duplicate the code to add peers to a lookup + fn add_peers_to_lookup_and_ancestors( + &mut self, + lookup_id: SingleLookupId, + peers: &[PeerId], + ) -> Result<(), String> { + let lookup = self + .single_block_lookups + .get_mut(&lookup_id) + .ok_or(format!("Unknown lookup for id {lookup_id}"))?; + + for peer in peers { + if lookup.add_peer(*peer) { + debug!(self.log, "Adding peer to existing single block lookup"; + "block_root" => ?lookup.block_root(), + "peer" => ?peer + ); + } + } + + // We may choose to attempt to continue a lookup here. It is possible that a lookup had zero + // peers and after adding this set of peers it can make progress again. Note that this + // recursive function iterates from child to parent, so continuing the child first is weird. + // However, we choose to not attempt to continue the lookup for simplicity. It's not + // strictly required and just and optimization for a rare corner case. + + if let Some(parent_root) = lookup.awaiting_parent() { + if let Some((&child_id, _)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == parent_root) + { + self.add_peers_to_lookup_and_ancestors(child_id, peers) + } else { + Err(format!("Lookup references unknown parent {parent_root:?}")) + } + } else { + Ok(()) } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index f587a98254..13efd36ab7 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -137,9 +137,17 @@ impl SingleBlockLookup { self.block_root() == block_root } - /// Get all unique peers that claim to have imported this set of block components - pub fn all_peers(&self) -> impl Iterator + '_ { - self.peers.iter() + /// Returns true if the block has already been downloaded. + pub fn both_components_processed(&self) -> bool { + self.block_request_state.state.is_processed() + && self.blob_request_state.state.is_processed() + } + + /// Returns true if this request is expecting some event to make progress + pub fn is_awaiting_event(&self) -> bool { + self.awaiting_parent.is_some() + || self.block_request_state.state.is_awaiting_event() + || self.blob_request_state.state.is_awaiting_event() } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -189,13 +197,9 @@ impl SingleBlockLookup { } let Some(peer_id) = self.use_rand_available_peer() else { - if awaiting_parent { - // Allow lookups awaiting for a parent to have zero peers. If when the parent - // resolve they still have zero peers the lookup will fail gracefully. - return Ok(()); - } else { - return Err(LookupRequestError::NoPeers); - } + // Allow lookup to not have any peers. In that case do nothing. If the lookup does + // not have peers for some time, it will be dropped. + return Ok(()); }; let request = R::request_state_mut(self); @@ -207,7 +211,12 @@ impl SingleBlockLookup { request.get_state_mut().on_completed_request()? } // Sync will receive a future event to make progress on the request, do nothing now - LookupRequestResult::Pending => return Ok(()), + LookupRequestResult::Pending(reason) => { + request + .get_state_mut() + .update_awaiting_download_status(reason); + return Ok(()); + } } // Otherwise, attempt to progress awaiting processing @@ -226,18 +235,17 @@ impl SingleBlockLookup { Ok(()) } + /// Get all unique peers that claim to have imported this set of block components + pub fn all_peers(&self) -> impl Iterator + '_ { + self.peers.iter() + } + /// Add peer to all request states. The peer must be able to serve this request. /// Returns true if the peer was newly inserted into some request state. pub fn add_peer(&mut self, peer_id: PeerId) -> bool { self.peers.insert(peer_id) } - /// Returns true if the block has already been downloaded. - pub fn both_components_processed(&self) -> bool { - self.block_request_state.state.is_processed() - && self.blob_request_state.state.is_processed() - } - /// Remove peer from available peers. Return true if there are no more available peers and all /// requests are not expecting any future event (AwaitingDownload). pub fn remove_peer(&mut self, peer_id: &PeerId) -> bool { @@ -301,7 +309,7 @@ pub struct DownloadResult { #[derive(PartialEq, Eq, IntoStaticStr)] pub enum State { - AwaitingDownload, + AwaitingDownload(&'static str), Downloading(ReqId), AwaitingProcess(DownloadResult), /// Request is processing, sent by lookup sync @@ -325,7 +333,7 @@ pub struct SingleLookupRequestState { impl SingleLookupRequestState { pub fn new() -> Self { Self { - state: State::AwaitingDownload, + state: State::AwaitingDownload("not started"), failed_processing: 0, failed_downloading: 0, } @@ -333,7 +341,7 @@ impl SingleLookupRequestState { pub fn is_awaiting_download(&self) -> bool { match self.state { - State::AwaitingDownload => true, + State::AwaitingDownload { .. } => true, State::Downloading { .. } | State::AwaitingProcess { .. } | State::Processing { .. } @@ -343,7 +351,7 @@ impl SingleLookupRequestState { pub fn is_processed(&self) -> bool { match self.state { - State::AwaitingDownload + State::AwaitingDownload { .. } | State::Downloading { .. } | State::AwaitingProcess { .. } | State::Processing { .. } => false, @@ -351,9 +359,27 @@ impl SingleLookupRequestState { } } + /// Returns true if we can expect some future event to progress this block component request + /// specifically. + pub fn is_awaiting_event(&self) -> bool { + match self.state { + // No event will progress this request specifically, but the request may be put on hold + // due to some external event + State::AwaitingDownload { .. } => false, + // Network will emit a download success / error event + State::Downloading { .. } => true, + // Not awaiting any external event + State::AwaitingProcess { .. } => false, + // Beacon processor will emit a processing result event + State::Processing { .. } => true, + // Request complete, no future event left + State::Processed { .. } => false, + } + } + pub fn peek_downloaded_data(&self) -> Option<&T> { match &self.state { - State::AwaitingDownload => None, + State::AwaitingDownload { .. } => None, State::Downloading { .. } => None, State::AwaitingProcess(result) => Some(&result.value), State::Processing(result) => Some(&result.value), @@ -364,7 +390,7 @@ impl SingleLookupRequestState { /// Switch to `AwaitingProcessing` if the request is in `AwaitingDownload` state, otherwise /// ignore. pub fn insert_verified_response(&mut self, result: DownloadResult) -> bool { - if let State::AwaitingDownload = &self.state { + if let State::AwaitingDownload { .. } = &self.state { self.state = State::AwaitingProcess(result); true } else { @@ -372,10 +398,18 @@ impl SingleLookupRequestState { } } + /// Append metadata on why this request is in AwaitingDownload status. Very helpful to debug + /// stuck lookups. Not fallible as it's purely informational. + pub fn update_awaiting_download_status(&mut self, new_status: &'static str) { + if let State::AwaitingDownload(status) = &mut self.state { + *status = new_status + } + } + /// Switch to `Downloading` if the request is in `AwaitingDownload` state, otherwise returns None. pub fn on_download_start(&mut self, req_id: ReqId) -> Result<(), LookupRequestError> { match &self.state { - State::AwaitingDownload => { + State::AwaitingDownload { .. } => { self.state = State::Downloading(req_id); Ok(()) } @@ -397,7 +431,7 @@ impl SingleLookupRequestState { }); } self.failed_downloading = self.failed_downloading.saturating_add(1); - self.state = State::AwaitingDownload; + self.state = State::AwaitingDownload("not started"); Ok(()) } other => Err(LookupRequestError::BadState(format!( @@ -461,7 +495,7 @@ impl SingleLookupRequestState { State::Processing(result) => { let peer_id = result.peer_id; self.failed_processing = self.failed_processing.saturating_add(1); - self.state = State::AwaitingDownload; + self.state = State::AwaitingDownload("not started"); Ok(peer_id) } other => Err(LookupRequestError::BadState(format!( @@ -485,7 +519,7 @@ impl SingleLookupRequestState { /// Mark a request as complete without any download or processing pub fn on_completed_request(&mut self) -> Result<(), LookupRequestError> { match &self.state { - State::AwaitingDownload => { + State::AwaitingDownload { .. } => { self.state = State::Processed; Ok(()) } @@ -517,7 +551,7 @@ impl std::fmt::Display for State { impl std::fmt::Debug for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::AwaitingDownload { .. } => write!(f, "AwaitingDownload"), + Self::AwaitingDownload(status) => write!(f, "AwaitingDownload({:?})", status), Self::Downloading(req_id) => write!(f, "Downloading({:?})", req_id), Self::AwaitingProcess(d) => write!(f, "AwaitingProcess({:?})", d.peer_id), Self::Processing(d) => write!(f, "Processing({:?})", d.peer_id), diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 5a85e57f63..a607151bde 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -210,7 +210,7 @@ impl TestRig { self.sync_manager.handle_message(sync_message); } - fn active_single_lookups(&self) -> Vec<(Id, Hash256, Option)> { + fn active_single_lookups(&self) -> Vec { self.sync_manager.active_single_lookups() } @@ -252,6 +252,21 @@ impl TestRig { } } + fn assert_lookup_peers(&self, block_root: Hash256, mut expected_peers: Vec) { + let mut lookup = self + .sync_manager + .active_single_lookups() + .into_iter() + .find(|l| l.1 == block_root) + .unwrap_or_else(|| panic!("no lookup for {block_root}")); + lookup.3.sort(); + expected_peers.sort(); + assert_eq!( + lookup.3, expected_peers, + "unexpected peers on lookup {block_root}" + ); + } + fn insert_failed_chain(&mut self, block_root: Hash256) { self.sync_manager.insert_failed_chain(block_root); } @@ -270,7 +285,7 @@ impl TestRig { fn find_single_lookup_for(&self, block_root: Hash256) -> Id { self.active_single_lookups() .iter() - .find(|(_, b, _)| b == &block_root) + .find(|l| l.1 == block_root) .unwrap_or_else(|| panic!("no single block lookup found for {block_root}")) .0 } @@ -1305,6 +1320,26 @@ fn test_lookup_disconnection_peer_left() { rig.assert_single_lookups_count(1); } +#[test] +fn test_lookup_add_peers_to_parent() { + let mut r = TestRig::test_setup(); + let peer_id_1 = r.new_connected_peer(); + let peer_id_2 = r.new_connected_peer(); + let blocks = r.rand_blockchain(5); + let last_block_root = blocks.last().unwrap().canonical_root(); + // Create a chain of lookups + for block in &blocks { + r.trigger_unknown_parent_block(peer_id_1, block.clone()); + } + r.trigger_unknown_block_from_attestation(last_block_root, peer_id_2); + for block in blocks.iter().take(blocks.len() - 1) { + // Parent has the original unknown parent event peer + new peer + r.assert_lookup_peers(block.canonical_root(), vec![peer_id_1, peer_id_2]); + } + // Child lookup only has the unknown attestation peer + r.assert_lookup_peers(last_block_root, vec![peer_id_2]); +} + #[test] fn test_skip_creating_failed_parent_lookup() { let mut rig = TestRig::test_setup(); @@ -1452,13 +1487,16 @@ fn block_in_processing_cache_becomes_invalid() { let peer_id = r.new_connected_peer(); r.insert_block_to_processing_cache(block.clone().into()); r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should trigger blob request + let id = r.expect_blob_lookup_request(block_root); // Should not trigger block request r.expect_empty_network(); // Simulate invalid block, removing it from processing cache r.simulate_block_gossip_processing_becomes_invalid(block_root); // Should download block, then issue blobs request r.complete_lookup_block_download(block); - let id = r.expect_blob_lookup_request(block_root); + // Should not trigger block or blob request + r.expect_empty_network(); r.complete_lookup_block_import_valid(block_root, false); // Resolve blob and expect lookup completed r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); @@ -1475,11 +1513,14 @@ fn block_in_processing_cache_becomes_valid_imported() { let peer_id = r.new_connected_peer(); r.insert_block_to_processing_cache(block.clone().into()); r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should trigger blob request + let id = r.expect_blob_lookup_request(block_root); // Should not trigger block request r.expect_empty_network(); // Resolve the block from processing step r.simulate_block_gossip_processing_becomes_valid_missing_components(block.into()); - let id = r.expect_blob_lookup_request(block_root); + // Should not trigger block or blob request + r.expect_empty_network(); // Resolve blob and expect lookup completed r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); r.expect_no_active_lookups(); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 1162f63de0..4c1a1e6b67 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -56,6 +56,7 @@ use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; +use lru_cache::LRUTimeCache; use slog::{crit, debug, error, info, o, trace, warn, Logger}; use std::ops::Sub; use std::sync::Arc; @@ -72,6 +73,11 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// blocks for. pub const SLOT_IMPORT_TOLERANCE: usize = 32; +/// Suppress duplicated `UnknownBlockHashFromAttestation` events for some duration of time. In +/// practice peers are likely to send the same root during a single slot. 30 seconds is a rather +/// arbitrary number that covers a full slot, but allows recovery if sync get stuck for a few slots. +const NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS: u64 = 30; + pub type Id = u32; #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -199,6 +205,10 @@ pub struct SyncManager { backfill_sync: BackFillSync, block_lookups: BlockLookups, + /// debounce duplicated `UnknownBlockHashFromAttestation` for the same root peer tuple. A peer + /// may forward us thousands of a attestations, each one triggering an individual event. Only + /// one event is useful, the rest generating log noise and wasted cycles + notified_unknown_roots: LRUTimeCache<(PeerId, Hash256)>, /// The logger for the import manager. log: Logger, @@ -262,12 +272,15 @@ impl SyncManager { log.new(o!("service" => "backfill_sync")), ), block_lookups: BlockLookups::new(log.new(o!("service"=> "lookup_sync"))), + notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( + NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, + )), log: log.clone(), } } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec<(Id, Hash256, Option)> { + pub(crate) fn active_single_lookups(&self) -> Vec { self.block_lookups.active_single_lookups() } @@ -547,9 +560,10 @@ impl SyncManager { futures::stream::iter(ee_responsiveness_watch.await).flatten() }; - // LOOKUP_MAX_DURATION_SECS is 60 seconds. Logging every 30 seconds allows enough timely - // visbility while being sparse and not increasing the debug log volume in a noticeable way - let mut interval = tokio::time::interval(Duration::from_secs(30)); + // min(LOOKUP_MAX_DURATION_*) is 15 seconds. The cost of calling prune_lookups more often is + // one iteration over the single lookups HashMap. This map is supposed to be very small < 10 + // unless there is a bug. + let mut prune_lookups_interval = tokio::time::interval(Duration::from_secs(15)); // process any inbound messages loop { @@ -560,8 +574,8 @@ impl SyncManager { Some(engine_state) = check_ee_stream.next(), if check_ee => { self.handle_new_execution_engine_state(engine_state); } - _ = interval.tick() => { - self.block_lookups.drop_stuck_lookups(); + _ = prune_lookups_interval.tick() => { + self.block_lookups.prune_lookups(); } } } @@ -622,7 +636,11 @@ impl SyncManager { ); } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { - self.handle_unknown_block_root(peer_id, block_root); + if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { + self.notified_unknown_roots.insert((peer_id, block_root)); + debug!(self.log, "Received unknown block hash message"; "block_root" => ?block_root, "peer" => ?peer_id); + self.handle_unknown_block_root(peer_id, block_root); + } } SyncMessage::Disconnect(peer_id) => { debug!(self.log, "Received disconnected message"; "peer_id" => %peer_id); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index fa1f50cee0..f3f82ee011 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -12,7 +12,7 @@ use crate::status::ToStatusMessage; use crate::sync::block_lookups::SingleLookupId; use crate::sync::manager::{BlockProcessType, SingleLookupReqId}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; @@ -105,7 +105,7 @@ pub enum LookupRequestResult { /// that makes progress on the request. For example: request is processing from a different /// source (i.e. block received from gossip) and sync MUST receive an event with that processing /// result. - Pending, + Pending(&'static str), } /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. @@ -337,26 +337,19 @@ impl SyncNetworkContext { peer_id: PeerId, block_root: Hash256, ) -> Result { - // da_checker includes block that are execution verified, but are missing components - if self - .chain - .data_availability_checker - .has_execution_valid_block(&block_root) - { - return Ok(LookupRequestResult::NoRequestNeeded); - } - - // reqresp_pre_import_cache includes blocks that may not be yet execution verified - if self - .chain - .reqresp_pre_import_cache - .read() - .contains_key(&block_root) - { - // A block is on the `reqresp_pre_import_cache` but NOT in the - // `data_availability_checker` only if it is actively processing. We can expect a future - // event with the result of processing - return Ok(LookupRequestResult::Pending); + match self.chain.get_block_process_status(&block_root) { + // Unknown block, continue request to download + BlockProcessStatus::Unknown => {} + // Block is known are currently processing, expect a future event with the result of + // processing. + BlockProcessStatus::NotValidated { .. } => { + return Ok(LookupRequestResult::Pending("block in processing cache")) + } + // Block is fully validated. If it's not yet imported it's waiting for missing block + // components. Consider this request completed and do nothing. + BlockProcessStatus::ExecutionValidated { .. } => { + return Ok(LookupRequestResult::NoRequestNeeded) + } } let req_id = self.next_id(); @@ -401,16 +394,21 @@ impl SyncNetworkContext { downloaded_block_expected_blobs: Option, ) -> Result { let Some(expected_blobs) = downloaded_block_expected_blobs.or_else(|| { - self.chain - .data_availability_checker - .num_expected_blobs(&block_root) + // If the block is already being processed or fully validated, retrieve how many blobs + // it expects. Consider any stage of the block. If the block root has been validated, we + // can assert that this is the correct value of `blob_kzg_commitments_count`. + match self.chain.get_block_process_status(&block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.num_expected_blobs()), + } }) else { // Wait to download the block before downloading blobs. Then we can be sure that the // block has data, so there's no need to do "blind" requests for all possible blobs and // latter handle the case where if the peer sent no blobs, penalize. // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. // - if `num_expected_blobs` returns Some = block is processed. - return Ok(LookupRequestResult::Pending); + return Ok(LookupRequestResult::Pending("waiting for block download")); }; let imported_blob_indexes = self diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 91fd00a397..c6ed6eb7f6 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,4 +1,4 @@ -use crate::attestation_storage::{AttestationRef, CompactIndexedAttestation}; +use crate::attestation_storage::{CompactAttestationRef, CompactIndexedAttestation}; use crate::max_cover::MaxCover; use crate::reward_cache::RewardCache; use state_processing::common::{ @@ -14,14 +14,14 @@ use types::{ #[derive(Debug, Clone)] pub struct AttMaxCover<'a, E: EthSpec> { /// Underlying attestation. - pub att: AttestationRef<'a, E>, + pub att: CompactAttestationRef<'a, E>, /// Mapping of validator indices and their rewards. pub fresh_validators_rewards: HashMap, } impl<'a, E: EthSpec> AttMaxCover<'a, E> { pub fn new( - att: AttestationRef<'a, E>, + att: CompactAttestationRef<'a, E>, state: &BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, @@ -36,7 +36,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { /// Initialise an attestation cover object for base/phase0 hard fork. pub fn new_for_base( - att: AttestationRef<'a, E>, + att: CompactAttestationRef<'a, E>, state: &BeaconState, base_state: &BeaconStateBase, total_active_balance: u64, @@ -69,7 +69,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { /// Initialise an attestation cover object for Altair or later. pub fn new_for_altair_deneb( - att: AttestationRef<'a, E>, + att: CompactAttestationRef<'a, E>, state: &BeaconState, reward_cache: &'a RewardCache, spec: &ChainSpec, @@ -119,14 +119,14 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { type Object = Attestation; - type Intermediate = AttestationRef<'a, E>; + type Intermediate = CompactAttestationRef<'a, E>; type Set = HashMap; - fn intermediate(&self) -> &AttestationRef<'a, E> { + fn intermediate(&self) -> &CompactAttestationRef<'a, E> { &self.att } - fn convert_to_object(att_ref: &AttestationRef<'a, E>) -> Attestation { + fn convert_to_object(att_ref: &CompactAttestationRef<'a, E>) -> Attestation { att_ref.clone_as_attestation() } @@ -153,7 +153,7 @@ impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { /// executing the `retain` when the `committee_bits` of the two attestations intersect. fn update_covering_set( &mut self, - best_att: &AttestationRef<'a, E>, + best_att: &CompactAttestationRef<'a, E>, covered_validators: &HashMap, ) { if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index { @@ -177,7 +177,7 @@ impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { /// /// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading. pub fn earliest_attestation_validators( - attestation: &AttestationRef, + attestation: &CompactAttestationRef, state: &BeaconState, base_state: &BeaconStateBase, ) -> BitList { diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index f06da2afb1..43b1c3abbb 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -41,9 +41,8 @@ pub struct SplitAttestation { pub indexed: CompactIndexedAttestation, } -// TODO(electra): rename this type #[derive(Debug, Clone)] -pub struct AttestationRef<'a, E: EthSpec> { +pub struct CompactAttestationRef<'a, E: EthSpec> { pub checkpoint: &'a CheckpointKey, pub data: &'a CompactAttestationData, pub indexed: &'a CompactIndexedAttestation, @@ -97,8 +96,8 @@ impl SplitAttestation { } } - pub fn as_ref(&self) -> AttestationRef { - AttestationRef { + pub fn as_ref(&self) -> CompactAttestationRef { + CompactAttestationRef { checkpoint: &self.checkpoint, data: &self.data, indexed: &self.indexed, @@ -106,7 +105,7 @@ impl SplitAttestation { } } -impl<'a, E: EthSpec> AttestationRef<'a, E> { +impl<'a, E: EthSpec> CompactAttestationRef<'a, E> { pub fn attestation_data(&self) -> AttestationData { AttestationData { slot: self.data.slot, @@ -171,7 +170,7 @@ impl CompactIndexedAttestation { } } - pub fn aggregate(&mut self, other: &Self) { + pub fn aggregate(&mut self, other: &Self) -> Option<()> { match (self, other) { (CompactIndexedAttestation::Base(this), CompactIndexedAttestation::Base(other)) => { this.aggregate(other) @@ -181,7 +180,7 @@ impl CompactIndexedAttestation { CompactIndexedAttestation::Electra(other), ) => this.aggregate_same_committee(other), // TODO(electra) is a mix of electra and base compact indexed attestations an edge case we need to deal with? - _ => (), + _ => None, } } } @@ -193,7 +192,7 @@ impl CompactIndexedAttestationBase { .is_zero() } - pub fn aggregate(&mut self, other: &Self) { + pub fn aggregate(&mut self, other: &Self) -> Option<()> { self.attesting_indices = self .attesting_indices .drain(..) @@ -202,6 +201,8 @@ impl CompactIndexedAttestationBase { .collect(); self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); self.signature.add_assign_aggregate(&other.signature); + + Some(()) } } @@ -215,9 +216,11 @@ impl CompactIndexedAttestationElectra { .is_zero() } - pub fn aggregate_same_committee(&mut self, other: &Self) { + pub fn aggregate_same_committee(&mut self, other: &Self) -> Option<()> { // TODO(electra): remove assert in favour of Result - assert_eq!(self.committee_bits, other.committee_bits); + if self.committee_bits != other.committee_bits { + return None; + } self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); self.attesting_indices = self .attesting_indices @@ -226,34 +229,48 @@ impl CompactIndexedAttestationElectra { .dedup() .collect(); self.signature.add_assign_aggregate(&other.signature); + Some(()) } - pub fn aggregate_with_disjoint_committees(&mut self, other: &Self) { - // TODO(electra): remove asserts or use Result - assert!(self + pub fn aggregate_with_disjoint_committees(&mut self, other: &Self) -> Option<()> { + if !self .committee_bits .intersection(&other.committee_bits) - .is_zero(),); + .is_zero() + { + return None; + } // The attestation being aggregated in must only have 1 committee bit set. - assert_eq!(other.committee_bits.num_set_bits(), 1); + if other.committee_bits.num_set_bits() != 1 { + return None; + } + // Check we are aggregating in increasing committee index order (so we can append // aggregation bits). - assert!(self.committee_bits.highest_set_bit() < other.committee_bits.highest_set_bit()); + if self.committee_bits.highest_set_bit() >= other.committee_bits.highest_set_bit() { + return None; + } self.committee_bits = self.committee_bits.union(&other.committee_bits); - self.aggregation_bits = - bitlist_extend(&self.aggregation_bits, &other.aggregation_bits).unwrap(); - self.attesting_indices = self - .attesting_indices - .drain(..) - .merge(other.attesting_indices.iter().copied()) - .dedup() - .collect(); - self.signature.add_assign_aggregate(&other.signature); + if let Some(agg_bits) = bitlist_extend(&self.aggregation_bits, &other.aggregation_bits) { + self.aggregation_bits = agg_bits; + + self.attesting_indices = self + .attesting_indices + .drain(..) + .merge(other.attesting_indices.iter().copied()) + .dedup() + .collect(); + self.signature.add_assign_aggregate(&other.signature); + + return Some(()); + } + + None } - pub fn committee_index(&self) -> u64 { - *self.get_committee_indices().first().unwrap_or(&0u64) + pub fn committee_index(&self) -> Option { + self.get_committee_indices().first().copied() } pub fn get_committee_indices(&self) -> Vec { @@ -350,27 +367,28 @@ impl AttestationMap { continue; } }; - let committee_index = electra_attestation.committee_index(); - if let Some(existing_attestation) = - best_attestations_by_committee.get_mut(&committee_index) - { - // Search for the best (most aggregation bits) attestation for this committee - // index. - if electra_attestation.aggregation_bits.num_set_bits() - > existing_attestation.aggregation_bits.num_set_bits() + if let Some(committee_index) = electra_attestation.committee_index() { + if let Some(existing_attestation) = + best_attestations_by_committee.get_mut(&committee_index) { - // New attestation is better than the previously known one for this - // committee. Replace it. - std::mem::swap(existing_attestation, &mut electra_attestation); + // Search for the best (most aggregation bits) attestation for this committee + // index. + if electra_attestation.aggregation_bits.num_set_bits() + > existing_attestation.aggregation_bits.num_set_bits() + { + // New attestation is better than the previously known one for this + // committee. Replace it. + std::mem::swap(existing_attestation, &mut electra_attestation); + } + // Put the inferior attestation into the list of aggregated attestations + // without performing any cross-committee aggregation. + aggregated_attestations + .push(CompactIndexedAttestation::Electra(electra_attestation)); + } else { + // First attestation seen for this committee. Place it in the map + // provisionally. + best_attestations_by_committee.insert(committee_index, electra_attestation); } - // Put the inferior attestation into the list of aggregated attestations - // without performing any cross-committee aggregation. - aggregated_attestations - .push(CompactIndexedAttestation::Electra(electra_attestation)); - } else { - // First attestation seen for this committee. Place it in the map - // provisionally. - best_attestations_by_committee.insert(committee_index, electra_attestation); } } @@ -399,7 +417,7 @@ impl AttestationMap { pub fn get_attestations<'a>( &'a self, checkpoint_key: &'a CheckpointKey, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { self.checkpoint_map .get(checkpoint_key) .into_iter() @@ -407,7 +425,7 @@ impl AttestationMap { } /// Iterate all attestations in the map. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl Iterator> { self.checkpoint_map .iter() .flat_map(|(checkpoint_key, attestation_map)| attestation_map.iter(checkpoint_key)) @@ -438,9 +456,9 @@ impl AttestationDataMap { pub fn iter<'a>( &'a self, checkpoint_key: &'a CheckpointKey, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { self.attestations.iter().flat_map(|(data, vec_indexed)| { - vec_indexed.iter().map(|indexed| AttestationRef { + vec_indexed.iter().map(|indexed| CompactAttestationRef { checkpoint: checkpoint_key, data, indexed, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index daddbf7665..c7659651da 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -11,11 +11,10 @@ mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::{earliest_attestation_validators, AttMaxCover}; -pub use attestation_storage::{AttestationRef, SplitAttestation}; +pub use attestation_storage::{CompactAttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, - PersistedOperationPoolV15, PersistedOperationPoolV5, + PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, }; pub use reward_cache::RewardCache; use state_processing::epoch_cache::is_epoch_cache_initialized; @@ -228,7 +227,7 @@ impl OperationPool { state: &'a BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, - validity_filter: impl FnMut(&AttestationRef<'a, E>) -> bool + Send, + validity_filter: impl FnMut(&CompactAttestationRef<'a, E>) -> bool + Send, spec: &'a ChainSpec, ) -> impl Iterator> + Send { all_attestations @@ -252,8 +251,8 @@ impl OperationPool { pub fn get_attestations( &self, state: &BeaconState, - prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, E>) -> bool + Send, - curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, E>) -> bool + Send, + prev_epoch_validity_filter: impl for<'a> FnMut(&CompactAttestationRef<'a, E>) -> bool + Send, + curr_epoch_validity_filter: impl for<'a> FnMut(&CompactAttestationRef<'a, E>) -> bool + Send, spec: &ChainSpec, ) -> Result>, OpPoolError> { let fork_name = state.fork_name_unchecked(); @@ -1282,9 +1281,7 @@ mod release_tests { for att in &best_attestations { match fork_name { ForkName::Electra => { - // TODO(electra) some attestations only have 2 or 3 agg bits set - // others have 5 - assert!(att.num_set_aggregation_bits() >= 2); + assert!(att.num_set_aggregation_bits() >= small_step_size); } _ => { assert!(att.num_set_aggregation_bits() >= big_step_size); diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 6fd8a6cc3c..99cb1aafbc 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,4 +1,3 @@ -use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; @@ -12,6 +11,7 @@ use state_processing::SigVerifiedOp; use std::collections::HashSet; use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; +use types::attestation::AttestationOnDisk; use types::*; type PersistedSyncContributions = Vec<(SyncAggregateId, Vec>)>; @@ -21,7 +21,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec = Vec<(SyncAggregateId, Vec { - /// [DEPRECATED] Mapping from attestation ID to attestation mappings. - #[superstruct(only(V5))] - pub attestations_v5: Vec<(AttestationId, Vec>)>, + #[superstruct(only(V15))] + pub attestations_v15: Vec<(AttestationBase, Vec)>, /// Attestations and their attesting indices. - #[superstruct(only(V12, V14, V15))] - pub attestations: Vec<(Attestation, Vec)>, + #[superstruct(only(V20))] + pub attestations: Vec<(AttestationOnDisk, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, - /// TODO(electra): we've made a DB change here!!! + #[superstruct(only(V15))] + pub attester_slashings_v15: Vec, E>>, /// Attester slashings. - #[superstruct(only(V12, V14, V15))] + #[superstruct(only(V20))] pub attester_slashings: Vec, E>>, - /// [DEPRECATED] Proposer slashings. - #[superstruct(only(V5))] - pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12, V14, V15))] pub proposer_slashings: Vec>, - /// [DEPRECATED] Voluntary exits. - #[superstruct(only(V5))] - pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12, V14, V15))] pub voluntary_exits: Vec>, /// BLS to Execution Changes - #[superstruct(only(V14, V15))] pub bls_to_execution_changes: Vec>, /// Validator indices with BLS to Execution Changes to be broadcast at the /// Capella fork. - #[superstruct(only(V15))] pub capella_bls_change_broadcast_indices: Vec, } @@ -73,7 +63,7 @@ impl PersistedOperationPool { .iter() .map(|att| { ( - att.clone_as_attestation(), + AttestationOnDisk::from(att.clone_as_attestation()), att.indexed.attesting_indices().clone(), ) }) @@ -121,7 +111,7 @@ impl PersistedOperationPool { .copied() .collect(); - PersistedOperationPool::V15(PersistedOperationPoolV15 { + PersistedOperationPool::V20(PersistedOperationPoolV20 { attestations, sync_contributions, attester_slashings, @@ -134,56 +124,86 @@ impl PersistedOperationPool { /// Reconstruct an `OperationPool`. pub fn into_operation_pool(mut self) -> Result, OpPoolError> { - let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); + let attester_slashings = match &self { + PersistedOperationPool::V15(pool_v15) => RwLock::new( + pool_v15 + .attester_slashings_v15 + .iter() + .map(|slashing| slashing.clone().into()) + .collect(), + ), + PersistedOperationPool::V20(pool_v20) => { + RwLock::new(pool_v20.attester_slashings.iter().cloned().collect()) + } + }; + let proposer_slashings = RwLock::new( - self.proposer_slashings()? + self.proposer_slashings() .iter() .cloned() .map(|slashing| (slashing.as_inner().proposer_index(), slashing)) .collect(), ); let voluntary_exits = RwLock::new( - self.voluntary_exits()? + self.voluntary_exits() .iter() .cloned() .map(|exit| (exit.as_inner().message.validator_index, exit)) .collect(), ); let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); - let attestations = match self { - PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { - return Err(OpPoolError::IncorrectOpPoolVariant) - } - PersistedOperationPool::V14(_) | PersistedOperationPool::V15(_) => { + let attestations = match &self { + PersistedOperationPool::V15(pool_v15) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in self.attestations()?.clone() { + for (att, attesting_indices) in + pool_v15 + .attestations_v15 + .iter() + .map(|(att, attesting_indices)| { + (Attestation::Base(att.clone()), attesting_indices.clone()) + }) + { + map.insert(att, attesting_indices); + } + RwLock::new(map) + } + PersistedOperationPool::V20(pool_v20) => { + let mut map = AttestationMap::default(); + for (att, attesting_indices) in + pool_v20 + .attestations + .iter() + .map(|(att, attesting_indices)| { + ( + AttestationRef::from(att.to_ref()).clone_as_attestation(), + attesting_indices.clone(), + ) + }) + { map.insert(att, attesting_indices); } RwLock::new(map) } }; + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); - if let Ok(persisted_changes) = self.bls_to_execution_changes_mut() { - let persisted_changes = mem::take(persisted_changes); + let persisted_changes = mem::take(self.bls_to_execution_changes_mut()); + let broadcast_indices: HashSet<_> = + mem::take(self.capella_bls_change_broadcast_indices_mut()) + .into_iter() + .collect(); - let broadcast_indices = - if let Ok(indices) = self.capella_bls_change_broadcast_indices_mut() { - mem::take(indices).into_iter().collect() - } else { - HashSet::new() - }; - - for bls_to_execution_change in persisted_changes { - let received_pre_capella = if broadcast_indices - .contains(&bls_to_execution_change.as_inner().message.validator_index) - { - ReceivedPreCapella::Yes - } else { - ReceivedPreCapella::No - }; - bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); - } + for bls_to_execution_change in persisted_changes { + let received_pre_capella = if broadcast_indices + .contains(&bls_to_execution_change.as_inner().message.validator_index) + { + ReceivedPreCapella::Yes + } else { + ReceivedPreCapella::No + }; + bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); } + let op_pool = OperationPool { attestations, sync_contributions, @@ -198,48 +218,6 @@ impl PersistedOperationPool { } } -impl StoreItem for PersistedOperationPoolV5 { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV5::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -impl StoreItem for PersistedOperationPoolV12 { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -impl StoreItem for PersistedOperationPoolV14 { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV14::from_ssz_bytes(bytes).map_err(Into::into) - } -} - impl StoreItem for PersistedOperationPoolV15 { fn db_column() -> DBColumn { DBColumn::OpPool @@ -254,6 +232,20 @@ impl StoreItem for PersistedOperationPoolV15 { } } +impl StoreItem for PersistedOperationPoolV20 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV20::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 81c2196b75..dbfda2d530 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,12 +1,16 @@ -use clap::{App, Arg, ArgGroup}; +use std::time::Duration; + +use clap::{builder::ArgPredicate, crate_version, Arg, ArgAction, ArgGroup, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use strum::VariantNames; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("beacon_node") - .visible_aliases(&["b", "bn", "beacon"]) +pub fn cli_app() -> Command { + Command::new("beacon_node") + .display_order(0) + .visible_aliases(["b", "bn", "beacon"]) .version(crate_version!()) .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("The primary component which connects to the Ethereum 2.0 P2P network and \ downloads, verifies and stores blocks. Provides a HTTP API for querying \ the beacon chain and publishing messages to the network.") @@ -14,68 +18,91 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * Configuration directory locations. */ .arg( - Arg::with_name("network-dir") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("network-dir") .long("network-dir") .value_name("DIR") .help("Data directory for network keys. Defaults to network/ inside the beacon node \ dir.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("freezer-dir") + Arg::new("freezer-dir") .long("freezer-dir") .value_name("DIR") .help("Data directory for the freezer database.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("blobs-dir") + Arg::new("blobs-dir") .long("blobs-dir") .value_name("DIR") .help("Data directory for the blobs database.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Network parameters. */ .arg( - Arg::with_name("subscribe-all-subnets") + Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Subscribe to all subnets regardless of validator count. \ This will also advertise the beacon node as being long-lived subscribed to all subnets.") - .takes_value(false), + .display_order(0) ) .arg( - Arg::with_name("import-all-attestations") + Arg::new("import-all-attestations") .long("import-all-attestations") .help("Import and aggregate all attestations, regardless of validator subscriptions. \ This will only import attestations from already-subscribed subnets, use with \ --subscribe-all-subnets to ensure all attestations are received for import.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("disable-packet-filter") + Arg::new("disable-packet-filter") .long("disable-packet-filter") .help("Disables the discovery packet filter. Useful for testing in smaller networks") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("shutdown-after-sync") + Arg::new("shutdown-after-sync") .long("shutdown-after-sync") .help("Shutdown beacon node as soon as sync is completed. Backfill sync will \ not be performed before shutdown.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("zero-ports") + Arg::new("zero-ports") .long("zero-ports") - .short("z") + .short('z') .help("Sets all listening TCP/UDP ports to 0, allowing the OS to choose some \ arbitrary free ports.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. To listen \ @@ -86,13 +113,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ IPv4 and IPv6. The order of the given addresses is not relevant. However, \ multiple IPv4, or multiple IPv6 addresses will not be accepted.") - .multiple(true) - .max_values(2) + .action(ArgAction::Append) + .num_args(0..=2) .default_value("0.0.0.0") - .takes_value(true) + .display_order(0) ) .arg( - Arg::with_name("port") + Arg::new("port") .long("port") .value_name("PORT") .help("The TCP/UDP ports to listen on. There are two UDP ports. \ @@ -100,134 +127,153 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { --discovery-port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 and IPv6 the --port flag \ will apply to the IPv4 address and --port6 to the IPv6 address.") .default_value("9000") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("port6") + Arg::new("port6") .long("port6") .value_name("PORT") .help("The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and \ IPv6. Defaults to 9090 when required. The Quic UDP port will be set to this value + 1.") .default_value("9090") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("discovery-port") + Arg::new("discovery-port") .long("discovery-port") .value_name("PORT") .help("The UDP port that discovery will listen on. Defaults to `port`") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("quic-port") + Arg::new("quic-port") .long("quic-port") .value_name("PORT") .help("The UDP port that quic will listen on. Defaults to `port` + 1") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("discovery-port6") + Arg::new("discovery-port6") .long("discovery-port6") .value_name("PORT") .help("The UDP port that discovery will listen on over IPv6 if listening over \ both IPv4 and IPv6. Defaults to `port6`") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("quic-port6") + Arg::new("quic-port6") .long("quic-port6") .value_name("PORT") .help("The UDP port that quic will listen on over IPv6 if listening over \ both IPv4 and IPv6. Defaults to `port6` + 1") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("target-peers") + Arg::new("target-peers") .long("target-peers") .help("The target number of peers.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("boot-nodes") + Arg::new("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) .value_name("ENR/MULTIADDR LIST") .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("network-load") + Arg::new("network-load") .long("network-load") .value_name("INTEGER") .help("Lighthouse's network can be tuned for bandwidth/performance. Setting this to a high value, will increase the bandwidth lighthouse uses, increasing the likelihood of redundant information in exchange for faster communication. This can increase profit of validators marginally by receiving messages faster on the network. Lower values decrease bandwidth usage, but makes communication slower which can lead to validator performance reduction. Values are in the range [1,5].") .default_value("3") - .set(clap::ArgSettings::Hidden) - .takes_value(true), + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-upnp") + Arg::new("disable-upnp") .long("disable-upnp") .help("Disables UPnP support. Setting this will prevent Lighthouse from attempting to automatically establish external port mappings.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("private") + Arg::new("private") .long("private") .help("Prevents sending various client identification information.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("enr-udp-port") + Arg::new("enr-udp-port") .long("enr-udp-port") .value_name("PORT") .help("The UDP4 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-quic-port") + Arg::new("enr-quic-port") .long("enr-quic-port") .value_name("PORT") .help("The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-udp6-port") + Arg::new("enr-udp6-port") .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-quic6-port") + Arg::new("enr-quic6-port") .long("enr-quic6-port") .value_name("PORT") .help("The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-tcp-port") + Arg::new("enr-tcp-port") .long("enr-tcp-port") .value_name("PORT") .help("The TCP4 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4. The --port flag is \ used if this is not set.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-tcp6-port") + Arg::new("enr-tcp6-port") .long("enr-tcp6-port") .value_name("PORT") .help("The TCP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6. The --port6 flag is \ used if this is not set.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-address") + Arg::new("enr-address") .long("enr-address") .value_name("ADDRESS") .help("The IP address/ DNS address to broadcast to other peers on how to reach \ @@ -236,76 +282,110 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .multiple(true) - .max_values(2) - .takes_value(true), + .action(ArgAction::Append) + .num_args(1..=2) + .display_order(0) ) .arg( - Arg::with_name("enr-match") - .short("e") + Arg::new("enr-match") + .short('e') .long("enr-match") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Sets the local ENR IP address and port to match those set for lighthouse. \ Specifically, the IP address will be the value of --listen-address and the \ UDP port will be --discovery-port.") + .display_order(0) ) .arg( - Arg::with_name("disable-enr-auto-update") - .short("x") + Arg::new("disable-enr-auto-update") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("disable-enr-auto-update") .help("Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. \ - This disables this feature, fixing the ENR's IP/PORT to those specified on boot."), + This disables this feature, fixing the ENR's IP/PORT to those specified on boot.") + .display_order(0) ) .arg( - Arg::with_name("libp2p-addresses") + Arg::new("libp2p-addresses") .long("libp2p-addresses") .value_name("MULTIADDR") .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer \ without an ENR.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) - // NOTE: This is hidden because it is primarily a developer feature for testnets and + // NOTE: This is hide because it is primarily a developer feature for testnets and // debugging. We remove it from the list to avoid clutter. .arg( - Arg::with_name("disable-discovery") + Arg::new("disable-discovery") .long("disable-discovery") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("disable-quic") + Arg::new("disable-quic") .long("disable-quic") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disables the quic transport. The node will rely solely on the TCP transport for libp2p connections.") + .display_order(0) ) .arg( - Arg::with_name("disable-peer-scoring") + Arg::new("disable-peer-scoring") .long("disable-peer-scoring") .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") - .takes_value(false) - .hidden(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("trusted-peers") + Arg::new("trusted-peers") .long("trusted-peers") .value_name("TRUSTED_PEERS") .help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) + .display_order(0) ) .arg( - Arg::with_name("genesis-backfill") + Arg::new("genesis-backfill") .long("genesis-backfill") .help("Attempts to download blocks all the way back to genesis when checkpoint syncing.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("enable-private-discovery") + Arg::new("enable-private-discovery") .long("enable-private-discovery") .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("self-limiter") + Arg::new("self-limiter") .long("self-limiter") + .help( + "Enables the outbound rate limiter (requests made by this node). \ + Use the self-limiter-protocol flag to set per protocol configurations. \ + If the self rate limiter is enabled and a protocol is not \ + present in the configuration, the quotas used for the inbound rate limiter will be \ + used." + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("self-limiter-protocols") + .long("self-limiter-protocols") .help( "Enables the outbound rate limiter (requests made by this node).\ \ @@ -315,69 +395,89 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { present in the configuration, the quotas used for the inbound rate limiter will be \ used." ) - .min_values(0) - .hidden(true) + .action(ArgAction::Append) + .value_delimiter(';') + .requires("self-limiter") + .display_order(0) ) .arg( - Arg::with_name("proposer-only") + Arg::new("proposer-only") .long("proposer-only") .help("Sets this beacon node at be a block proposer only node. \ This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \ for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("inbound-rate-limiter") - .long("inbound-rate-limiter") + Arg::new("disable-inbound-rate-limiter") + .long("disable-inbound-rate-limiter") + .help( + "Disables the inbound rate limiter (requests received by this node)." + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("inbound-rate-limiter-protocols") + .long("inbound-rate-limiter-protocols") .help( "Configures the inbound rate limiter (requests received by this node).\ \ Rate limit quotas per protocol can be set in the form of \ :/. To set quotas for multiple protocols, \ - separate them by ';'. If the inbound rate limiter is enabled and a protocol is not \ - present in the configuration, the default quotas will be used. \ + separate them by ';'. \ \ - This is enabled by default, using default quotas. To disable rate limiting pass \ - `disabled` to this option instead." + This is enabled by default, using default quotas. To disable rate limiting use \ + the disable-inbound-rate-limiter flag instead." ) - .takes_value(true) - .hidden(true) + .action(ArgAction::Set) + .conflicts_with("disable-inbound-rate-limiter") + .display_order(0) ) .arg( - Arg::with_name("disable-backfill-rate-limiting") + Arg::new("disable-backfill-rate-limiting") .long("disable-backfill-rate-limiting") .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ as possible, however it can result in resource contention which degrades staking performance. Stakers \ should generally choose to avoid this flag since backfill sync is not required for staking.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* REST API related arguments */ .arg( - Arg::with_name("http") + Arg::new("http") .long("http") .help("Enable the RESTful HTTP API server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("http-address") + Arg::new("http-address") .long("http-address") .requires("enable_http") .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") - .default_value_if("enable_http", None, "127.0.0.1") - .takes_value(true), + .default_value_if("enable_http", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-port") + Arg::new("http-port") .long("http-port") .requires("enable_http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("enable_http", None, "5052") - .takes_value(true), + .default_value_if("enable_http", ArgPredicate::IsPresent, "5052") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-origin") + Arg::new("http-allow-origin") .long("http-allow-origin") .requires("enable_http") .value_name("ORIGIN") @@ -385,71 +485,82 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5052).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-spec-fork") + Arg::new("http-spec-fork") .long("http-spec-fork") .requires("enable_http") .value_name("FORK") .help("This flag is deprecated and has no effect.") - .takes_value(true) - .hidden(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-enable-tls") + Arg::new("http-enable-tls") .long("http-enable-tls") .help("Serves the RESTful HTTP API server over TLS. This feature is currently \ experimental.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .requires("http-tls-cert") .requires("http-tls-key") + .display_order(0) ) .arg( - Arg::with_name("http-tls-cert") + Arg::new("http-tls-cert") .long("http-tls-cert") .requires("enable_http") .help("The path of the certificate to be used when serving the HTTP API server \ over TLS.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-tls-key") + Arg::new("http-tls-key") .long("http-tls-key") .requires("enable_http") .help("The path of the private key to be used when serving the HTTP API server \ over TLS. Must not be password-protected.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-sync-stalled") + Arg::new("http-allow-sync-stalled") .long("http-allow-sync-stalled") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .requires("enable_http") .help("This flag is deprecated and has no effect.") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("http-sse-capacity-multiplier") + Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") .requires("enable_http") - .takes_value(true) - .default_value_if("enable_http", None, "1") + .action(ArgAction::Set) + .default_value_if("enable_http", ArgPredicate::IsPresent, "1") .value_name("N") .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ Increasing this value can prevent messages from being dropped.") + .display_order(0) ) .arg( - Arg::with_name("http-duplicate-block-status") + Arg::new("http-duplicate-block-status") .long("http-duplicate-block-status") .requires("enable_http") - .takes_value(true) - .default_value_if("enable_http", None, "202") + .action(ArgAction::Set) + .default_value_if("enable_http", ArgPredicate::IsPresent, "202") .value_name("STATUS_CODE") .help("Status code to send when a block that is already known is POSTed to the \ HTTP API.") + .display_order(0) ) .arg( - Arg::with_name("http-enable-beacon-processor") + Arg::new("http-enable-beacon-processor") .long("http-enable-beacon-processor") .requires("enable_http") .value_name("BOOLEAN") @@ -457,36 +568,41 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { DoS protection. When set to \"true\", HTTP API requests will be queued and scheduled \ alongside other tasks. When set to \"false\", HTTP API responses will be executed \ immediately.") - .takes_value(true) - .default_value_if("enable_http", None, "true") + .action(ArgAction::Set) + .display_order(0) + .default_value_if("enable_http", ArgPredicate::IsPresent, "true") ) /* Prometheus metrics HTTP server related arguments */ .arg( - Arg::with_name("metrics") + Arg::new("metrics") .long("metrics") .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("metrics-address") + Arg::new("metrics-address") .long("metrics-address") .value_name("ADDRESS") .requires("metrics") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "127.0.0.1") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-port") + Arg::new("metrics-port") .long("metrics-port") .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "5054") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "5054") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-allow-origin") + Arg::new("metrics-allow-origin") .long("metrics-allow-origin") .value_name("ORIGIN") .requires("metrics") @@ -494,15 +610,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5054).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("shuffling-cache-size") + Arg::new("shuffling-cache-size") .long("shuffling-cache-size") .help("Some HTTP API requests can be optimised by caching the shufflings at each epoch. \ This flag allows the user to set the shuffling cache size in epochs. \ Shufflings are dependent on validator count and setting this value to a large number can consume a large amount of memory.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* @@ -510,7 +628,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .arg( - Arg::with_name("monitoring-endpoint") + Arg::new("monitoring-endpoint") .long("monitoring-endpoint") .value_name("ADDRESS") .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ @@ -519,16 +637,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ and never provide an untrusted URL.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("monitoring-endpoint-period") + Arg::new("monitoring-endpoint-period") .long("monitoring-endpoint-period") .value_name("SECONDS") .help("Defines how many seconds to wait between each message sent to \ the monitoring-endpoint. Default: 60s") .requires("monitoring-endpoint") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) /* @@ -536,122 +656,143 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .arg( - Arg::with_name("staking") + Arg::new("staking") .long("staking") .help("Standard option for a staking beacon node. This will enable the HTTP server \ on localhost:5052 and import deposit logs from the execution node. This is \ equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Eth1 Integration */ .arg( - Arg::with_name("eth1") + Arg::new("eth1") .long("eth1") .help("If present the node will connect to an eth1 node. This is required for \ block production, you must use this flag if you wish to serve a validator.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("dummy-eth1") + Arg::new("dummy-eth1") .long("dummy-eth1") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .conflicts_with("eth1") .help("If present, uses an eth1 backend that generates static dummy data.\ Identical to the method used at the 2019 Canada interop.") + .display_order(0) ) .arg( - Arg::with_name("eth1-purge-cache") + Arg::new("eth1-purge-cache") .long("eth1-purge-cache") .value_name("PURGE-CACHE") .help("Purges the eth1 block and deposit caches") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("eth1-blocks-per-log-query") + Arg::new("eth1-blocks-per-log-query") .long("eth1-blocks-per-log-query") .value_name("BLOCKS") .help("Specifies the number of blocks that a deposit log query should span. \ This will reduce the size of responses from the Eth1 endpoint.") .default_value("1000") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("eth1-cache-follow-distance") + Arg::new("eth1-cache-follow-distance") .long("eth1-cache-follow-distance") .value_name("BLOCKS") .help("Specifies the distance between the Eth1 chain head and the last block which \ should be imported into the cache. Setting this value lower can help \ compensate for irregular Proof-of-Work block times, but setting it too low \ can make the node vulnerable to re-orgs.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slots-per-restore-point") + Arg::new("slots-per-restore-point") .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help("Specifies how often a freezer DB restore point should be stored. \ Cannot be changed after initialization. \ [default: 8192 (mainnet) or 64 (minimal)]") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("epochs-per-migration") + Arg::new("epochs-per-migration") .long("epochs-per-migration") .value_name("N") .help("The number of epochs to wait between running the migration of data from the \ hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \ writes") .default_value("1") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("block-cache-size") + Arg::new("block-cache-size") .long("block-cache-size") .value_name("SIZE") - .help("Specifies how many blocks the database should cache in memory [default: 5]") - .takes_value(true) + .help("Specifies how many blocks the database should cache in memory") + .default_value("5") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("historic-state-cache-size") + Arg::new("historic-state-cache-size") .long("historic-state-cache-size") .value_name("SIZE") - .help("Specifies how many states from the freezer database should cache in memory [default: 1]") - .takes_value(true) + .help("Specifies how many states from the freezer database should cache in memory") + .default_value("1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("state-cache-size") + Arg::new("state-cache-size") .long("state-cache-size") .value_name("STATE_CACHE_SIZE") - .help("Specifies the size of the state cache [default: 128]") - .takes_value(true) + .help("Specifies the size of the state cache") + .default_value("128") + .action(ArgAction::Set) + .display_order(0) ) /* * Execution Layer Integration */ .arg( - Arg::with_name("execution-endpoint") + Arg::new("execution-endpoint") .long("execution-endpoint") .value_name("EXECUTION-ENDPOINT") .alias("execution-endpoints") .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt") + Arg::new("execution-jwt") .long("execution-jwt") .value_name("EXECUTION-JWT") .alias("jwt-secrets") .help("File path which contains the hex-encoded JWT secret for the \ execution endpoint provided in the --execution-endpoint flag.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-secret-key") + Arg::new("execution-jwt-secret-key") .long("execution-jwt-secret-key") .value_name("EXECUTION-JWT-SECRET-KEY") .alias("jwt-secret-key") @@ -659,10 +800,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { execution endpoint provided in the --execution-endpoint flag.") .requires("execution-endpoint") .conflicts_with("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-id") + Arg::new("execution-jwt-id") .long("execution-jwt-id") .value_name("EXECUTION-JWT-ID") .alias("jwt-id") @@ -670,10 +812,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ Set to empty by default") .requires("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-version") + Arg::new("execution-jwt-version") .long("execution-jwt-version") .value_name("EXECUTION-JWT-VERSION") .alias("jwt-version") @@ -681,119 +824,162 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ Set to empty by default") .requires("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("suggested-fee-recipient") + Arg::new("suggested-fee-recipient") .long("suggested-fee-recipient") .value_name("SUGGESTED-FEE-RECIPIENT") .help("Emergency fallback fee recipient for use in case the validator client does \ not have one configured. You should set this flag on the validator \ client instead of (or in addition to) setting it here.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder") + Arg::new("builder") .long("builder") .alias("payload-builder") .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-timeout-multiplier") + Arg::new("execution-timeout-multiplier") .long("execution-timeout-multiplier") .value_name("NUM") .help("Unsigned integer to multiply the default execution timeouts by.") .default_value("1") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("builder-header-timeout") + .long("builder-header-timeout") + .value_name("MILLISECONDS") + .help("Defines a timeout value (in milliseconds) to use when \ + fetching a block header from the builder API.") + .default_value("1000") + .value_parser(|timeout: &str| { + match timeout + .parse::() + .ok() + .map(Duration::from_millis) + { + Some(val) => { + if val > Duration::from_secs(3) { + return Err("builder-header-timeout cannot exceed 3000ms") + } + Ok(timeout.to_string()) + }, + None => Err("builder-header-timeout must be a number"), + } + }) + .requires("builder") + .action(ArgAction::Set) + .display_order(0) ) /* Deneb settings */ .arg( - Arg::with_name("trusted-setup-file-override") + Arg::new("trusted-setup-file-override") .long("trusted-setup-file-override") .value_name("FILE") .help("Path to a json file containing the trusted setup params. \ NOTE: This will override the trusted setup that is generated \ from the mainnet kzg ceremony. Use with caution") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Database purging and compaction. */ .arg( - Arg::with_name("purge-db") + Arg::new("purge-db") .long("purge-db") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, the chain database will be deleted. Use with caution.") + .display_order(0) ) .arg( - Arg::with_name("compact-db") + Arg::new("compact-db") .long("compact-db") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, apply compaction to the database on start-up. Use with caution. \ It is generally not recommended unless auto-compaction is disabled.") + .display_order(0) ) .arg( - Arg::with_name("auto-compact-db") + Arg::new("auto-compact-db") .long("auto-compact-db") .help("Enable or disable automatic compaction of the database on finalization.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("prune-payloads") + Arg::new("prune-payloads") .long("prune-payloads") .help("Prune execution payloads from Lighthouse's database. This saves space but \ imposes load on the execution client, as payloads need to be \ reconstructed and sent to syncing peers.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("prune-blobs") + Arg::new("prune-blobs") .long("prune-blobs") .value_name("BOOLEAN") .help("Prune blobs from Lighthouse's database when they are older than the data \ data availability boundary relative to the current epoch.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("epochs-per-blob-prune") + Arg::new("epochs-per-blob-prune") .long("epochs-per-blob-prune") .value_name("EPOCHS") .help("The epoch interval with which to prune blobs from Lighthouse's \ database when they are older than the data availability boundary \ relative to the current epoch.") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") + .display_order(0) ) .arg( - Arg::with_name("blob-prune-margin-epochs") + Arg::new("blob-prune-margin-epochs") .long("blob-prune-margin-epochs") .value_name("EPOCHS") .help("The margin for blob pruning in epochs. The oldest blobs are pruned \ up until data_availability_boundary - blob_prune_margin_epochs.") - .takes_value(true) + .action(ArgAction::Set) .default_value("0") + .display_order(0) ) /* * Misc. */ .arg( - Arg::with_name("graffiti") + Arg::new("graffiti") .long("graffiti") .help( "Specify your custom graffiti to be included in blocks. \ Defaults to the current version and commit, truncated to fit in 32 bytes. " ) .value_name("GRAFFITI") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("max-skip-slots") + Arg::new("max-skip-slots") .long("max-skip-slots") .help( "Refuse to skip more than this many slots when processing an attestation. \ @@ -801,43 +987,48 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { but could also cause unnecessary consensus failures, so is disabled by default." ) .value_name("NUM_SLOTS") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Slasher. */ .arg( - Arg::with_name("slasher") + Arg::new("slasher") .long("slasher") .help( "Run a slasher alongside the beacon node. It is currently only recommended for \ expert users because of the immaturity of the slasher UX and the extra \ resources required." ) - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("slasher-dir") + Arg::new("slasher-dir") .long("slasher-dir") .help( "Set the slasher's database directory." ) .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .requires("slasher") + .display_order(0) ) .arg( - Arg::with_name("slasher-update-period") + Arg::new("slasher-update-period") .long("slasher-update-period") .help( "Configure how often the slasher runs batch processing." ) .value_name("SECONDS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-slot-offset") + Arg::new("slasher-slot-offset") .long("slasher-slot-offset") .help( "Set the delay from the start of the slot at which the slasher should ingest \ @@ -846,10 +1037,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("SECONDS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-history-length") + Arg::new("slasher-history-length") .long("slasher-history-length") .help( "Configure how many epochs of history the slasher keeps. Immutable after \ @@ -857,65 +1049,72 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("EPOCHS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-max-db-size") + Arg::new("slasher-max-db-size") .long("slasher-max-db-size") .help( "Maximum size of the MDBX database used by the slasher." ) .value_name("GIGABYTES") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-att-cache-size") + Arg::new("slasher-att-cache-size") .long("slasher-att-cache-size") .help("Set the maximum number of attestation roots for the slasher to cache") .value_name("COUNT") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-chunk-size") + Arg::new("slasher-chunk-size") .long("slasher-chunk-size") .help( "Number of epochs per validator per chunk stored on disk." ) .value_name("EPOCHS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-validator-chunk-size") + Arg::new("slasher-validator-chunk-size") .long("slasher-validator-chunk-size") .help( "Number of validators per chunk stored on disk." ) .value_name("NUM_VALIDATORS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-broadcast") + Arg::new("slasher-broadcast") .long("slasher-broadcast") .help("Broadcast slashings found by the slasher to the rest of the network \ [Enabled by default].") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("slasher-backend") + Arg::new("slasher-backend") .long("slasher-backend") .value_name("DATABASE") .help("Set the database backend to be used by the slasher.") - .takes_value(true) - .possible_values(slasher::DatabaseBackend::VARIANTS) + .action(ArgAction::Set) + .value_parser(slasher::DatabaseBackend::VARIANTS.to_vec()) .requires("slasher") + .display_order(0) ) .arg( - Arg::with_name("wss-checkpoint") + Arg::new("wss-checkpoint") .long("wss-checkpoint") .help( "Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify \ @@ -924,94 +1123,109 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { state use --checkpoint-sync-url." ) .value_name("WSS_CHECKPOINT") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("checkpoint-state") + Arg::new("checkpoint-state") .long("checkpoint-state") .help("Set a checkpoint state to start syncing from. Must be aligned and match \ --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") .value_name("STATE_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-block") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-block") + Arg::new("checkpoint-block") .long("checkpoint-block") .help("Set a checkpoint block to start syncing from. Must be aligned and match \ --checkpoint-state. Using --checkpoint-sync-url instead is recommended.") .value_name("BLOCK_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-state") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-blobs") + Arg::new("checkpoint-blobs") .long("checkpoint-blobs") .help("Set the checkpoint blobs to start syncing from. Must be aligned and match \ --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") .value_name("BLOBS_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-block") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-sync-url") + Arg::new("checkpoint-sync-url") .long("checkpoint-sync-url") .help("Set the remote beacon node HTTP endpoint to use for checkpoint sync.") .value_name("BEACON_NODE") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("checkpoint-state") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-sync-url-timeout") + Arg::new("checkpoint-sync-url-timeout") .long("checkpoint-sync-url-timeout") .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") .value_name("SECONDS") - .takes_value(true) + .action(ArgAction::Set) .default_value("180") + .display_order(0) ) .arg( - Arg::with_name("allow-insecure-genesis-sync") + Arg::new("allow-insecure-genesis-sync") .long("allow-insecure-genesis-sync") .help("Enable syncing from genesis, which is generally insecure and incompatible with data availability checks. \ Checkpoint syncing is the preferred method for syncing a node. \ Only use this flag when testing. DO NOT use on mainnet!") .conflicts_with("checkpoint-sync-url") .conflicts_with("checkpoint-state") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("reconstruct-historic-states") + Arg::new("reconstruct-historic-states") .long("reconstruct-historic-states") .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-auto") + Arg::new("validator-monitor-auto") .long("validator-monitor-auto") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Enables the automatic detection and monitoring of validators connected to the \ HTTP API and using the subnet subscription endpoint. This generally has the \ effect of providing additional logging and metrics for locally controlled \ validators.") + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-pubkeys") + Arg::new("validator-monitor-pubkeys") .long("validator-monitor-pubkeys") .help("A comma-separated list of 0x-prefixed validator public keys. \ These validators will receive special monitoring and additional \ logging.") .value_name("PUBKEYS") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-file") + Arg::new("validator-monitor-file") .long("validator-monitor-file") .help("As per --validator-monitor-pubkeys, but the comma-separated list is \ contained within a file at the given path.") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-individual-tracking-threshold") + Arg::new("validator-monitor-individual-tracking-threshold") .long("validator-monitor-individual-tracking-threshold") .help("Once the validator monitor reaches this number of local validators \ it will stop collecting per-validator Prometheus metrics and issuing \ @@ -1019,59 +1233,73 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { This avoids infeasibly high cardinality in the Prometheus database and \ high log volume when using many validators. Defaults to 64.") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-lock-timeouts") + Arg::new("disable-lock-timeouts") .long("disable-lock-timeouts") .help("Disable the timeouts applied to some internal locks by default. This can \ lead to less spurious failures on slow hardware but is considered \ experimental as it may obscure performance issues.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-proposer-reorgs") + Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") .help("Do not attempt to reorg late blocks from other validators when proposing.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-threshold") + Arg::new("proposer-reorg-threshold") .long("proposer-reorg-threshold") + .action(ArgAction::Set) .value_name("PERCENT") .help("Percentage of head vote weight below which to attempt a proposer reorg. \ Default: 20%") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-parent-threshold") + Arg::new("proposer-reorg-parent-threshold") .long("proposer-reorg-parent-threshold") .value_name("PERCENT") .help("Percentage of parent vote weight above which to attempt a proposer reorg. \ Default: 160%") .conflicts_with("disable-proposer-reorgs") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-epochs-since-finalization") + Arg::new("proposer-reorg-epochs-since-finalization") .long("proposer-reorg-epochs-since-finalization") + .action(ArgAction::Set) .value_name("EPOCHS") .help("Maximum number of epochs since finalization at which proposer reorgs are \ allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-cutoff") + Arg::new("proposer-reorg-cutoff") .long("proposer-reorg-cutoff") .value_name("MILLISECONDS") + .action(ArgAction::Set) .help("Maximum delay after the start of the slot at which to propose a reorging \ block. Lower values can prevent failed reorgs by ensuring the block has \ ample time to propagate and be processed by the network. The default is \ 1/12th of a slot (1 second on mainnet)") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-disallowed-offsets") + Arg::new("proposer-reorg-disallowed-offsets") .long("proposer-reorg-disallowed-offsets") + .action(ArgAction::Set) .value_name("N1,N2,...") .help("Comma-separated list of integer offsets which can be used to avoid \ proposing reorging blocks at certain slots. An offset of N means that \ @@ -1080,66 +1308,75 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { avoided. Any offsets supplied with this flag will impose additional \ restrictions.") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("prepare-payload-lookahead") + Arg::new("prepare-payload-lookahead") .long("prepare-payload-lookahead") .value_name("MILLISECONDS") .help("The time before the start of a proposal slot at which payload attributes \ should be sent. Low values are useful for execution nodes which don't \ improve their payload after the first call, and high values are useful \ for ensuring the EL is given ample notice. Default: 1/3 of a slot.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("always-prepare-payload") + Arg::new("always-prepare-payload") .long("always-prepare-payload") .help("Send payload attributes with every fork choice update. This is intended for \ use by block builders, relays and developers. You should set a fee \ recipient on this BN and also consider adjusting the \ --prepare-payload-lookahead flag.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("fork-choice-before-proposal-timeout") + Arg::new("fork-choice-before-proposal-timeout") .long("fork-choice-before-proposal-timeout") .help("Set the maximum number of milliseconds to wait for fork choice before \ proposing a block. You can prevent waiting at all by setting the timeout \ to 0, however you risk proposing atop the wrong parent block.") .default_value("250") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("paranoid-block-proposal") + Arg::new("paranoid-block-proposal") .long("paranoid-block-proposal") .help("Paranoid enough to be reading the source? Nice. This flag reverts some \ block proposal optimisations and forces the node to check every attestation \ it includes super thoroughly. This may be useful in an emergency, but not \ otherwise.") - .hidden(true) - .takes_value(false) + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-skips") + Arg::new("builder-fallback-skips") .long("builder-fallback-skips") .help("If this node is proposing a block and has seen this number of skip slots \ on the canonical chain in a row, it will NOT query any connected builders, \ and will use the local execution engine for payload construction.") .default_value("3") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-skips-per-epoch") + Arg::new("builder-fallback-skips-per-epoch") .long("builder-fallback-skips-per-epoch") .help("If this node is proposing a block and has seen this number of skip slots \ on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ any connected builders, and will use the local execution engine for \ payload construction.") .default_value("8") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-epochs-since-finalization") + Arg::new("builder-fallback-epochs-since-finalization") .long("builder-fallback-epochs-since-finalization") .help("If this node is proposing a block and the chain has not finalized within \ this number of epochs, it will NOT query any connected builders, \ @@ -1149,152 +1386,180 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { if there are skips slots at the start of an epoch, right before this node \ is set to propose.") .default_value("3") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-disable-checks") + Arg::new("builder-fallback-disable-checks") .long("builder-fallback-disable-checks") .help("This flag disables all checks related to chain health. This means the builder \ API will always be used for payload construction, regardless of recent chain \ conditions.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-profit-threshold") + Arg::new("builder-profit-threshold") .long("builder-profit-threshold") .value_name("WEI_VALUE") .help("This flag is deprecated and has no effect.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-user-agent") + Arg::new("builder-user-agent") .long("builder-user-agent") .value_name("STRING") .help("The HTTP user agent to send alongside requests to the builder URL. The \ default is Lighthouse's version string.") .requires("builder") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("reset-payload-statuses") + Arg::new("reset-payload-statuses") .long("reset-payload-statuses") .help("When present, Lighthouse will forget the payload statuses of any \ already-imported blocks. This can assist in the recovery from a consensus \ failure caused by the execution layer.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-deposit-contract-sync") + Arg::new("disable-deposit-contract-sync") .long("disable-deposit-contract-sync") .help("Explicitly disables syncing of deposit logs from the execution node. \ This overrides any previous option that depends on it. \ Useful if you intend to run a non-validating beacon node.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-optimistic-finalized-sync") + Arg::new("disable-optimistic-finalized-sync") .long("disable-optimistic-finalized-sync") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Force Lighthouse to verify every execution block hash with the execution \ client during finalized sync. By default block hashes will be checked in \ Lighthouse and only passed to the EL if initial verification fails.") + .display_order(0) ) .arg( - Arg::with_name("light-client-server") + Arg::new("light-client-server") .long("light-client-server") .help("Act as a full node supporting light clients on the p2p network \ [experimental]") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("gui") + Arg::new("gui") .long("gui") .help("Enable the graphical user interface and all its requirements. \ This enables --http and --validator-monitor-auto and enables SSE logging.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("always-prefer-builder-payload") + Arg::new("always-prefer-builder-payload") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("always-prefer-builder-payload") .help("This flag is deprecated and has no effect.") + .display_order(0) ) .arg( - Arg::with_name("invalid-gossip-verified-blocks-path") + Arg::new("invalid-gossip-verified-blocks-path") + .action(ArgAction::Set) .long("invalid-gossip-verified-blocks-path") .value_name("PATH") .help("If a block succeeds gossip validation whilst failing full validation, store \ the block SSZ as a file at this path. This feature is only recommended for \ developers. This directory is not pruned, users should be careful to avoid \ filling up their disks.") + .display_order(0) ) .arg( - Arg::with_name("progressive-balances") + Arg::new("progressive-balances") .long("progressive-balances") .value_name("MODE") .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .takes_value(true) - .possible_values(&["fast", "disabled", "checked", "strict"]) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-max-workers") + Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") .value_name("INTEGER") .help("Specifies the maximum concurrent tasks for the task scheduler. Increasing \ this value may increase resource consumption. Reducing the value \ may result in decreased resource usage and diminished performance. The \ default value is the number of logical CPU cores on the host.") - .hidden(true) - .takes_value(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-work-queue-len") + Arg::new("beacon-processor-work-queue-len") .long("beacon-processor-work-queue-len") .value_name("INTEGER") .help("Specifies the length of the inbound event queue. \ Higher values may prevent messages from being dropped while lower values \ may help protect the node from becoming overwhelmed.") .default_value("16384") - .hidden(true) - .takes_value(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-reprocess-queue-len") + Arg::new("beacon-processor-reprocess-queue-len") .long("beacon-processor-reprocess-queue-len") .value_name("INTEGER") .help("Specifies the length of the queue for messages requiring delayed processing. \ Higher values may prevent messages from being dropped while lower values \ may help protect the node from becoming overwhelmed.") - .hidden(true) + .hide(true) .default_value("12288") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-attestation-batch-size") + Arg::new("beacon-processor-attestation-batch-size") .long("beacon-processor-attestation-batch-size") .value_name("INTEGER") .help("Specifies the number of gossip attestations in a signature verification batch. \ Higher values may reduce CPU usage in a healthy network whilst lower values may \ increase CPU usage in an unhealthy or hostile network.") - .hidden(true) + .hide(true) .default_value("64") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-aggregate-batch-size") + Arg::new("beacon-processor-aggregate-batch-size") .long("beacon-processor-aggregate-batch-size") .value_name("INTEGER") .help("Specifies the number of gossip aggregate attestations in a signature \ verification batch. \ Higher values may reduce CPU usage in a healthy network while lower values may \ increase CPU usage in an unhealthy or hostile network.") - .hidden(true) + .hide(true) .default_value("64") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-duplicate-warn-logs") + Arg::new("disable-duplicate-warn-logs") .long("disable-duplicate-warn-logs") .help("This flag is deprecated and has no effect.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) - .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) + .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9a1d7df124..35fad0718c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -5,9 +5,9 @@ use beacon_chain::chain_config::{ }; use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; -use clap::ArgMatches; +use clap::{parser::ValueSource, ArgMatches, Id}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; -use clap_utils::parse_required; +use clap_utils::{parse_flag, parse_required}; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; @@ -50,7 +50,7 @@ pub fn get_config( client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir().exists() && cli_args.is_present("purge-db") { + if client_config.data_dir().exists() && cli_args.get_flag("purge-db") { // Remove the chain_db. let chain_db = client_config.get_db_path(); if chain_db.exists() { @@ -96,7 +96,7 @@ pub fn get_config( * Note: the config values set here can be overwritten by other more specific cli params */ - if cli_args.is_present("staking") { + if cli_args.get_flag("staking") { client_config.http_api.enabled = true; client_config.sync_eth1_chain = true; } @@ -105,22 +105,22 @@ pub fn get_config( * Http API server */ - if cli_args.is_present("enable_http") { + if cli_args.get_one::("enable_http").is_some() { client_config.http_api.enabled = true; - if let Some(address) = cli_args.value_of("http-address") { + if let Some(address) = cli_args.get_one::("http-address") { client_config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("http-port") { + if let Some(port) = cli_args.get_one::("http-port") { client_config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -129,7 +129,7 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-spec-fork") { + if cli_args.get_one::("http-spec-fork").is_some() { warn!( log, "Ignoring --http-spec-fork"; @@ -137,22 +137,22 @@ pub fn get_config( ); } - if cli_args.is_present("http-enable-tls") { + if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args - .value_of("http-tls-cert") + .get_one::("http-tls-cert") .ok_or("--http-tls-cert was not provided.")? .parse::() .map_err(|_| "http-tls-cert is not a valid path name.")?, key: cli_args - .value_of("http-tls-key") + .get_one::("http-tls-key") .ok_or("--http-tls-key was not provided.")? .parse::() .map_err(|_| "http-tls-key is not a valid path name.")?, }); } - if cli_args.is_present("http-allow-sync-stalled") { + if cli_args.get_flag("http-allow-sync-stalled") { warn!( log, "Ignoring --http-allow-sync-stalled"; @@ -170,10 +170,10 @@ pub fn get_config( parse_required(cli_args, "http-duplicate-block-status")?; client_config.http_api.enable_light_client_server = - cli_args.is_present("light-client-server"); + cli_args.get_flag("light-client-server"); } - if cli_args.is_present("light-client-server") { + if cli_args.get_flag("light-client-server") { client_config.chain.enable_light_client_server = true; } @@ -185,23 +185,23 @@ pub fn get_config( * Prometheus metrics HTTP server */ - if cli_args.is_present("metrics") { + if cli_args.get_flag("metrics") { client_config.http_metrics.enabled = true; } - if let Some(address) = cli_args.value_of("metrics-address") { + if let Some(address) = cli_args.get_one::("metrics-address") { client_config.http_metrics.listen_addr = address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("metrics-port") { + if let Some(port) = cli_args.get_one::("metrics-port") { client_config.http_metrics.listen_port = port .parse::() .map_err(|_| "metrics-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -213,7 +213,7 @@ pub fn get_config( /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { let update_period_secs = clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; @@ -227,7 +227,7 @@ pub fn get_config( // Log a warning indicating an open HTTP server if it wasn't specified explicitly // (e.g. using the --staking flag). - if cli_args.is_present("staking") { + if cli_args.get_flag("staking") { warn!( log, "Running HTTP server on port {}", client_config.http_api.listen_port @@ -235,7 +235,7 @@ pub fn get_config( } // Do not scrape for malloc metrics if we've disabled tuning malloc as it may cause panics. - if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if cli_args.get_flag(DISABLE_MALLOC_TUNING_FLAG) { client_config.http_metrics.allocator_metrics_enabled = false; } @@ -246,24 +246,24 @@ pub fn get_config( // When present, use an eth1 backend that generates deterministic junk. // // Useful for running testnets without the overhead of a deposit contract. - if cli_args.is_present("dummy-eth1") { + if cli_args.get_flag("dummy-eth1") { client_config.dummy_eth1_backend = true; } // When present, attempt to sync to an eth1 node. // // Required for block production. - if cli_args.is_present("eth1") { + if cli_args.get_flag("eth1") { client_config.sync_eth1_chain = true; } - if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { + if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { client_config.eth1.blocks_per_log_query = val .parse() .map_err(|_| "eth1-blocks-per-log-query is not a valid integer".to_string())?; } - if cli_args.is_present("eth1-purge-cache") { + if cli_args.get_flag("eth1-purge-cache") { client_config.eth1.purge_cache = true; } @@ -273,7 +273,7 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if let Some(endpoints) = cli_args.value_of("execution-endpoint") { + if let Some(endpoints) = cli_args.get_one::("execution-endpoint") { let mut el_config = execution_layer::Config::default(); // Always follow the deposit contract when there is an execution endpoint. @@ -296,13 +296,14 @@ pub fn get_config( let secret_file: PathBuf; // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. - if let Some(secret_files) = cli_args.value_of("execution-jwt") { + if let Some(secret_files) = cli_args.get_one::("execution-jwt") { secret_file = parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; // Check if the JWT secret key is passed directly via cli flag and persist it to the default // file location. - } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { + } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") + { use std::fs::File; use std::io::Write; secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); @@ -321,23 +322,27 @@ pub fn get_config( } // Parse and set the payload builder, if any. - if let Some(endpoint) = cli_args.value_of("builder") { + if let Some(endpoint) = cli_args.get_one::("builder") { let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); el_config.builder_user_agent = clap_utils::parse_optional(cli_args, "builder-user-agent")?; + + el_config.builder_header_timeout = + clap_utils::parse_optional(cli_args, "builder-header-timeout")? + .map(Duration::from_millis); } - if cli_args.is_present("builder-profit-threshold") { + if parse_flag(cli_args, "builder-profit-threshold") { warn!( log, "Ignoring --builder-profit-threshold"; "info" => "this flag is deprecated and will be removed" ); } - if cli_args.is_present("always-prefer-builder-payload") { + if cli_args.get_flag("always-prefer-builder-payload") { warn!( log, "Ignoring --always-prefer-builder-payload"; @@ -380,7 +385,8 @@ pub fn get_config( .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; // Override default trusted setup file if required - if let Some(trusted_setup_file_path) = cli_args.value_of("trusted-setup-file-override") { + if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") + { let file = std::fs::File::open(trusted_setup_file_path) .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; let trusted_setup: TrustedSetup = serde_json::from_reader(file) @@ -388,11 +394,11 @@ pub fn get_config( client_config.trusted_setup = Some(trusted_setup); } - if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { + if let Some(freezer_dir) = cli_args.get_one::("freezer-dir") { client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } - if let Some(blobs_db_dir) = cli_args.value_of("blobs-dir") { + if let Some(blobs_db_dir) = cli_args.get_one::("blobs-dir") { client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); } @@ -400,24 +406,27 @@ pub fn get_config( client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; - if let Some(block_cache_size) = cli_args.value_of("block-cache-size") { + if let Some(block_cache_size) = cli_args.get_one::("block-cache-size") { client_config.store.block_cache_size = block_cache_size .parse() .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } - if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { - client_config.store.state_cache_size = cache_size; + if let Some(cache_size) = cli_args.get_one::("state-cache-size") { + client_config.store.state_cache_size = cache_size + .parse() + .map_err(|_| "state-cache-size is not a valid integer".to_string())?; } - if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { + if let Some(historic_state_cache_size) = cli_args.get_one::("historic-state-cache-size") + { client_config.store.historic_state_cache_size = historic_state_cache_size .parse() .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; } - client_config.store.compact_on_init = cli_args.is_present("compact-db"); - if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") { + client_config.store.compact_on_init = cli_args.get_flag("compact-db"); + if let Some(compact_on_prune) = cli_args.get_one::("auto-compact-db") { client_config.store.compact_on_prune = compact_on_prune .parse() .map_err(|_| "auto-compact-db takes a boolean".to_string())?; @@ -458,7 +467,7 @@ pub fn get_config( * from lighthouse. * Discovery address is set to localhost by default. */ - if cli_args.is_present("zero-ports") { + if cli_args.get_flag("zero-ports") { client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -524,14 +533,14 @@ pub fn get_config( None }; - client_config.allow_insecure_genesis_sync = cli_args.is_present("allow-insecure-genesis-sync"); + client_config.allow_insecure_genesis_sync = cli_args.get_flag("allow-insecure-genesis-sync"); client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path), opt_initial_blobs_path) = ( - cli_args.value_of("checkpoint-state"), - cli_args.value_of("checkpoint-block"), - cli_args.value_of("checkpoint-blobs"), + cli_args.get_one::("checkpoint-state"), + cli_args.get_one::("checkpoint-block"), + cli_args.get_one::("checkpoint-blobs"), ) { let read = |path: &str| { use std::fs::File; @@ -547,14 +556,14 @@ pub fn get_config( let anchor_state_bytes = read(initial_state_path)?; let anchor_block_bytes = read(initial_block_path)?; - let anchor_blobs_bytes = opt_initial_blobs_path.map(read).transpose()?; + let anchor_blobs_bytes = opt_initial_blobs_path.map(|s| read(s)).transpose()?; ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, anchor_blobs_bytes, } - } else if let Some(remote_bn_url) = cli_args.value_of("checkpoint-sync-url") { + } else if let Some(remote_bn_url) = cli_args.get_one::("checkpoint-sync-url") { let url = SensitiveUrl::parse(remote_bn_url) .map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?; @@ -563,7 +572,7 @@ pub fn get_config( ClientGenesis::GenesisState } } else { - if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") { + if parse_flag(cli_args, "checkpoint-state") || parse_flag(cli_args, "checkpoint-sync-url") { return Err( "Checkpoint sync is not available for this network as no genesis state is known" .to_string(), @@ -572,14 +581,14 @@ pub fn get_config( ClientGenesis::DepositContract }; - if cli_args.is_present("reconstruct-historic-states") { + if cli_args.get_flag("reconstruct-historic-states") { client_config.chain.reconstruct_historic_states = true; client_config.chain.genesis_backfill = true; } - let beacon_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { + let beacon_graffiti = if let Some(graffiti) = cli_args.get_one::("graffiti") { GraffitiOrigin::UserSpecified(GraffitiString::from_str(graffiti)?.into()) - } else if cli_args.is_present("private") { + } else if cli_args.get_flag("private") { // When 'private' flag is present, use a zero-initialized bytes array. GraffitiOrigin::UserSpecified(GraffitiString::empty().into()) } else { @@ -588,7 +597,7 @@ pub fn get_config( }; client_config.beacon_graffiti = beacon_graffiti; - if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { + if let Some(wss_checkpoint) = cli_args.get_one::("wss-checkpoint") { let mut split = wss_checkpoint.split(':'); let root_str = split .next() @@ -623,8 +632,8 @@ pub fn get_config( client_config.chain.weak_subjectivity_checkpoint = Some(Checkpoint { epoch, root }) } - if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { - client_config.chain.import_max_skip_slots = match max_skip_slots { + if let Some(max_skip_slots) = cli_args.get_one::("max-skip-slots") { + client_config.chain.import_max_skip_slots = match max_skip_slots.as_str() { "none" => None, n => Some( n.parse() @@ -638,8 +647,8 @@ pub fn get_config( spec.gossip_max_size as usize, ); - if cli_args.is_present("slasher") { - let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { + if cli_args.get_flag("slasher") { + let slasher_dir = if let Some(slasher_dir) = cli_args.get_one::("slasher-dir") { PathBuf::from(slasher_dir) } else { client_config.data_dir().join("slasher_db") @@ -704,11 +713,11 @@ pub fn get_config( client_config.slasher = Some(slasher_config); } - if cli_args.is_present("validator-monitor-auto") { + if cli_args.get_flag("validator-monitor-auto") { client_config.validator_monitor.auto_register = true; } - if let Some(pubkeys) = cli_args.value_of("validator-monitor-pubkeys") { + if let Some(pubkeys) = cli_args.get_one::("validator-monitor-pubkeys") { let pubkeys = pubkeys .split(',') .map(PublicKeyBytes::from_str) @@ -720,7 +729,7 @@ pub fn get_config( .extend_from_slice(&pubkeys); } - if let Some(path) = cli_args.value_of("validator-monitor-file") { + if let Some(path) = cli_args.get_one::("validator-monitor-file") { let string = fs::read(path) .map_err(|e| format!("Unable to read --validator-monitor-file: {}", e)) .and_then(|bytes| { @@ -747,11 +756,11 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.is_present("disable-lock-timeouts") { + if cli_args.get_flag("disable-lock-timeouts") { client_config.chain.enable_lock_timeouts = false; } - if cli_args.is_present("disable-proposer-reorgs") { + if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; } else { @@ -789,7 +798,7 @@ pub fn get_config( } // Note: This overrides any previous flags that enable this option. - if cli_args.is_present("disable-deposit-contract-sync") { + if cli_args.get_flag("disable-deposit-contract-sync") { client_config.sync_eth1_chain = false; } @@ -801,7 +810,7 @@ pub fn get_config( / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR }); - client_config.chain.always_prepare_payload = cli_args.is_present("always-prepare-payload"); + client_config.chain.always_prepare_payload = cli_args.get_flag("always-prepare-payload"); if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? @@ -809,10 +818,9 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - client_config.chain.always_reset_payload_statuses = - cli_args.is_present("reset-payload-statuses"); + client_config.chain.always_reset_payload_statuses = cli_args.get_flag("reset-payload-statuses"); - client_config.chain.paranoid_block_proposal = cli_args.is_present("paranoid-block-proposal"); + client_config.chain.paranoid_block_proposal = cli_args.get_flag("paranoid-block-proposal"); /* * Builder fallback configs. @@ -826,32 +834,32 @@ pub fn get_config( .builder_fallback_epochs_since_finalization = clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; client_config.chain.builder_fallback_disable_checks = - cli_args.is_present("builder-fallback-disable-checks"); + cli_args.get_flag("builder-fallback-disable-checks"); // Graphical user interface config. - if cli_args.is_present("gui") { + if cli_args.get_flag("gui") { client_config.http_api.enabled = true; client_config.validator_monitor.auto_register = true; } // Optimistic finalized sync. client_config.chain.optimistic_finalized_sync = - !cli_args.is_present("disable-optimistic-finalized-sync"); + !cli_args.get_flag("disable-optimistic-finalized-sync"); - if cli_args.is_present("genesis-backfill") { + if cli_args.get_flag("genesis-backfill") { client_config.chain.genesis_backfill = true; } // Backfill sync rate-limiting client_config.beacon_processor.enable_backfill_rate_limiting = - !cli_args.is_present("disable-backfill-rate-limiting"); + !cli_args.get_flag("disable-backfill-rate-limiting"); if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")? { client_config.network.invalid_block_storage = Some(path); } - if cli_args.is_present("progressive-balances") { + if cli_args.get_one::("progressive-balances").is_some() { warn!( log, "Progressive balances mode is deprecated"; @@ -890,10 +898,9 @@ pub fn parse_listening_addresses( log: &Logger, ) -> Result { let listen_addresses_str = cli_args - .values_of("listen-address") + .get_many::("listen-address") .expect("--listen_addresses has a default value"); - - let use_zero_ports = cli_args.is_present("zero-ports"); + let use_zero_ports = parse_flag(cli_args, "zero-ports"); // parse the possible ips let mut maybe_ipv4 = None; @@ -927,28 +934,28 @@ pub fn parse_listening_addresses( // parse the possible tcp ports let port = cli_args - .value_of("port") + .get_one::("port") .expect("--port has a default value") .parse::() .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; let port6 = cli_args - .value_of("port6") - .map(str::parse::) + .get_one::("port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? .unwrap_or(9090); // parse the possible discovery ports. let maybe_disc_port = cli_args - .value_of("discovery-port") - .map(str::parse::) + .get_one::("discovery-port") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port as an integer: {parse_error}") })?; let maybe_disc6_port = cli_args - .value_of("discovery-port6") - .map(str::parse::) + .get_one::("discovery-port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port6 as an integer: {parse_error}") @@ -956,8 +963,8 @@ pub fn parse_listening_addresses( // parse the possible quic port. let maybe_quic_port = cli_args - .value_of("quic-port") - .map(str::parse::) + .get_one::("quic-port") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --quic-port as an integer: {parse_error}") @@ -965,8 +972,8 @@ pub fn parse_listening_addresses( // parse the possible quic port. let maybe_quic6_port = cli_args - .value_of("quic-port6") - .map(str::parse::) + .get_one::("quic-port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --quic6-port as an integer: {parse_error}") @@ -980,10 +987,10 @@ pub fn parse_listening_addresses( } (None, Some(ipv6)) => { // A single ipv6 address was provided. Set the ports - - if cli_args.is_present("port6") { - warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored.") + if cli_args.value_source("port6") == Some(ValueSource::CommandLine) { + warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); } + // use zero ports if required. If not, use the given port. let tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) @@ -1117,41 +1124,41 @@ pub fn set_network_config( log: &Logger, ) -> Result<(), String> { // If a network dir has been specified, override the `datadir` definition. - if let Some(dir) = cli_args.value_of("network-dir") { + if let Some(dir) = cli_args.get_one::("network-dir") { config.network_dir = PathBuf::from(dir); } else { config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; - if cli_args.is_present("subscribe-all-subnets") { + if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } - if cli_args.is_present("import-all-attestations") { + if parse_flag(cli_args, "import-all-attestations") { config.import_all_attestations = true; } - if cli_args.is_present("shutdown-after-sync") { + if parse_flag(cli_args, "shutdown-after-sync") { config.shutdown_after_sync = true; } config.set_listening_addr(parse_listening_addresses(cli_args, log)?); // A custom target-peers command will overwrite the --proposer-only default. - if let Some(target_peers_str) = cli_args.value_of("target-peers") { + if let Some(target_peers_str) = cli_args.get_one::("target-peers") { config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; } - if let Some(value) = cli_args.value_of("network-load") { + if let Some(value) = cli_args.get_one::("network-load") { let network_load = value .parse::() .map_err(|_| format!("Invalid integer: {}", value))?; config.network_load = network_load; } - if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { + if let Some(boot_enr_str) = cli_args.get_one::("boot-nodes") { let mut enrs: Vec = vec![]; let mut multiaddrs: Vec = vec![]; for addr in boot_enr_str.split(',') { @@ -1176,7 +1183,7 @@ pub fn set_network_config( config.boot_nodes_multiaddr = multiaddrs; } - if let Some(libp2p_addresses_str) = cli_args.value_of("libp2p-addresses") { + if let Some(libp2p_addresses_str) = cli_args.get_one::("libp2p-addresses") { config.libp2p_nodes = libp2p_addresses_str .split(',') .map(|multiaddr| { @@ -1187,11 +1194,11 @@ pub fn set_network_config( .collect::, _>>()?; } - if cli_args.is_present("disable-peer-scoring") { + if parse_flag(cli_args, "disable-peer-scoring") { config.disable_peer_scoring = true; } - if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { + if let Some(trusted_peers_str) = cli_args.get_one::("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') .map(|peer_id| { @@ -1205,7 +1212,7 @@ pub fn set_network_config( } } - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { + if let Some(enr_udp_port_str) = cli_args.get_one::("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str .parse::() @@ -1213,7 +1220,7 @@ pub fn set_network_config( ); } - if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { + if let Some(enr_quic_port_str) = cli_args.get_one::("enr-quic-port") { config.enr_quic4_port = Some( enr_quic_port_str .parse::() @@ -1221,7 +1228,7 @@ pub fn set_network_config( ); } - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { + if let Some(enr_tcp_port_str) = cli_args.get_one::("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str .parse::() @@ -1229,7 +1236,7 @@ pub fn set_network_config( ); } - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { + if let Some(enr_udp_port_str) = cli_args.get_one::("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str .parse::() @@ -1237,7 +1244,7 @@ pub fn set_network_config( ); } - if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { + if let Some(enr_quic_port_str) = cli_args.get_one::("enr-quic6-port") { config.enr_quic6_port = Some( enr_quic_port_str .parse::() @@ -1245,7 +1252,7 @@ pub fn set_network_config( ); } - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { + if let Some(enr_tcp_port_str) = cli_args.get_one::("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str .parse::() @@ -1253,7 +1260,7 @@ pub fn set_network_config( ); } - if cli_args.is_present("enr-match") { + if parse_flag(cli_args, "enr-match") { // Match the IP and UDP port in the ENR. if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { @@ -1291,7 +1298,7 @@ pub fn set_network_config( } } - if let Some(enr_addresses) = cli_args.values_of("enr-address") { + if let Some(enr_addresses) = cli_args.get_many::("enr-address") { let mut enr_ip4 = None; let mut enr_ip6 = None; let mut resolved_enr_ip4 = None; @@ -1369,79 +1376,78 @@ pub fn set_network_config( } } - if cli_args.is_present("disable-enr-auto-update") { + if parse_flag(cli_args, "disable-enr-auto-update") { config.discv5_config.enr_update = false; } - if cli_args.is_present("disable-packet-filter") { + if parse_flag(cli_args, "disable-packet-filter") { warn!(log, "Discv5 packet filter is disabled"); config.discv5_config.enable_packet_filter = false; } - if cli_args.is_present("disable-discovery") { + if parse_flag(cli_args, "disable-discovery") { config.disable_discovery = true; warn!(log, "Discovery is disabled. New peers will not be found"); } - if cli_args.is_present("disable-quic") { + if parse_flag(cli_args, "disable-quic") { config.disable_quic_support = true; } - if cli_args.is_present("disable-upnp") { + if parse_flag(cli_args, "disable-upnp") { config.upnp_enabled = false; } - if cli_args.is_present("private") { + if parse_flag(cli_args, "private") { config.private = true; } - if cli_args.is_present("metrics") { + if parse_flag(cli_args, "metrics") { config.metrics_enabled = true; } - if cli_args.is_present("enable-private-discovery") { + if parse_flag(cli_args, "enable-private-discovery") { config.discv5_config.table_filter = |_| true; } // Light client server config. - config.enable_light_client_server = cli_args.is_present("light-client-server"); + config.enable_light_client_server = parse_flag(cli_args, "light-client-server"); - // The self limiter is disabled by default. - // This flag can be used both with or without a value. Try to parse it first with a value, if - // no value is defined but the flag is present, use the default params. - config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; - if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { - config.outbound_rate_limiter_config = Some(Default::default()); + // The self limiter is disabled by default. If the `self-limiter` flag is provided + // without the `self-limiter-protocols` flag, the default params will be used. + if parse_flag(cli_args, "self-limiter") { + config.outbound_rate_limiter_config = + if let Some(protocols) = cli_args.get_one::("self-limiter-protocols") { + Some(protocols.parse()?) + } else { + Some(Default::default()) + }; } // Proposer-only mode overrides a number of previous configuration parameters. // Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set // of peers. - if cli_args.is_present("proposer-only") { + if parse_flag(cli_args, "proposer-only") { config.subscribe_all_subnets = false; - if cli_args.value_of("target-peers").is_none() { + if cli_args.get_one::("target-peers").is_none() { // If a custom value is not set, change the default to 15 config.target_peers = 15; } config.proposer_only = true; warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); } - // The inbound rate limiter is enabled by default unless `disabled` is passed to the - // `inbound-rate-limiter` flag. Any other value should be parsed as a configuration string. - config.inbound_rate_limiter_config = match cli_args.value_of("inbound-rate-limiter") { - None => { - // Enabled by default, with default values + // The inbound rate limiter is enabled by default unless `disabled` via the + // `disable-inbound-rate-limiter` flag. + config.inbound_rate_limiter_config = if parse_flag(cli_args, "disable-inbound-rate-limiter") { + None + } else { + // Use the default unless values are provided via the `inbound-rate-limiter-protocols` + if let Some(protocols) = cli_args.get_one::("inbound-rate-limiter-protocols") { + Some(protocols.parse()?) + } else { Some(Default::default()) } - Some("disabled") => { - // Explicitly disabled - None - } - Some(config_str) => { - // Enabled with a custom configuration - Some(config_str.parse()?) - } }; Ok(()) } @@ -1454,7 +1460,7 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { // directory and the testnet name onto it. cli_args - .value_of("datadir") + .get_one::("datadir") .map(|path| PathBuf::from(path).join(DEFAULT_BEACON_NODE_DIR)) .or_else(|| { dirs::home_dir().map(|home| { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index ee782c650e..4ca084c316 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -1,4 +1,3 @@ -#[macro_use] extern crate clap; mod cli; @@ -44,7 +43,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: ArgMatches<'static>, + matches: ArgMatches, ) -> Result { let client_config = get_config::(&matches, &context)?; Self::new(context, client_config).await diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b782267007..7bf1ef76be 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,7 +25,3 @@ lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } strum = { workspace = true } -safe_arith = { workspace = true } -bls = { workspace = true } -smallvec = { workspace = true } -logging = { workspace = true } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 1675051bd8..116926ad3f 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(19); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(20); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 6ef00578f7..b63505c490 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -686,7 +686,7 @@ The first few lines of the response would look like: "slot": "1", "parent_slot": "0", "proposer_index": 93, - "graffiti": "EF #vm-eth2-raw-iron-prater-101" + "graffiti": "EF #vm-eth2-raw-iron-101" }, "attestation_rewards": { "total": 637260, diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index ca0cc098d9..f2f9caf46b 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -31,7 +31,7 @@ When starting the validator client it will output a log message containing the p to the file containing the api token. ```text -Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/prater/validators/api-token.txt", listen_address: 127.0.0.1:5062 +Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/holesky/validators/api-token.txt", listen_address: 127.0.0.1:5062 ``` The _path_ to the API token may also be fetched from the HTTP API itself (this endpoint is the only @@ -45,7 +45,7 @@ Response: ```json { - "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" } ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 22f0064745..a36aa73708 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -225,7 +225,7 @@ Example Response Body ```json { "data": { - "CONFIG_NAME": "prater", + "CONFIG_NAME": "holesky", "PRESET_BASE": "mainnet", "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -353,7 +353,7 @@ Example Response Body ```json { - "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" } ``` diff --git a/book/src/help_bn.md b/book/src/help_bn.md index e77db3df54..b458842e08 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -1,490 +1,610 @@ # Beacon Node ``` -Sigma Prime -The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides -a HTTP API for querying the beacon chain and publishing messages to the network. +The primary component which connects to the Ethereum 2.0 P2P network and +downloads, verifies and stores blocks. Provides a HTTP API for querying the +beacon chain and publishing messages to the network. -USAGE: - lighthouse beacon_node [FLAGS] [OPTIONS] +Usage: lighthouse beacon_node [OPTIONS] -FLAGS: - --allow-insecure-genesis-sync Enable syncing from genesis, which is generally insecure and incompatible - with data availability checks. Checkpoint syncing is the preferred method - for syncing a node. Only use this flag when testing. DO NOT use on - mainnet! - --always-prefer-builder-payload This flag is deprecated and has no effect. - --always-prepare-payload Send payload attributes with every fork choice update. This is intended - for use by block builders, relays and developers. You should set a fee - recipient on this BN and also consider adjusting the --prepare-payload- - lookahead flag. - --builder-fallback-disable-checks This flag disables all checks related to chain health. This means the - builder API will always be used for payload construction, regardless of - recent chain conditions. - --compact-db If present, apply compaction to the database on start-up. Use with - caution. It is generally not recommended unless auto-compaction is - disabled. - --disable-backfill-rate-limiting Disable the backfill sync rate-limiting. This allow users to just sync - the entire chain as fast as possible, however it can result in resource - contention which degrades staking performance. Stakers should generally - choose to avoid this flag since backfill sync is not required for - staking. - --disable-deposit-contract-sync Explicitly disables syncing of deposit logs from the execution node. This - overrides any previous option that depends on it. Useful if you intend to - run a non-validating beacon node. - --disable-duplicate-warn-logs This flag is deprecated and has no effect. - -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP - address and port as seen by other peers on the network. This disables - this feature, fixing the ENR's IP/PORT to those specified on boot. - --disable-lock-timeouts Disable the timeouts applied to some internal locks by default. This can - lead to less spurious failures on slow hardware but is considered - experimental as it may obscure performance issues. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag - will generally increase memory usage, it should only be provided when - debugging specific memory allocation issues. - --disable-optimistic-finalized-sync Force Lighthouse to verify every execution block hash with the execution - client during finalized sync. By default block hashes will be checked in - Lighthouse and only passed to the EL if initial verification fails. - --disable-packet-filter Disables the discovery packet filter. Useful for testing in smaller - networks - --disable-proposer-reorgs Do not attempt to reorg late blocks from other validators when proposing. - --disable-quic Disables the quic transport. The node will rely solely on the TCP - transport for libp2p connections. - --disable-upnp Disables UPnP support. Setting this will prevent Lighthouse from - attempting to automatically establish external port mappings. - --dummy-eth1 If present, uses an eth1 backend that generates static dummy - data.Identical to the method used at the 2019 Canada interop. - --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this - flag to enable connection attempts to local addresses. - -e, --enr-match Sets the local ENR IP address and port to match those set for lighthouse. - Specifically, the IP address will be the value of --listen-address and - the UDP port will be --discovery-port. - --eth1 If present the node will connect to an eth1 node. This is required for - block production, you must use this flag if you wish to serve a - validator. - --eth1-purge-cache Purges the eth1 block and deposit caches - --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint - syncing. - --gui Enable the graphical user interface and all its requirements. This - enables --http and --validator-monitor-auto and enables SSE logging. - -h, --help Prints help information - --http Enable the RESTful HTTP API server. Disabled by default. - --http-enable-tls Serves the RESTful HTTP API server over TLS. This feature is currently - experimental. - --import-all-attestations Import and aggregate all attestations, regardless of validator - subscriptions. This will only import attestations from already-subscribed - subnets, use with --subscribe-all-subnets to ensure all attestations are - received for import. - --light-client-server Act as a full node supporting light clients on the p2p network - [experimental] - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed - to store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they - can be read by any user on the machine. Note that logs can often contain - sensitive information about your validator and so this flag should be - used with caution. For Windows users, the log file permissions will be - inherited from the parent folder. - --metrics Enable the Prometheus metrics HTTP server. Disabled by default. - --private Prevents sending various client identification information. - --proposer-only Sets this beacon node at be a block proposer only node. This will run the - beacon node in a minimal configuration that is sufficient for block - publishing only. This flag should be used for a beacon node being - referenced by validator client using the --proposer-node flag. This - configuration is for enabling more secure setups. - --purge-db If present, the chain database will be deleted. Use with caution. - --reconstruct-historic-states After a checkpoint sync, reconstruct historic states in the database. - This requires syncing all the way back to genesis. - --reset-payload-statuses When present, Lighthouse will forget the payload statuses of any already- - imported blocks. This can assist in the recovery from a consensus - failure caused by the execution layer. - --shutdown-after-sync Shutdown beacon node as soon as sync is completed. Backfill sync will not - be performed before shutdown. - --slasher Run a slasher alongside the beacon node. It is currently only recommended - for expert users because of the immaturity of the slasher UX and the - extra resources required. - --staking Standard option for a staking beacon node. This will enable the HTTP - server on localhost:5052 and import deposit logs from the execution node. - This is equivalent to `--http` on merge-ready networks, or `--http - --eth1` pre-merge - --subscribe-all-subnets Subscribe to all subnets regardless of validator count. This will also - advertise the beacon node as being long-lived subscribed to all subnets. - --validator-monitor-auto Enables the automatic detection and monitoring of validators connected to - the HTTP API and using the subnet subscription endpoint. This generally - has the effect of providing additional logging and metrics for locally - controlled validators. - -V, --version Prints version information - -z, --zero-ports Sets all listening TCP/UDP ports to 0, allowing the OS to choose some - arbitrary free ports. +Options: + --auto-compact-db + Enable or disable automatic compaction of the database on + finalization. [default: true] + --blob-prune-margin-epochs + The margin for blob pruning in epochs. The oldest blobs are pruned up + until data_availability_boundary - blob_prune_margin_epochs. [default: + 0] + --blobs-dir + Data directory for the blobs database. + --block-cache-size + Specifies how many blocks the database should cache in memory + [default: 5] + --boot-nodes + One or more comma-delimited base64-encoded ENR's to bootstrap the p2p + network. Multiaddr is also supported. + --builder + The URL of a service compatible with the MEV-boost API. + --builder-fallback-epochs-since-finalization + If this node is proposing a block and the chain has not finalized + within this number of epochs, it will NOT query any connected + builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause + the node to NEVER query connected builders. Setting it to 2 will cause + this condition to be hit if there are skips slots at the start of an + epoch, right before this node is set to propose. [default: 3] + --builder-fallback-skips + If this node is proposing a block and has seen this number of skip + slots on the canonical chain in a row, it will NOT query any connected + builders, and will use the local execution engine for payload + construction. [default: 3] + --builder-fallback-skips-per-epoch + If this node is proposing a block and has seen this number of skip + slots on the canonical chain in the past `SLOTS_PER_EPOCH`, it will + NOT query any connected builders, and will use the local execution + engine for payload construction. [default: 8] + --builder-header-timeout + Defines a timeout value (in milliseconds) to use when fetching a block + header from the builder API. [default: 1000] + --builder-profit-threshold + This flag is deprecated and has no effect. + --builder-user-agent + The HTTP user agent to send alongside requests to the builder URL. The + default is Lighthouse's version string. + --checkpoint-blobs + Set the checkpoint blobs to start syncing from. Must be aligned and + match --checkpoint-block. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-block + Set a checkpoint block to start syncing from. Must be aligned and + match --checkpoint-state. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-state + Set a checkpoint state to start syncing from. Must be aligned and + match --checkpoint-block. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-sync-url + Set the remote beacon node HTTP endpoint to use for checkpoint sync. + --checkpoint-sync-url-timeout + Set the timeout for checkpoint sync calls to remote beacon node HTTP + endpoint. [default: 180] + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --discovery-port + The UDP port that discovery will listen on. Defaults to `port` + --discovery-port6 + The UDP port that discovery will listen on over IPv6 if listening over + both IPv4 and IPv6. Defaults to `port6` + --enr-address
... + The IP address/ DNS address to broadcast to other peers on how to + reach this node. If a DNS address is provided, the enr-address is set + to the IP address it resolves to and does not auto-update based on + PONG responses in discovery. Set this only if you are sure other nodes + can connect to your local node on this address. This will update the + `ip4` or `ip6` ENR fields accordingly. To update both, set this flag + twice with the different values. + --enr-quic-port + The quic UDP4 port that will be set on the local ENR. Set this only if + you are sure other nodes can connect to your local node on this port + over IPv4. + --enr-quic6-port + The quic UDP6 port that will be set on the local ENR. Set this only if + you are sure other nodes can connect to your local node on this port + over IPv6. + --enr-tcp-port + The TCP4 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv4. The + --port flag is used if this is not set. + --enr-tcp6-port + The TCP6 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv6. The + --port6 flag is used if this is not set. + --enr-udp-port + The UDP4 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv4. + --enr-udp6-port + The UDP6 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv6. + --epochs-per-blob-prune + The epoch interval with which to prune blobs from Lighthouse's + database when they are older than the data availability boundary + relative to the current epoch. [default: 1] + --epochs-per-migration + The number of epochs to wait between running the migration of data + from the hot DB to the cold DB. Less frequent runs can be useful for + minimizing disk writes [default: 1] + --eth1-blocks-per-log-query + Specifies the number of blocks that a deposit log query should span. + This will reduce the size of responses from the Eth1 endpoint. + [default: 1000] + --eth1-cache-follow-distance + Specifies the distance between the Eth1 chain head and the last block + which should be imported into the cache. Setting this value lower can + help compensate for irregular Proof-of-Work block times, but setting + it too low can make the node vulnerable to re-orgs. + --execution-endpoint + Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC + connection. Uses the same endpoint to populate the deposit cache. + --execution-jwt + File path which contains the hex-encoded JWT secret for the execution + endpoint provided in the --execution-endpoint flag. + --execution-jwt-id + Used by the beacon node to communicate a unique identifier to + execution nodes during JWT authentication. It corresponds to the 'id' + field in the JWT claims object.Set to empty by default + --execution-jwt-secret-key + Hex-encoded JWT secret for the execution endpoint provided in the + --execution-endpoint flag. + --execution-jwt-version + Used by the beacon node to communicate a client version to execution + nodes during JWT authentication. It corresponds to the 'clv' field in + the JWT claims object.Set to empty by default + --execution-timeout-multiplier + Unsigned integer to multiply the default execution timeouts by. + [default: 1] + --fork-choice-before-proposal-timeout + Set the maximum number of milliseconds to wait for fork choice before + proposing a block. You can prevent waiting at all by setting the + timeout to 0, however you risk proposing atop the wrong parent block. + [default: 250] + --freezer-dir + Data directory for the freezer database. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --graffiti + Specify your custom graffiti to be included in blocks. Defaults to the + current version and commit, truncated to fit in 32 bytes. + --historic-state-cache-size + Specifies how many states from the freezer database should cache in + memory [default: 1] + --http-address
+ Set the listen address for the RESTful HTTP API server. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5052). + --http-duplicate-block-status + Status code to send when a block that is already known is POSTed to + the HTTP API. + --http-enable-beacon-processor + The beacon processor is a scheduler which provides quality-of-service + and DoS protection. When set to "true", HTTP API requests will be + queued and scheduled alongside other tasks. When set to "false", HTTP + API responses will be executed immediately. + --http-port + Set the listen TCP port for the RESTful HTTP API server. + --http-sse-capacity-multiplier + Multiplier to apply to the length of HTTP server-sent-event (SSE) + channels. Increasing this value can prevent messages from being + dropped. + --http-tls-cert + The path of the certificate to be used when serving the HTTP API + server over TLS. + --http-tls-key + The path of the private key to be used when serving the HTTP API + server over TLS. Must not be password-protected. + --inbound-rate-limiter-protocols + Configures the inbound rate limiter (requests received by this + node).Rate limit quotas per protocol can be set in the form of + :/. To set quotas for multiple + protocols, separate them by ';'. This is enabled by default, using + default quotas. To disable rate limiting use the + disable-inbound-rate-limiter flag instead. + --invalid-gossip-verified-blocks-path + If a block succeeds gossip validation whilst failing full validation, + store the block SSZ as a file at this path. This feature is only + recommended for developers. This directory is not pruned, users should + be careful to avoid filling up their disks. + --libp2p-addresses + One or more comma-delimited multiaddrs to manually connect to a libp2p + peer without an ENR. + --listen-address [
...] + The address lighthouse will listen for UDP and TCP connections. To + listen over IpV4 and IpV6 set this flag twice with the different + values. + Examples: + - --listen-address '0.0.0.0' will listen over IPv4. + - --listen-address '::' will listen over IPv6. + - --listen-address '0.0.0.0' --listen-address '::' will listen over + both IPv4 and IPv6. The order of the given addresses is not relevant. + However, multiple IPv4, or multiple IPv6 addresses will not be + accepted. [default: 0.0.0.0] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --max-skip-slots + Refuse to skip more than this many slots when processing an + attestation. This prevents nodes on minority forks from wasting our + time and disk space, but could also cause unnecessary consensus + failures, so is disabled by default. + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5054). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote + endpoint. This can be used to monitor your setup on certain services + (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote + sever which may identify and associate your validators, IP address and + other personal information. Always use a HTTPS connection and never + provide an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the + monitoring-endpoint. Default: 60s + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --network-dir + Data directory for network keys. Defaults to network/ inside the + beacon node dir. + --port + The TCP/UDP ports to listen on. There are two UDP ports. The discovery + UDP port will be set to this value and the Quic UDP port will be set + to this value + 1. The discovery port can be modified by the + --discovery-port flag and the quic port can be modified by the + --quic-port flag. If listening over both IPv4 and IPv6 the --port flag + will apply to the IPv4 address and --port6 to the IPv6 address. + [default: 9000] + --port6 + The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 + and IPv6. Defaults to 9090 when required. The Quic UDP port will be + set to this value + 1. [default: 9090] + --prepare-payload-lookahead + The time before the start of a proposal slot at which payload + attributes should be sent. Low values are useful for execution nodes + which don't improve their payload after the first call, and high + values are useful for ensuring the EL is given ample notice. Default: + 1/3 of a slot. + --progressive-balances + Deprecated. This optimisation is now the default and cannot be + disabled. + --proposer-reorg-cutoff + Maximum delay after the start of the slot at which to propose a + reorging block. Lower values can prevent failed reorgs by ensuring the + block has ample time to propagate and be processed by the network. The + default is 1/12th of a slot (1 second on mainnet) + --proposer-reorg-disallowed-offsets + Comma-separated list of integer offsets which can be used to avoid + proposing reorging blocks at certain slots. An offset of N means that + reorging proposals will not be attempted at any slot such that `slot % + SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be + avoided. Any offsets supplied with this flag will impose additional + restrictions. + --proposer-reorg-epochs-since-finalization + Maximum number of epochs since finalization at which proposer reorgs + are allowed. Default: 2 + --proposer-reorg-parent-threshold + Percentage of parent vote weight above which to attempt a proposer + reorg. Default: 160% + --proposer-reorg-threshold + Percentage of head vote weight below which to attempt a proposer + reorg. Default: 20% + --prune-blobs + Prune blobs from Lighthouse's database when they are older than the + data data availability boundary relative to the current epoch. + [default: true] + --prune-payloads + Prune execution payloads from Lighthouse's database. This saves space + but imposes load on the execution client, as payloads need to be + reconstructed and sent to syncing peers. [default: true] + --quic-port + The UDP port that quic will listen on. Defaults to `port` + 1 + --quic-port6 + The UDP port that quic will listen on over IPv6 if listening over both + IPv4 and IPv6. Defaults to `port6` + 1 + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --self-limiter-protocols + Enables the outbound rate limiter (requests made by this node).Rate + limit quotas per protocol can be set in the form of + :/. To set quotas for multiple + protocols, separate them by ';'. If the self rate limiter is enabled + and a protocol is not present in the configuration, the quotas used + for the inbound rate limiter will be used. + --shuffling-cache-size + Some HTTP API requests can be optimised by caching the shufflings at + each epoch. This flag allows the user to set the shuffling cache size + in epochs. Shufflings are dependent on validator count and setting + this value to a large number can consume a large amount of memory. + --slasher-att-cache-size + Set the maximum number of attestation roots for the slasher to cache + --slasher-backend + Set the database backend to be used by the slasher. [possible values: + lmdb, disabled] + --slasher-broadcast + Broadcast slashings found by the slasher to the rest of the network + [Enabled by default]. [default: true] + --slasher-chunk-size + Number of epochs per validator per chunk stored on disk. + --slasher-dir + Set the slasher's database directory. + --slasher-history-length + Configure how many epochs of history the slasher keeps. Immutable + after initialization. + --slasher-max-db-size + Maximum size of the MDBX database used by the slasher. + --slasher-slot-offset + Set the delay from the start of the slot at which the slasher should + ingest attestations. Only effective if the slasher-update-period is a + multiple of the slot duration. + --slasher-update-period + Configure how often the slasher runs batch processing. + --slasher-validator-chunk-size + Number of validators per chunk stored on disk. + --slots-per-restore-point + Specifies how often a freezer DB restore point should be stored. + Cannot be changed after initialization. [default: 8192 (mainnet) or 64 + (minimal)] + --state-cache-size + Specifies the size of the state cache [default: 128] + --suggested-fee-recipient + Emergency fallback fee recipient for use in case the validator client + does not have one configured. You should set this flag on the + validator client instead of (or in addition to) setting it here. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --target-peers + The target number of peers. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --trusted-peers + One or more comma-delimited trusted peer ids which always have the + highest score according to the peer scoring system. + --trusted-setup-file-override + Path to a json file containing the trusted setup params. NOTE: This + will override the trusted setup that is generated from the mainnet kzg + ceremony. Use with caution + --validator-monitor-file + As per --validator-monitor-pubkeys, but the comma-separated list is + contained within a file at the given path. + --validator-monitor-individual-tracking-threshold + Once the validator monitor reaches this number of local validators it + will stop collecting per-validator Prometheus metrics and issuing + per-validator logs. Instead, it will provide aggregate metrics and + logs. This avoids infeasibly high cardinality in the Prometheus + database and high log volume when using many validators. Defaults to + 64. + --validator-monitor-pubkeys + A comma-separated list of 0x-prefixed validator public keys. These + validators will receive special monitoring and additional logging. + --wss-checkpoint + Specify a weak subjectivity checkpoint in `block_root:epoch` format to + verify the node's sync against. The block root should be 0x-prefixed. + Note that this flag is for verification only, to perform a checkpoint + sync from a recent state use --checkpoint-sync-url. + -V, --version + Print version -OPTIONS: - --auto-compact-db - Enable or disable automatic compaction of the database on finalization. [default: true] - - --blob-prune-margin-epochs - The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - - blob_prune_margin_epochs. [default: 0] - --blobs-dir - Data directory for the blobs database. - - --block-cache-size - Specifies how many blocks the database should cache in memory [default: 5] - - --boot-nodes - One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported. - - --builder - The URL of a service compatible with the MEV-boost API. - - --builder-fallback-epochs-since-finalization - If this node is proposing a block and the chain has not finalized within this number of epochs, it will NOT - query any connected builders, and will use the local execution engine for payload construction. Setting this - value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will - cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is - set to propose. [default: 3] - --builder-fallback-skips - If this node is proposing a block and has seen this number of skip slots on the canonical chain in a row, it - will NOT query any connected builders, and will use the local execution engine for payload construction. - [default: 3] - --builder-fallback-skips-per-epoch - If this node is proposing a block and has seen this number of skip slots on the canonical chain in the past - `SLOTS_PER_EPOCH`, it will NOT query any connected builders, and will use the local execution engine for - payload construction. [default: 8] - --builder-profit-threshold - This flag is deprecated and has no effect. - - --builder-user-agent - The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version - string. - --checkpoint-blobs - Set the checkpoint blobs to start syncing from. Must be aligned and match --checkpoint-block. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-block - Set a checkpoint block to start syncing from. Must be aligned and match --checkpoint-state. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-state - Set a checkpoint state to start syncing from. Must be aligned and match --checkpoint-block. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-sync-url - Set the remote beacon node HTTP endpoint to use for checkpoint sync. - - --checkpoint-sync-url-timeout - Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint. [default: 180] - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --discovery-port - The UDP port that discovery will listen on. Defaults to `port` - - --discovery-port6 - The UDP port that discovery will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to - `port6` - --enr-address
... - The IP address/ DNS address to broadcast to other peers on how to reach this node. If a DNS address is - provided, the enr-address is set to the IP address it resolves to and does not auto-update based on PONG - responses in discovery. Set this only if you are sure other nodes can connect to your local node on this - address. This will update the `ip4` or `ip6` ENR fields accordingly. To update both, set this flag twice - with the different values. - --enr-quic-port - The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes can connect - to your local node on this port over IPv4. - --enr-quic6-port - The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes can connect - to your local node on this port over IPv6. - --enr-tcp-port - The TCP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv4. The --port flag is used if this is not set. - --enr-tcp6-port - The TCP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv6. The --port6 flag is used if this is not set. - --enr-udp-port - The UDP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv4. - --enr-udp6-port - The UDP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv6. - --epochs-per-blob-prune - The epoch interval with which to prune blobs from Lighthouse's database when they are older than the data - availability boundary relative to the current epoch. [default: 1] - --epochs-per-migration - The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less - frequent runs can be useful for minimizing disk writes [default: 1] - --eth1-blocks-per-log-query - Specifies the number of blocks that a deposit log query should span. This will reduce the size of responses - from the Eth1 endpoint. [default: 1000] - --eth1-cache-follow-distance - Specifies the distance between the Eth1 chain head and the last block which should be imported into the - cache. Setting this value lower can help compensate for irregular Proof-of-Work block times, but setting it - too low can make the node vulnerable to re-orgs. - --execution-endpoint - Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to - populate the deposit cache. - --execution-jwt - File path which contains the hex-encoded JWT secret for the execution endpoint provided in the --execution- - endpoint flag. - --execution-jwt-id - Used by the beacon node to communicate a unique identifier to execution nodes during JWT authentication. It - corresponds to the 'id' field in the JWT claims object.Set to empty by default - --execution-jwt-secret-key - Hex-encoded JWT secret for the execution endpoint provided in the --execution-endpoint flag. - - --execution-jwt-version - Used by the beacon node to communicate a client version to execution nodes during JWT authentication. It - corresponds to the 'clv' field in the JWT claims object.Set to empty by default - --execution-timeout-multiplier - Unsigned integer to multiply the default execution timeouts by. [default: 1] - - --fork-choice-before-proposal-timeout - Set the maximum number of milliseconds to wait for fork choice before proposing a block. You can prevent - waiting at all by setting the timeout to 0, however you risk proposing atop the wrong parent block. - [default: 250] - --freezer-dir - Data directory for the freezer database. - - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --graffiti - Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated - to fit in 32 bytes. - --historic-state-cache-size - Specifies how many states from the freezer database should cache in memory [default: 1] - - --http-address
- Set the listen address for the RESTful HTTP API server. - - --http-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5052). - --http-duplicate-block-status - Status code to send when a block that is already known is POSTed to the HTTP API. - - --http-enable-beacon-processor - The beacon processor is a scheduler which provides quality-of-service and DoS protection. When set to - "true", HTTP API requests will be queued and scheduled alongside other tasks. When set to "false", HTTP API - responses will be executed immediately. - --http-port - Set the listen TCP port for the RESTful HTTP API server. - - --http-sse-capacity-multiplier - Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. Increasing this value can - prevent messages from being dropped. - --http-tls-cert - The path of the certificate to be used when serving the HTTP API server over TLS. - - --http-tls-key - The path of the private key to be used when serving the HTTP API server over TLS. Must not be password- - protected. - --invalid-gossip-verified-blocks-path - If a block succeeds gossip validation whilst failing full validation, store the block SSZ as a file at this - path. This feature is only recommended for developers. This directory is not pruned, users should be careful - to avoid filling up their disks. - --libp2p-addresses - One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR. - - --listen-address
... - The address lighthouse will listen for UDP and TCP connections. To listen over IpV4 and IpV6 set this flag - twice with the different values. - Examples: - - --listen-address '0.0.0.0' will listen over IPv4. - - --listen-address '::' will listen over IPv6. - - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the - given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be accepted. - [default: 0.0.0.0] - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --max-skip-slots - Refuse to skip more than this many slots when processing an attestation. This prevents nodes on minority - forks from wasting our time and disk space, but could also cause unnecessary consensus failures, so is - disabled by default. - --metrics-address
- Set the listen address for the Prometheus metrics HTTP server. - - --metrics-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5054). - --metrics-port - Set the listen TCP port for the Prometheus metrics HTTP server. - - --monitoring-endpoint
- Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor - your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node - metrics will be sent. Note: This will send information to a remote sever which may identify and associate - your validators, IP address and other personal information. Always use a HTTPS connection and never provide - an untrusted URL. - --monitoring-endpoint-period - Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --network-dir - Data directory for network keys. Defaults to network/ inside the beacon node dir. - - --port - The TCP/UDP ports to listen on. There are two UDP ports. The discovery UDP port will be set to this value - and the Quic UDP port will be set to this value + 1. The discovery port can be modified by the --discovery- - port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 - and IPv6 the --port flag will apply to the IPv4 address and --port6 to the IPv6 address. [default: 9000] - --port6 - The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and IPv6. Defaults to 9090 when - required. The Quic UDP port will be set to this value + 1. [default: 9090] - --prepare-payload-lookahead - The time before the start of a proposal slot at which payload attributes should be sent. Low values are - useful for execution nodes which don't improve their payload after the first call, and high values are - useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be disabled. [possible values: fast, disabled, - checked, strict] - --proposer-reorg-cutoff - Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent - failed reorgs by ensuring the block has ample time to propagate and be processed by the network. The default - is 1/12th of a slot (1 second on mainnet) - --proposer-reorg-disallowed-offsets - Comma-separated list of integer offsets which can be used to avoid proposing reorging blocks at certain - slots. An offset of N means that reorging proposals will not be attempted at any slot such that `slot % - SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be avoided. Any offsets supplied with this - flag will impose additional restrictions. - --proposer-reorg-epochs-since-finalization - Maximum number of epochs since finalization at which proposer reorgs are allowed. Default: 2 - - --proposer-reorg-parent-threshold - Percentage of parent vote weight above which to attempt a proposer reorg. Default: 160% - - --proposer-reorg-threshold - Percentage of head vote weight below which to attempt a proposer reorg. Default: 20% - - --prune-blobs - Prune blobs from Lighthouse's database when they are older than the data data availability boundary relative - to the current epoch. [default: true] - --prune-payloads - Prune execution payloads from Lighthouse's database. This saves space but imposes load on the execution - client, as payloads need to be reconstructed and sent to syncing peers. [default: true] - --quic-port - The UDP port that quic will listen on. Defaults to `port` + 1 - - --quic-port6 - The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + - 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --shuffling-cache-size - Some HTTP API requests can be optimised by caching the shufflings at each epoch. This flag allows the user - to set the shuffling cache size in epochs. Shufflings are dependent on validator count and setting this - value to a large number can consume a large amount of memory. - --slasher-att-cache-size - Set the maximum number of attestation roots for the slasher to cache - - --slasher-backend - Set the database backend to be used by the slasher. [possible values: lmdb, disabled] - - --slasher-broadcast - Broadcast slashings found by the slasher to the rest of the network [Enabled by default]. [default: true] - - --slasher-chunk-size - Number of epochs per validator per chunk stored on disk. - - --slasher-dir - Set the slasher's database directory. - - --slasher-history-length - Configure how many epochs of history the slasher keeps. Immutable after initialization. - - --slasher-max-db-size - Maximum size of the MDBX database used by the slasher. - - --slasher-slot-offset - Set the delay from the start of the slot at which the slasher should ingest attestations. Only effective if - the slasher-update-period is a multiple of the slot duration. - --slasher-update-period - Configure how often the slasher runs batch processing. - - --slasher-validator-chunk-size - Number of validators per chunk stored on disk. - - --slots-per-restore-point - Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. - [default: 8192 (mainnet) or 64 (minimal)] - --state-cache-size - Specifies the size of the state cache [default: 128] - - --suggested-fee-recipient - Emergency fallback fee recipient for use in case the validator client does not have one configured. You - should set this flag on the validator client instead of (or in addition to) setting it here. - --target-peers - The target number of peers. - - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --trusted-peers - One or more comma-delimited trusted peer ids which always have the highest score according to the peer - scoring system. - --trusted-setup-file-override - Path to a json file containing the trusted setup params. NOTE: This will override the trusted setup that is - generated from the mainnet kzg ceremony. Use with caution - --validator-monitor-file - As per --validator-monitor-pubkeys, but the comma-separated list is contained within a file at the given - path. - --validator-monitor-individual-tracking-threshold - Once the validator monitor reaches this number of local validators it will stop collecting per-validator - Prometheus metrics and issuing per-validator logs. Instead, it will provide aggregate metrics and logs. This - avoids infeasibly high cardinality in the Prometheus database and high log volume when using many - validators. Defaults to 64. - --validator-monitor-pubkeys - A comma-separated list of 0x-prefixed validator public keys. These validators will receive special - monitoring and additional logging. - --wss-checkpoint - Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify the node's sync against. The - block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync - from a recent state use --checkpoint-sync-url. +Flags: + --allow-insecure-genesis-sync + Enable syncing from genesis, which is generally insecure and + incompatible with data availability checks. Checkpoint syncing is the + preferred method for syncing a node. Only use this flag when testing. + DO NOT use on mainnet! + --always-prefer-builder-payload + This flag is deprecated and has no effect. + --always-prepare-payload + Send payload attributes with every fork choice update. This is + intended for use by block builders, relays and developers. You should + set a fee recipient on this BN and also consider adjusting the + --prepare-payload-lookahead flag. + --builder-fallback-disable-checks + This flag disables all checks related to chain health. This means the + builder API will always be used for payload construction, regardless + of recent chain conditions. + --compact-db + If present, apply compaction to the database on start-up. Use with + caution. It is generally not recommended unless auto-compaction is + disabled. + --disable-backfill-rate-limiting + Disable the backfill sync rate-limiting. This allow users to just sync + the entire chain as fast as possible, however it can result in + resource contention which degrades staking performance. Stakers should + generally choose to avoid this flag since backfill sync is not + required for staking. + --disable-deposit-contract-sync + Explicitly disables syncing of deposit logs from the execution node. + This overrides any previous option that depends on it. Useful if you + intend to run a non-validating beacon node. + --disable-duplicate-warn-logs + This flag is deprecated and has no effect. + --disable-enr-auto-update + Discovery automatically updates the nodes local ENR with an external + IP address and port as seen by other peers on the network. This + disables this feature, fixing the ENR's IP/PORT to those specified on + boot. + --disable-inbound-rate-limiter + Disables the inbound rate limiter (requests received by this node). + --disable-lock-timeouts + Disable the timeouts applied to some internal locks by default. This + can lead to less spurious failures on slow hardware but is considered + experimental as it may obscure performance issues. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-optimistic-finalized-sync + Force Lighthouse to verify every execution block hash with the + execution client during finalized sync. By default block hashes will + be checked in Lighthouse and only passed to the EL if initial + verification fails. + --disable-packet-filter + Disables the discovery packet filter. Useful for testing in smaller + networks + --disable-proposer-reorgs + Do not attempt to reorg late blocks from other validators when + proposing. + --disable-quic + Disables the quic transport. The node will rely solely on the TCP + transport for libp2p connections. + --disable-upnp + Disables UPnP support. Setting this will prevent Lighthouse from + attempting to automatically establish external port mappings. + --dummy-eth1 + If present, uses an eth1 backend that generates static dummy + data.Identical to the method used at the 2019 Canada interop. + -e, --enr-match + Sets the local ENR IP address and port to match those set for + lighthouse. Specifically, the IP address will be the value of + --listen-address and the UDP port will be --discovery-port. + --enable-private-discovery + Lighthouse by default does not discover private IP addresses. Set this + flag to enable connection attempts to local addresses. + --eth1 + If present the node will connect to an eth1 node. This is required for + block production, you must use this flag if you wish to serve a + validator. + --eth1-purge-cache + Purges the eth1 block and deposit caches + --genesis-backfill + Attempts to download blocks all the way back to genesis when + checkpoint syncing. + --gui + Enable the graphical user interface and all its requirements. This + enables --http and --validator-monitor-auto and enables SSE logging. + -h, --help + Prints help information + --http + Enable the RESTful HTTP API server. Disabled by default. + --http-enable-tls + Serves the RESTful HTTP API server over TLS. This feature is currently + experimental. + --import-all-attestations + Import and aggregate all attestations, regardless of validator + subscriptions. This will only import attestations from + already-subscribed subnets, use with --subscribe-all-subnets to ensure + all attestations are received for import. + --light-client-server + Act as a full node supporting light clients on the p2p network + [experimental] + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --metrics + Enable the Prometheus metrics HTTP server. Disabled by default. + --private + Prevents sending various client identification information. + --proposer-only + Sets this beacon node at be a block proposer only node. This will run + the beacon node in a minimal configuration that is sufficient for + block publishing only. This flag should be used for a beacon node + being referenced by validator client using the --proposer-node flag. + This configuration is for enabling more secure setups. + --purge-db + If present, the chain database will be deleted. Use with caution. + --reconstruct-historic-states + After a checkpoint sync, reconstruct historic states in the database. + This requires syncing all the way back to genesis. + --reset-payload-statuses + When present, Lighthouse will forget the payload statuses of any + already-imported blocks. This can assist in the recovery from a + consensus failure caused by the execution layer. + --self-limiter + Enables the outbound rate limiter (requests made by this node). Use + the self-limiter-protocol flag to set per protocol configurations. If + the self rate limiter is enabled and a protocol is not present in the + configuration, the quotas used for the inbound rate limiter will be + used. + --shutdown-after-sync + Shutdown beacon node as soon as sync is completed. Backfill sync will + not be performed before shutdown. + --slasher + Run a slasher alongside the beacon node. It is currently only + recommended for expert users because of the immaturity of the slasher + UX and the extra resources required. + --staking + Standard option for a staking beacon node. This will enable the HTTP + server on localhost:5052 and import deposit logs from the execution + node. This is equivalent to `--http` on merge-ready networks, or + `--http --eth1` pre-merge + --subscribe-all-subnets + Subscribe to all subnets regardless of validator count. This will also + advertise the beacon node as being long-lived subscribed to all + subnets. + --validator-monitor-auto + Enables the automatic detection and monitoring of validators connected + to the HTTP API and using the subnet subscription endpoint. This + generally has the effect of providing additional logging and metrics + for locally controlled validators. + -z, --zero-ports + Sets all listening TCP/UDP ports to 0, allowing the OS to choose some + arbitrary free ports. ``` diff --git a/book/src/help_general.md b/book/src/help_general.md index e7e323f330..42bff04d1a 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,109 +1,141 @@ # Lighthouse General Commands ``` -Sigma Prime -Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a validator client and utilities for managing -validator accounts. +Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a +validator client and utilities for managing validator accounts. -USAGE: - lighthouse [FLAGS] [OPTIONS] [SUBCOMMAND] +Usage: lighthouse [OPTIONS] [COMMAND] -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -l DEPRECATED Enables environment logging giving access to sub-protocol logs such - as discv5 and libp2p - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Commands: + account_manager + Utilities for generating and managing Ethereum 2.0 accounts. [aliases: + a, am, account, account_manager] + beacon_node + The primary component which connects to the Ethereum 2.0 P2P network + and downloads, verifies and stores blocks. Provides a HTTP API for + querying the beacon chain and publishing messages to the network. + [aliases: b, bn, beacon] + boot_node + Start a special Lighthouse process that only serves as a discv5 + boot-node. This process will *not* import blocks or perform most + typical beacon node functions. Instead, it will simply run the discv5 + service and assist nodes on the network to discover each other. This + is the recommended way to provide a network boot-node since it has a + reduced attack surface compared to a full beacon node. + database_manager + Manage a beacon node database [aliases: db] + validator_client + When connected to a beacon node, performs the duties of a staked + validator (e.g., proposing blocks and attestations). [aliases: v, vc, + validator] + validator_manager + Utilities for managing a Lighthouse validator client via the HTTP API. + [aliases: vm, validator-manager, validator_manager] + help + Print this message or the help of the given subcommand(s) -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + -V, --version + Print version - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - -SUBCOMMANDS: - account_manager Utilities for generating and managing Ethereum 2.0 accounts. [aliases: a, am, account, - account_manager] - beacon_node The primary component which connects to the Ethereum 2.0 P2P network and downloads, - verifies and stores blocks. Provides a HTTP API for querying the beacon chain and - publishing messages to the network. [aliases: b, bn, beacon] - boot_node Start a special Lighthouse process that only serves as a discv5 boot-node. This process - will *not* import blocks or perform most typical beacon node functions. Instead, it will - simply run the discv5 service and assist nodes on the network to discover each other. This - is the recommended way to provide a network boot-node since it has a reduced attack surface - compared to a full beacon node. - database_manager Manage a beacon node database [aliases: db] - help Prints this message or the help of the given subcommand(s) - validator_client When connected to a beacon node, performs the duties of a staked validator (e.g., proposing - blocks and attestations). [aliases: v, vc, validator] - validator_manager Utilities for managing a Lighthouse validator client via the HTTP API. [aliases: vm, - validator-manager, validator_manager] +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + -l + DEPRECATED Enables environment logging giving access to sub-protocol + logs such as discv5 and libp2p + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 4fd35b1ea2..1dba75e521 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -1,227 +1,281 @@ # Validator Client ``` -When connected to a beacon node, performs the duties of a staked validator (e.g., proposing blocks and attestations). +When connected to a beacon node, performs the duties of a staked validator +(e.g., proposing blocks and attestations). -USAGE: - lighthouse validator_client [FLAGS] [OPTIONS] +Usage: lighthouse validator_client [OPTIONS] -FLAGS: - --builder-proposals - If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will - sign over headers. Useful for outsourcing execution payload construction during proposals. - --disable-auto-discover - If present, do not attempt to discover new validators in the validators-dir. Validators will need to be - manually added to the validator_definitions.yml file. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning - If present, do not configure the system allocator. Providing this flag will generally increase memory usage, - it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes attestation, sync committee subscriptions and - proposer preparation messages to all beacon nodes provided in the `--beacon-nodes flag`. This option changes - that behaviour such that these api calls only go out to the first available and synced beacon node - --disable-slashing-protection-web3signer - Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC - but is only safe if slashing protection is enabled on the remote signer and is implemented correctly. DO NOT - ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL - GET SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING PROTECTION. - --distributed - Enables functionality required for running the validator in a distributed validator cluster. +Options: + --beacon-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. + Default is http://localhost:5052. + --beacon-nodes-tls-certs + Comma-separated paths to custom TLS certificates to use when + connecting to a beacon node (and/or proposer node). These certificates + must be in PEM format and are used in addition to the OS trust store. + Commas must only be used as a delimiter, and must not be part of the + certificate path. + --broadcast + Comma-separated list of beacon API topics to broadcast to all beacon + nodes. Possible values are: none, attestations, blocks, subscriptions, + sync-committee. Default (when flag is omitted) is to broadcast + subscriptions only. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-registration-timestamp-override + This flag takes a unix timestamp value that will be used to override + the timestamp used in the builder api registration + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --gas-limit + The gas limit to be used in all builder proposals for all validators + managed by this validator client. Note this will not necessarily be + used if the gas limit set here moves too far from the previous block's + gas limit. [default: 30,000,000] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --graffiti + Specify your custom graffiti to be included in blocks. + --graffiti-file + Specify a graffiti file to load validator graffitis from. + --http-address
+ Set the address for the HTTP address. The HTTP server is not encrypted + and therefore it is unsafe to publish on a public network. When this + flag is used, it additionally requires the explicit use of the + `--unencrypted-http-transport` flag to ensure the user is aware of the + risks involved. For access via the Internet, users should apply + transport-layer security like a HTTPS reverse-proxy or SSH tunnelling. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5062). + --http-port + Set the listen TCP port for the RESTful HTTP API server. + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5064). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote + endpoint. This can be used to monitor your setup on certain services + (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote + sever which may identify and associate your validators, IP address and + other personal information. Always use a HTTPS connection and never + provide an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the + monitoring-endpoint. Default: 60s + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --proposer-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. These + specify nodes that are used to send beacon block proposals. A failure + will revert back to the standard beacon nodes specified in + --beacon-nodes. + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --secrets-dir + The directory which contains the password to unlock the validator + voting keypairs. Each password should be contained in a file where the + name is the 0x-prefixed hex representation of the validators voting + public key. Defaults to ~/.lighthouse/{network}/secrets. + --suggested-fee-recipient + Once the merge has happened, this address will receive transaction + fees from blocks proposed by this validator client. If a fee recipient + is configured in the validator definitions it takes priority over this + value. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validator-registration-batch-size + Defines the number of validators per validator/register_validator + request sent to the BN. This value can be reduced to avoid timeouts + from builders. [default: 500] + --validators-dir + The directory which contains the validator keystores, deposit data for + each validator along with the common slashing protection database and + the validator_definitions.yml + --web3-signer-keep-alive-timeout + Keep-alive timeout for each web3signer connection. Set to 'null' to + never timeout [default: 20000] + --web3-signer-max-idle-connections + Maximum number of idle connections to maintain per web3signer host. + Default is unlimited. - --enable-doppelganger-protection - If this flag is set, Lighthouse will delay startup for three epochs and monitor for messages on the network - by any of the validators managed by this client. This will result in three (possibly four) epochs worth of - missed attestations. If an attestation is detected during this period, it means it is very likely that you - are running a second validator client with the same keys. This validator client will immediately shutdown if - this is detected in order to avoid potentially committing a slashable offense. Use this flag in order to - ENABLE this functionality, without this flag Lighthouse will begin attesting immediately. - --enable-high-validator-count-metrics - Enable per validator metrics for > 64 validators. Note: This flag is automatically enabled for <= 64 - validators. Enabling this metric for higher validator counts will lead to higher volume of prometheus - metrics being collected. - -h, --help Prints help information - --http Enable the RESTful HTTP API server. Disabled by default. - --http-allow-keystore-export - If present, allow access to the DELETE /lighthouse/keystores HTTP API method, which allows exporting - keystores and passwords to HTTP API consumers who have access to the API token. This method is useful for - exporting validators, however it should be used with caution since it exposes private key data to authorized - users. - --http-store-passwords-in-secrets-dir - If present, any validators created via the HTTP will have keystore passwords stored in the secrets-dir - rather than the validator definitions file. - --init-slashing-protection - If present, do not require the slashing protection database to exist before running. You SHOULD NOT use this - flag unless you're certain that a new slashing protection database is required. Usually, your database will - have been initialized when you imported your validator keys. If you misplace your database and then run with - this flag you risk being slashed. - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress - If present, compress old log files. This can help reduce the space needed to store old logs. - - --logfile-no-restricted-perms - If present, log files will be generated as world-readable meaning they can be read by any user on the - machine. Note that logs can often contain sensitive information about your validator and so this flag should - be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. - --metrics Enable the Prometheus metrics HTTP server. Disabled by default. - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. - --produce-block-v3 - Enable block production via the block v3 endpoint for this validator client. This should only be enabled - when paired with a beacon node that has this endpoint implemented. This flag will be enabled by default in - future. - --unencrypted-http-transport - This is a safety flag to ensure that the user is aware that the http transport is unencrypted and using a - custom HTTP address is unsafe. - --use-long-timeouts - If present, the validator client will use longer timeouts for requests made to the beacon node. This flag is - generally not recommended, longer timeouts can cause missed duties when fallbacks are used. - -V, --version Prints version information - -OPTIONS: - --beacon-nodes - Comma-separated addresses to one or more beacon node HTTP APIs. Default is http://localhost:5052. - - --beacon-nodes-tls-certs - Comma-separated paths to custom TLS certificates to use when connecting to a beacon node (and/or proposer - node). These certificates must be in PEM format and are used in addition to the OS trust store. Commas must - only be used as a delimiter, and must not be part of the certificate path. - --broadcast - Comma-separated list of beacon API topics to broadcast to all beacon nodes. Possible values are: none, - attestations, blocks, subscriptions, sync-committee. Default (when flag is omitted) is to broadcast - subscriptions only. - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-registration-timestamp-override - This flag takes a unix timestamp value that will be used to override the timestamp used in the builder api - registration - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --gas-limit - The gas limit to be used in all builder proposals for all validators managed by this validator client. Note - this will not necessarily be used if the gas limit set here moves too far from the previous block's gas - limit. [default: 30,000,000] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --graffiti - Specify your custom graffiti to be included in blocks. - - --graffiti-file - Specify a graffiti file to load validator graffitis from. - - --http-address
- Set the address for the HTTP address. The HTTP server is not encrypted and therefore it is unsafe to publish - on a public network. When this flag is used, it additionally requires the explicit use of the - `--unencrypted-http-transport` flag to ensure the user is aware of the risks involved. For access via the - Internet, users should apply transport-layer security like a HTTPS reverse-proxy or SSH tunnelling. - --http-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5062). - --http-port - Set the listen TCP port for the RESTful HTTP API server. - - --latency-measurement-service - Set to 'true' to enable a service that periodically attempts to measure latency to BNs. Set to 'false' to - disable. [default: true] - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --metrics-address
- Set the listen address for the Prometheus metrics HTTP server. - - --metrics-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5064). - --metrics-port - Set the listen TCP port for the Prometheus metrics HTTP server. - - --monitoring-endpoint
- Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor - your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node - metrics will be sent. Note: This will send information to a remote sever which may identify and associate - your validators, IP address and other personal information. Always use a HTTPS connection and never provide - an untrusted URL. - --monitoring-endpoint-period - Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --proposer-nodes - Comma-separated addresses to one or more beacon node HTTP APIs. These specify nodes that are used to send - beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --secrets-dir - The directory which contains the password to unlock the validator voting keypairs. Each password should be - contained in a file where the name is the 0x-prefixed hex representation of the validators voting public - key. Defaults to ~/.lighthouse/{network}/secrets. - --suggested-fee-recipient - Once the merge has happened, this address will receive transaction fees from blocks proposed by this - validator client. If a fee recipient is configured in the validator definitions it takes priority over this - value. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validator-registration-batch-size - Defines the number of validators per validator/register_validator request sent to the BN. This value can be - reduced to avoid timeouts from builders. [default: 500] - --validators-dir - The directory which contains the validator keystores, deposit data for each validator along with the common - slashing protection database and the validator_definitions.yml - --web3-signer-keep-alive-timeout - Keep-alive timeout for each web3signer connection. Set to 'null' to never timeout [default: 20000] - - --web3-signer-max-idle-connections - Maximum number of idle connections to maintain per web3signer host. Default is unlimited. +Flags: + --builder-proposals + If this flag is set, Lighthouse will query the Beacon Node for only + block headers during proposals and will sign over headers. Useful for + outsourcing execution payload construction during proposals. + --disable-auto-discover + If present, do not attempt to discover new validators in the + validators-dir. Validators will need to be manually added to the + validator_definitions.yml file. + --disable-latency-measurement-service + Disables the service that periodically attempts to measure latency to + BNs. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-run-on-all + DEPRECATED. Use --broadcast. By default, Lighthouse publishes + attestation, sync committee subscriptions and proposer preparation + messages to all beacon nodes provided in the `--beacon-nodes flag`. + This option changes that behaviour such that these api calls only go + out to the first available and synced beacon node + --disable-slashing-protection-web3signer + Disable Lighthouse's slashing protection for all web3signer keys. This + can reduce the I/O burden on the VC but is only safe if slashing + protection is enabled on the remote signer and is implemented + correctly. DO NOT ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT + SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL GET + SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING + PROTECTION. + --distributed + Enables functionality required for running the validator in a + distributed validator cluster. + --enable-doppelganger-protection + If this flag is set, Lighthouse will delay startup for three epochs + and monitor for messages on the network by any of the validators + managed by this client. This will result in three (possibly four) + epochs worth of missed attestations. If an attestation is detected + during this period, it means it is very likely that you are running a + second validator client with the same keys. This validator client will + immediately shutdown if this is detected in order to avoid potentially + committing a slashable offense. Use this flag in order to ENABLE this + functionality, without this flag Lighthouse will begin attesting + immediately. + --enable-high-validator-count-metrics + Enable per validator metrics for > 64 validators. Note: This flag is + automatically enabled for <= 64 validators. Enabling this metric for + higher validator counts will lead to higher volume of prometheus + metrics being collected. + -h, --help + Prints help information + --http + Enable the RESTful HTTP API server. Disabled by default. + --http-allow-keystore-export + If present, allow access to the DELETE /lighthouse/keystores HTTP API + method, which allows exporting keystores and passwords to HTTP API + consumers who have access to the API token. This method is useful for + exporting validators, however it should be used with caution since it + exposes private key data to authorized users. + --http-store-passwords-in-secrets-dir + If present, any validators created via the HTTP will have keystore + passwords stored in the secrets-dir rather than the validator + definitions file. + --init-slashing-protection + If present, do not require the slashing protection database to exist + before running. You SHOULD NOT use this flag unless you're certain + that a new slashing protection database is required. Usually, your + database will have been initialized when you imported your validator + keys. If you misplace your database and then run with this flag you + risk being slashed. + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --metrics + Enable the Prometheus metrics HTTP server. Disabled by default. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. + --produce-block-v3 + Enable block production via the block v3 endpoint for this validator + client. This should only be enabled when paired with a beacon node + that has this endpoint implemented. This flag will be enabled by + default in future. + --unencrypted-http-transport + This is a safety flag to ensure that the user is aware that the http + transport is unencrypted and using a custom HTTP address is unsafe. + --use-long-timeouts + If present, the validator client will use longer timeouts for requests + made to the beacon node. This flag is generally not recommended, + longer timeouts can cause missed duties when fallbacks are used. ``` diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 85dcdd3c0b..6f9cc405e7 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -3,97 +3,126 @@ ``` Utilities for managing a Lighthouse validator client via the HTTP API. -USAGE: - lighthouse validator_manager [FLAGS] [OPTIONS] [SUBCOMMAND] +Usage: lighthouse validator_manager [OPTIONS] [COMMAND] -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Commands: + create + Creates new validators from BIP-39 mnemonic. A JSON file will be + created which contains all the validator keystores and other validator + data. This file can then be imported to a validator client using the + "import-validators" command. Another, optional JSON file is created + which contains a list of validator deposits in the same format as the + "ethereum/staking-deposit-cli" tool. + import + Uploads validators to a validator client using the HTTP API. The + validators are defined in a JSON file which can be generated using the + "create-validators" command. + move + Uploads validators to a validator client using the HTTP API. The + validators are defined in a JSON file which can be generated using the + "create-validators" command. This command only supports validators + signing via a keystore on the local file system (i.e., not Web3Signer + validators). + help + Print this message or the help of the given subcommand(s) -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - -SUBCOMMANDS: - create Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the - validator keystores and other validator data. This file can then be imported to a validator client - using the "import-validators" command. Another, optional JSON file is created which contains a list of - validator deposits in the same format as the "ethereum/staking-deposit-cli" tool. - help Prints this message or the help of the given subcommand(s) - import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file - which can be generated using the "create-validators" command. - move Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file - which can be generated using the "create-validators" command. This command only supports validators - signing via a keystore on the local file system (i.e., not Web3Signer validators). +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 1b43d0f988..4ddb360e48 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -1,139 +1,169 @@ # Validator Manager Create ``` -Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and -other validator data. This file can then be imported to a validator client using the "import-validators" command. -Another, optional JSON file is created which contains a list of validator deposits in the same format as the -"ethereum/staking-deposit-cli" tool. +Creates new validators from BIP-39 mnemonic. A JSON file will be created which +contains all the validator keystores and other validator data. This file can +then be imported to a validator client using the "import-validators" command. +Another, optional JSON file is created which contains a list of validator +deposits in the same format as the "ethereum/staking-deposit-cli" tool. -USAGE: - lighthouse validator_manager create [FLAGS] [OPTIONS] --output-path +Usage: lighthouse validator_manager create [OPTIONS] --output-path -FLAGS: - --disable-deposits When provided don't generate the deposits JSON file that is commonly used - for submitting validator deposits via a web UI. Using this flag will save - several seconds per validator if the user has an alternate strategy for - submitting deposits. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag - will generally increase memory usage, it should only be provided when - debugging specific memory allocation issues. - --force-bls-withdrawal-credentials If present, allows BLS withdrawal credentials rather than an execution - address. This is not recommended. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed - to store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can - be read by any user on the machine. Note that logs can often contain - sensitive information about your validator and so this flag should be used - with caution. For Windows users, the log file permissions will be - inherited from the parent folder. - --specify-voting-keystore-password If present, the user will be prompted to enter the voting keystore - password that will be used to encrypt the voting keystores. If this flag - is not provided, a random password will be used. It is not necessary to - keep backups of voting keystore passwords if the mnemonic is safely backed - up. - --stdin-inputs If present, read all user inputs from stdin instead of tty. - -V, --version Prints version information +Options: + --beacon-node + A HTTP(S) address of a beacon node using the beacon-API. If this value + is provided, an error will be raised if any validator key here is + already known as a validator by that beacon node. This helps prevent + the same validator being created twice and therefore slashable + conditions. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] + --count + The number of validators to create, regardless of how many already + exist + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --deposit-gwei + The GWEI value of the deposit amount. Defaults to the minimum amount + required for an active validator (MAX_EFFECTIVE_BALANCE) + --eth1-withdrawal-address + If this field is set, the given eth1 address will be used to create + the withdrawal credentials. Otherwise, it will generate withdrawal + credentials with the mnemonic-derived withdrawal public key in + EIP-2334 format. + --first-index + The first of consecutive key indexes you wish to create. [default: 0] + --gas-limit + All created validators will use this gas limit. It is recommended to + leave this as the default value by not specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --mnemonic-path + If present, the mnemonic will be read in from this file. + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --output-path + The path to a directory where the validator and (optionally) deposits + files will be created. The directory will be created if it does not + exist. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. [possible values: true, + false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --suggested-fee-recipient + All created validators will use this value for the suggested fee + recipient. Omit this flag to use the default value from the VC. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. -OPTIONS: - --beacon-node - A HTTP(S) address of a beacon node using the beacon-API. If this value is provided, an error will be raised - if any validator key here is already known as a validator by that beacon node. This helps prevent the same - validator being created twice and therefore slashable conditions. - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-proposals - When provided, all created validators will attempt to create blocks via builder rather than the local EL. - [possible values: true, false] - --count - The number of validators to create, regardless of how many already exist - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --deposit-gwei - The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator - (MAX_EFFECTIVE_BALANCE) - --eth1-withdrawal-address - If this field is set, the given eth1 address will be used to create the withdrawal credentials. Otherwise, - it will generate withdrawal credentials with the mnemonic-derived withdrawal public key in EIP-2334 format. - --first-index - The first of consecutive key indexes you wish to create. [default: 0] - - --gas-limit - All created validators will use this gas limit. It is recommended to leave this as the default value by not - specifying this flag. - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --mnemonic-path - If present, the mnemonic will be read in from this file. - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --output-path - The path to a directory where the validator and (optionally) deposits files will be created. The directory - will be created if it does not exist. - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --suggested-fee-recipient - All created validators will use this value for the suggested fee recipient. Omit this flag to use the - default value from the VC. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. +Flags: + --disable-deposits + When provided don't generate the deposits JSON file that is commonly + used for submitting validator deposits via a web UI. Using this flag + will save several seconds per validator if the user has an alternate + strategy for submitting deposits. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --force-bls-withdrawal-credentials + If present, allows BLS withdrawal credentials rather than an execution + address. This is not recommended. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --specify-voting-keystore-password + If present, the user will be prompted to enter the voting keystore + password that will be used to encrypt the voting keystores. If this + flag is not provided, a random password will be used. It is not + necessary to keep backups of voting keystore passwords if the mnemonic + is safely backed up. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index e8eb4946aa..799a1db82b 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -1,103 +1,126 @@ # Validator Manager Import ``` -Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be -generated using the "create-validators" command. +Uploads validators to a validator client using the HTTP API. The validators are +defined in a JSON file which can be generated using the "create-validators" +command. -USAGE: - lighthouse validator_manager import [FLAGS] [OPTIONS] --validators-file +Usage: lighthouse validator_manager import [OPTIONS] --validators-file -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --ignore-duplicates If present, ignore any validators which already exist on the VC. Without this - flag, the process will terminate without making any changes. This flag should - be used with caution, whilst it does not directly cause slashable conditions, - it might be an indicator that something is amiss. Users should also be careful - to avoid submitting duplicate deposits for validators that already exist on the - VC. - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validators-file + The path to a JSON file containing a list of validators to be imported + to the validator client. This file is usually named "validators.json". + --vc-token + The file containing a token required by the validator client. + --vc-url + A HTTP(S) address of a validator client using the keymanager-API. If + this value is not supplied then a 'dry run' will be conducted where no + changes are made to the validator client. [default: + http://localhost:5062] -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validators-file - The path to a JSON file containing a list of validators to be imported to the validator client. This file is - usually named "validators.json". - --vc-token - The file containing a token required by the validator client. - - --vc-url - A HTTP(S) address of a validator client using the keymanager-API. If this value is not supplied then a 'dry - run' will be conducted where no changes are made to the validator client. [default: http://localhost:5062] +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --ignore-duplicates + If present, ignore any validators which already exist on the VC. + Without this flag, the process will terminate without making any + changes. This flag should be used with caution, whilst it does not + directly cause slashable conditions, it might be an indicator that + something is amiss. Users should also be careful to avoid submitting + duplicate deposits for validators that already exist on the VC. + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 95c6c8e00e..9b92e21bc2 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -1,120 +1,147 @@ # Validator Manager Move ``` -Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be -generated using the "create-validators" command. This command only supports validators signing via a keystore on the +Uploads validators to a validator client using the HTTP API. The validators are +defined in a JSON file which can be generated using the "create-validators" +command. This command only supports validators signing via a keystore on the local file system (i.e., not Web3Signer validators). -USAGE: - lighthouse validator_manager move [FLAGS] [OPTIONS] --dest-vc-token --dest-vc-url --src-vc-token --src-vc-url +Usage: lighthouse validator_manager move [OPTIONS] --src-vc-token --src-vc-url --dest-vc-token --dest-vc-url -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - --stdin-inputs If present, read all user inputs from stdin instead of tty. - -V, --version Prints version information +Options: + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] + --count + The number of validators to move. + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --dest-vc-token + The file containing a token required by the destination validator + client. + --dest-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This + validator client is the "destination" and will have new validators + added as they are removed from the "source" validator client. + --gas-limit + All created validators will use this gas limit. It is recommended to + leave this as the default value by not specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. [possible values: true, + false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --src-vc-token + The file containing a token required by the source validator client. + --src-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This + validator client is the "source" and contains the validators that are + to be moved. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. + --suggested-fee-recipient + All created validators will use this value for the suggested fee + recipient. Omit this flag to use the default value from the VC. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validators + The validators to be moved. Either a list of 0x-prefixed validator + pubkeys or the keyword "all". -OPTIONS: - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-proposals - When provided, all created validators will attempt to create blocks via builder rather than the local EL. - [possible values: true, false] - --count The number of validators to move. - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --dest-vc-token - The file containing a token required by the destination validator client. - - --dest-vc-url - A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "destination" - and will have new validators added as they are removed from the "source" validator client. - --gas-limit - All created validators will use this gas limit. It is recommended to leave this as the default value by not - specifying this flag. - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --src-vc-token - The file containing a token required by the source validator client. - - --src-vc-url - A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "source" and - contains the validators that are to be moved. - --suggested-fee-recipient - All created validators will use this value for the suggested fee recipient. Omit this flag to use the - default value from the VC. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validators - The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` diff --git a/book/src/key-management.md b/book/src/key-management.md index 007ccf6977..fa6e99a2aa 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -75,21 +75,21 @@ mnemonic is encrypted with a password. It is the responsibility of the user to define a strong password. The password is only required for interacting with the wallet, it is not required for recovering keys from a mnemonic. -To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Goerli testnet named `wally` and saves it in `~/.lighthouse/goerli/wallets` with a randomly generated password saved +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Holesky testnet named `wally` and saves it in `~/.lighthouse/holesky/wallets` with a randomly generated password saved to `./wallet.pass`: ```bash -lighthouse --network goerli account wallet create --name wally --password-file wally.pass +lighthouse --network holesky account wallet create --name wally --password-file wally.pass ``` -Using the above command, a wallet will be created in `~/.lighthouse/goerli/wallets` with the name +Using the above command, a wallet will be created in `~/.lighthouse/holesky/wallets` with the name `wally`. It is encrypted using the password defined in the `wally.pass` file. During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. > Notes: > -> - When navigating to the directory `~/.lighthouse/goerli/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> - When navigating to the directory `~/.lighthouse/holesky/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. > - The password is not `wally.pass`, it is the _content_ of the > `wally.pass` file. > - If `wally.pass` already exists, the wallet password will be set to the content @@ -100,18 +100,18 @@ During the wallet creation process, a 24-word mnemonic will be displayed. Record Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: ```bash -lighthouse --network goerli account validator create --wallet-name wally --wallet-password wally.pass --count 1 +lighthouse --network holesky account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` This command will: -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/goerli/wallets`, updating it so that it generates a new key next time. -- Create a new directory `~/.lighthouse/goerli/validators` containing: +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/holesky/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/holesky/validators` containing: - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit contract for the Goerli testnet. Other networks can be set via the `--network` parameter. -- Create a new directory `~/.lighthouse/goerli/secrets` which stores a password to the validator's voting keypair. +- Create a new directory `~/.lighthouse/holesky/secrets` which stores a password to the validator's voting keypair. If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 8653cc45d9..6de05cff2a 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -209,4 +209,3 @@ guidance for specific setups. - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). - [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/archived-guides/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) - [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) -- [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 33672e54b7..6261f2e267 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -32,7 +32,7 @@ Below is an example for initiating a voluntary exit on the Holesky testnet. ``` $ lighthouse --network holesky account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 -Running account manager for Prater network +Running account manager for Holesky network validator-dir path: ~/.lighthouse/holesky/validators Enter the keystore password for validator in 0xabcd @@ -82,7 +82,7 @@ There are two types of withdrawal credentials, `0x00` and `0x01`. To check which If your withdrawal credentials is of type `0x01`, it means you have set your withdrawal address previously, and you will not be able to change the withdrawal address. ### 3. When will my BTEC request (update withdrawal credentials to type `0x01`) be processed ? - + Your BTEC request will be included very quickly as soon as a new block is proposed. This should be the case most (if not all) of the time, given that the peak BTEC request time has now past (right after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023 and lasted for ~ 2 days) . ### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 6cf62e0430..d5c5fe0d64 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -18,9 +18,6 @@ slog-term = { workspace = true } logging = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -slog-stdlog = "4.0.0" hex = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } -serde_yaml = { workspace = true } eth2_network_config = { workspace = true } diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index d7ea5ab0b3..440a9d27e2 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -1,18 +1,29 @@ //! Simple logic for spawning a Lighthouse BootNode. -use clap::{App, Arg}; +use clap::{Arg, ArgAction, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; // TODO: Add DOS prevention CLI params -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("boot_node") +pub fn cli_app() -> Command { + Command::new("boot_node") .about("Start a special Lighthouse process that only serves as a discv5 boot-node. This \ process will *not* import blocks or perform most typical beacon node functions. Instead, it \ will simply run the discv5 service and assist nodes on the network to discover each other. \ This is the recommended way to provide a network boot-node since it has a reduced attack \ surface compared to a full beacon node.") - .settings(&[clap::AppSettings::ColoredHelp]) + .styles(get_color_style()) + .display_order(0) .arg( - Arg::with_name("enr-address") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("enr-address") .long("enr-address") .value_name("ADDRESS") .help("The IP address/ DNS address to broadcast to other peers on how to reach \ @@ -21,31 +32,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .multiple(true) - .max_values(2) + .action(ArgAction::Append) + .num_args(1..=2) .required(true) .conflicts_with("network-dir") - .takes_value(true), + .display_order(0) ) .arg( - Arg::with_name("port") + Arg::new("port") .long("port") .value_name("PORT") .help("The UDP port to listen on.") .default_value("9000") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("port6") + Arg::new("port6") .long("port6") .value_name("PORT") .help("The UDP port to listen on over IpV6 when listening over both Ipv4 and \ Ipv6. Defaults to 9090 when required.") .default_value("9090") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address the bootnode will listen for UDP communications. To listen \ @@ -56,53 +69,63 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") - .multiple(true) - .max_values(2) + .num_args(1..=2) .default_value("0.0.0.0") - .takes_value(true) + .action(ArgAction::Append) + .display_order(0) ) .arg( - Arg::with_name("boot-nodes") + Arg::new("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) .value_name("ENR-LIST/Multiaddr") .help("One or more comma-delimited base64-encoded ENR's or multiaddr strings of peers to initially add to the local routing table") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-udp-port") + Arg::new("enr-udp-port") .long("enr-port") .value_name("PORT") .help("The UDP port of the boot node's ENR. This is the port that external peers will dial to reach this boot node. Set this only if the external port differs from the listening port.") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("network-dir") + .display_order(0) ) .arg( - Arg::with_name("enr-udp6-port") + Arg::new("enr-udp6-port") .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IpV6.") .conflicts_with("network-dir") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-enr-auto-update") - .short("x") + Arg::new("enable-enr-auto-update") + .short('x') + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("enable-enr-auto-update") .help("Discovery can automatically update the node's local ENR with an external IP address and port as seen by other peers on the network. \ This enables this feature.") + .display_order(0) ) .arg( - Arg::with_name("disable-packet-filter") + Arg::new("disable-packet-filter") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("disable-packet-filter") .help("Disables discv5 packet filter. Useful for testing in smaller networks") + .display_order(0) ) .arg( - Arg::with_name("network-dir") + Arg::new("network-dir") .value_name("NETWORK_DIR") .long("network-dir") .help("The directory which contains the enr and it's associated private key") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index a9c8950532..a8b0f7aa56 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -25,11 +25,10 @@ pub struct BootNodeConfig { impl BootNodeConfig { pub async fn new( - matches: &ArgMatches<'_>, + matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, ) -> Result { let data_dir = get_data_dir(matches); - // Try and obtain bootnodes let boot_nodes = { @@ -39,7 +38,7 @@ impl BootNodeConfig { boot_nodes.extend_from_slice(enr); } - if let Some(nodes) = matches.value_of("boot-nodes") { + if let Some(nodes) = matches.get_one::("boot-nodes") { boot_nodes.extend_from_slice( &nodes .split(',') @@ -81,14 +80,14 @@ impl BootNodeConfig { }; // By default this is enabled. If it is not set, revert to false. - if !matches.is_present("enable-enr-auto-update") { + if !matches.get_flag("enable-enr-auto-update") { network_config.discv5_config.enr_update = false; } let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(private_key)?; - let local_enr = if let Some(dir) = matches.value_of("network-dir") { + let local_enr = if let Some(dir) = matches.get_one::("network-dir") { let network_dir: PathBuf = dir.into(); load_enr_from_disk(&network_dir)? } else { diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index e707dc14f7..669b126bd3 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -14,8 +14,8 @@ const LOG_CHANNEL_SIZE: usize = 2048; /// Run the bootnode given the CLI configuration. pub fn run( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth_spec_id: EthSpecId, eth2_network_config: &Eth2NetworkConfig, debug_level: String, @@ -67,8 +67,8 @@ pub fn run( } fn main( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index b6bdd148f4..286fa9e0f0 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -12,8 +12,8 @@ use slog::info; use types::EthSpec; pub async fn run( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { @@ -28,7 +28,7 @@ pub async fn run( ð2_network_config.chain_spec::()?, )?; - if lh_matches.is_present("immediate-shutdown") { + if lh_matches.get_flag("immediate-shutdown") { return Ok(()); } diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index 1ebd2b1740..ea56e7e672 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,5 +1,6 @@ //! A helper library for parsing values from `clap::ArgMatches`. +use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; use ethereum_types::U256 as Uint256; @@ -15,12 +16,14 @@ pub const BAD_TESTNET_DIR_MESSAGE: &str = "The hard-coded testnet directory was or when there is no default public network to connect to. \ During these times you must specify a --testnet-dir."; +pub const FLAG_HEADER: &str = "Flags"; + /// Try to parse the eth2 network config from the `network`, `testnet-dir` flags in that order. /// Returns the default hardcoded testnet if neither flags are set. pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result { - let optional_network_config = if cli_args.is_present("network") { + let optional_network_config = if cli_args.contains_id("network") { parse_hardcoded_network(cli_args, "network")? - } else if cli_args.is_present("testnet-dir") { + } else if cli_args.contains_id("testnet-dir") { parse_testnet_dir(cli_args, "testnet-dir")? } else { // if neither is present, assume the default network @@ -92,7 +95,7 @@ pub fn parse_path_with_default_in_home_dir( default: PathBuf, ) -> Result { matches - .value_of(name) + .get_one::(name) .map(|dir| { dir.parse::() .map_err(|e| format!("Unable to parse {}: {}", name, e)) @@ -122,7 +125,8 @@ where ::Err: std::fmt::Display, { matches - .value_of(name) + .try_get_one::(name) + .map_err(|e| format!("Unable to parse {}: {}", name, e))? .map(|val| { val.parse() .map_err(|e| format!("Unable to parse {}: {}", name, e)) @@ -150,7 +154,7 @@ pub fn parse_ssz_optional( name: &'static str, ) -> Result, String> { matches - .value_of(name) + .get_one::(name) .map(|val| { if let Some(stripped) = val.strip_prefix("0x") { let vec = hex::decode(stripped) @@ -190,3 +194,15 @@ where } Ok(()) } + +pub fn get_color_style() -> Styles { + Styles::styled() + .header(AnsiColor::Yellow.on_default()) + .usage(AnsiColor::Green.on_default()) + .literal(AnsiColor::Green.on_default()) + .placeholder(AnsiColor::Green.on_default()) +} + +pub fn parse_flag(matches: &ArgMatches, name: &str) -> bool { + *matches.get_one::(name).unwrap_or(&false) +} diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index e8585c504a..df03b4f9a4 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -21,9 +21,9 @@ pub const CUSTOM_TESTNET_DIR: &str = "custom"; /// if not present, then checks the "testnet-dir" flag and returns a custom name /// If neither flags are present, returns the default hardcoded network name. pub fn get_network_dir(matches: &ArgMatches) -> String { - if let Some(network_name) = matches.value_of("network") { + if let Some(network_name) = matches.get_one::("network") { network_name.to_string() - } else if matches.value_of("testnet-dir").is_some() { + } else if matches.get_one::("testnet-dir").is_some() { CUSTOM_TESTNET_DIR.to_string() } else { eth2_network_config::DEFAULT_HARDCODED_NETWORK.to_string() diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 0f27bb6672..10b4755ba2 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -10,7 +10,6 @@ edition = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } -tree_hash = { workspace = true } types = { workspace = true } reqwest = { workspace = true } lighthouse_network = { workspace = true } @@ -29,7 +28,6 @@ futures = { workspace = true } store = { workspace = true } slashing_protection = { workspace = true } mediatype = "0.19.13" -mime = "0.3.16" pretty_reqwest_error = { workspace = true } [dev-dependencies] diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 2f6e7fe0e8..4863b72cf0 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1084,6 +1084,7 @@ pub enum EventKind { PayloadAttributes(VersionedSsePayloadAttributes), ProposerSlashing(Box), AttesterSlashing(Box>), + BlsToExecutionChange(Box), } impl EventKind { @@ -1105,6 +1106,7 @@ impl EventKind { EventKind::BlockReward(_) => "block_reward", EventKind::ProposerSlashing(_) => "proposer_slashing", EventKind::AttesterSlashing(_) => "attester_slashing", + EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", } } @@ -1195,6 +1197,11 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Proposer Slashing: {:?}", e)) })?, )), + "bls_to_execution_change" => Ok(EventKind::BlsToExecutionChange( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Bls To Execution Change: {:?}", e)) + })?, + )), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -1228,6 +1235,7 @@ pub enum EventTopic { BlockReward, AttesterSlashing, ProposerSlashing, + BlsToExecutionChange, } impl FromStr for EventTopic { @@ -1251,6 +1259,7 @@ impl FromStr for EventTopic { "block_reward" => Ok(EventTopic::BlockReward), "attester_slashing" => Ok(EventTopic::AttesterSlashing), "proposer_slashing" => Ok(EventTopic::ProposerSlashing), + "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -1275,6 +1284,7 @@ impl fmt::Display for EventTopic { EventTopic::BlockReward => write!(f, "block_reward"), EventTopic::AttesterSlashing => write!(f, "attester_slashing"), EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), + EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), } } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index bf707c4d17..9104db8f67 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -192,7 +192,11 @@ macro_rules! define_net { config_dir: ETH2_NET_DIR.config_dir, genesis_state_source: ETH2_NET_DIR.genesis_state_source, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), - deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), + deploy_block: $this_crate::$include_file!( + $this_crate, + "../", + "deposit_contract_block.txt" + ), boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"), genesis_state_bytes: $this_crate::$include_file!($this_crate, "../", "genesis.ssz"), } @@ -284,26 +288,6 @@ define_hardcoded_nets!( // Describes how the genesis state can be obtained. GenesisStateSource::IncludedBytes ), - ( - // Network name (must be unique among all networks). - prater, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - "prater", - // Describes how the genesis state can be obtained. - GenesisStateSource::IncludedBytes - ), - ( - // Network name (must be unique among all networks). - goerli, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - // - // The Goerli network is effectively an alias to Prater. - "prater", - // Describes how the genesis state can be obtained. - GenesisStateSource::IncludedBytes - ), ( // Network name (must be unique among all networks). gnosis, diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 3807c2e993..4b34405e5b 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -13,12 +13,11 @@ eth2_config = { workspace = true } [dev-dependencies] tempfile = { workspace = true } tokio = { workspace = true } +ethereum_ssz = { workspace = true } [dependencies] serde_yaml = { workspace = true } -serde_json = { workspace = true } types = { workspace = true } -ethereum_ssz = { workspace = true } eth2_config = { workspace = true } discv5 = { workspace = true } reqwest = { workspace = true } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index c869d9cfc8..07d100b011 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -3,7 +3,7 @@ PRESET_BASE: 'gnosis' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one -# * 'prater' - testnet +# * 'holesky' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'chiado' diff --git a/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/chiado/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/chiado/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/gnosis/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/gnosis/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/holesky/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/holesky/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index c8695123ab..fc9c002dab 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one -# * 'prater' - testnet +# * 'holesky' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/mainnet/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/mainnet/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/mainnet/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml deleted file mode 100644 index 7000ff0bbc..0000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# q9f bootnode errai (lighthouse) -# /ip4/135.181.181.239/tcp/9000/p2p/16Uiu2HAmPitcpwsGZf1vGiu6hdwZHsVLyFzVZeNqaSmUaSyM7Xvj -- enr:-LK4QH1xnjotgXwg25IDPjrqRGFnH1ScgNHA3dv1Z8xHCp4uP3N3Jjl_aYv_WIxQRdwZvSukzbwspXZ7JjpldyeVDzMCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhIe1te-Jc2VjcDI1NmsxoQOkcGXqbCJYbcClZ3z5f6NWhX_1YPFRYRRWQpJjwSHpVIN0Y3CCIyiDdWRwgiMo -# q9f bootnode gudja (teku) -# /ip4/135.181.182.51/tcp/9000/p2p/16Uiu2HAmTttt9ZTmCmwmKiV3QR7iTAfnAckwzhswrNmWkthi6meB -- enr:-KG4QCIzJZTY_fs_2vqWEatJL9RrtnPwDCv-jRBuO5FQ2qBrfJubWOWazri6s9HsyZdu-fRUfEzkebhf1nvO42_FVzwDhGV0aDKQed8EKAAAECD__________4JpZIJ2NIJpcISHtbYziXNlY3AyNTZrMaED4m9AqVs6F32rSCGsjtYcsyfQE2K8nDiGmocUY_iq-TSDdGNwgiMog3VkcIIjKA -# Prysm bootnode #1 -- enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g -# Lighthouse bootnode #1 -- enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA -# Lighthouse bootnode #2 -- enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo -# Nimbus bootstrap nodes -- enr:-LK4QMzPq4Q7w5R-rnGQDcI8BYky6oPVBGQTbS1JJLVtNi_8PzBLV7Bdzsoame9nJK5bcJYpGHn4SkaDN2CM6tR5G_4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhAN4yvyJc2VjcDI1NmsxoQKa8Qnp_P2clLIP6VqLKOp_INvEjLszalEnW0LoBZo4YYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QLM_pPHa78R8xlcU_s40Y3XhFjlb3kPddW9lRlY67N5qeFE2Wo7RgzDgRs2KLCXODnacVHMFw1SfpsW3R474RZEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhANBY-yJc2VjcDI1NmsxoQNsZkFXgKbTzuxF7uwxlGauTGJelE6HD269CcFlZ_R7A4N0Y3CCI4yDdWRwgiOM -# Teku bootnode -- enr:-KK4QH0RsNJmIG0EX9LSnVxMvg-CAOr3ZFF92hunU63uE7wcYBjG1cFbUTvEa5G_4nDJkRhUq9q2ck9xY-VX1RtBsruBtIRldGgykIL0pysBABAg__________-CaWSCdjSCaXCEEnXQ0YlzZWNwMjU2azGhA1grTzOdMgBvjNrk-vqWtTZsYQIi0QawrhoZrsn5Hd56g3RjcIIjKIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml deleted file mode 100644 index f474b172c5..0000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ /dev/null @@ -1,134 +0,0 @@ -# Prater config - -# Extends the mainnet preset -PRESET_BASE: 'mainnet' - -CONFIG_NAME: 'prater' - -# Transition -# --------------------------------------------------------------- -# Expected August 10, 2022 -TERMINAL_TOTAL_DIFFICULTY: 10790000 -# By default, don't use these params -TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 -TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - - -# Genesis -# --------------------------------------------------------------- -# `2**14` (= 16,384) -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 -# Mar-01-2021 08:53:32 AM +UTC -MIN_GENESIS_TIME: 1614588812 -# Prater area code (Vienna) -GENESIS_FORK_VERSION: 0x00001020 -# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) -GENESIS_DELAY: 1919188 - - -# Forking -# --------------------------------------------------------------- -# Some forks are disabled for now: -# - These may be re-assigned to another fork-version later -# - Temporarily set to max uint64 value: 2**64 - 1 - -# Altair -ALTAIR_FORK_VERSION: 0x01001020 -ALTAIR_FORK_EPOCH: 36660 -# Bellatrix -BELLATRIX_FORK_VERSION: 0x02001020 -BELLATRIX_FORK_EPOCH: 112260 -# Capella -CAPELLA_FORK_VERSION: 0x03001020 -CAPELLA_FORK_EPOCH: 162304 -# DENEB -DENEB_FORK_VERSION: 0x04001020 -DENEB_FORK_EPOCH: 231680 - -# Time parameters -# --------------------------------------------------------------- -# 12 seconds -SECONDS_PER_SLOT: 12 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours -ETH1_FOLLOW_DISTANCE: 2048 - - -# Validator cycle -# --------------------------------------------------------------- -# 2**2 (= 4) -INACTIVITY_SCORE_BIAS: 4 -# 2**4 (= 16) -INACTIVITY_SCORE_RECOVERY_RATE: 16 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) -MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 - -# Fork choice -# --------------------------------------------------------------- -# 40% -PROPOSER_SCORE_BOOST: 40 -# 20% -REORG_HEAD_WEIGHT_THRESHOLD: 20 -# 160% -REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs -REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -# Prater test deposit contract on Goerli Testnet -DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b - -# Networking -# --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 -# `2**10` (= 1024) -MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) -EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) -MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 -# 5s -TTFB_TIMEOUT: 5 -# 10s -RESP_TIMEOUT: 10 -ATTESTATION_PROPAGATION_SLOT_RANGE: 32 -# 500ms -MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 -MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 -MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 -# 2 subnets per node -SUBNETS_PER_NODE: 2 -# 2**8 (= 64) -ATTESTATION_SUBNET_COUNT: 64 -ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS -ATTESTATION_SUBNET_PREFIX_BITS: 6 -ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 - -# Deneb -# `2**7` (=128) -MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) -MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` -BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt deleted file mode 100644 index e8c50058b6..0000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -4367322 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip deleted file mode 100644 index 36bad7fae6..0000000000 Binary files a/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/sepolia/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/sepolia/deposit_contract_block.txt diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 1ead9a6bde..fb8c6938cd 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -29,14 +29,14 @@ use url::Url; pub use eth2_config::GenesisStateSource; -pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; +pub const DEPLOY_BLOCK_FILE: &str = "deposit_contract_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; pub const BASE_CONFIG_FILE: &str = "config.yaml"; // Creates definitions for: // -// - Each of the `HardcodedNet` values (e.g., `MAINNET`, `PRATER`, etc). +// - Each of the `HardcodedNet` values (e.g., `MAINNET`, `HOLESKY`, etc). // - `HARDCODED_NETS: &[HardcodedNet]` // - `HARDCODED_NET_NAMES: &[&'static str]` instantiate_hardcoded_nets!(eth2_config); @@ -502,13 +502,6 @@ mod tests { .expect("beacon state can decode"); } - #[test] - fn prater_and_goerli_are_equal() { - let goerli = Eth2NetworkConfig::from_hardcoded_net(&GOERLI).unwrap(); - let prater = Eth2NetworkConfig::from_hardcoded_net(&PRATER).unwrap(); - assert_eq!(goerli, prater); - } - #[test] fn hard_coded_nets_work() { for net in HARDCODED_NETS { diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml index 6d90534401..fe966f4a9c 100644 --- a/common/lighthouse_metrics/Cargo.toml +++ b/common/lighthouse_metrics/Cargo.toml @@ -7,5 +7,4 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = { workspace = true } prometheus = "0.13.0" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 1fad56d475..3a03d22f3c 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -15,7 +15,6 @@ parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } slog = { workspace = true } -slog-async = { workspace = true } slog-term = { workspace = true } sloggers = { workspace = true } take_mut = "0.2.2" diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index 5f0de80d90..be339f2779 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -8,5 +8,4 @@ lighthouse_network = { workspace = true } types = { workspace = true } sysinfo = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } parking_lot = { workspace = true } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 3b05cfab1f..d2935dbca4 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -435,7 +435,12 @@ impl ForkChoiceTest { let validator_committee_index = 0; let validator_index = *head .beacon_state - .get_beacon_committee(current_slot, attestation.data().index) + .get_beacon_committee( + current_slot, + attestation + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees") .committee .get(validator_committee_index) diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index 33249c4941..a79604003e 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -13,7 +13,6 @@ pub mod attesting_indices_base { ) -> Result, BlockOperationError> { let attesting_indices = get_attesting_indices::(committee, &attestation.aggregation_bits)?; - Ok(IndexedAttestation::Base(IndexedAttestationBase { attesting_indices: VariableList::new(attesting_indices)?, data: attestation.data.clone(), @@ -52,6 +51,100 @@ pub mod attesting_indices_electra { use safe_arith::SafeArith; use types::*; + // TODO(electra) remove duplicate code + // get_indexed_attestation is almost an exact duplicate + // the only differences are the invalid selection proof + // and aggregator not in committee checks + pub fn get_indexed_attestation_from_signed_aggregate( + committees: &[BeaconCommittee], + signed_aggregate: &SignedAggregateAndProofElectra, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { + let mut output: HashSet = HashSet::new(); + + let committee_bits = &signed_aggregate.message.aggregate.committee_bits; + let aggregation_bits = &signed_aggregate.message.aggregate.aggregation_bits; + let aggregator_index = signed_aggregate.message.aggregator_index; + let attestation = &signed_aggregate.message.aggregate; + + let committee_indices = get_committee_indices::(committee_bits); + + let mut committee_offset = 0; + + let committees_map: HashMap = committees + .iter() + .map(|committee| (committee.index, committee)) + .collect(); + + let committee_count_per_slot = committees.len() as u64; + let mut participant_count = 0; + + // TODO(electra): + // Note: this clones the signature which is known to be a relatively slow operation. + // + // Future optimizations should remove this clone. + let selection_proof = + SelectionProof::from(signed_aggregate.message.selection_proof.clone()); + + for index in committee_indices { + if let Some(&beacon_committee) = committees_map.get(&index) { + if !selection_proof + .is_aggregator(beacon_committee.committee.len(), spec) + .map_err(BeaconStateError::ArithError)? + { + return Err(BeaconStateError::InvalidSelectionProof { aggregator_index }); + } + + if !beacon_committee + .committee + .contains(&(aggregator_index as usize)) + { + return Err(BeaconStateError::AggregatorNotInCommittee { aggregator_index }); + } + + // This check is new to the spec's `process_attestation` in Electra. + if index >= committee_count_per_slot { + return Err(BeaconStateError::InvalidCommitteeIndex(index)); + } + + participant_count.safe_add_assign(beacon_committee.committee.len() as u64)?; + let committee_attesters = beacon_committee + .committee + .iter() + .enumerate() + .filter_map(|(i, &index)| { + if let Ok(aggregation_bit_index) = committee_offset.safe_add(i) { + if aggregation_bits.get(aggregation_bit_index).unwrap_or(false) { + return Some(index as u64); + } + } + None + }) + .collect::>(); + + output.extend(committee_attesters); + + committee_offset.safe_add_assign(beacon_committee.committee.len())?; + } else { + return Err(Error::NoCommitteeFound(index)); + } + } + + // This check is new to the spec's `process_attestation` in Electra. + if participant_count as usize != aggregation_bits.len() { + return Err(Error::InvalidBitfield); + } + + let mut indices = output.into_iter().collect_vec(); + indices.sort_unstable(); + + Ok(IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: VariableList::new(indices)?, + data: attestation.data.clone(), + signature: attestation.signature.clone(), + })) + } + pub fn get_indexed_attestation( committees: &[BeaconCommittee], attestation: &AttestationElectra, @@ -155,7 +248,7 @@ pub mod attesting_indices_electra { Ok(indices) } - fn get_committee_indices( + pub fn get_committee_indices( committee_bits: &BitVector, ) -> Vec { committee_bits diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index b8316063a3..85dbe4da79 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -326,6 +326,7 @@ where genesis_validators_root, ); + // TODO(electra), signing root isnt unique in the case of electra let message = indexed_attestation.data().signing_root(domain); Ok(SignatureSet::multiple_pubkeys(signature, pubkeys, message)) @@ -436,7 +437,6 @@ where let message = slot.signing_root(domain); let signature = signed_aggregate_and_proof.message().selection_proof(); let validator_index = signed_aggregate_and_proof.message().aggregator_index(); - Ok(SignatureSet::single_pubkey( signature, get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?, diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index c12467fbb5..f218b806d2 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -12,7 +12,9 @@ use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; -use types::{AttesterSlashing, AttesterSlashingOnDisk, AttesterSlashingRefOnDisk}; +use types::{ + AttesterSlashing, AttesterSlashingBase, AttesterSlashingOnDisk, AttesterSlashingRefOnDisk, +}; use types::{ BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, @@ -366,6 +368,49 @@ impl TransformPersist for AttesterSlashing { } } +// TODO: Remove this once we no longer support DB schema version 17 +impl TransformPersist for types::AttesterSlashingBase { + type Persistable = Self; + type PersistableRef<'a> = &'a Self; + + fn as_persistable_ref(&self) -> Self::PersistableRef<'_> { + self + } + + fn from_persistable(persistable: Self::Persistable) -> Self { + persistable + } +} +// TODO: Remove this once we no longer support DB schema version 17 +impl From, E>> + for SigVerifiedOp, E> +{ + fn from(base: SigVerifiedOp, E>) -> Self { + SigVerifiedOp { + op: AttesterSlashing::Base(base.op), + verified_against: base.verified_against, + _phantom: PhantomData, + } + } +} +// TODO: Remove this once we no longer support DB schema version 17 +impl TryFrom, E>> + for SigVerifiedOp, E> +{ + type Error = String; + + fn try_from(slashing: SigVerifiedOp, E>) -> Result { + match slashing.op { + AttesterSlashing::Base(base) => Ok(SigVerifiedOp { + op: base, + verified_against: slashing.verified_against, + _phantom: PhantomData, + }), + AttesterSlashing::Electra(_) => Err("non-base attester slashing".to_string()), + } + } +} + impl TransformPersist for ProposerSlashing { type Persistable = Self; type PersistableRef<'a> = &'a Self; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 4b7d9f2b98..fd1f862a92 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -51,7 +51,6 @@ metastruct = "0.1.0" serde_json = { workspace = true } smallvec = { workspace = true } maplit = { workspace = true } -strum = { workspace = true } milhouse = { workspace = true } rpds = { workspace = true } diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 8c8a81b90f..3df14feade 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -4,7 +4,6 @@ use derivative::Derivative; use rand::RngCore; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; -use ssz::Decode; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; use std::hash::{Hash, Hasher}; @@ -22,6 +21,7 @@ pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), SubnetCountIsZero(ArithError), + IncorrectStateVariant, } #[superstruct( @@ -43,7 +43,9 @@ pub enum Error { serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), ), - ref_attributes(derive(TreeHash), tree_hash(enum_behaviour = "transparent")) + ref_attributes(derive(TreeHash), tree_hash(enum_behaviour = "transparent")), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] #[derive( Debug, @@ -72,37 +74,17 @@ pub struct Attestation { pub signature: AggregateSignature, } -impl Decode for Attestation { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if let Ok(result) = AttestationBase::from_ssz_bytes(bytes) { - return Ok(Attestation::Base(result)); - } - - if let Ok(result) = AttestationElectra::from_ssz_bytes(bytes) { - return Ok(Attestation::Electra(result)); - } - - Err(ssz::DecodeError::BytesInvalid(String::from( - "bytes not valid for any fork variant", - ))) - } -} - // TODO(electra): think about how to handle fork variants here impl TestRandom for Attestation { fn random_for_test(rng: &mut impl RngCore) -> Self { - let aggregation_bits: BitList = BitList::random_for_test(rng); - // let committee_bits: BitList = BitList::random_for_test(rng); + let aggregation_bits = BitList::random_for_test(rng); let data = AttestationData::random_for_test(rng); let signature = AggregateSignature::random_for_test(rng); + let committee_bits = BitVector::random_for_test(rng); - Self::Base(AttestationBase { + Self::Electra(AttestationElectra { aggregation_bits, - // committee_bits, + committee_bits, data, signature, }) @@ -187,9 +169,9 @@ impl Attestation { } } - pub fn committee_index(&self) -> u64 { + pub fn committee_index(&self) -> Option { match self { - Attestation::Base(att) => att.data.index, + Attestation::Base(att) => Some(att.data.index), Attestation::Electra(att) => att.committee_index(), } } @@ -238,12 +220,31 @@ impl<'a, E: EthSpec> AttestationRef<'a, E> { } } - pub fn committee_index(&self) -> u64 { + pub fn committee_index(&self) -> Option { match self { - AttestationRef::Base(att) => att.data.index, + AttestationRef::Base(att) => Some(att.data.index), AttestationRef::Electra(att) => att.committee_index(), } } + + pub fn set_aggregation_bits(&self) -> Vec { + match self { + Self::Base(att) => att + .aggregation_bits + .iter() + .enumerate() + .filter(|(_i, bit)| *bit) + .map(|(i, _bit)| i) + .collect::>(), + Self::Electra(att) => att + .aggregation_bits + .iter() + .enumerate() + .filter(|(_i, bit)| *bit) + .map(|(i, _bit)| i) + .collect::>(), + } + } } impl AttestationElectra { @@ -257,8 +258,8 @@ impl AttestationElectra { .is_zero() } - pub fn committee_index(&self) -> u64 { - *self.get_committee_indices().first().unwrap_or(&0u64) + pub fn committee_index(&self) -> Option { + self.get_committee_indices().first().cloned() } pub fn get_committee_indices(&self) -> Vec { @@ -417,6 +418,65 @@ impl<'a, E: EthSpec> SlotData for AttestationRef<'a, E> { } } +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +#[ssz(enum_behaviour = "union")] +pub enum AttestationOnDisk { + Base(AttestationBase), + Electra(AttestationElectra), +} + +impl AttestationOnDisk { + pub fn to_ref(&self) -> AttestationRefOnDisk { + match self { + AttestationOnDisk::Base(att) => AttestationRefOnDisk::Base(att), + AttestationOnDisk::Electra(att) => AttestationRefOnDisk::Electra(att), + } + } +} + +#[derive(Debug, Clone, Encode)] +#[ssz(enum_behaviour = "union")] +pub enum AttestationRefOnDisk<'a, E: EthSpec> { + Base(&'a AttestationBase), + Electra(&'a AttestationElectra), +} + +impl From> for AttestationOnDisk { + fn from(attestation: Attestation) -> Self { + match attestation { + Attestation::Base(attestation) => Self::Base(attestation), + Attestation::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl From> for Attestation { + fn from(attestation: AttestationOnDisk) -> Self { + match attestation { + AttestationOnDisk::Base(attestation) => Self::Base(attestation), + AttestationOnDisk::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl<'a, E: EthSpec> From> for AttestationRefOnDisk<'a, E> { + fn from(attestation: AttestationRef<'a, E>) -> Self { + match attestation { + AttestationRef::Base(attestation) => Self::Base(attestation), + AttestationRef::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl<'a, E: EthSpec> From> for AttestationRef<'a, E> { + fn from(attestation: AttestationRefOnDisk<'a, E>) -> Self { + match attestation { + AttestationRefOnDisk::Base(attestation) => Self::Base(attestation), + AttestationRefOnDisk::Electra(attestation) => Self::Electra(attestation), + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -428,7 +488,7 @@ mod tests { // This test will only pass with `blst`, if we run these tests with another // BLS library in future we will have to make it generic. #[test] - fn size_of() { + fn size_of_base() { use std::mem::size_of; let aggregation_bits = @@ -441,16 +501,43 @@ mod tests { assert_eq!(signature, 288 + 16); let attestation_expected = aggregation_bits + attestation_data + signature; - // TODO(electra) since we've removed attestation aggregation for electra variant - // i've updated the attestation value expected from 488 544 - // assert_eq!(attestation_expected, 488); assert_eq!(attestation_expected, 488); assert_eq!( - size_of::>(), + size_of::>(), attestation_expected ); } - // TODO(electra): can we do this with both variants or should we? - ssz_and_tree_hash_tests!(AttestationBase); + #[test] + fn size_of_electra() { + use std::mem::size_of; + + let aggregation_bits = + size_of::::MaxValidatorsPerSlot>>(); + let attestation_data = size_of::(); + let committee_bits = + size_of::::MaxCommitteesPerSlot>>(); + let signature = size_of::(); + + assert_eq!(aggregation_bits, 56); + assert_eq!(committee_bits, 56); + assert_eq!(attestation_data, 128); + assert_eq!(signature, 288 + 16); + + let attestation_expected = aggregation_bits + committee_bits + attestation_data + signature; + assert_eq!(attestation_expected, 544); + assert_eq!( + size_of::>(), + attestation_expected + ); + } + + mod base { + use super::*; + ssz_and_tree_hash_tests!(AttestationBase); + } + mod electra { + use super::*; + ssz_and_tree_hash_tests!(AttestationElectra); + } } diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index 6668f809b8..a8d4e6989c 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -164,7 +164,12 @@ impl AttesterSlashing { mod tests { use super::*; use crate::*; - - // TODO(electra): should this be done for both variants? - ssz_and_tree_hash_tests!(AttesterSlashingBase); + mod base { + use super::*; + ssz_and_tree_hash_tests!(AttesterSlashingBase); + } + mod electra { + use super::*; + ssz_and_tree_hash_tests!(AttesterSlashingElectra); + } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 1052b20146..f67a965955 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -607,14 +607,12 @@ impl> BeaconBlockElectra /// Return a Electra block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); - // TODO(electra): check this let indexed_attestation: IndexedAttestationElectra = IndexedAttestationElectra { attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerSlot::to_usize()]) .unwrap(), data: AttestationData::default(), signature: AggregateSignature::empty(), }; - // TODO(electra): fix this so we calculate this size correctly let attester_slashings = vec![ AttesterSlashingElectra { attestation_1: indexed_attestation.clone(), @@ -627,7 +625,6 @@ impl> BeaconBlockElectra aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerSlot::to_usize()).unwrap(), data: AttestationData::default(), signature: AggregateSignature::empty(), - // TODO(electra): does this actually allocate the size correctly? committee_bits: BitVector::new(), }; let mut attestations_electra = vec![]; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index fba542450d..0466d1b768 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -251,6 +251,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, body.execution_payload.tree_hash_root(), body.bls_to_execution_changes.tree_hash_root(), body.blob_kzg_commitments.tree_hash_root(), + body.consolidations.tree_hash_root(), ]; let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize; let tree = MerkleTree::create(&leaves, beacon_block_body_depth); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 6b4bfd29cb..0426d43cac 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -163,6 +163,12 @@ pub enum Error { NonExecutionAddresWithdrawalCredential, NoCommitteeFound(CommitteeIndex), InvalidCommitteeIndex(CommitteeIndex), + InvalidSelectionProof { + aggregator_index: u64, + }, + AggregatorNotInCommittee { + aggregator_index: u64, + }, } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index f5b55f6d9b..d282e2f259 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -206,14 +206,17 @@ impl Decode for IndexedAttestation { } } +// TODO(electra): think about how to handle fork variants here impl TestRandom for IndexedAttestation { fn random_for_test(rng: &mut impl RngCore) -> Self { let attesting_indices = VariableList::random_for_test(rng); + // let committee_bits: BitList = BitList::random_for_test(rng); let data = AttestationData::random_for_test(rng); let signature = AggregateSignature::random_for_test(rng); Self::Base(IndexedAttestationBase { attesting_indices, + // committee_bits, data, signature, }) diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 57a2ce5bab..ddf1dedb04 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -74,7 +74,6 @@ impl SignedAggregateAndProof { genesis_validators_root, spec, ); - let target_epoch = message.aggregate().data().slot.epoch(E::slots_per_epoch()); let domain = spec.get_domain( target_epoch, diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index ec44795674..b5b05e710b 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -40,13 +40,15 @@ impl SubnetId { /// Compute the subnet for an attestation where each slot in the /// attestation epoch contains `committee_count_per_slot` committees. pub fn compute_subnet_for_attestation( - attestation: &AttestationRef, + attestation: AttestationRef, committee_count_per_slot: u64, spec: &ChainSpec, ) -> Result { + let committee_index = attestation.committee_index().ok_or(ArithError::Overflow)?; + Self::compute_subnet::( attestation.data().slot, - attestation.committee_index(), + committee_index, committee_count_per_slot, spec, ) diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index f73f7c18c5..35176d389d 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -26,6 +26,15 @@ impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; rng.fill_bytes(&mut raw_bytes); + // If N isn't divisible by 8 + // zero out bits greater than N + if let Some(last_byte) = raw_bytes.last_mut() { + let mut mask = 0; + for i in 0..N::to_usize() % 8 { + mask |= 1 << i; + } + *last_byte &= mask; + } Self::from_bytes(raw_bytes).expect("we generate a valid BitVector") } } diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 07045dd95c..250188e2db 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,10 +10,7 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -logging = { workspace = true } -sloggers = { workspace = true } store = { workspace = true } -tempfile = { workspace = true } types = { workspace = true } slog = { workspace = true } strum = { workspace = true } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 617192abfe..fafff0f0f9 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -3,7 +3,8 @@ use beacon_chain::{ slot_clock::SystemTimeSlotClock, }; use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use environment::{Environment, RuntimeContext}; use slog::{info, warn, Logger}; use std::fs; @@ -20,147 +21,173 @@ use types::{BeaconState, EthSpec, Slot}; pub const CMD: &str = "database_manager"; -pub fn version_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("version") - .visible_aliases(&["v"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn version_cli_app() -> Command { + Command::new("version") + .visible_aliases(["v"]) + .styles(get_color_style()) .about("Display database schema version") } -pub fn migrate_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("migrate") - .setting(clap::AppSettings::ColoredHelp) +pub fn migrate_cli_app() -> Command { + Command::new("migrate") + .styles(get_color_style()) .about("Migrate the database to a specific schema version") .arg( - Arg::with_name("to") + Arg::new("to") .long("to") .value_name("VERSION") .help("Schema version to migrate to") - .takes_value(true) + .action(ArgAction::Set) .required(true), ) } -pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("inspect") - .setting(clap::AppSettings::ColoredHelp) +pub fn inspect_cli_app() -> Command { + Command::new("inspect") + .styles(get_color_style()) .about("Inspect raw database values") .arg( - Arg::with_name("column") + Arg::new("column") .long("column") .value_name("TAG") .help("3-byte column ID (see `DBColumn`)") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0), ) .arg( - Arg::with_name("output") + Arg::new("output") .long("output") .value_name("TARGET") .help("Select the type of output to show") .default_value("sizes") - .possible_values(InspectTarget::VARIANTS), + .value_parser(InspectTarget::VARIANTS.to_vec()) + .display_order(0), ) .arg( - Arg::with_name("skip") + Arg::new("skip") .long("skip") .value_name("N") - .help("Skip over the first N keys"), + .help("Skip over the first N keys") + .display_order(0), ) .arg( - Arg::with_name("limit") + Arg::new("limit") .long("limit") .value_name("N") - .help("Output at most N keys"), + .help("Output at most N keys") + .display_order(0), ) .arg( - Arg::with_name("freezer") + Arg::new("freezer") .long("freezer") .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false) - .conflicts_with("blobs-db"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("blobs-db") + .display_order(0), ) .arg( - Arg::with_name("blobs-db") + Arg::new("blobs-db") .long("blobs-db") .help("Inspect the blobs DB rather than the hot DB") - .takes_value(false) - .conflicts_with("freezer"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("freezer") + .display_order(0), ) .arg( - Arg::with_name("output-dir") + Arg::new("output-dir") .long("output-dir") .value_name("DIR") .help("Base directory for the output files. Defaults to the current directory") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) } -pub fn compact_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("compact") - .setting(clap::AppSettings::ColoredHelp) +pub fn compact_cli_app() -> Command { + Command::new("compact") + .styles(get_color_style()) .about("Compact database manually") .arg( - Arg::with_name("column") + Arg::new("column") .long("column") .value_name("TAG") .help("3-byte column ID (see `DBColumn`)") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0), ) .arg( - Arg::with_name("freezer") + Arg::new("freezer") .long("freezer") .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false) - .conflicts_with("blobs-db"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("blobs-db") + .display_order(0), ) .arg( - Arg::with_name("blobs-db") + Arg::new("blobs-db") .long("blobs-db") .help("Inspect the blobs DB rather than the hot DB") - .takes_value(false) - .conflicts_with("freezer"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("freezer") + .display_order(0), ) } -pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-payloads") +pub fn prune_payloads_app() -> Command { + Command::new("prune-payloads") .alias("prune_payloads") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("Prune finalized execution payloads") } -pub fn prune_blobs_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-blobs") +pub fn prune_blobs_app() -> Command { + Command::new("prune-blobs") .alias("prune_blobs") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("Prune blobs older than data availability boundary") } -pub fn prune_states_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-states") +pub fn prune_states_app() -> Command { + Command::new("prune-states") .alias("prune_states") .arg( - Arg::with_name("confirm") + Arg::new("confirm") .long("confirm") .help( "Commit to pruning states irreversably. Without this flag the command will \ just check that the database is capable of being pruned.", ) - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("Prune all beacon states from the freezer database") } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["db"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn cli_app() -> Command { + Command::new(CMD) + .display_order(0) + .visible_aliases(["db"]) + .styles(get_color_style()) .about("Manage a beacon node database") .arg( - Arg::with_name("slots-per-restore-point") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new("slots-per-restore-point") .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help( @@ -168,32 +195,36 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Cannot be changed after initialization. \ [default: 2048 (mainnet) or 64 (minimal)]", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name("freezer-dir") + Arg::new("freezer-dir") .long("freezer-dir") .value_name("DIR") .help("Data directory for the freezer database.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name("blob-prune-margin-epochs") + Arg::new("blob-prune-margin-epochs") .long("blob-prune-margin-epochs") .value_name("EPOCHS") .help( "The margin for blob pruning in epochs. The oldest blobs are pruned \ up until data_availability_boundary - blob_prune_margin_epochs.", ) - .takes_value(true) - .default_value("0"), + .action(ArgAction::Set) + .default_value("0") + .display_order(0), ) .arg( - Arg::with_name("blobs-dir") + Arg::new("blobs-dir") .long("blobs-dir") .value_name("DIR") .help("Data directory for the blobs database.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .subcommand(migrate_cli_app()) .subcommand(version_cli_app()) @@ -298,8 +329,8 @@ fn parse_inspect_config(cli_args: &ArgMatches) -> Result let target = clap_utils::parse_required(cli_args, "output")?; let skip = clap_utils::parse_optional(cli_args, "skip")?; let limit = clap_utils::parse_optional(cli_args, "limit")?; - let freezer = cli_args.is_present("freezer"); - let blobs_db = cli_args.is_present("blobs-db"); + let freezer = cli_args.get_flag("freezer"); + let blobs_db = cli_args.get_flag("blobs-db"); let output_dir: PathBuf = clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); @@ -421,8 +452,8 @@ pub struct CompactConfig { fn parse_compact_config(cli_args: &ArgMatches) -> Result { let column = clap_utils::parse_required(cli_args, "column")?; - let freezer = cli_args.is_present("freezer"); - let blobs_db = cli_args.is_present("blobs-db"); + let freezer = cli_args.get_flag("freezer"); + let blobs_db = cli_args.get_flag("blobs-db"); Ok(CompactConfig { column, freezer, @@ -566,7 +597,7 @@ pub struct PruneStatesConfig { } fn parse_prune_states_config(cli_args: &ArgMatches) -> Result { - let confirm = cli_args.is_present("confirm"); + let confirm = cli_args.get_flag("confirm"); Ok(PruneStatesConfig { confirm }) } @@ -645,33 +676,33 @@ pub fn prune_states( } /// Run the database manager, returning an error string if the operation did not succeed. -pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { +pub fn run(cli_args: &ArgMatches, env: Environment) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; let context = env.core_context(); let log = context.log().clone(); let format_err = |e| format!("Fatal error: {:?}", e); match cli_args.subcommand() { - ("version", Some(_)) => { + Some(("version", _)) => { display_db_version(client_config, &context, log).map_err(format_err) } - ("migrate", Some(cli_args)) => { + Some(("migrate", cli_args)) => { let migrate_config = parse_migrate_config(cli_args)?; migrate_db(migrate_config, client_config, &context, log).map_err(format_err) } - ("inspect", Some(cli_args)) => { + Some(("inspect", cli_args)) => { let inspect_config = parse_inspect_config(cli_args)?; inspect_db::(inspect_config, client_config) } - ("compact", Some(cli_args)) => { + Some(("compact", cli_args)) => { let compact_config = parse_compact_config(cli_args)?; compact_db::(compact_config, client_config, log).map_err(format_err) } - ("prune-payloads", Some(_)) => { + Some(("prune-payloads", _)) => { prune_payloads(client_config, &context, log).map_err(format_err) } - ("prune-blobs", Some(_)) => prune_blobs(client_config, &context, log).map_err(format_err), - ("prune-states", Some(cli_args)) => { + Some(("prune-blobs", _)) => prune_blobs(client_config, &context, log).map_err(format_err), + Some(("prune-states", cli_args)) => { let executor = env.core_context().executor; let network_config = context .eth2_network_config diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2aba106e50..5d948a25c3 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -20,7 +20,6 @@ serde_json = { workspace = true } env_logger = { workspace = true } types = { workspace = true } state_processing = { workspace = true } -int_to_bytes = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } environment = { workspace = true } @@ -32,7 +31,6 @@ clap_utils = { workspace = true } lighthouse_network = { workspace = true } validator_dir = { workspace = true, features = ["insecure_keys"] } lighthouse_version = { workspace = true } -directory = { workspace = true } account_utils = { workspace = true } eth2_wallet = { workspace = true } eth1_test_rig = { workspace = true } diff --git a/lcli/src/change_genesis_time.rs b/lcli/src/change_genesis_time.rs index f75652c768..1b100d4644 100644 --- a/lcli/src/change_genesis_time.rs +++ b/lcli/src/change_genesis_time.rs @@ -8,13 +8,13 @@ use types::{BeaconState, EthSpec}; pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let path = matches - .value_of("ssz-state") + .get_one::("ssz-state") .ok_or("ssz-state not specified")? .parse::() .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; let genesis_time = matches - .value_of("genesis-time") + .get_one::("genesis-time") .ok_or("genesis-time not specified")? .parse::() .map_err(|e| format!("Unable to parse genesis-time: {}", e))?; diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 974a34591f..65ee40a5e7 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -20,7 +20,9 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { ); let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; - let file_name = matches.value_of("file").ok_or("No file supplied")?; + let file_name = matches + .get_one::("file") + .ok_or("No file supplied")?; let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Bellatrix); let execution_payload_header: ExecutionPayloadHeader = match fork_name { diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index b920486c84..0674028bee 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -4,7 +4,7 @@ use types::EthSpec; use eth1_test_rig::{Http, Provider}; -pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { let eth1_http: String = clap_utils::parse_required(matches, "eth1-http")?; let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; let validator_count: Option = clap_utils::parse_optional(matches, "validator-count")?; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 635a36ef70..1879851841 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -15,15 +15,19 @@ pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); pub fn run( env: Environment, testnet_dir: PathBuf, - matches: &ArgMatches<'_>, + matches: &ArgMatches, ) -> Result<(), String> { let endpoints = matches - .value_of("eth1-endpoint") + .get_one::("eth1-endpoint") .map(|e| { warn!("The --eth1-endpoint flag is deprecated. Please use --eth1-endpoints instead"); String::from(e) }) - .or_else(|| matches.value_of("eth1-endpoints").map(String::from)); + .or_else(|| { + matches + .get_one::("eth1-endpoints") + .map(String::from) + }); let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index f44edffd46..4987a84476 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -9,12 +9,12 @@ use types::{test_utils::generate_deterministic_keypairs, EthSpec, Hash256}; pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let validator_count = matches - .value_of("validator-count") + .get_one::("validator-count") .ok_or("validator-count not specified")? .parse::() .map_err(|e| format!("Unable to parse validator-count: {}", e))?; - let genesis_time = if let Some(genesis_time) = matches.value_of("genesis-time") { + let genesis_time = if let Some(genesis_time) = matches.get_one::("genesis-time") { genesis_time .parse::() .map_err(|e| format!("Unable to parse genesis-time: {}", e))? diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 7b5c1598c9..e0e3b9b461 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -19,8 +19,8 @@ mod skip_slots; mod state_root; mod transition_blocks; -use clap::{App, Arg, ArgMatches, SubCommand}; -use clap_utils::parse_optional; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{parse_optional, FLAG_HEADER}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::Eth2NetworkConfig; use parse_ssz::run_parse_ssz; @@ -32,327 +32,367 @@ use types::{EthSpec, EthSpecId}; fn main() { env_logger::init(); - let matches = App::new("Lighthouse CLI Tool") + let matches = Command::new("Lighthouse CLI Tool") .version(lighthouse_version::VERSION) + .display_order(0) .about("Performs various testing-related tasks, including defining testnets.") .arg( - Arg::with_name("spec") - .short("s") + Arg::new("spec") + .short('s') .long("spec") .value_name("STRING") - .takes_value(true) - .possible_values(&["minimal", "mainnet", "gnosis"]) + .action(ArgAction::Set) + .value_parser(["minimal", "mainnet", "gnosis"]) .default_value("mainnet") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("testnet-dir") - .short("d") + Arg::new("testnet-dir") + .short('d') .long("testnet-dir") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .global(true) - .help("The testnet dir."), + .help("The testnet dir.") + .display_order(0) ) .arg( - Arg::with_name("network") + Arg::new("network") .long("network") .value_name("NAME") - .takes_value(true) + .action(ArgAction::Set) .global(true) .help("The network to use. Defaults to mainnet.") .conflicts_with("testnet-dir") + .display_order(0) ) .subcommand( - SubCommand::with_name("skip-slots") + Command::new("skip-slots") .about( "Performs a state transition from some state across some number of skip slots", ) .arg( - Arg::with_name("output-path") + Arg::new("output-path") .long("output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output a SSZ file."), + .action(ArgAction::Set) + .help("Path to output a SSZ file.") + .display_order(0) ) .arg( - Arg::with_name("pre-state-path") + Arg::new("pre-state-path") .long("pre-state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to a SSZ file of the pre-state."), + .help("Path to a SSZ file of the pre-state.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("state-id") + Arg::new("state-id") .long("state-id") .value_name("STATE_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) .arg( - Arg::with_name("state-root") + Arg::new("state-root") .long("state-root") .value_name("HASH256") - .takes_value(true) - .help("Tree hash root of the provided state, to avoid computing it."), + .action(ArgAction::Set) + .help("Tree hash root of the provided state, to avoid computing it.") + .display_order(0) ) .arg( - Arg::with_name("slots") + Arg::new("slots") .long("slots") .value_name("INTEGER") - .takes_value(true) - .help("Number of slots to skip forward."), + .action(ArgAction::Set) + .help("Number of slots to skip forward.") + .display_order(0) ) .arg( - Arg::with_name("partial-state-advance") + Arg::new("partial-state-advance") .long("partial-state-advance") - .takes_value(false) - .help("If present, don't compute state roots when skipping forward."), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, don't compute state roots when skipping forward.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("transition-blocks") + Command::new("transition-blocks") .about("Performs a state transition given a pre-state and block") .arg( - Arg::with_name("pre-state-path") + Arg::new("pre-state-path") .long("pre-state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") .requires("block-path") - .help("Path to load a BeaconState from as SSZ."), + .help("Path to load a BeaconState from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("block-path") + Arg::new("block-path") .long("block-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") .requires("pre-state-path") - .help("Path to load a SignedBeaconBlock from as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("post-state-output-path") + Arg::new("post-state-output-path") .long("post-state-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the post-state."), + .action(ArgAction::Set) + .help("Path to output the post-state.") + .display_order(0) ) .arg( - Arg::with_name("pre-state-output-path") + Arg::new("pre-state-output-path") .long("pre-state-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the pre-state, useful when used with --beacon-url."), + .action(ArgAction::Set) + .help("Path to output the pre-state, useful when used with --beacon-url.") + .display_order(0) ) .arg( - Arg::with_name("block-output-path") + Arg::new("block-output-path") .long("block-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the block, useful when used with --beacon-url."), + .action(ArgAction::Set) + .help("Path to output the block, useful when used with --beacon-url.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("block-id") + Arg::new("block-id") .long("block-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) .arg( - Arg::with_name("no-signature-verification") + Arg::new("no-signature-verification") .long("no-signature-verification") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disable signature verification.") + .display_order(0) ) .arg( - Arg::with_name("exclude-cache-builds") + Arg::new("exclude-cache-builds") .long("exclude-cache-builds") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, pre-build the committee and tree-hash caches without \ - including them in the timings."), + including them in the timings.") + .display_order(0) ) .arg( - Arg::with_name("exclude-post-block-thc") + Arg::new("exclude-post-block-thc") .long("exclude-post-block-thc") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, don't rebuild the tree-hash-cache after applying \ - the block."), + the block.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("pretty-ssz") + Command::new("pretty-ssz") .about("Parses SSZ-encoded data from a file") .arg( - Arg::with_name("format") - .short("f") + Arg::new("format") + .short('f') .long("format") .value_name("FORMAT") - .takes_value(true) - .required(true) + .action(ArgAction::Set) + .required(false) .default_value("json") - .possible_values(&["json", "yaml"]) + .value_parser(["json", "yaml"]) .help("Output format to use") + .display_order(0) ) .arg( - Arg::with_name("type") + Arg::new("type") .value_name("TYPE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Type to decode"), + .help("Type to decode") + .display_order(0) ) .arg( - Arg::with_name("ssz-file") + Arg::new("ssz-file") .value_name("FILE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Path to SSZ bytes"), + .help("Path to SSZ bytes") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("deploy-deposit-contract") + Command::new("deploy-deposit-contract") .about( "Deploy a testing eth1 deposit contract.", ) .arg( - Arg::with_name("eth1-http") + Arg::new("eth1-http") .long("eth1-http") - .short("e") + .short('e') .value_name("ETH1_HTTP_PATH") .help("Path to an Eth1 JSON-RPC IPC endpoint") - .takes_value(true) + .action(ArgAction::Set) .required(true) + .display_order(0) ) .arg( - Arg::with_name("confirmations") + Arg::new("confirmations") .value_name("INTEGER") .long("confirmations") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") - .help("The number of block confirmations before declaring the contract deployed."), + .help("The number of block confirmations before declaring the contract deployed.") + .display_order(0) ) .arg( - Arg::with_name("validator-count") + Arg::new("validator-count") .value_name("VALIDATOR_COUNT") .long("validator-count") - .takes_value(true) + .action(ArgAction::Set) .help("If present, makes `validator_count` number of INSECURE deterministic deposits after \ deploying the deposit contract." - ), + ) + .display_order(0) ) ) .subcommand( - SubCommand::with_name("eth1-genesis") + Command::new("eth1-genesis") .about("Listens to the eth1 chain and finds the genesis beacon state") .arg( - Arg::with_name("eth1-endpoint") - .short("e") + Arg::new("eth1-endpoint") + .short('e') .long("eth1-endpoint") .value_name("HTTP_SERVER") - .takes_value(true) - .help("Deprecated. Use --eth1-endpoints."), + .action(ArgAction::Set) + .help("Deprecated. Use --eth1-endpoints.") + .display_order(0) ) .arg( - Arg::with_name("eth1-endpoints") + Arg::new("eth1-endpoints") .long("eth1-endpoints") .value_name("HTTP_SERVER_LIST") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("eth1-endpoint") .help( "One or more comma-delimited URLs to eth1 JSON-RPC http APIs. \ If multiple endpoints are given the endpoints are used as \ fallback in the given order.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("interop-genesis") + Command::new("interop-genesis") .about("Produces an interop-compatible genesis state using deterministic keypairs") .arg( - Arg::with_name("validator-count") + Arg::new("validator-count") .long("validator-count") .index(1) .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1024") - .help("The number of validators in the genesis state."), + .help("The number of validators in the genesis state.") + .display_order(0) ) .arg( - Arg::with_name("genesis-time") + Arg::new("genesis-time") .long("genesis-time") - .short("t") + .short('t') .value_name("UNIX_EPOCH") - .takes_value(true) - .help("The value for state.genesis_time. Defaults to now."), + .action(ArgAction::Set) + .help("The value for state.genesis_time. Defaults to now.") + .display_order(0) ) .arg( - Arg::with_name("genesis-fork-version") + Arg::new("genesis-fork-version") .long("genesis-fork-version") .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .help( "Used to avoid reply attacks between testnets. Recommended to set to non-default.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("change-genesis-time") + Command::new("change-genesis-time") .about( "Loads a file with an SSZ-encoded BeaconState and modifies the genesis time.", ) .arg( - Arg::with_name("ssz-state") + Arg::new("ssz-state") .index(1) .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The path to the SSZ file"), + .help("The path to the SSZ file") + .display_order(0) ) .arg( - Arg::with_name("genesis-time") + Arg::new("genesis-time") .index(2) .value_name("UNIX_EPOCH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The value for state.genesis_time."), + .help("The value for state.genesis_time.") + .display_order(0) ), ) .subcommand( - SubCommand::with_name("replace-state-pubkeys") + Command::new("replace-state-pubkeys") .about( "Loads a file with an SSZ-encoded BeaconState and replaces \ all the validator pubkeys with ones derived from the mnemonic \ @@ -360,616 +400,687 @@ fn main() { derivation paths.", ) .arg( - Arg::with_name("ssz-state") + Arg::new("ssz-state") .index(1) .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The path to the SSZ file"), + .help("The path to the SSZ file") + .display_order(0) ) .arg( - Arg::with_name("mnemonic") + Arg::new("mnemonic") .index(2) .value_name("BIP39_MNENMONIC") - .takes_value(true) + .action(ArgAction::Set) .default_value( "replace nephew blur decorate waste convince soup column \ orient excite play baby", ) - .help("The mnemonic for key derivation."), + .help("The mnemonic for key derivation.") + .display_order(0) ), ) .subcommand( - SubCommand::with_name("create-payload-header") + Command::new("create-payload-header") .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ is not provided, a payload header for the `Bellatrix` fork will be created.") .arg( - Arg::with_name("execution-block-hash") + Arg::new("execution-block-hash") .long("execution-block-hash") .value_name("BLOCK_HASH") - .takes_value(true) + .action(ArgAction::Set) .help("The block hash used when generating an execution payload. This \ value is used for `execution_payload_header.block_hash` as well as \ `execution_payload_header.random`") .default_value( "0x0000000000000000000000000000000000000000000000000000000000000000", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("genesis-time") + Arg::new("genesis-time") .long("genesis-time") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .help("The genesis time when generating an execution payload.") + .display_order(0) ) .arg( - Arg::with_name("base-fee-per-gas") + Arg::new("base-fee-per-gas") .long("base-fee-per-gas") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .help("The base fee per gas field in the execution payload generated.") - .default_value("1000000000"), + .default_value("1000000000") + .display_order(0) ) .arg( - Arg::with_name("gas-limit") + Arg::new("gas-limit") .long("gas-limit") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .help("The gas limit field in the execution payload generated.") - .default_value("30000000"), + .default_value("30000000") + .display_order(0) ) .arg( - Arg::with_name("file") + Arg::new("file") .long("file") .value_name("FILE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Output file"), + .help("Output file") + .display_order(0) ).arg( - Arg::with_name("fork") + Arg::new("fork") .long("fork") .value_name("FORK") - .takes_value(true) + .action(ArgAction::Set) .default_value("bellatrix") .help("The fork for which the execution payload header should be created.") - .possible_values(&["bellatrix", "capella", "deneb", "electra"]) + .value_parser(["bellatrix", "capella", "deneb", "electra"]) + .display_order(0) ) ) .subcommand( - SubCommand::with_name("new-testnet") + Command::new("new-testnet") .about( "Produce a new testnet directory. If any of the optional flags are not supplied the values will remain the default for the --spec flag", ) .arg( - Arg::with_name("force") + Arg::new("force") .long("force") - .short("f") - .takes_value(false) - .help("Overwrites any previous testnet configurations"), + .short('f') + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Overwrites any previous testnet configurations") + .display_order(0) ) .arg( - Arg::with_name("interop-genesis-state") + Arg::new("interop-genesis-state") .long("interop-genesis-state") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, a interop-style genesis.ssz file will be generated.", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("derived-genesis-state") + Arg::new("derived-genesis-state") .long("derived-genesis-state") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, a genesis.ssz file will be generated with keys generated from a given mnemonic.", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("mnemonic-phrase") + Arg::new("mnemonic-phrase") .long("mnemonic-phrase") .value_name("MNEMONIC_PHRASE") - .takes_value(true) + .action(ArgAction::Set) .requires("derived-genesis-state") - .help("The mnemonic with which we generate the validator keys for a derived genesis state"), + .help("The mnemonic with which we generate the validator keys for a derived genesis state") + .display_order(0) ) .arg( - Arg::with_name("min-genesis-time") + Arg::new("min-genesis-time") .long("min-genesis-time") .value_name("UNIX_SECONDS") - .takes_value(true) + .action(ArgAction::Set) .help( "The minimum permitted genesis time. For non-eth1 testnets will be the genesis time. Defaults to now.", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("min-genesis-active-validator-count") + Arg::new("min-genesis-active-validator-count") .long("min-genesis-active-validator-count") .value_name("INTEGER") - .takes_value(true) - .help("The number of validators required to trigger eth2 genesis."), + .action(ArgAction::Set) + .help("The number of validators required to trigger eth2 genesis.") + .display_order(0) ) .arg( - Arg::with_name("genesis-delay") + Arg::new("genesis-delay") .long("genesis-delay") .value_name("SECONDS") - .takes_value(true) - .help("The delay between sufficient eth1 deposits and eth2 genesis."), + .action(ArgAction::Set) + .help("The delay between sufficient eth1 deposits and eth2 genesis.") + .display_order(0) ) .arg( - Arg::with_name("min-deposit-amount") + Arg::new("min-deposit-amount") .long("min-deposit-amount") .value_name("GWEI") - .takes_value(true) - .help("The minimum permitted deposit amount."), + .action(ArgAction::Set) + .help("The minimum permitted deposit amount.") + .display_order(0) ) .arg( - Arg::with_name("max-effective-balance") + Arg::new("max-effective-balance") .long("max-effective-balance") .value_name("GWEI") - .takes_value(true) - .help("The amount required to become a validator."), + .action(ArgAction::Set) + .help("The amount required to become a validator.") + .display_order(0) ) .arg( - Arg::with_name("effective-balance-increment") + Arg::new("effective-balance-increment") .long("effective-balance-increment") .value_name("GWEI") - .takes_value(true) - .help("The steps in effective balance calculation."), + .action(ArgAction::Set) + .help("The steps in effective balance calculation.") + .display_order(0) ) .arg( - Arg::with_name("ejection-balance") + Arg::new("ejection-balance") .long("ejection-balance") .value_name("GWEI") - .takes_value(true) - .help("The balance at which a validator gets ejected."), + .action(ArgAction::Set) + .help("The balance at which a validator gets ejected.") + .display_order(0) ) .arg( - Arg::with_name("eth1-follow-distance") + Arg::new("eth1-follow-distance") .long("eth1-follow-distance") .value_name("ETH1_BLOCKS") - .takes_value(true) - .help("The distance to follow behind the eth1 chain head."), + .action(ArgAction::Set) + .help("The distance to follow behind the eth1 chain head.") + .display_order(0) ) .arg( - Arg::with_name("genesis-fork-version") + Arg::new("genesis-fork-version") .long("genesis-fork-version") .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .help( "Used to avoid reply attacks between testnets. Recommended to set to non-default.", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("seconds-per-slot") + Arg::new("seconds-per-slot") .long("seconds-per-slot") .value_name("SECONDS") - .takes_value(true) - .help("Eth2 slot time"), + .action(ArgAction::Set) + .help("Eth2 slot time") + .display_order(0) ) .arg( - Arg::with_name("seconds-per-eth1-block") + Arg::new("seconds-per-eth1-block") .long("seconds-per-eth1-block") .value_name("SECONDS") - .takes_value(true) - .help("Eth1 block time"), + .action(ArgAction::Set) + .help("Eth1 block time") + .display_order(0) ) .arg( - Arg::with_name("eth1-id") + Arg::new("eth1-id") .long("eth1-id") .value_name("ETH1_ID") - .takes_value(true) - .help("The chain id and network id for the eth1 testnet."), + .action(ArgAction::Set) + .help("The chain id and network id for the eth1 testnet.") + .display_order(0) ) .arg( - Arg::with_name("deposit-contract-address") + Arg::new("deposit-contract-address") .long("deposit-contract-address") .value_name("ETH1_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The address of the deposit contract."), + .help("The address of the deposit contract.") + .display_order(0) ) .arg( - Arg::with_name("deposit-contract-deploy-block") + Arg::new("deposit-contract-deploy-block") .long("deposit-contract-deploy-block") .value_name("ETH1_BLOCK_NUMBER") - .takes_value(true) + .action(ArgAction::Set) .default_value("0") .help( "The block the deposit contract was deployed. Setting this is a huge optimization for nodes, please do it.", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("altair-fork-epoch") + Arg::new("altair-fork-epoch") .long("altair-fork-epoch") .value_name("EPOCH") - .takes_value(true) + .action(ArgAction::Set) .help( "The epoch at which to enable the Altair hard fork", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("bellatrix-fork-epoch") + Arg::new("bellatrix-fork-epoch") .long("bellatrix-fork-epoch") .value_name("EPOCH") - .takes_value(true) + .action(ArgAction::Set) .help( "The epoch at which to enable the Bellatrix hard fork", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("capella-fork-epoch") + Arg::new("capella-fork-epoch") .long("capella-fork-epoch") .value_name("EPOCH") - .takes_value(true) + .action(ArgAction::Set) .help( "The epoch at which to enable the Capella hard fork", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("deneb-fork-epoch") + Arg::new("deneb-fork-epoch") .long("deneb-fork-epoch") .value_name("EPOCH") - .takes_value(true) + .action(ArgAction::Set) .help( "The epoch at which to enable the Deneb hard fork", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("electra-fork-epoch") + Arg::new("electra-fork-epoch") .long("electra-fork-epoch") .value_name("EPOCH") - .takes_value(true) + .action(ArgAction::Set) .help( "The epoch at which to enable the Electra hard fork", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("ttd") + Arg::new("ttd") .long("ttd") .value_name("TTD") - .takes_value(true) + .action(ArgAction::Set) .help( "The terminal total difficulty", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("eth1-block-hash") + Arg::new("eth1-block-hash") .long("eth1-block-hash") .value_name("BLOCK_HASH") - .takes_value(true) - .help("The eth1 block hash used when generating a genesis state."), + .action(ArgAction::Set) + .help("The eth1 block hash used when generating a genesis state.") + .display_order(0) ) .arg( - Arg::with_name("execution-payload-header") + Arg::new("execution-payload-header") .long("execution-payload-header") .value_name("FILE") - .takes_value(true) + .action(ArgAction::Set) .required(false) .help("Path to file containing `ExecutionPayloadHeader` SSZ bytes to be \ - used in the genesis state."), + used in the genesis state.") + .display_order(0) ) .arg( - Arg::with_name("validator-count") + Arg::new("validator-count") .long("validator-count") .value_name("INTEGER") - .takes_value(true) - .help("The number of validators when generating a genesis state."), + .action(ArgAction::Set) + .help("The number of validators when generating a genesis state.") + .display_order(0) ) .arg( - Arg::with_name("genesis-time") + Arg::new("genesis-time") .long("genesis-time") .value_name("INTEGER") - .takes_value(true) - .help("The genesis time when generating a genesis state."), + .action(ArgAction::Set) + .help("The genesis time when generating a genesis state.") + .display_order(0) ) .arg( - Arg::with_name("proposer-score-boost") + Arg::new("proposer-score-boost") .long("proposer-score-boost") .value_name("INTEGER") - .takes_value(true) - .help("The proposer score boost to apply as a percentage, e.g. 70 = 70%"), + .action(ArgAction::Set) + .help("The proposer score boost to apply as a percentage, e.g. 70 = 70%") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("check-deposit-data") + Command::new("check-deposit-data") .about("Checks the integrity of some deposit data.") .arg( - Arg::with_name("deposit-amount") + Arg::new("deposit-amount") .index(1) .value_name("GWEI") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The amount (in Gwei) that was deposited"), + .help("The amount (in Gwei) that was deposited") + .display_order(0) ) .arg( - Arg::with_name("deposit-data") + Arg::new("deposit-data") .index(2) .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "A 0x-prefixed hex string of the deposit data. Should include the function signature.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("generate-bootnode-enr") + Command::new("generate-bootnode-enr") .about("Generates an ENR address to be used as a pre-genesis boot node.") .arg( - Arg::with_name("ip") + Arg::new("ip") .long("ip") .value_name("IP_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The IP address to be included in the ENR and used for discovery"), + .help("The IP address to be included in the ENR and used for discovery") + .display_order(0) ) .arg( - Arg::with_name("udp-port") + Arg::new("udp-port") .long("udp-port") .value_name("UDP_PORT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The UDP port to be included in the ENR and used for discovery"), + .help("The UDP port to be included in the ENR and used for discovery") + .display_order(0) ) .arg( - Arg::with_name("tcp-port") + Arg::new("tcp-port") .long("tcp-port") .value_name("TCP_PORT") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "The TCP port to be included in the ENR and used for application comms", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("output-dir") + Arg::new("output-dir") .long("output-dir") .value_name("OUTPUT_DIRECTORY") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The directory in which to create the network dir"), + .help("The directory in which to create the network dir") + .display_order(0) ) .arg( - Arg::with_name("genesis-fork-version") + Arg::new("genesis-fork-version") .long("genesis-fork-version") .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "Used to avoid reply attacks between testnets. Recommended to set to non-default.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("insecure-validators") + Command::new("insecure-validators") .about("Produces validator directories with INSECURE, deterministic keypairs.") .arg( - Arg::with_name("count") + Arg::new("count") .long("count") .value_name("COUNT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Produces validators in the range of 0..count."), + .help("Produces validators in the range of 0..count.") + .display_order(0) ) .arg( - Arg::with_name("base-dir") + Arg::new("base-dir") .long("base-dir") .value_name("BASE_DIR") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The base directory where validator keypairs and secrets are stored"), + .help("The base directory where validator keypairs and secrets are stored") + .display_order(0) ) .arg( - Arg::with_name("node-count") + Arg::new("node-count") .long("node-count") .value_name("NODE_COUNT") - .takes_value(true) - .help("The number of nodes to divide the validator keys to"), + .action(ArgAction::Set) + .help("The number of nodes to divide the validator keys to") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("mnemonic-validators") + Command::new("mnemonic-validators") .about("Produces validator directories by deriving the keys from \ a mnemonic. For testing purposes only, DO NOT USE IN \ PRODUCTION!") .arg( - Arg::with_name("count") + Arg::new("count") .long("count") .value_name("COUNT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Produces validators in the range of 0..count."), + .help("Produces validators in the range of 0..count.") + .display_order(0) ) .arg( - Arg::with_name("base-dir") + Arg::new("base-dir") .long("base-dir") .value_name("BASE_DIR") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The base directory where validator keypairs and secrets are stored"), + .help("The base directory where validator keypairs and secrets are stored") + .display_order(0) ) .arg( - Arg::with_name("node-count") + Arg::new("node-count") .long("node-count") .value_name("NODE_COUNT") - .takes_value(true) - .help("The number of nodes to divide the validator keys to"), + .action(ArgAction::Set) + .help("The number of nodes to divide the validator keys to") + .display_order(0) ) .arg( - Arg::with_name("mnemonic-phrase") + Arg::new("mnemonic-phrase") .long("mnemonic-phrase") .value_name("MNEMONIC_PHRASE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The mnemonic with which we generate the validator keys"), + .help("The mnemonic with which we generate the validator keys") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("indexed-attestations") + Command::new("indexed-attestations") .about("Convert attestations to indexed form, using the committees from a state.") .arg( - Arg::with_name("state") + Arg::new("state") .long("state") .value_name("SSZ_STATE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("BeaconState to generate committees from (SSZ)"), + .help("BeaconState to generate committees from (SSZ)") + .display_order(0) ) .arg( - Arg::with_name("attestations") + Arg::new("attestations") .long("attestations") .value_name("JSON_ATTESTATIONS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("List of Attestations to convert to indexed form (JSON)"), + .help("List of Attestations to convert to indexed form (JSON)") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("block-root") + Command::new("block-root") .about("Computes the block root of some block.") .arg( - Arg::with_name("block-path") + Arg::new("block-path") .long("block-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to load a SignedBeaconBlock from as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("block-id") + Arg::new("block-id") .long("block-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("state-root") + Command::new("state-root") .about("Computes the state root of some state.") .arg( - Arg::with_name("state-path") + Arg::new("state-path") .long("state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to load a BeaconState from as SSZ."), + .help("Path to load a BeaconState from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("state-id") + Arg::new("state-id") .long("state-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("mock-el") + Command::new("mock-el") .about("Creates a mock execution layer server. This is NOT SAFE and should only \ be used for testing and development on testnets. Do not use in production. Do not \ use on mainnet. It cannot perform validator duties.") .arg( - Arg::with_name("jwt-output-path") + Arg::new("jwt-output-path") .long("jwt-output-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Path to write the JWT secret."), + .help("Path to write the JWT secret.") + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("IP_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .help("The server will listen on this address.") .default_value("127.0.0.1") + .display_order(0) ) .arg( - Arg::with_name("listen-port") + Arg::new("listen-port") .long("listen-port") .value_name("PORT") - .takes_value(true) + .action(ArgAction::Set) .help("The server will listen on this port.") .default_value("8551") + .display_order(0) ) .arg( - Arg::with_name("all-payloads-valid") + Arg::new("all-payloads-valid") .long("all-payloads-valid") - .takes_value(true) + .action(ArgAction::Set) .help("Controls the response to newPayload and forkchoiceUpdated. \ Set to 'true' to return VALID. Set to 'false' to return SYNCING.") .default_value("false") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("shanghai-time") + Arg::new("shanghai-time") .long("shanghai-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Shanghai. Defaults to the mainnet value.") .default_value("1681338479") + .display_order(0) ) .arg( - Arg::with_name("cancun-time") + Arg::new("cancun-time") .long("cancun-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Cancun. No default is provided \ until Cancun is triggered on mainnet.") + .display_order(0) ) .arg( - Arg::with_name("prague-time") + Arg::new("prague-time") .long("prague-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Prague. No default is provided \ until Prague is triggered on mainnet.") + .display_order(0) ) ) .get_matches(); let result = matches - .value_of("spec") + .get_one::("spec") .ok_or_else(|| "Missing --spec flag".to_string()) - .and_then(FromStr::from_str) + .and_then(|s| FromStr::from_str(s)) .and_then(|eth_spec_id| match eth_spec_id { EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches), EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches), @@ -985,10 +1096,7 @@ fn main() { } } -fn run( - env_builder: EnvironmentBuilder, - matches: &ArgMatches<'_>, -) -> Result<(), String> { +fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { let env = env_builder .multi_threaded_tokio_runtime() .map_err(|e| format!("should start tokio runtime: {:?}", e))? @@ -1039,74 +1147,75 @@ fn run( }; match matches.subcommand() { - ("transition-blocks", Some(matches)) => { + Some(("transition-blocks", matches)) => { let network_config = get_network_config()?; transition_blocks::run::(env, network_config, matches) .map_err(|e| format!("Failed to transition blocks: {}", e)) } - ("skip-slots", Some(matches)) => { + Some(("skip-slots", matches)) => { let network_config = get_network_config()?; skip_slots::run::(env, network_config, matches) .map_err(|e| format!("Failed to skip slots: {}", e)) } - ("pretty-ssz", Some(matches)) => { + Some(("pretty-ssz", matches)) => { let network_config = get_network_config()?; run_parse_ssz::(network_config, matches) .map_err(|e| format!("Failed to pretty print hex: {}", e)) } - ("deploy-deposit-contract", Some(matches)) => { + Some(("deploy-deposit-contract", matches)) => { deploy_deposit_contract::run::(env, matches) .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) } - ("eth1-genesis", Some(matches)) => { + Some(("eth1-genesis", matches)) => { let testnet_dir = get_testnet_dir()?; eth1_genesis::run::(env, testnet_dir, matches) .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)) } - ("interop-genesis", Some(matches)) => { + Some(("interop-genesis", matches)) => { let testnet_dir = get_testnet_dir()?; interop_genesis::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run interop-genesis command: {}", e)) } - ("change-genesis-time", Some(matches)) => { + Some(("change-genesis-time", matches)) => { let testnet_dir = get_testnet_dir()?; change_genesis_time::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) } - ("create-payload-header", Some(matches)) => create_payload_header::run::(matches) + Some(("create-payload-header", matches)) => create_payload_header::run::(matches) .map_err(|e| format!("Failed to run create-payload-header command: {}", e)), - ("replace-state-pubkeys", Some(matches)) => { + Some(("replace-state-pubkeys", matches)) => { let testnet_dir = get_testnet_dir()?; replace_state_pubkeys::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) } - ("new-testnet", Some(matches)) => { + Some(("new-testnet", matches)) => { let testnet_dir = get_testnet_dir()?; new_testnet::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run new_testnet command: {}", e)) } - ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) + Some(("check-deposit-data", matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), - ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) + Some(("generate-bootnode-enr", matches)) => generate_bootnode_enr::run::(matches) .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), - ("insecure-validators", Some(matches)) => insecure_validators::run(matches) + Some(("insecure-validators", matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), - ("mnemonic-validators", Some(matches)) => mnemonic_validators::run(matches) + Some(("mnemonic-validators", matches)) => mnemonic_validators::run(matches) .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), - ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) + Some(("indexed-attestations", matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), - ("block-root", Some(matches)) => { + Some(("block-root", matches)) => { let network_config = get_network_config()?; block_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run block-root command: {}", e)) } - ("state-root", Some(matches)) => { + Some(("state-root", matches)) => { let network_config = get_network_config()?; state_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run state-root command: {}", e)) } - ("mock-el", Some(matches)) => mock_el::run::(env, matches) + Some(("mock-el", matches)) => mock_el::run::(env, matches) .map_err(|e| format!("Failed to run mock-el command: {}", e)), - (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), + Some((other, _)) => Err(format!("Unknown subcommand {}. See --help.", other)), + _ => Err("No subcommand provided. See --help.".to_string()), } } diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index f6bfb2ac01..57b1199917 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -30,7 +30,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; - let overwrite_files = matches.is_present("force"); + let overwrite_files = matches.get_flag("force"); if testnet_dir_path.exists() && !overwrite_files { return Err(format!( @@ -154,7 +154,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul (eth1_block_hash, genesis_time) }; - let genesis_state_bytes = if matches.is_present("interop-genesis-state") { + let genesis_state_bytes = if matches.get_flag("interop-genesis-state") { let keypairs = generate_deterministic_keypairs(validator_count); let keypairs: Vec<_> = keypairs.into_iter().map(|kp| (kp.clone(), kp)).collect(); @@ -167,7 +167,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul )?; Some(genesis_state.as_ssz_bytes()) - } else if matches.is_present("derived-genesis-state") { + } else if matches.get_flag("derived-genesis-state") { let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; let mnemonic = Mnemonic::from_phrase(&mnemonic_phrase, Language::English).map_err(|e| { format!( diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index e86ffb73dc..3aa77e5700 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -31,8 +31,12 @@ pub fn run_parse_ssz( network_config: Eth2NetworkConfig, matches: &ArgMatches, ) -> Result<(), String> { - let type_str = matches.value_of("type").ok_or("No type supplied")?; - let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; + let type_str = matches + .get_one::("type") + .ok_or("No type supplied")?; + let filename = matches + .get_one::("ssz-file") + .ok_or("No file supplied")?; let format = parse_required(matches, "format")?; let bytes = if filename.ends_with("ssz_snappy") { @@ -58,7 +62,7 @@ pub fn run_parse_ssz( // More fork-specific decoders may need to be added in future, but shouldn't be 100% necessary, // as the fork-generic decoder will always be available (requires correct --network flag). - match type_str { + match type_str.as_str() { "SignedBeaconBlock" => decode_and_print::>( &bytes, |bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec), diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index e8d012b16e..4b8d6c8253 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -13,13 +13,13 @@ use types::{BeaconState, DepositData, EthSpec, Hash256, SignatureBytes, DEPOSIT_ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let path = matches - .value_of("ssz-state") + .get_one::("ssz-state") .ok_or("ssz-state not specified")? .parse::() .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; let mnemonic_phrase = matches - .value_of("mnemonic") + .get_one::("mnemonic") .ok_or("mnemonic not specified")?; let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index d421c077d8..a2173f10df 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -75,7 +75,7 @@ pub fn run( let runs: usize = parse_required(matches, "runs")?; let slots: u64 = parse_required(matches, "slots")?; let cli_state_root: Option = parse_optional(matches, "state-root")?; - let partial: bool = matches.is_present("partial-state-advance"); + let partial: bool = matches.get_flag("partial-state-advance"); info!("Using {} spec", E::spec_name()); info!("Advancing {} slots", slots); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 5c7231a9ed..7097908677 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -117,9 +117,9 @@ pub fn run( let beacon_url: Option = parse_optional(matches, "beacon-url")?; let runs: usize = parse_required(matches, "runs")?; let config = Config { - no_signature_verification: matches.is_present("no-signature-verification"), - exclude_cache_builds: matches.is_present("exclude-cache-builds"), - exclude_post_block_thc: matches.is_present("exclude-post-block-thc"), + no_signature_verification: matches.get_flag("no-signature-verification"), + exclude_cache_builds: matches.get_flag("exclude-cache-builds"), + exclude_post_block_thc: matches.get_flag("exclude-post-block-thc"), }; info!("Using {} spec", E::spec_name()); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 54faa03a31..b6d4166b6a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -28,7 +28,6 @@ jemalloc = ["malloc_utils/jemalloc"] [dependencies] beacon_node = { workspace = true } slog = { workspace = true } -sloggers = { workspace = true } types = { workspace = true } bls = { workspace = true } ethereum_hashing = { workspace = true } @@ -54,7 +53,6 @@ unused_port = { workspace = true } database_manager = { path = "../database_manager" } slasher = { workspace = true } validator_manager = { path = "../validator_manager" } -tracing-subscriber = { workspace = true } logging = { workspace = true } [dev-dependencies] diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 932b125dc6..5743bedfd7 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,13 +1,16 @@ mod metrics; use beacon_node::ProductionBeaconNode; -use clap::{App, Arg, ArgMatches}; -use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{ + flags::DISABLE_MALLOC_TUNING_FLAG, get_color_style, get_eth2_network_config, FLAG_HEADER, +}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; +use lazy_static::lazy_static; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info}; @@ -18,6 +21,25 @@ use task_executor::ShutdownReason; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; +lazy_static! { + pub static ref SHORT_VERSION: String = VERSION.replace("Lighthouse/", ""); + pub static ref LONG_VERSION: String = format!( + "{}\n\ + BLS library: {}\n\ + SHA256 hardware acceleration: {}\n\ + Allocator: {}\n\ + Profile: {}\n\ + Specs: mainnet (true), minimal ({}), gnosis ({})", + SHORT_VERSION.as_str(), + bls_library_name(), + have_sha_extensions(), + allocator_name(), + build_profile_name(), + cfg!(feature = "spec-minimal"), + cfg!(feature = "gnosis"), + ); +} + fn bls_library_name() -> &'static str { if cfg!(feature = "portable") { "blst-portable" @@ -54,41 +76,31 @@ fn main() { } // Parse the CLI parameters. - let matches = App::new("Lighthouse") - .version(VERSION.replace("Lighthouse/", "").as_str()) + let matches = Command::new("Lighthouse") + .version(SHORT_VERSION.as_str()) .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) + .next_line_help(true) + .term_width(80) + .disable_help_flag(true) .about( "Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon \ node, a validator client and utilities for managing validator accounts.", ) - .long_version( - format!( - "{}\n\ - BLS library: {}\n\ - SHA256 hardware acceleration: {}\n\ - Allocator: {}\n\ - Profile: {}\n\ - Specs: mainnet (true), minimal ({}), gnosis ({})", - VERSION.replace("Lighthouse/", ""), - bls_library_name(), - have_sha_extensions(), - allocator_name(), - build_profile_name(), - cfg!(feature = "spec-minimal"), - cfg!(feature = "gnosis"), - ).as_str() - ) + .long_version(LONG_VERSION.as_str()) + .display_order(0) .arg( - Arg::with_name("env_log") - .short("l") + Arg::new("env_log") + .short('l') .help( "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", ) - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("logfile") + Arg::new("logfile") .long("logfile") .value_name("FILE") .help( @@ -97,115 +109,135 @@ fn main() { future logs are stored. \ Once the number of log files exceeds the value specified in \ `--logfile-max-number` the oldest log file will be overwritten.") - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-debug-level") + Arg::new("logfile-debug-level") .long("logfile-debug-level") .value_name("LEVEL") .help("The verbosity level used when emitting logs to the log file.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .action(ArgAction::Set) + .value_parser(["info", "debug", "trace", "warn", "error", "crit"]) .default_value("debug") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-format") + Arg::new("logfile-format") .long("logfile-format") .value_name("FORMAT") .help("Specifies the log format used when emitting logs to the logfile.") - .possible_values(&["DEFAULT", "JSON"]) - .takes_value(true) + .value_parser(["DEFAULT", "JSON"]) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-max-size") + Arg::new("logfile-max-size") .long("logfile-max-size") .value_name("SIZE") .help( "The maximum size (in MB) each log file can grow to before rotating. If set \ to 0, background file logging is disabled.") - .takes_value(true) + .action(ArgAction::Set) .default_value("200") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-max-number") + Arg::new("logfile-max-number") .long("logfile-max-number") .value_name("COUNT") .help( "The maximum number of log files that will be stored. If set to 0, \ background file logging is disabled.") - .takes_value(true) + .action(ArgAction::Set) .default_value("5") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-compress") + Arg::new("logfile-compress") .long("logfile-compress") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, compress old log files. This can help reduce the space needed \ to store old logs.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-no-restricted-perms") + Arg::new("logfile-no-restricted-perms") .long("logfile-no-restricted-perms") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, log files will be generated as world-readable meaning they can be read by \ any user on the machine. Note that logs can often contain sensitive information \ about your validator and so this flag should be used with caution. For Windows users, \ the log file permissions will be inherited from the parent folder.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("log-format") + Arg::new("log-format") .long("log-format") .value_name("FORMAT") .help("Specifies the log format used when emitting logs to the terminal.") - .possible_values(&["JSON"]) - .takes_value(true) - .global(true), + .value_parser(["JSON"]) + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("log-color") + Arg::new("log-color") .long("log-color") .alias("log-colour") .help("Force outputting colors when emitting logs to the terminal.") - .global(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("disable-log-timestamp") + Arg::new("disable-log-timestamp") .long("disable-log-timestamp") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, do not include timestamps in logging output.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("debug-level") + Arg::new("debug-level") .long("debug-level") .value_name("LEVEL") .help("Specifies the verbosity level used when emitting logs to the terminal.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .action(ArgAction::Set) + .value_parser(["info", "debug", "trace", "warn", "error", "crit"]) .global(true) - .default_value("info"), + .default_value("info") + .display_order(0) ) .arg( - Arg::with_name("datadir") + Arg::new("datadir") .long("datadir") - .short("d") + .short('d') .value_name("DIR") .global(true) .help( "Used to specify a custom root data directory for lighthouse keys and databases. \ Defaults to $HOME/.lighthouse/{network} where network is the value of the `network` flag \ Note: Users should specify separate custom datadirs for different networks.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("testnet-dir") - .short("t") + Arg::new("testnet-dir") + .short('t') .long("testnet-dir") .value_name("DIR") .help( @@ -213,57 +245,66 @@ fn main() { a hard-coded Lighthouse testnet. Only effective if there is no \ existing database.", ) - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("network") + Arg::new("network") .long("network") .value_name("network") .help("Name of the Eth2 chain Lighthouse will sync and follow.") - .possible_values(HARDCODED_NET_NAMES) + .value_parser(HARDCODED_NET_NAMES.to_vec()) .conflicts_with("testnet-dir") - .takes_value(true) + .action(ArgAction::Set) .global(true) - + .display_order(0) ) .arg( - Arg::with_name("dump-config") + Arg::new("dump-config") .long("dump-config") - .hidden(true) + .hide(true) .help("Dumps the config to a desired location. Used for testing only.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("dump-chain-config") + Arg::new("dump-chain-config") .long("dump-chain-config") - .hidden(true) + .hide(true) .help("Dumps the chain config to a desired location. Used for testing only.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("immediate-shutdown") + Arg::new("immediate-shutdown") .long("immediate-shutdown") - .hidden(true) + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "Shuts down immediately after the Beacon Node or Validator has successfully launched. \ Used for testing only, DO NOT USE IN PRODUCTION.") .global(true) + .display_order(0) ) .arg( - Arg::with_name(DISABLE_MALLOC_TUNING_FLAG) + Arg::new(DISABLE_MALLOC_TUNING_FLAG) .long(DISABLE_MALLOC_TUNING_FLAG) .help( "If present, do not configure the system allocator. Providing this flag will \ generally increase memory usage, it should only be provided when debugging \ specific memory allocation issues." ) - .global(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-total-difficulty-override") + Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ @@ -272,11 +313,12 @@ fn main() { the broad Ethereum community has elected to override the terminal difficulty. \ Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-block-hash-override") + Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ @@ -285,11 +327,12 @@ fn main() { Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-epoch-override") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-block-hash-epoch-override") + Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ @@ -298,11 +341,12 @@ fn main() { Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-override") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("safe-slots-to-import-optimistically") + Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ @@ -311,11 +355,12 @@ fn main() { of an attack at the PoS transition block. Incorrect use of this flag can cause your \ node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ this flag.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("genesis-state-url") + Arg::new("genesis-state-url") .long("genesis-state-url") .value_name("URL") .help( @@ -324,19 +369,30 @@ fn main() { If not supplied, a default URL or the --checkpoint-sync-url may be used. \ If the genesis state is already included in this binary then this value will be ignored.", ) - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("genesis-state-url-timeout") + Arg::new("genesis-state-url-timeout") .long("genesis-state-url-timeout") .value_name("SECONDS") .help( "The timeout in seconds for the request to --genesis-state-url.", ) - .takes_value(true) + .action(ArgAction::Set) .default_value("180") - .global(true), + .global(true) + .display_order(0) + ) + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) @@ -352,7 +408,7 @@ fn main() { // Only apply this optimization for the beacon node. It's the only process with a substantial // memory footprint. let is_beacon_node = matches.subcommand_name() == Some("beacon_node"); - if is_beacon_node && !matches.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if is_beacon_node && !matches.get_flag(DISABLE_MALLOC_TUNING_FLAG) { if let Err(e) = configure_memory_allocator() { eprintln!( "Unable to configure the memory allocator: {} \n\ @@ -370,7 +426,7 @@ fn main() { if let Some(bootnode_matches) = matches.subcommand_matches("boot_node") { // The bootnode uses the main debug-level flag let debug_info = matches - .value_of("debug-level") + .get_one::("debug-level") .expect("Debug-level must be present") .into(); @@ -430,53 +486,53 @@ fn run( } let debug_level = matches - .value_of("debug-level") + .get_one::("debug-level") .ok_or("Expected --debug-level flag")?; - let log_format = matches.value_of("log-format"); + let log_format = matches.get_one::("log-format"); - let log_color = matches.is_present("log-color"); + let log_color = matches.get_flag("log-color"); - let disable_log_timestamp = matches.is_present("disable-log-timestamp"); + let disable_log_timestamp = matches.get_flag("disable-log-timestamp"); let logfile_debug_level = matches - .value_of("logfile-debug-level") + .get_one::("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; let logfile_format = matches - .value_of("logfile-format") + .get_one::("logfile-format") // Ensure that `logfile-format` defaults to the value of `log-format`. - .or_else(|| matches.value_of("log-format")); + .or_else(|| matches.get_one::("log-format")); let logfile_max_size: u64 = matches - .value_of("logfile-max-size") + .get_one::("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? .parse() .map_err(|e| format!("Failed to parse `logfile-max-size`: {:?}", e))?; let logfile_max_number: usize = matches - .value_of("logfile-max-number") + .get_one::("logfile-max-number") .ok_or("Expected --logfile-max-number flag")? .parse() .map_err(|e| format!("Failed to parse `logfile-max-number`: {:?}", e))?; - let logfile_compress = matches.is_present("logfile-compress"); + let logfile_compress = matches.get_flag("logfile-compress"); - let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + let logfile_restricted = !matches.get_flag("logfile-no-restricted-perms"); // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { log_path = match matches.subcommand() { - ("beacon_node", _) => Some( + Some(("beacon_node", _)) => Some( parse_path_or_default(matches, "datadir")? .join(DEFAULT_BEACON_NODE_DIR) .join("logs") .join("beacon") .with_extension("log"), ), - ("validator_client", Some(vc_matches)) => { - let base_path = if vc_matches.is_present("validators-dir") { + Some(("validator_client", vc_matches)) => { + let base_path = if vc_matches.contains_id("validators-dir") { parse_path_or_default(vc_matches, "validators-dir")? } else { parse_path_or_default(matches, "datadir")?.join(DEFAULT_VALIDATOR_DIR) @@ -495,9 +551,9 @@ fn run( let sse_logging = { if let Some(bn_matches) = matches.subcommand_matches("beacon_node") { - bn_matches.is_present("gui") + bn_matches.get_flag("gui") } else if let Some(vc_matches) = matches.subcommand_matches("validator_client") { - vc_matches.is_present("http") + vc_matches.get_flag("http") } else { false } @@ -626,13 +682,13 @@ fn run( ); match matches.subcommand() { - ("beacon_node", Some(matches)) => { + Some(("beacon_node", matches)) => { let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); let mut config = beacon_node::get_config::(matches, &context)?; config.logger_config = logger_config; - let shutdown_flag = matches.is_present("immediate-shutdown"); + let shutdown_flag = matches.get_flag("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; executor.clone().spawn( @@ -653,13 +709,13 @@ fn run( "beacon_node", ); } - ("validator_client", Some(matches)) => { + Some(("validator_client", matches)) => { let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); let config = validator_client::Config::from_cli(matches, context.log()) .map_err(|e| format!("Unable to initialize validator config: {}", e))?; - let shutdown_flag = matches.is_present("immediate-shutdown"); + let shutdown_flag = matches.get_flag("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; if !shutdown_flag { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 62bb067273..f8e1182e89 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -636,6 +636,26 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_get_header_timeout() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-header-timeout"), + Some("1500"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_header_timeout, + Some(Duration::from_millis(1500)) + ); + }, + ); +} + #[test] fn builder_user_agent() { run_payload_builder_flag_test_with_config( @@ -1578,7 +1598,7 @@ fn empty_inbound_rate_limiter_flag() { #[test] fn disable_inbound_rate_limiter_flag() { CommandLineTest::new() - .flag("inbound-rate-limiter", Some("disabled")) + .flag("disable-inbound-rate-limiter", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.network.inbound_rate_limiter_config, None)); } @@ -2134,7 +2154,6 @@ fn slasher_broadcast_flag_no_args() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-max-db-size", Some("1")) - .flag("slasher-broadcast", None) .run_with_zero_port() .with_config(|config| { let slasher_config = config @@ -2313,7 +2332,7 @@ fn proposer_re_org_disallowed_offsets_default() { #[test] fn proposer_re_org_disallowed_offsets_override() { CommandLineTest::new() - .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .flag("proposer-reorg-disallowed-offsets", Some("1,2,3")) .run_with_zero_port() .with_config(|config| { assert_eq!( @@ -2327,7 +2346,7 @@ fn proposer_re_org_disallowed_offsets_override() { #[should_panic] fn proposer_re_org_disallowed_offsets_invalid() { CommandLineTest::new() - .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .flag("proposer-reorg-disallowed-offsets", Some("32,33,34")) .run_with_zero_port(); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index cdf8fa15aa..9ca6ab4333 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -594,23 +594,8 @@ fn wrong_broadcast_flag() { #[test] fn latency_measurement_service() { - CommandLineTest::new().run().with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); CommandLineTest::new() - .flag("latency-measurement-service", None) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); - CommandLineTest::new() - .flag("latency-measurement-service", Some("true")) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); - CommandLineTest::new() - .flag("latency-measurement-service", Some("false")) + .flag("disable-latency-measurement-service", None) .run() .with_config(|config| { assert!(!config.enable_latency_measurement_service); diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index fab1cfebf4..bca6a18ab5 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -55,7 +55,12 @@ impl CommandLineTest { } fn run(mut cmd: Command, should_succeed: bool) { - let output = cmd.output().expect("process should complete"); + let output = cmd + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .expect("process should complete"); if output.status.success() != should_succeed { let stdout = String::from_utf8(output.stdout).unwrap(); let stderr = String::from_utf8(output.stderr).unwrap(); diff --git a/scripts/cli.sh b/scripts/cli.sh index 148b23966c..6ca019b39e 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -12,9 +12,6 @@ write_to_file() { local file="$2" local program="$3" - # Remove first line of cmd to get rid of commit specific numbers. - cmd=${cmd#*$'\n'} - # We need to add the header and the backticks to create the code block. printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 90fb54cd1a..ef5cb8249e 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -25,7 +25,6 @@ rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } -sloggers = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index f3d00fa035..fc4614f5d4 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -24,19 +24,16 @@ serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } eth2_network_config = { workspace = true } -ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -cached_tree_hash = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } types = { workspace = true } snap = { workspace = true } fs2 = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } fork_choice = { workspace = true } execution_layer = { workspace = true } logging = { workspace = true } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 2e39294582..b3b20d6efe 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -180,10 +180,24 @@ impl LoadCase for ForkChoiceTest { valid, }) } - Step::Attestation { attestation } => { - ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) - .map(|attestation| Step::Attestation { attestation }) - } + Step::Attestation { attestation } => match fork_name { + ForkName::Base + | ForkName::Altair + | ForkName::Bellatrix + | ForkName::Capella + | ForkName::Deneb => ssz_decode_file( + &path.join(format!("{}.ssz_snappy", attestation)), + ) + .map(|attestation| Step::Attestation { + attestation: Attestation::Base(attestation), + }), + ForkName::Electra => ssz_decode_file( + &path.join(format!("{}.ssz_snappy", attestation)), + ) + .map(|attestation| Step::Attestation { + attestation: Attestation::Electra(attestation), + }), + }, Step::AttesterSlashing { attester_slashing } => match fork_name { ForkName::Base | ForkName::Altair diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index b922668c0a..d87770a4df 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -78,8 +78,12 @@ impl Operation for Attestation { "attestation".into() } - fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { - ssz_decode_file(path) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + if fork_name < ForkName::Electra { + Ok(Self::Base(ssz_decode_file(path)?)) + } else { + Ok(Self::Electra(ssz_decode_file(path)?)) + } } fn apply_to( diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 7f66658f0f..43d24cd123 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -10,7 +10,6 @@ serde_json = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } futures = { workspace = true } -environment = { workspace = true } execution_layer = { workspace = true } sensitive_url = { workspace = true } types = { workspace = true } diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md index f97c3cff28..1dcf372dbd 100644 --- a/testing/network_testing/README.md +++ b/testing/network_testing/README.md @@ -50,11 +50,11 @@ $ cargo build --release --bin lighthouse --features network/disable-backfill Once built, it can run via checkpoint sync on any network, making sure we point to our mock-el -Prater testnet: +Holesky testnet: ``` -$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url -https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 +$ lighthouse --network holesky bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://holesky.checkpoint.sigp.io --execution-endpoint http://localhost:8551 ``` Mainnet: diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index d7ff7b3dd8..f8769b10e2 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -8,7 +8,6 @@ edition = { workspace = true } [dependencies] node_test_rig = { path = "../node_test_rig" } -eth1 = { workspace = true } execution_layer = { workspace = true } types = { workspace = true } parking_lot = { workspace = true } @@ -18,7 +17,5 @@ env_logger = { workspace = true } clap = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -ssz_types = { workspace = true } -ethereum-types = { workspace = true } eth2_network_config = { workspace = true } serde_json = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 755bb71b43..f69d107e34 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -27,15 +27,32 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { - let node_count = value_t!(matches, "nodes", usize).expect("Missing nodes default"); - let proposer_nodes = - value_t!(matches, "proposer-nodes", usize).expect("Missing proposer-nodes default"); - let validators_per_node = value_t!(matches, "validators-per-node", usize) - .expect("Missing validators-per-node default"); - let speed_up_factor = - value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); - let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); - let continue_after_checks = matches.is_present("continue-after-checks"); + let node_count = matches + .get_one::("nodes") + .expect("missing nodes default") + .parse::() + .expect("missing nodes default"); + let proposer_nodes = matches + .get_one::("proposer-nodes") + .unwrap_or(&String::from("0")) + .parse::() + .unwrap_or(0); + println!("PROPOSER-NODES: {}", proposer_nodes); + let validators_per_node = matches + .get_one::("validators-per-node") + .expect("missing validators-per-node default") + .parse::() + .expect("missing validators-per-node default"); + let speed_up_factor = matches + .get_one::("speed-up-factor") + .expect("missing speed-up-factor default") + .parse::() + .expect("missing speed-up-factor default"); + let log_level = matches + .get_one::("debug-level") + .expect("missing debug-level"); + + let continue_after_checks = matches.get_flag("continue-after-checks"); println!("Basic Simulator:"); println!(" nodes: {}", node_count); @@ -64,7 +81,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .initialize_logger(LoggerConfig { path: None, debug_level: log_level.clone(), - logfile_debug_level: log_level, + logfile_debug_level: log_level.clone(), log_format: None, logfile_format: None, log_color: false, diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 00af7e560c..a82c8b8577 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -1,12 +1,12 @@ -use clap::{App, Arg, SubCommand}; +use clap::{crate_version, Arg, ArgAction, Command}; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("simulator") +pub fn cli_app() -> Command { + Command::new("simulator") .version(crate_version!()) .author("Sigma Prime ") .about("Options for interacting with simulator") .subcommand( - SubCommand::with_name("basic-sim") + Command::new("basic-sim") .about( "Runs a Beacon Chain simulation with `n` beacon node and validator clients, \ each with `v` validators. \ @@ -16,55 +16,55 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { exit immediately.", ) .arg( - Arg::with_name("nodes") - .short("n") + Arg::new("nodes") + .short('n') .long("nodes") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of beacon nodes"), ) .arg( - Arg::with_name("proposer-nodes") - .short("p") + Arg::new("proposer-nodes") + .short('p') .long("proposer-nodes") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of proposer-only beacon nodes"), ) .arg( - Arg::with_name("validators-per-node") - .short("v") + Arg::new("validators-per-node") + .short('v') .long("validators-per-node") - .takes_value(true) + .action(ArgAction::Set) .default_value("20") .help("Number of validators"), ) .arg( - Arg::with_name("speed-up-factor") - .short("s") + Arg::new("speed-up-factor") + .short('s') .long("speed-up-factor") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("debug-level") - .short("d") + Arg::new("debug-level") + .short('d') .long("debug-level") - .takes_value(true) + .action(ArgAction::Set) .default_value("debug") .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("continue-after-checks") - .short("c") + Arg::new("continue-after-checks") + .short('c') .long("continue_after_checks") - .takes_value(false) + .action(ArgAction::SetTrue) .help("Continue after checks (default false)"), ), ) .subcommand( - SubCommand::with_name("fallback-sim") + Command::new("fallback-sim") .about( "Runs a Beacon Chain simulation with `c` validator clients where each VC is \ connected to `b` beacon nodes with `v` validators. \ @@ -76,50 +76,50 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Otherwise, the simulation will exit and an error will be reported.", ) .arg( - Arg::with_name("vc-count") - .short("c") + Arg::new("vc-count") + .short('c') .long("vc-count") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of validator clients."), ) .arg( - Arg::with_name("bns-per-vc") - .short("b") + Arg::new("bns-per-vc") + .short('b') .long("bns-per-vc") - .takes_value(true) + .action(ArgAction::Set) .default_value("2") .help("Number of beacon nodes per validator client."), ) .arg( - Arg::with_name("validators-per-vc") - .short("v") + Arg::new("validators-per-vc") + .short('v') .long("validators-per-vc") - .takes_value(true) + .action(ArgAction::Set) .default_value("20") .help("Number of validators per client."), ) .arg( - Arg::with_name("speed-up-factor") - .short("s") + Arg::new("speed-up-factor") + .short('s') .long("speed-up-factor") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("debug-level") - .short("d") + Arg::new("debug-level") + .short('d') .long("debug-level") - .takes_value(true) + .action(ArgAction::Set) .default_value("debug") .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("continue-after-checks") - .short("c") + Arg::new("continue-after-checks") + .short('c') .long("continue_after_checks") - .takes_value(false) + .action(ArgAction::SetTrue) .help("Continue after checks (default false)"), ), ) diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index c9deeba04d..33f497f37f 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -34,15 +34,36 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { - let vc_count = value_t!(matches, "vc-count", usize).expect("Missing validator-count default"); - let validators_per_vc = - value_t!(matches, "validators-per-vc", usize).expect("Missing validators-per-vc default"); - let bns_per_vc = value_t!(matches, "bns-per-vc", usize).expect("Missing bns-per-vc default"); + let vc_count = matches + .get_one::("vc-count") + .expect("missing vc-count default") + .parse::() + .expect("missing vc-count default"); + + let validators_per_vc = matches + .get_one::("validators-per-vc") + .expect("missing validators-per-vc default") + .parse::() + .expect("missing validators-per-vc default"); + + let bns_per_vc = matches + .get_one::("bns-per-vc") + .expect("missing bns-per-vc default") + .parse::() + .expect("missing bns-per-vc default"); + assert!(bns_per_vc > 1); - let speed_up_factor = - value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); - let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); - let continue_after_checks = matches.is_present("continue-after-checks"); + let speed_up_factor = matches + .get_one::("speed-up-factor") + .expect("missing speed-up-factor default") + .parse::() + .expect("missing speed-up-factor default"); + + let log_level = matches + .get_one::("debug-level") + .expect("missing debug-level default"); + + let continue_after_checks = matches.get_flag("continue-after-checks"); println!("Fallback Simulator:"); println!(" vc-count: {}", vc_count); @@ -70,7 +91,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { .initialize_logger(LoggerConfig { path: None, debug_level: log_level.clone(), - logfile_debug_level: log_level, + logfile_debug_level: log_level.clone(), log_format: None, logfile_format: None, log_color: false, diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index d1a2d0dc67..03ee902c77 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -11,7 +11,6 @@ //! easy-to-find files and stdout only contained info from the simulation. //! -#[macro_use] extern crate clap; mod basic_sim; @@ -34,14 +33,14 @@ fn main() { let matches = cli_app().get_matches(); match matches.subcommand() { - ("basic-sim", Some(matches)) => match basic_sim::run_basic_sim(matches) { + Some(("basic-sim", matches)) => match basic_sim::run_basic_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); std::process::exit(1) } }, - ("fallback-sim", Some(matches)) => match fallback_sim::run_fallback_sim(matches) { + Some(("fallback-sim", matches)) => match fallback_sim::run_fallback_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 34493087c3..4187844cec 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -901,13 +901,14 @@ mod tests { } #[tokio::test] - async fn prater_base_types() { - test_base_types("prater", 4246).await + async fn mainnet_bellatrix_types() { + test_bellatrix_types("mainnet", 4244).await } #[tokio::test] - async fn prater_altair_types() { - test_altair_types("prater", 4247).await + async fn holesky_bellatrix_types() { + // web3signer does not support forks prior to Bellatrix on Holesky + test_bellatrix_types("holesky", 4247).await } #[tokio::test] diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index c885763717..bf6b2c49a2 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -14,11 +14,11 @@ use std::ops::Deref; use std::sync::Arc; use tokio::time::{sleep, sleep_until, Duration, Instant}; use tree_hash::TreeHash; +use types::ForkName; use types::{ - attestation::AttestationBase, AggregateSignature, Attestation, AttestationData, BitList, - ChainSpec, CommitteeIndex, EthSpec, Slot, + attestation::AttestationBase, AggregateSignature, Attestation, AttestationData, + AttestationElectra, BitList, BitVector, ChainSpec, CommitteeIndex, EthSpec, Slot, }; -use types::{AttestationElectra, BitVector, ForkName}; /// Builds an `AttestationService`. pub struct AttestationServiceBuilder { @@ -666,7 +666,7 @@ impl AttestationService { "aggregator" => signed_aggregate_and_proof.message().aggregator_index(), "signatures" => attestation.num_set_aggregation_bits(), "head_block" => format!("{:?}", attestation.data().beacon_block_root), - "committee_index" => attestation.data().index, + "committee_index" => attestation.committee_index(), "slot" => attestation.data().slot.as_u64(), "type" => "aggregated", ); @@ -680,7 +680,7 @@ impl AttestationService { "Failed to publish attestation"; "error" => %e, "aggregator" => signed_aggregate_and_proof.message().aggregator_index(), - "committee_index" => attestation.data().index, + "committee_index" => attestation.committee_index(), "slot" => attestation.data().slot.as_u64(), "type" => "aggregated", ); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 991b621f27..66b467c1e2 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,34 +1,47 @@ -use clap::{App, Arg}; +use clap::{builder::ArgPredicate, Arg, ArgAction, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("validator_client") - .visible_aliases(&["v", "vc", "validator"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn cli_app() -> Command { + Command::new("validator_client") + .visible_aliases(["v", "vc", "validator"]) + .styles(get_color_style()) + .display_order(0) .about( "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", ) .arg( - Arg::with_name("beacon-nodes") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("beacon-nodes") .long("beacon-nodes") .value_name("NETWORK_ADDRESSES") .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ Default is http://localhost:5052." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("proposer-nodes") + Arg::new("proposer-nodes") .long("proposer-nodes") .value_name("NETWORK_ADDRESSES") .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) // TODO remove this flag in a future release .arg( - Arg::with_name("disable-run-on-all") + Arg::new("disable-run-on-all") .long("disable-run-on-all") .value_name("DISABLE_RUN_ON_ALL") .help("DEPRECATED. Use --broadcast. \ @@ -36,10 +49,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and proposer preparation messages to all beacon nodes provided in the \ `--beacon-nodes flag`. This option changes that behaviour such that these \ api calls only go out to the first available and synced beacon node") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("broadcast") + Arg::new("broadcast") .long("broadcast") .value_name("API_TOPICS") .help("Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ @@ -47,10 +62,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { sync-committee. Default (when flag is omitted) is to broadcast \ subscriptions only." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validators-dir") + Arg::new("validators-dir") .long("validators-dir") .alias("validator-dir") .value_name("VALIDATORS_DIR") @@ -59,11 +75,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { each validator along with the common slashing protection database \ and the validator_definitions.yml" ) - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name("secrets-dir") + Arg::new("secrets-dir") .long("secrets-dir") .value_name("SECRETS_DIRECTORY") .help( @@ -72,11 +89,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { name is the 0x-prefixed hex representation of the validators voting public \ key. Defaults to ~/.lighthouse/{network}/secrets.", ) - .takes_value(true) + .action(ArgAction::Set) + .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name("init-slashing-protection") + Arg::new("init-slashing-protection") .long("init-slashing-protection") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, do not require the slashing protection database to exist before \ running. You SHOULD NOT use this flag unless you're certain that a new \ @@ -84,78 +105,95 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { will have been initialized when you imported your validator keys. If you \ misplace your database and then run with this flag you risk being slashed." ) + .display_order(0) ) .arg( - Arg::with_name("disable-auto-discover") + Arg::new("disable-auto-discover") .long("disable-auto-discover") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, do not attempt to discover new validators in the validators-dir. Validators \ will need to be manually added to the validator_definitions.yml file." ) + .display_order(0) ) .arg( - Arg::with_name("use-long-timeouts") + Arg::new("use-long-timeouts") .long("use-long-timeouts") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, the validator client will use longer timeouts for requests \ made to the beacon node. This flag is generally not recommended, \ longer timeouts can cause missed duties when fallbacks are used.") + .display_order(0) ) .arg( - Arg::with_name("beacon-nodes-tls-certs") + Arg::new("beacon-nodes-tls-certs") .long("beacon-nodes-tls-certs") .value_name("CERTIFICATE-FILES") - .takes_value(true) + .action(ArgAction::Set) .help("Comma-separated paths to custom TLS certificates to use when connecting \ to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ in addition to the OS trust store. Commas must only be used as a \ delimiter, and must not be part of the certificate path.") + .display_order(0) ) // This overwrites the graffiti configured in the beacon node. .arg( - Arg::with_name("graffiti") + Arg::new("graffiti") .long("graffiti") .help("Specify your custom graffiti to be included in blocks.") .value_name("GRAFFITI") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("graffiti-file") + Arg::new("graffiti-file") .long("graffiti-file") .help("Specify a graffiti file to load validator graffitis from.") .value_name("GRAFFITI-FILE") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("graffiti") + .display_order(0) ) .arg( - Arg::with_name("suggested-fee-recipient") + Arg::new("suggested-fee-recipient") .long("suggested-fee-recipient") .help("Once the merge has happened, this address will receive transaction fees \ from blocks proposed by this validator client. If a fee recipient is \ configured in the validator definitions it takes priority over this value.") .value_name("FEE-RECIPIENT") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("produce-block-v3") + Arg::new("produce-block-v3") .long("produce-block-v3") .help("Enable block production via the block v3 endpoint for this validator client. \ This should only be enabled when paired with a beacon node \ that has this endpoint implemented. This flag will be enabled by default in \ future.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("distributed") + Arg::new("distributed") .long("distributed") .help("Enables functionality required for running the validator in a distributed validator cluster.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* REST API related arguments */ .arg( - Arg::with_name("http") + Arg::new("http") .long("http") .help("Enable the RESTful HTTP API server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is @@ -165,7 +203,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * must also be used in order to make it clear to the user that this is unsafe. */ .arg( - Arg::with_name("http-address") + Arg::new("http-address") .long("http-address") .requires("http") .value_name("ADDRESS") @@ -175,26 +213,31 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { `--unencrypted-http-transport` flag to ensure the user is aware of the \ risks involved. For access via the Internet, users should apply \ transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.") - .requires("unencrypted-http-transport"), + .requires("unencrypted-http-transport") + .display_order(0) ) .arg( - Arg::with_name("unencrypted-http-transport") - .long("unencrypted-http-transport") - .help("This is a safety flag to ensure that the user is aware that the http \ - transport is unencrypted and using a custom HTTP address is unsafe.") - .requires("http-address"), + Arg::new("unencrypted-http-transport") + .long("unencrypted-http-transport") + .help("This is a safety flag to ensure that the user is aware that the http \ + transport is unencrypted and using a custom HTTP address is unsafe.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .requires("http-address") + .display_order(0) ) .arg( - Arg::with_name("http-port") + Arg::new("http-port") .long("http-port") .requires("http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("http", None, "5062") - .takes_value(true), + .default_value_if("http", ArgPredicate::IsPresent, "5062") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-origin") + Arg::new("http-allow-origin") .long("http-allow-origin") .requires("http") .value_name("ORIGIN") @@ -202,10 +245,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5062).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-keystore-export") + Arg::new("http-allow-keystore-export") .long("http-allow-keystore-export") .requires("http") .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ @@ -213,44 +257,52 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { consumers who have access to the API token. This method is useful for \ exporting validators, however it should be used with caution since it \ exposes private key data to authorized users.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("http-store-passwords-in-secrets-dir") + Arg::new("http-store-passwords-in-secrets-dir") .long("http-store-passwords-in-secrets-dir") .requires("http") .help("If present, any validators created via the HTTP will have keystore \ passwords stored in the secrets-dir rather than the validator \ definitions file.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* Prometheus metrics HTTP server related arguments */ .arg( - Arg::with_name("metrics") + Arg::new("metrics") .long("metrics") .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("metrics-address") + Arg::new("metrics-address") .long("metrics-address") .requires("metrics") .value_name("ADDRESS") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "127.0.0.1") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-port") + Arg::new("metrics-port") .long("metrics-port") .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "5064") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "5064") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-allow-origin") + Arg::new("metrics-allow-origin") .long("metrics-allow-origin") .requires("metrics") .value_name("ORIGIN") @@ -258,22 +310,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5064).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-high-validator-count-metrics") + Arg::new("enable-high-validator-count-metrics") .long("enable-high-validator-count-metrics") .help("Enable per validator metrics for > 64 validators. \ Note: This flag is automatically enabled for <= 64 validators. \ Enabling this metric for higher validator counts will lead to higher volume \ of prometheus metrics being collected.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Explorer metrics */ .arg( - Arg::with_name("monitoring-endpoint") + Arg::new("monitoring-endpoint") .long("monitoring-endpoint") .value_name("ADDRESS") .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ @@ -282,19 +337,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ and never provide an untrusted URL.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("monitoring-endpoint-period") + Arg::new("monitoring-endpoint-period") .long("monitoring-endpoint-period") .value_name("SECONDS") .help("Defines how many seconds to wait between each message sent to \ the monitoring-endpoint. Default: 60s") .requires("monitoring-endpoint") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-doppelganger-protection") + Arg::new("enable-doppelganger-protection") .long("enable-doppelganger-protection") .value_name("ENABLE_DOPPELGANGER_PROTECTION") .help("If this flag is set, Lighthouse will delay startup for three epochs and \ @@ -306,56 +363,62 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to avoid potentially committing a slashable offense. Use this flag in order to \ ENABLE this functionality, without this flag Lighthouse will begin attesting \ immediately.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-proposals") + Arg::new("builder-proposals") .long("builder-proposals") .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-registration-timestamp-override") + Arg::new("builder-registration-timestamp-override") .long("builder-registration-timestamp-override") .alias("builder-registration-timestamp-override") .help("This flag takes a unix timestamp value that will be used to override the \ timestamp used in the builder api registration") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("gas-limit") + Arg::new("gas-limit") .long("gas-limit") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .help("The gas limit to be used in all builder proposals for all validators managed \ by this validator client. Note this will not necessarily be used if the gas limit \ set here moves too far from the previous block's gas limit. [default: 30,000,000]") - .requires("builder-proposals"), + .requires("builder-proposals") + .display_order(0) ) .arg( - Arg::with_name("latency-measurement-service") - .long("latency-measurement-service") - .value_name("BOOLEAN") - .help("Set to 'true' to enable a service that periodically attempts to measure latency to BNs. \ - Set to 'false' to disable.") - .default_value("true") - .takes_value(true), + Arg::new("disable-latency-measurement-service") + .long("disable-latency-measurement-service") + .help("Disables the service that periodically attempts to measure latency to BNs.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("validator-registration-batch-size") + Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") .value_name("INTEGER") .help("Defines the number of validators per \ validator/register_validator request sent to the BN. This value \ can be reduced to avoid timeouts from builders.") .default_value("500") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-boost-factor") + Arg::new("builder-boost-factor") .long("builder-boost-factor") .value_name("UINT64") .help("Defines the boost factor, \ @@ -363,17 +426,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { when choosing between a builder payload header and payload from \ the local execution node.") .conflicts_with("prefer-builder-proposals") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("prefer-builder-proposals") + Arg::new("prefer-builder-proposals") .long("prefer-builder-proposals") .help("If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-slashing-protection-web3signer") + Arg::new("disable-slashing-protection-web3signer") .long("disable-slashing-protection-web3signer") .help("Disable Lighthouse's slashing protection for all web3signer keys. This can \ reduce the I/O burden on the VC but is only safe if slashing protection \ @@ -381,26 +447,30 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ ENABLING WEB3SIGNER'S SLASHING PROTECTION.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Experimental/development options. */ .arg( - Arg::with_name("web3-signer-keep-alive-timeout") + Arg::new("web3-signer-keep-alive-timeout") .long("web3-signer-keep-alive-timeout") .value_name("MILLIS") .default_value("20000") .help("Keep-alive timeout for each web3signer connection. Set to 'null' to never \ timeout") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("web3-signer-max-idle-connections") + Arg::new("web3-signer-max-idle-connections") .long("web3-signer-max-idle-connections") .value_name("COUNT") .help("Maximum number of idle connections to maintain per web3signer host. Default \ is unlimited.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 5bd32fced2..eb47fcf31a 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -151,15 +151,15 @@ impl Config { .unwrap_or_else(|| PathBuf::from(".")); let (mut validator_dir, mut secrets_dir) = (None, None); - if cli_args.value_of("datadir").is_some() { + if cli_args.get_one::("datadir").is_some() { let base_dir: PathBuf = parse_required(cli_args, "datadir")?; validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); } - if cli_args.value_of("validators-dir").is_some() { + if cli_args.get_one::("validators-dir").is_some() { validator_dir = Some(parse_required(cli_args, "validators-dir")?); } - if cli_args.value_of("secrets-dir").is_some() { + if cli_args.get_one::("secrets-dir").is_some() { secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); } @@ -195,11 +195,11 @@ impl Config { .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); - config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); - config.use_long_timeouts = cli_args.is_present("use-long-timeouts"); + config.disable_auto_discover = cli_args.get_flag("disable-auto-discover"); + config.init_slashing_protection = cli_args.get_flag("init-slashing-protection"); + config.use_long_timeouts = cli_args.get_flag("use-long-timeouts"); - if let Some(graffiti_file_path) = cli_args.value_of("graffiti-file") { + if let Some(graffiti_file_path) = cli_args.get_one::("graffiti-file") { let mut graffiti_file = GraffitiFile::new(graffiti_file_path.into()); graffiti_file .read_graffiti_file() @@ -208,7 +208,7 @@ impl Config { info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path); } - if let Some(input_graffiti) = cli_args.value_of("graffiti") { + if let Some(input_graffiti) = cli_args.get_one::("graffiti") { let graffiti_bytes = input_graffiti.as_bytes(); if graffiti_bytes.len() > GRAFFITI_BYTES_LEN { return Err(format!( @@ -237,11 +237,11 @@ impl Config { config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); } - if cli_args.is_present("distributed") { + if cli_args.get_flag("distributed") { config.distributed = true; } - if cli_args.is_present("disable-run-on-all") { + if cli_args.get_flag("disable-run-on-all") { warn!( log, "The --disable-run-on-all flag is deprecated"; @@ -249,7 +249,7 @@ impl Config { ); config.broadcast_topics = vec![]; } - if let Some(broadcast_topics) = cli_args.value_of("broadcast") { + if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') .filter(|t| *t != "none") @@ -281,12 +281,12 @@ impl Config { * Http API server */ - if cli_args.is_present("http") { + if cli_args.get_flag("http") { config.http_api.enabled = true; } - if let Some(address) = cli_args.value_of("http-address") { - if cli_args.is_present("unencrypted-http-transport") { + if let Some(address) = cli_args.get_one::("http-address") { + if cli_args.get_flag("unencrypted-http-transport") { config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; @@ -298,13 +298,13 @@ impl Config { } } - if let Some(port) = cli_args.value_of("http-port") { + if let Some(port) = cli_args.get_one::("http-port") { config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -313,11 +313,11 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-allow-keystore-export") { + if cli_args.get_flag("http-allow-keystore-export") { config.http_api.allow_keystore_export = true; } - if cli_args.is_present("http-store-passwords-in-secrets-dir") { + if cli_args.get_flag("http-store-passwords-in-secrets-dir") { config.http_api.store_passwords_in_secrets_dir = true; } @@ -325,27 +325,27 @@ impl Config { * Prometheus metrics HTTP server */ - if cli_args.is_present("metrics") { + if cli_args.get_flag("metrics") { config.http_metrics.enabled = true; } - if cli_args.is_present("enable-high-validator-count-metrics") { + if cli_args.get_flag("enable-high-validator-count-metrics") { config.enable_high_validator_count_metrics = true; } - if let Some(address) = cli_args.value_of("metrics-address") { + if let Some(address) = cli_args.get_one::("metrics-address") { config.http_metrics.listen_addr = address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("metrics-port") { + if let Some(port) = cli_args.get_one::("metrics-port") { config.http_metrics.listen_port = port .parse::() .map_err(|_| "metrics-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -354,14 +354,14 @@ impl Config { config.http_metrics.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if cli_args.get_flag(DISABLE_MALLOC_TUNING_FLAG) { config.http_metrics.allocator_metrics_enabled = false; } /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { let update_period_secs = clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; config.monitoring_api = Some(monitoring_api::Config { @@ -372,24 +372,24 @@ impl Config { }); } - if cli_args.is_present("enable-doppelganger-protection") { + if cli_args.get_flag("enable-doppelganger-protection") { config.enable_doppelganger_protection = true; } - if cli_args.is_present("builder-proposals") { + if cli_args.get_flag("builder-proposals") { config.builder_proposals = true; } - if cli_args.is_present("produce-block-v3") { + if cli_args.get_flag("produce-block-v3") { config.produce_block_v3 = true; } - if cli_args.is_present("prefer-builder-proposals") { + if cli_args.get_flag("prefer-builder-proposals") { config.prefer_builder_proposals = true; } config.gas_limit = cli_args - .value_of("gas-limit") + .get_one::("gas-limit") .map(|gas_limit| { gas_limit .parse::() @@ -398,7 +398,7 @@ impl Config { .transpose()?; if let Some(registration_timestamp_override) = - cli_args.value_of("builder-registration-timestamp-override") + cli_args.get_one::("builder-registration-timestamp-override") { config.builder_registration_timestamp_override = Some( registration_timestamp_override @@ -410,7 +410,7 @@ impl Config { config.builder_boost_factor = parse_optional(cli_args, "builder-boost-factor")?; config.enable_latency_measurement_service = - parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); + !cli_args.get_flag("disable-latency-measurement-service"); config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; @@ -419,7 +419,7 @@ impl Config { } config.enable_web3signer_slashing_protection = - if cli_args.is_present("disable-slashing-protection-web3signer") { + if cli_args.get_flag("disable-slashing-protection-web3signer") { warn!( log, "Slashing protection for remote keys disabled"; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 381269129e..729ff62ee3 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -109,7 +109,7 @@ impl ProductionValidatorClient { /// and attestation production. pub async fn new_from_cli( context: RuntimeContext, - cli_args: &ArgMatches<'_>, + cli_args: &ArgMatches, ) -> Result { let config = Config::from_cli(cli_args, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 35af2b1ce7..ebcde6a828 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -6,14 +6,12 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bls = { workspace = true } clap = { workspace = true } types = { workspace = true } environment = { workspace = true } eth2_network_config = { workspace = true } clap_utils = { workspace = true } eth2_wallet = { workspace = true } -eth2_keystore = { workspace = true } account_utils = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index cd19bd0ae3..d53e92deb3 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -1,7 +1,8 @@ use super::common::*; use crate::DumpConfig; use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::std_types::KeystoreJsonStr, types::{StateId, ValidatorId}, @@ -35,8 +36,8 @@ pub const DEPOSITS_FILENAME: &str = "deposits.json"; const BEACON_NODE_HTTP_TIMEOUT: Duration = Duration::from_secs(2); -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Creates new validators from BIP-39 mnemonic. A JSON file will be created which \ contains all the validator keystores and other validator data. This file can then \ @@ -45,7 +46,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", ) .arg( - Arg::with_name(OUTPUT_PATH_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(OUTPUT_PATH_FLAG) .long(OUTPUT_PATH_FLAG) .value_name("DIRECTORY") .help( @@ -53,10 +63,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { files will be created. The directory will be created if it does not exist.", ) .required(true) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEPOSIT_GWEI_FLAG) + Arg::new(DEPOSIT_GWEI_FLAG) .long(DEPOSIT_GWEI_FLAG) .value_name("DEPOSIT_GWEI") .help( @@ -64,51 +75,60 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { required for an active validator (MAX_EFFECTIVE_BALANCE)", ) .conflicts_with(DISABLE_DEPOSITS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FIRST_INDEX_FLAG) + Arg::new(FIRST_INDEX_FLAG) .long(FIRST_INDEX_FLAG) .value_name("FIRST_INDEX") .help("The first of consecutive key indexes you wish to create.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("0"), + .default_value("0") + .display_order(0), ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") .conflicts_with("at-most") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help("If present, the mnemonic will be read in from this file.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) + .help_heading(FLAG_HEADER), ) .arg( - Arg::with_name(DISABLE_DEPOSITS_FLAG) + Arg::new(DISABLE_DEPOSITS_FLAG) .long(DISABLE_DEPOSITS_FLAG) .help( "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ user has an alternate strategy for submitting deposits.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) .arg( - Arg::with_name(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + Arg::new(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) .long(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) .help( "If present, the user will be prompted to enter the voting keystore \ @@ -116,10 +136,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { flag is not provided, a random password will be used. It is not \ necessary to keep backups of voting keystore passwords if the \ mnemonic is safely backed up.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) .arg( - Arg::with_name(ETH1_WITHDRAWAL_ADDRESS_FLAG) + Arg::new(ETH1_WITHDRAWAL_ADDRESS_FLAG) .long(ETH1_WITHDRAWAL_ADDRESS_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -128,10 +151,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { with the mnemonic-derived withdrawal public key in EIP-2334 format.", ) .conflicts_with(DISABLE_DEPOSITS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(GAS_LIMIT_FLAG) + Arg::new(GAS_LIMIT_FLAG) .long(GAS_LIMIT_FLAG) .value_name("UINT64") .help( @@ -139,10 +163,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to leave this as the default value by not specifying this flag.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FEE_RECIPIENT_FLAG) + Arg::new(FEE_RECIPIENT_FLAG) .long(FEE_RECIPIENT_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -150,21 +175,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { fee recipient. Omit this flag to use the default value from the VC.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_PROPOSALS_FLAG) + Arg::new(BUILDER_PROPOSALS_FLAG) .long(BUILDER_PROPOSALS_FLAG) .help( "When provided, all created validators will attempt to create \ blocks via builder rather than the local EL.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BEACON_NODE_FLAG) + Arg::new(BEACON_NODE_FLAG) .long(BEACON_NODE_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -174,21 +201,24 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { prevent the same validator being created twice and therefore slashable \ conditions.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FORCE_BLS_WITHDRAWAL_CREDENTIALS) - .takes_value(false) + Arg::new(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long(FORCE_BLS_WITHDRAWAL_CREDENTIALS) .help( "If present, allows BLS withdrawal credentials rather than an execution \ address. This is not recommended.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("UINT64") .required(false) .help( @@ -196,18 +226,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { a percentage multiplier to apply to the builder's payload value \ when choosing between a builder payload header and payload from \ the local execution node.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + Arg::new(PREFER_BUILDER_PROPOSALS_FLAG) .long(PREFER_BUILDER_PROPOSALS_FLAG) .help( "If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -242,10 +274,10 @@ impl CreateConfig { first_index: clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?, count: clap_utils::parse_required(matches, COUNT_FLAG)?, mnemonic_path: clap_utils::parse_optional(matches, MNEMONIC_FLAG)?, - stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), - disable_deposits: matches.is_present(DISABLE_DEPOSITS_FLAG), + stdin_inputs: cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG), + disable_deposits: matches.get_flag(DISABLE_DEPOSITS_FLAG), specify_voting_keystore_password: matches - .is_present(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), + .get_flag(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), eth1_withdrawal_address: clap_utils::parse_optional( matches, ETH1_WITHDRAWAL_ADDRESS_FLAG, @@ -259,7 +291,7 @@ impl CreateConfig { fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, bn_url: clap_utils::parse_optional(matches, BEACON_NODE_FLAG)?, - force_bls_withdrawal_credentials: matches.is_present(FORCE_BLS_WITHDRAWAL_CREDENTIALS), + force_bls_withdrawal_credentials: matches.get_flag(FORCE_BLS_WITHDRAWAL_CREDENTIALS), }) } } @@ -516,8 +548,8 @@ impl ValidatorsAndDeposits { } } -pub async fn cli_run<'a, E: EthSpec>( - matches: &'a ArgMatches<'a>, +pub async fn cli_run( + matches: &ArgMatches, spec: &ChainSpec, dump_config: DumpConfig, ) -> Result<(), String> { @@ -581,7 +613,7 @@ pub mod tests { type E = MainnetEthSpec; - const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.3.0"; + const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.7.0"; fn junk_execution_address() -> Option
{ Some(Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap()) @@ -933,12 +965,6 @@ pub mod tests { for deposit in &mut deposits { // Ensures we can match test vectors. deposit.deposit_cli_version = TEST_VECTOR_DEPOSIT_CLI_VERSION.to_string(); - - // We use "prater" and the vectors use "goerli" now. The two names refer to the same - // network so there should be no issue here. - if deposit.network_name == "prater" { - deposit.network_name = "goerli".to_string(); - } } deposits }; diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 4b924189f2..f193e8d0fb 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; use serde::{Deserialize, Serialize}; use std::fs; @@ -13,15 +14,24 @@ pub const VC_TOKEN_FLAG: &str = "vc-token"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Uploads validators to a validator client using the HTTP API. The validators \ are defined in a JSON file which can be generated using the \"create-validators\" \ command.", ) .arg( - Arg::with_name(VALIDATORS_FILE_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(VALIDATORS_FILE_FLAG) .long(VALIDATORS_FILE_FLAG) .value_name("PATH_TO_JSON_FILE") .help( @@ -30,10 +40,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { \"validators.json\".", ) .required(true) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VC_URL_FLAG) + Arg::new(VC_URL_FLAG) .long(VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -43,18 +54,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .default_value("http://localhost:5062") .requires(VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VC_TOKEN_FLAG) + Arg::new(VC_TOKEN_FLAG) .long(VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(IGNORE_DUPLICATES_FLAG) - .takes_value(false) + Arg::new(IGNORE_DUPLICATES_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long(IGNORE_DUPLICATES_FLAG) .help( "If present, ignore any validators which already exist on the VC. \ @@ -63,7 +77,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { slashable conditions, it might be an indicator that something is amiss. \ Users should also be careful to avoid submitting duplicate deposits for \ validators that already exist on the VC.", - ), + ) + .display_order(0), ) } @@ -81,15 +96,12 @@ impl ImportConfig { validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, - ignore_duplicates: matches.is_present(IGNORE_DUPLICATES_FLAG), + ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), }) } } -pub async fn cli_run<'a>( - matches: &'a ArgMatches<'a>, - dump_config: DumpConfig, -) -> Result<(), String> { +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = ImportConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index a9991d3272..101d6d2136 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -1,5 +1,5 @@ -use clap::App; -use clap::ArgMatches; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use common::write_to_json_file; use environment::Environment; use serde::Serialize; @@ -38,17 +38,28 @@ impl DumpConfig { } } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["vm", "validator-manager", CMD]) +pub fn cli_app() -> Command { + Command::new(CMD) + .visible_aliases(["vm", "validator-manager", CMD]) + .display_order(0) + .styles(get_color_style()) .about("Utilities for managing a Lighthouse validator client via the HTTP API.") + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. -pub fn run<'a, E: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), String> { let context = env.core_context(); let spec = context.eth2_config.spec; let dump_config = clap_utils::parse_optional(matches, DUMP_CONFIGS_FLAG)? @@ -63,20 +74,20 @@ pub fn run<'a, E: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> .block_on_dangerous( async { match matches.subcommand() { - (create_validators::CMD, Some(matches)) => { + Some((create_validators::CMD, matches)) => { create_validators::cli_run::(matches, &spec, dump_config).await } - (import_validators::CMD, Some(matches)) => { + Some((import_validators::CMD, matches)) => { import_validators::cli_run(matches, dump_config).await } - (move_validators::CMD, Some(matches)) => { + Some((move_validators::CMD, matches)) => { move_validators::cli_run(matches, dump_config).await } - ("", _) => Err("No command supplied. See --help.".to_string()), - (unknown, _) => Err(format!( + Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD )), + _ => Err("No command supplied. See --help.".to_string()), } }, "validator_manager", diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 5826f2756b..d2149d742c 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,7 +1,8 @@ use super::common::*; use crate::DumpConfig; use account_utils::{read_password_from_user, ZeroizeString}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::{ std_types::{ @@ -66,8 +67,8 @@ impl PasswordSource { } } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Uploads validators to a validator client using the HTTP API. The validators \ are defined in a JSON file which can be generated using the \"create-validators\" \ @@ -75,7 +76,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { file system (i.e., not Web3Signer validators).", ) .arg( - Arg::with_name(SRC_VC_URL_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(SRC_VC_URL_FLAG) .long(SRC_VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -85,17 +95,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .required(true) .requires(SRC_VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(SRC_VC_TOKEN_FLAG) + Arg::new(SRC_VC_TOKEN_FLAG) .long(SRC_VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the source validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEST_VC_URL_FLAG) + Arg::new(DEST_VC_URL_FLAG) .long(DEST_VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -105,35 +117,39 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .required(true) .requires(DEST_VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEST_VC_TOKEN_FLAG) + Arg::new(DEST_VC_TOKEN_FLAG) .long(DEST_VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the destination validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VALIDATORS_FLAG) + Arg::new(VALIDATORS_FLAG) .long(VALIDATORS_FLAG) .value_name("STRING") .help( "The validators to be moved. Either a list of 0x-prefixed \ validator pubkeys or the keyword \"all\".", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to move.") .conflicts_with(VALIDATORS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(GAS_LIMIT_FLAG) + Arg::new(GAS_LIMIT_FLAG) .long(GAS_LIMIT_FLAG) .value_name("UINT64") .help( @@ -141,10 +157,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to leave this as the default value by not specifying this flag.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FEE_RECIPIENT_FLAG) + Arg::new(FEE_RECIPIENT_FLAG) .long(FEE_RECIPIENT_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -152,30 +169,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { fee recipient. Omit this flag to use the default value from the VC.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_PROPOSALS_FLAG) + Arg::new(BUILDER_PROPOSALS_FLAG) .long(BUILDER_PROPOSALS_FLAG) .help( "When provided, all created validators will attempt to create \ blocks via builder rather than the local EL.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) .arg( - Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("UINT64") .required(false) .help( @@ -183,18 +203,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { a percentage multiplier to apply to the builder's payload value \ when choosing between a builder payload header and payload from \ the local execution node.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + Arg::new(PREFER_BUILDER_PROPOSALS_FLAG) .long(PREFER_BUILDER_PROPOSALS_FLAG) .help( "If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -223,10 +245,10 @@ pub struct MoveConfig { impl MoveConfig { fn from_cli(matches: &ArgMatches) -> Result { let count_flag = clap_utils::parse_optional(matches, COUNT_FLAG)?; - let validators_flag = matches.value_of(VALIDATORS_FLAG); + let validators_flag = matches.get_one::(VALIDATORS_FLAG); let validators = match (count_flag, validators_flag) { (Some(count), None) => Validators::Count(count), - (None, Some(string)) => match string { + (None, Some(string)) => match string.as_str() { "all" => Validators::All, pubkeys => pubkeys .split(',') @@ -257,16 +279,13 @@ impl MoveConfig { fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + stdin_inputs: cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG), }, }) } } -pub async fn cli_run<'a>( - matches: &'a ArgMatches<'a>, - dump_config: DumpConfig, -) -> Result<(), String> { +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = MoveConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py index 722414de73..8bf7f5f52d 100644 --- a/validator_manager/test_vectors/generate.py +++ b/validator_manager/test_vectors/generate.py @@ -1,10 +1,13 @@ # This script uses the `ethereum/staking-deposit-cli` tool to generate # deposit data files which are then used for testing by Lighthouse. # -# To generate vectors, simply run this Python script: +# To generate vectors, run this Python script: # # `python generate.py` # +# This script was last run on Linux using Python v3.10.4. Python v3.11.0 was not working at time +# of writing due to dependency issues in `staking-deposit-cli`. You should probably use `pyenv` and +# `virtualenv`. import os import sys import shutil @@ -89,8 +92,7 @@ def sdc_generate(network, first_index, count, eth1_withdrawal_address=None): os.mkdir(output_dir) command = [ - '/bin/sh', - 'deposit.sh', + './deposit.sh', '--language', 'english', '--non_interactive', 'existing-mnemonic', @@ -114,10 +116,10 @@ def test_network(network): sdc_generate(network, first_index=99, count=2) sdc_generate(network, first_index=1024, count=3) sdc_generate(network, first_index=0, count=2, - eth1_withdrawal_address="0x0f51bb10119727a7e5ea3538074fb341f56b09ad") + eth1_withdrawal_address="0x0f51bb10119727a7e5eA3538074fb341F56B09Ad") setup() test_network("mainnet") -test_network("prater") +test_network("holesky") cleanup() diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json new file mode 100644 index 0000000000..6b343d087a --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json new file mode 100644 index 0000000000..f70410746b --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "997cff67c1675ecd2467ac050850ddec8b0488995abf363cee40cbe1461043acf4e68422e9731340437d566542e010cd186031dc0de30b2f56d19f3bb866e0fa9be31dd49ea27777f25ad786cc8587fb745598e5870647b6deeaab77fba4a9e4", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "8787f86d699426783983d03945a8ebe45b349118d28e8af528b9695887f98fac", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json new file mode 100644 index 0000000000..9b2678651f --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8eed5bb34dec5fdee4a3e68a774143072af0ebdae26a9b24ea0601d516a5eeb18aa2ec804be3f05f8475f2e472ce91809d93b7586c3a90fc8a7bbb63ad1f762eee3df0dc0ea3d33dd8ba782e48de495b3bc76e280658c1406e11d07db659e69", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "74ead0279baa86ed7106268e4806484eaae26a8f1c42f693e4b3cb626c724b63", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "8d87cdd627ed169114c00653fd3167e2afc917010071bbbbddd60e331ed0d0d7273cb4a887efe63e7b840bac713420d907e9dac20df56e50e7346b59e3acfe56753234a34c7ab3d8c40ea00b447db005b4b780701a0a2416c4fdadbdb18bf174", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "978b04b76d0a56ff28beb8eb1859792e0967d0b51e4a31485d2078b8390954d2", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json new file mode 100644 index 0000000000..997260bb87 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "818141f1f2fdba651f6a3de4ed43c774974b6cec82b3e6c3fa00569b6b67a88c37742d0033275dc98b4bbaac875e48b416b89cebfd1fe9996e2a29c0a2c512d1cedff558420a1a2b50cf5c743a622d85d941b896b00520b3e9a3eaf1f5eff12c", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "9c9f6ed171b93a08f4e1bc46c0a7feace6466e3e213c6c2d567428c73e22e242", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b62103a32290ec8c710d48f3147895a2dddb25231c9ae38b8ca12bcaf30770a9fc632f4da6b3c5b7a43cfa6a9f096f5e13d26b2c68a42c1c86385aea268dcd2ad3cf766b3f01ee2ba19379ddae9c15830aac8acbef20accc82c734f4c40e5ffd", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "37b75d75086f4b980c85c021ca22343008d445061714cff41d63aea4dca49a5f", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "af2dc295084b4a3eff01a52fe5d42aa931509c24328d5304e59026d0957b55bc35e64802a8d64fdb4a9700bf12e1d6bb184eba01682d8413d86b737e63d3d79a16243d9c8e00115a202efc889ef7129861d8aa32bf8ec9ef5305eecce87b2eda", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "fd0c081818d2ce1bc54b7979e9b348bbbdb8fe5904694143bf4b355dcbbde692", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json new file mode 100644 index 0000000000..4fa3724c59 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "b687aa7d55752f00a060c21fa9287485bab94c841d96b3516263fb384a812c92e60ef9fa2e09add9f55db71961fc051e0bb83d214b6f31d04ee59eaba3b43e27eadd2a64884c5d4125a1f5bd6e1d930e5a1e420c278c697d4af6ed3fcdac16cf", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "54dc56d2838ca70bac89ca92ae1f8d04945d3305ce8507b390756b646163387a", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json new file mode 100644 index 0000000000..7436b53f24 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a59a2c510c5ce378b514f62550a7115cd6cfebaf73a5ba20c2cf21456a2d2c11d6e117b91d23743fc0361794cf7e5405030eb296926b526e8a2d68aa87569358e69d3884563a23770714730b6fab6ba639977d725a5ed4f29abe3ccc34575610", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "149a5dfbba87109dac65142cc067aed97c9579730488cfe16625be3ce4f753a6", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "966ae45b81402f1155ff313e48ca3a5346264dcc4bc9ee9e69994ee74368852d9d27c1684752735feba6c21042ad366b13f12c6e772c453518900435d87e2d743e1818e7471cf3574598e3b085c4527f643efe679841ddf8a480cac12b2c6e08", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "f44dac412ae36929a84f64d5f7f91cada908a8f9e837fc70628f58804591798d", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json index 31c00c57f2..d9ba926d1c 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json index 2880b7724c..f1ea4c6ad3 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json index da92a1d0d9..5741f23d8f 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json similarity index 93% rename from validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json rename to validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json index 9cc01dc0df..9b9556cf9d 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json @@ -1 +1 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json rename to validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json index 3a971d0959..84140f53fe 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json @@ -1 +1 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json rename to validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json index 2efa5c4ec8..3205390a43 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json @@ -1 +1 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json deleted file mode 100644 index c736d75b7e..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json deleted file mode 100644 index e86500d14f..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "87b4b4e9c923aa9e1687219e9df0e838956ee6e15b7ab18142467430d00940dc7aa243c9996e85125dfe72d9dbdb00a30a36e16a2003ee0c86f29c9f5d74f12bfe5b7f62693dbf5187a093555ae8d6b48acd075788549c4b6a249b397af24cd0", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "ea80b639356a03f6f58e4acbe881fabefc9d8b93375a6aa7e530c77d7e45d3e4", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json deleted file mode 100644 index c79ae5a4fc..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "ab32595d8201c2b4e8173aece9151fdc15f4d2ad36008462d0416598ddbf0f37ed0877f06d284a9669e73dbc0885bd2207fe64385e95a4488dc2bcb2c324d5c20da3248a6244463583dfbba8db20805765421e59cb56b0bc3ee6d24a9218216d", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "b4df3a3a26dd5f6eb32999d8a7051a7d1a8573a16553d4b45ee706a0d59c1066", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "9655e195eda5517efe6f36bcebd45250c889a4177d7bf5fcd59598d2d03f37f038b5ee2ec079a30a8382ea42f351943f08a6f006bab9c2130db2742bd7315c8ad5aa1f03a0801c26d4c9efdef71c4c59c449c7f9b21fa62600ab8f5f1e2b938a", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7661474fba11bfb453274f62df022cab3c0b6f4a58af4400f6bce83c9cb5fcb8", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json deleted file mode 100644 index 136dc38554..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "b5dae79ce8f3d7326b46f93182981c5f3d64257a457f038caa78ec8e5cc25a9fdac52c7beb221ab2a3205404131366ad18e1e13801393b3d486819e8cca96128bf1244884a91d05dced092c74bc1e7259788f30dd3432df15f3d2f629645f345", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "94213d76aba9e6a434589d4939dd3764e0832df78f66d30db22a760c14ba1b89", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "816f38a321c4f84ad5187eda58f6d9c1fd1e81c860ed1722bdb76b920fdd430a1e814b9bb893837ae3b38ad738684fbf1795fa687f617c52121472b1ac8d2e34e5c1127186233a8833ffb54c509d9e52cb7242c6c6a65b5e496296b3caa90d89", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "7ad1d059d69794680a1deef5e72c33827f0c449a5f0917095821c0343572789d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "95d20c35484dea6b2a0bd7c2da2d2e810d7829e14c03657b2524adfc2111aa5ed95908ecb975ff75ff742c68ce8df417016c048959b0f807675430f6d981478e26d48e594e0830a0406da9817f8a1ecb94bd8be1f9281eeb5e952a82173c72bb", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "83abfb2a166f7af708526a9bdd2767c4be3cd231c9bc4e2f047a80df88a2860c", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json deleted file mode 100644 index ccd2ece069..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "8f75836ceb390dd4fc8c16bc4be52ca09b9c5aa0ab5bc16dcfdb344787b29ddfd76d877b0a2330bc8e904b233397c6bd124845d1b868e4951cb6daacea023c986bdf0c6ac28d73f65681d941ea96623bc23acc7c84dcfc1304686240d9171cfc", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "3011f5cac32f13e86ecc061e89ed6675c27a46ab6ecb1ec6f6e5f133ae1d0287", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json deleted file mode 100644 index 2ab5908307..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a7706e102bfb0b986a5c8050044f7e221919463149771a92c3ca46ff7d4564867db48eaf89b5237fed8db2cdb9c9c057099d0982bbdb3fbfcbe0ab7259ad3f31f7713692b78ee25e6251982e7081d049804632b70b8a24d8c3e59b624a0bd221", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "8a26fbee0c3a99fe090af1fce68afc525b4e7efa70df72abaa91f29148b2f672", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8b7aa5b0e97d15ec8c2281b919fde9e064f6ac064b163445ea99441ab063f9d10534bfde861b5606021ae46614ff075e0c2305ce5a6cbcc9f0bc8e7df1a177c4d969a5ed4ac062b0ea959bdac963fe206b73565a1a3937adcca736c6117c15f0", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "d38575167a94b516455c5b7e36d24310a612fa0f4580446c5f9d45e4e94f0642", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/watch/Cargo.toml b/watch/Cargo.toml index aaaf50aa40..9e8da3b293 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -15,6 +15,7 @@ path = "src/main.rs" [dependencies] clap = { workspace = true } +clap_utils = { workspace = true } log = { workspace = true } env_logger = { workspace = true } types = { workspace = true } @@ -30,9 +31,7 @@ url = { workspace = true } rand = { workspace = true } diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } diesel_migrations = { version = "2.0.0", features = ["postgres"] } -byteorder = { workspace = true } bls = { workspace = true } -hex = { workspace = true } r2d2 = { workspace = true } serde_yaml = { workspace = true } diff --git a/watch/src/cli.rs b/watch/src/cli.rs index 97dc217293..b7179efe5d 100644 --- a/watch/src/cli.rs +++ b/watch/src/cli.rs @@ -1,28 +1,29 @@ use crate::{config::Config, logger, server, updater}; -use clap::{App, Arg}; +use clap::{Arg, ArgAction, Command}; +use clap_utils::get_color_style; pub const SERVE: &str = "serve"; pub const RUN_UPDATER: &str = "run-updater"; pub const CONFIG: &str = "config"; -fn run_updater<'a, 'b>() -> App<'a, 'b> { - App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +fn run_updater() -> Command { + Command::new(RUN_UPDATER).styles(get_color_style()) } -fn serve<'a, 'b>() -> App<'a, 'b> { - App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +fn serve() -> Command { + Command::new(SERVE).styles(get_color_style()) } -pub fn app<'a, 'b>() -> App<'a, 'b> { - App::new("beacon_watch_daemon") +pub fn app() -> Command { + Command::new("beacon_watch_daemon") .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .arg( - Arg::with_name(CONFIG) + Arg::new(CONFIG) .long(CONFIG) .value_name("PATH_TO_CONFIG") .help("Path to configuration file") - .takes_value(true) + .action(ArgAction::Set) .global(true), ) .subcommand(run_updater()) @@ -32,7 +33,7 @@ pub fn app<'a, 'b>() -> App<'a, 'b> { pub async fn run() -> Result<(), String> { let matches = app().get_matches(); - let config = match matches.value_of(CONFIG) { + let config = match matches.get_one::(CONFIG) { Some(path) => Config::load_from_file(path.to_string())?, None => Config::default(), }; @@ -40,10 +41,10 @@ pub async fn run() -> Result<(), String> { logger::init_logger(&config.log_level); match matches.subcommand() { - (RUN_UPDATER, Some(_)) => updater::run_updater(config) + Some((RUN_UPDATER, _)) => updater::run_updater(config) .await .map_err(|e| format!("Failure: {:?}", e)), - (SERVE, Some(_)) => server::serve(config) + Some((SERVE, _)) => server::serve(config) .await .map_err(|e| format!("Failure: {:?}", e)), _ => Err("Unsupported subcommand. See --help".into()),