From 8600645f65739f15937304676dd37511dbe52cf2 Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 4 Nov 2022 07:43:43 +0000 Subject: [PATCH 01/11] Fix rust 1.65 lints (#3682) ## Issue Addressed New lints for rust 1.65 ## Proposed Changes Notable change is the identification or parameters that are only used in recursion ## Additional Info na --- account_manager/src/validator/exit.rs | 2 +- account_manager/src/validator/import.rs | 4 ++-- account_manager/src/wallet/create.rs | 2 +- beacon_node/beacon_chain/src/head_tracker.rs | 2 +- beacon_node/beacon_chain/src/migrate.rs | 2 +- beacon_node/beacon_chain/src/schema_change.rs | 24 ++----------------- beacon_node/beacon_chain/src/test_utils.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 20 ++++------------ beacon_node/client/src/builder.rs | 2 -- beacon_node/http_api/src/lib.rs | 1 + .../lighthouse_network/src/service/utils.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 4 ++-- beacon_node/src/lib.rs | 10 ++------ common/filesystem/src/lib.rs | 2 +- consensus/serde_utils/src/hex_vec.rs | 2 +- consensus/serde_utils/src/u64_hex_be.rs | 2 +- consensus/tree_hash/src/merkle_hasher.rs | 2 +- consensus/types/src/config_and_preset.rs | 4 ++-- consensus/types/src/graffiti.rs | 2 +- database_manager/src/lib.rs | 1 - slasher/src/array.rs | 2 +- slasher/src/database.rs | 10 ++++---- .../execution_engine_integration/src/geth.rs | 2 +- .../src/test_rig.rs | 4 ++-- .../src/transactions.rs | 2 +- validator_client/src/http_api/api_secret.rs | 2 +- 26 files changed, 37 insertions(+), 77 deletions(-) diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index ca8cab5bd3..9e5b57a297 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -349,7 +349,7 @@ fn load_voting_keypair( password_file_path: Option<&PathBuf>, stdin_inputs: bool, ) -> Result { - let keystore = Keystore::from_json_file(&voting_keystore_path).map_err(|e| { + let keystore = Keystore::from_json_file(voting_keystore_path).map_err(|e| { format!( "Unable to read keystore JSON {:?}: {:?}", voting_keystore_path, e diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index c581866a25..8dc50a9df1 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -176,7 +176,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password = match keystore_password_path.as_ref() { Some(path) => { - let password_from_file: ZeroizeString = fs::read_to_string(&path) + let password_from_file: ZeroizeString = fs::read_to_string(path) .map_err(|e| format!("Unable to read {:?}: {:?}", path, e))? .into(); password_from_file.without_newlines() @@ -256,7 +256,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .ok_or_else(|| format!("Badly formatted file name: {:?}", src_keystore))?; // Copy the keystore to the new location. - fs::copy(&src_keystore, &dest_keystore) + fs::copy(src_keystore, &dest_keystore) .map_err(|e| format!("Unable to copy keystore: {:?}", e))?; // Register with slashing protection. diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 9ebaeae5f1..accee11b5a 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -159,7 +159,7 @@ pub fn create_wallet_from_mnemonic( unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), }; - let mgr = WalletManager::open(&wallet_base_dir) + let mgr = WalletManager::open(wallet_base_dir) .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let wallet_password: PlainText = match wallet_password_path { diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 84c800f3b7..3fa577ff93 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -45,7 +45,7 @@ impl HeadTracker { /// Returns a `SszHeadTracker`, which contains all necessary information to restore the state /// of `Self` at some later point. pub fn to_ssz_container(&self) -> SszHeadTracker { - SszHeadTracker::from_map(&*self.0.read()) + SszHeadTracker::from_map(&self.0.read()) } /// Creates a new `Self` from the given `SszHeadTracker`, restoring `Self` to the same state of diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 1c0d9c4ed3..66f082742e 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -588,7 +588,7 @@ impl, Cold: ItemStore> BackgroundMigrator( db: Arc>, deposit_contract_deploy_block: u64, - datadir: &Path, from: SchemaVersion, to: SchemaVersion, log: Logger, @@ -42,21 +40,12 @@ pub fn migrate_schema( migrate_schema::( db.clone(), deposit_contract_deploy_block, - datadir, from, next, log.clone(), spec, )?; - migrate_schema::( - db, - deposit_contract_deploy_block, - datadir, - next, - to, - log, - spec, - ) + migrate_schema::(db, deposit_contract_deploy_block, next, to, log, spec) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { @@ -64,21 +53,12 @@ pub fn migrate_schema( migrate_schema::( db.clone(), deposit_contract_deploy_block, - datadir, from, next, log.clone(), spec, )?; - migrate_schema::( - db, - deposit_contract_deploy_block, - datadir, - next, - to, - log, - spec, - ) + migrate_schema::(db, deposit_contract_deploy_block, next, to, log, spec) } // diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3b4a62f5a9..a1c7acf173 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -356,7 +356,7 @@ where let urls: Vec = urls .iter() - .map(|s| SensitiveUrl::parse(*s)) + .map(|s| SensitiveUrl::parse(s)) .collect::>() .unwrap(); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 06734d3e6e..95f4aadced 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -332,34 +332,22 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_SLASHED, &[id], - if validator.slashed { 1 } else { 0 }, + i64::from(validator.slashed), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_ACTIVE, &[id], - if validator.is_active_at(current_epoch) { - 1 - } else { - 0 - }, + i64::from(validator.is_active_at(current_epoch)), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_EXITED, &[id], - if validator.is_exited_at(current_epoch) { - 1 - } else { - 0 - }, + i64::from(validator.is_exited_at(current_epoch)), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_WITHDRAWABLE, &[id], - if validator.is_withdrawable_at(current_epoch) { - 1 - } else { - 0 - }, + i64::from(validator.is_withdrawable_at(current_epoch)), ); metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c89980e6e8..36d6491a56 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -858,7 +858,6 @@ where /// Specifies that the `Client` should use a `HotColdDB` database. pub fn disk_store( mut self, - datadir: &Path, hot_path: &Path, cold_path: &Path, config: StoreConfig, @@ -888,7 +887,6 @@ where migrate_schema::>( db, deposit_contract_deploy_block, - datadir, from, to, log, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4267a22f98..46275820ca 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "256"] //! This crate contains a HTTP server which serves the endpoints listed here: //! //! https://github.com/ethereum/beacon-APIs diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 2aaa46fe8b..8073ae7768 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -269,7 +269,7 @@ pub(crate) fn save_metadata_to_disk( metadata: MetaData, log: &slog::Logger, ) { - let _ = std::fs::create_dir_all(&dir); + let _ = std::fs::create_dir_all(dir); match File::create(dir.join(METADATA_FILENAME)) .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 8c335189c6..4fe5a72545 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -267,7 +267,7 @@ impl OperationPool { &prev_epoch_key, &*all_attestations, state, - &*reward_cache, + &reward_cache, total_active_balance, prev_epoch_validity_filter, spec, @@ -278,7 +278,7 @@ impl OperationPool { &curr_epoch_key, &*all_attestations, state, - &*reward_cache, + &reward_cache, total_active_balance, curr_epoch_validity_filter, spec, diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 9fd6882202..650763dcaf 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -61,7 +61,7 @@ impl ProductionBeaconNode { let client_genesis = client_config.genesis.clone(); let store_config = client_config.store.clone(); let log = context.log().clone(); - let datadir = client_config.create_data_dir()?; + let _datadir = client_config.create_data_dir()?; let db_path = client_config.create_db_path()?; let freezer_db_path = client_config.create_freezer_db_path()?; let executor = context.executor.clone(); @@ -84,13 +84,7 @@ impl ProductionBeaconNode { .runtime_context(context) .chain_spec(spec) .http_api_config(client_config.http_api.clone()) - .disk_store( - &datadir, - &db_path, - &freezer_db_path, - store_config, - log.clone(), - )?; + .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; let builder = if let Some(slasher_config) = client_config.slasher.clone() { let slasher = Arc::new( diff --git a/common/filesystem/src/lib.rs b/common/filesystem/src/lib.rs index 6305671c51..d73b7a355b 100644 --- a/common/filesystem/src/lib.rs +++ b/common/filesystem/src/lib.rs @@ -55,7 +55,7 @@ pub enum Error { /// Creates a file with `600 (-rw-------)` permissions and writes the specified bytes to file. pub fn create_with_600_perms>(path: P, bytes: &[u8]) -> Result<(), Error> { let path = path.as_ref(); - let mut file = File::create(&path).map_err(Error::UnableToCreateFile)?; + let mut file = File::create(path).map_err(Error::UnableToCreateFile)?; #[cfg(unix)] { diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs index 60d6494434..f7f4833628 100644 --- a/consensus/serde_utils/src/hex_vec.rs +++ b/consensus/serde_utils/src/hex_vec.rs @@ -10,7 +10,7 @@ where S: Serializer, { let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); + hex_string.push_str(&hex::encode(bytes)); serializer.serialize_str(&hex_string) } diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs index dc6af0fa4c..6af8a75893 100644 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ b/consensus/serde_utils/src/u64_hex_be.rs @@ -39,7 +39,7 @@ impl<'de> Visitor<'de> for QuantityVisitor { hex::decode(&format!("0{}", stripped)) .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) } else { - hex::decode(&stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) + hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) } } } diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs index 1753eade1b..2acaf1c3b8 100644 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ b/consensus/tree_hash/src/merkle_hasher.rs @@ -368,7 +368,7 @@ mod test { fn context_size() { assert_eq!( mem::size_of::(), - 232, + 224, "Halfnode size should be as expected" ); } diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index e624afe2db..b7ec015ea3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -40,7 +40,7 @@ impl ConfigAndPreset { let extra_fields = get_extra_fields(spec); if spec.bellatrix_fork_epoch.is_some() - || fork_name == None + || fork_name.is_none() || fork_name == Some(ForkName::Merge) { let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); @@ -65,7 +65,7 @@ impl ConfigAndPreset { /// Get a hashmap of constants to add to the `PresetAndConfig` pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { - let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)).into(); + let hex_string = |value: &[u8]| format!("0x{}", hex::encode(value)).into(); let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); hashmap! { diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 73beb82649..2b0a645cd0 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -27,7 +27,7 @@ impl Graffiti { impl fmt::Display for Graffiti { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) } } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index cb50a4ee82..c0023f3505 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -257,7 +257,6 @@ pub fn migrate_db( migrate_schema::, _, _, _>>( db, client_config.eth1.deposit_contract_deploy_block, - &client_config.get_data_dir(), from, to, log, diff --git a/slasher/src/array.rs b/slasher/src/array.rs index d9cb8a4ec6..4deb389124 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -188,7 +188,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn txn.put( Self::select_db(db), - &disk_key.to_be_bytes(), + disk_key.to_be_bytes(), &compressed_value, )?; Ok(()) diff --git a/slasher/src/database.rs b/slasher/src/database.rs index c8046c80dc..49d2b00a4c 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -301,7 +301,7 @@ impl SlasherDB { pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( &self.databases.metadata_db, - &METADATA_VERSION_KEY, + METADATA_VERSION_KEY, &bincode::serialize(&CURRENT_SCHEMA_VERSION)?, )?; Ok(()) @@ -323,7 +323,7 @@ impl SlasherDB { pub fn store_config(&self, config: &Config, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( &self.databases.metadata_db, - &METADATA_CONFIG_KEY, + METADATA_CONFIG_KEY, &bincode::serialize(config)?, )?; Ok(()) @@ -367,7 +367,7 @@ impl SlasherDB { txn.put( &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), - &CompactAttesterRecord::null().as_bytes(), + CompactAttesterRecord::null().as_bytes(), )?; } } @@ -423,7 +423,7 @@ impl SlasherDB { key: &IndexedAttestationIdKey, value: IndexedAttestationId, ) -> Result<(), Error> { - txn.put(&self.databases.indexed_attestation_id_db, key, &value)?; + txn.put(&self.databases.indexed_attestation_id_db, key, value)?; Ok(()) } @@ -579,7 +579,7 @@ impl SlasherDB { txn.put( &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), - &indexed_attestation_id, + indexed_attestation_id, )?; Ok(AttesterSlashingStatus::NotSlashable) diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 467fd8b430..1b96fa9f3f 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -13,7 +13,7 @@ const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { Command::new("make") .arg("geth") - .current_dir(&repo_dir) + .current_dir(repo_dir) .output() .expect("failed to make geth") } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 515e238e97..6e9f37ff1f 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -202,8 +202,8 @@ impl TestRig { .await; // We hardcode the accounts here since some EEs start with a default unlocked account - let account1 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT1).unwrap()); - let account2 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT2).unwrap()); + let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); + let account2 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT2).unwrap()); /* * Check the transition config endpoint. diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index a8c0ab3c15..62b77d5024 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -30,7 +30,7 @@ pub fn transactions(account1: Address, account2: Address) -> Vec(), diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index 484ac50bd3..b42cd11edd 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -60,7 +60,7 @@ impl ApiSecret { // Create and write the secret key to file with appropriate permissions create_with_600_perms( &sk_path, - eth2_serde_utils::hex::encode(&sk.serialize()).as_bytes(), + eth2_serde_utils::hex::encode(sk.serialize()).as_bytes(), ) .map_err(|e| { format!( From 0655006e8730d0f4804d67ae37bdbbf608a8aa77 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 7 Nov 2022 06:48:31 +0000 Subject: [PATCH 02/11] Clarify error log when registering validators (#3650) ## Issue Addressed NA ## Proposed Changes Adds clarification to an error log when there is an error submitting a validator registration. There seems to be a few cases where relays return errors during validator registration, including spurious timeouts and when a validator has been very recently activated/made pending. Changing this log helps indicate that it's "just another registration error" rather than something more serious. I didn't drop this to a `WARN` since I still have hope we can eliminate these errors completely by chatting with relays and adjusting timeouts. ## Additional Info NA --- beacon_node/http_api/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 46275820ca..1ef3c3e2a9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2627,7 +2627,12 @@ pub fn serve( .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { - error!(log, "Error from connected relay"; "error" => ?e); + error!( + log, + "Relay error when registering validator(s)"; + "num_registrations" => filtered_registration_data.len(), + "error" => ?e + ); // Forward the HTTP status code if we are able to, otherwise fall back // to a server error. if let eth2::Error::ServerMessage(message) = e { From 253767ebc1af48893ab124c53013460098469728 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 7 Nov 2022 06:48:32 +0000 Subject: [PATCH 03/11] Update stale sections of the book (#3671) ## Issue Addressed Which issue # does this PR address? ## Proposed Changes * Add v3.2 and v3.3 to database migrations table * Remove docs on `--subscribe-all-subnets` and `--import-all-attestations` from redundancy docs * Clarify that the merge has already occurred on the merge migration page --- book/src/database-migrations.md | 2 ++ book/src/merge-migration.md | 14 +++++-------- book/src/redundancy.md | 37 ++++++++++----------------------- 3 files changed, 18 insertions(+), 35 deletions(-) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index c31e373b48..2b0ac836a4 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -24,6 +24,8 @@ validator client or the slasher**. | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | | v3.1.0 | Sep 2022 | v12 | yes | +| v3.2.0 | Oct 2022 | v12 | yes | +| v3.3.0 | TBD | v13 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 8596cd942c..c0ba048997 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -1,9 +1,8 @@ # Merge Migration -This document provides detail for users who want to run a merge-ready Lighthouse node. +This document provides detail for users who want to run a Lighthouse node on post-merge Ethereum. -> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6 -> 2022. +> The merge occurred on mainnet in September 2022. ## Necessary Configuration @@ -27,12 +26,9 @@ engine to a merge-ready version. You must configure your node to be merge-ready before the Bellatrix fork occurs on the network on which your node is operating. -* **Mainnet**: the Bellatrix fork is scheduled for epoch 144896, September 6 2022 11:34 UTC. - You must ensure your node configuration is updated before then in order to continue following - the chain. We recommend updating your configuration now. - -* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has already occurred. - You must have a merge-ready configuration right now. +* **Gnosis**: the Bellatrix fork has not yet been scheduled. +* **Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has + already occurred. You must have a merge-ready configuration right now. ## Connecting to an execution engine diff --git a/book/src/redundancy.md b/book/src/redundancy.md index dae7ac51fe..dcd2ecdea1 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -55,42 +55,27 @@ In our previous example, we listed `http://192.168.1.1:5052` as a redundant node. Apart from having sufficient resources, the backup node should have the following flags: -- `--staking`: starts the HTTP API server and ensures the execution chain is synced. +- `--http`: starts the HTTP API server. - `--http-address 0.0.0.0`: this allows *any* external IP address to access the HTTP server (a firewall should be configured to deny unauthorized access to port `5052`). This is only required if your backup node is on a different host. -- `--subscribe-all-subnets`: ensures that the beacon node subscribes to *all* - subnets, not just on-demand requests from validators. -- `--import-all-attestations`: ensures that the beacon node performs - aggregation on all seen attestations. +- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). +- `--execution-jwt`: see [Merge Migration](./merge-migration.md). -Subsequently, one could use the following command to provide a backup beacon -node: +For example one could use the following command to provide a backup beacon node: ```bash lighthouse bn \ - --staking \ + --http \ --http-address 0.0.0.0 \ - --subscribe-all-subnets \ - --import-all-attestations + --execution-endpoint http://localhost:8551 \ + --execution-jwt /secrets/jwt.hex ``` -### Resource usage of redundant Beacon Nodes - -The `--subscribe-all-subnets` and `--import-all-attestations` flags typically -cause a significant increase in resource consumption. A doubling in CPU -utilization and RAM consumption is expected. - -The increase in resource consumption is due to the fact that the beacon node is -now processing, validating, aggregating and forwarding *all* attestations, -whereas previously it was likely only doing a fraction of this work. Without -these flags, subscription to attestation subnets and aggregation of -attestations is only performed for validators which [explicitly request -subscriptions][subscribe-api]. - -There are 64 subnets and each validator will result in a subscription to *at -least* one subnet. So, using the two aforementioned flags will result in -resource consumption akin to running 64+ validators. +Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and +`--import-all-attestations` flags. These flags are no longer required as the validator client will +now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour +can be disabled using the `--disable-run-on-all` flag for `lighthouse vc`. ## Redundant execution nodes From 84c7d8cc7006a6f1f1bb5729ab222b9f85f72727 Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 7 Nov 2022 06:48:34 +0000 Subject: [PATCH 04/11] Blocklookup data inconsistencies (#3677) ## Issue Addressed Closes #3649 ## Proposed Changes Add a regression test for the data inconsistency, catching the problem in https://github.com/sigp/lighthouse/pull/3677/commits/31e88c5533be9cf25571dd5ffbdf6e0bdc26f060 [here](https://github.com/sigp/lighthouse/actions/runs/3379894044/jobs/5612044797#step:6:2043). When a chain is sent for processing, move it to a separate collection and now the test works, yay! ## Additional Info na --- .../network/src/sync/block_lookups/mod.rs | 99 ++++++++++++------- .../src/sync/block_lookups/parent_lookup.rs | 41 ++++++-- .../network/src/sync/block_lookups/tests.rs | 97 +++++++++++++++--- 3 files changed, 181 insertions(+), 56 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 5c2bc65229..aa2694769c 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,4 +1,5 @@ use std::collections::hash_map::Entry; +use std::collections::HashMap; use std::time::Duration; use beacon_chain::{BeaconChainTypes, BlockError}; @@ -13,6 +14,7 @@ use store::{Hash256, SignedBeaconBlock}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; +use self::parent_lookup::PARENT_FAIL_TOLERANCE; use self::{ parent_lookup::{ParentLookup, VerifyError}, single_block_lookup::SingleBlockRequest, @@ -36,8 +38,11 @@ const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; pub(crate) struct BlockLookups { - /// A collection of parent block lookups. - parent_queue: SmallVec<[ParentLookup; 3]>, + /// Parent chain lookups being downloaded. + parent_lookups: SmallVec<[ParentLookup; 3]>, + + processing_parent_lookups: + HashMap, SingleBlockRequest)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, @@ -55,7 +60,8 @@ pub(crate) struct BlockLookups { impl BlockLookups { pub fn new(log: Logger) -> Self { Self { - parent_queue: Default::default(), + parent_lookups: Default::default(), + processing_parent_lookups: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), @@ -78,6 +84,23 @@ impl BlockLookups { return; } + if self.parent_lookups.iter_mut().any(|parent_req| { + parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash) + }) { + // If the block was already downloaded, or is being downloaded in this moment, do not + // request it. + return; + } + + if self + .processing_parent_lookups + .values() + .any(|(hashes, _last_parent_request)| hashes.contains(&hash)) + { + // we are already processing this block, ignore it. + return; + } + debug!( self.log, "Searching for block"; @@ -118,8 +141,8 @@ impl BlockLookups { // Make sure this block is not already downloaded, and that neither it or its parent is // being searched for. - if self.parent_queue.iter_mut().any(|parent_req| { - parent_req.contains_block(&block) + if self.parent_lookups.iter_mut().any(|parent_req| { + parent_req.contains_block(&block_root) || parent_req.add_peer(&block_root, &peer_id) || parent_req.add_peer(&parent_root, &peer_id) }) { @@ -127,6 +150,15 @@ impl BlockLookups { return; } + if self + .processing_parent_lookups + .values() + .any(|(hashes, _peers)| hashes.contains(&block_root) || hashes.contains(&parent_root)) + { + // we are already processing this block, ignore it. + return; + } + let parent_lookup = ParentLookup::new(block_root, block, peer_id); self.request_parent(parent_lookup, cx); } @@ -207,11 +239,11 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { let mut parent_lookup = if let Some(pos) = self - .parent_queue + .parent_lookups .iter() .position(|request| request.pending_response(id)) { - self.parent_queue.remove(pos) + self.parent_lookups.remove(pos) } else { if block.is_some() { debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); @@ -233,13 +265,13 @@ impl BlockLookups { ) .is_ok() { - self.parent_queue.push(parent_lookup) + self.parent_lookups.push(parent_lookup) } } Ok(None) => { // Request finished successfully, nothing else to do. It will be removed after the // processing result arrives. - self.parent_queue.push(parent_lookup); + self.parent_lookups.push(parent_lookup); } Err(e) => match e { VerifyError::RootMismatch @@ -276,7 +308,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -324,11 +356,11 @@ impl BlockLookups { /* Check disconnection for parent lookups */ while let Some(pos) = self - .parent_queue + .parent_lookups .iter_mut() .position(|req| req.check_peer_disconnected(peer_id).is_err()) { - let parent_lookup = self.parent_queue.remove(pos); + let parent_lookup = self.parent_lookups.remove(pos); trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup); self.request_parent(parent_lookup, cx); } @@ -342,11 +374,11 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { if let Some(pos) = self - .parent_queue + .parent_lookups .iter() .position(|request| request.pending_response(id)) { - let mut parent_lookup = self.parent_queue.remove(pos); + let mut parent_lookup = self.parent_lookups.remove(pos); parent_lookup.download_failed(); trace!(self.log, "Parent lookup request failed"; &parent_lookup); self.request_parent(parent_lookup, cx); @@ -355,7 +387,7 @@ impl BlockLookups { }; metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -470,7 +502,7 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self - .parent_queue + .parent_lookups .iter() .enumerate() .find_map(|(pos, request)| { @@ -478,7 +510,7 @@ impl BlockLookups { .get_processing_peer(chain_hash) .map(|peer| (pos, peer)) }) { - (self.parent_queue.remove(pos), peer) + (self.parent_lookups.remove(pos), peer) } else { return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; @@ -520,13 +552,13 @@ impl BlockLookups { ); } }; - let chain_hash = parent_lookup.chain_hash(); - let blocks = parent_lookup.chain_blocks(); + let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { Ok(_) => { - self.parent_queue.push(parent_lookup); + self.processing_parent_lookups + .insert(chain_hash, (hashes, request)); } Err(e) => { error!( @@ -580,7 +612,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -590,14 +622,11 @@ impl BlockLookups { result: BatchProcessResult, cx: &mut SyncNetworkContext, ) { - let parent_lookup = if let Some(pos) = self - .parent_queue - .iter() - .position(|request| request.chain_hash() == chain_hash) - { - self.parent_queue.remove(pos) - } else { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + let request = match self.processing_parent_lookups.remove(&chain_hash) { + Some((_hashes, request)) => request, + None => { + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result) + } }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); @@ -609,8 +638,8 @@ impl BlockLookups { imported_blocks: _, penalty, } => { - self.failed_chains.insert(parent_lookup.chain_hash()); - for &peer_id in parent_lookup.used_peers() { + self.failed_chains.insert(chain_hash); + for peer_id in request.used_peers { cx.report_peer(peer_id, penalty, "parent_chain_failure") } } @@ -621,7 +650,7 @@ impl BlockLookups { metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -697,14 +726,14 @@ impl BlockLookups { } Ok(_) => { debug!(self.log, "Requesting parent"; &parent_lookup); - self.parent_queue.push(parent_lookup) + self.parent_lookups.push(parent_lookup) } } // We remove and add back again requests so we want this updated regardless of outcome. metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_queue.len() as i64, + self.parent_lookups.len() as i64, ); } @@ -715,6 +744,6 @@ impl BlockLookups { /// Drops all the parent chain requests and returns how many requests were dropped. pub fn drop_parent_chain_requests(&mut self) -> usize { - self.parent_queue.drain(..).len() + self.parent_lookups.drain(..).len() } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 38ad59ebc4..a2c2f1d1ce 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -24,7 +24,7 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>>, + downloaded_blocks: Vec>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -53,10 +53,10 @@ pub enum RequestError { } impl ParentLookup { - pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { + pub fn contains_block(&self, block_root: &Hash256) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block.as_ref() == block) + .any(|(root, _d_block)| root == block_root) } pub fn new( @@ -68,7 +68,7 @@ impl ParentLookup { Self { chain_hash: block_root, - downloaded_blocks: vec![block], + downloaded_blocks: vec![(block_root, block)], current_parent_request, current_parent_request_id: None, } @@ -100,7 +100,8 @@ impl ParentLookup { pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); - self.downloaded_blocks.push(block); + let current_root = self.current_parent_request.hash; + self.downloaded_blocks.push((current_root, block)); self.current_parent_request.hash = next_parent; self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; self.current_parent_request_id = None; @@ -110,6 +111,32 @@ impl ParentLookup { self.current_parent_request_id == Some(req_id) } + /// Consumes the parent request and destructures it into it's parts. + #[allow(clippy::type_complexity)] + pub fn parts_for_processing( + self, + ) -> ( + Hash256, + Vec>>, + Vec, + SingleBlockRequest, + ) { + let ParentLookup { + chain_hash, + downloaded_blocks, + current_parent_request, + current_parent_request_id: _, + } = self; + let block_count = downloaded_blocks.len(); + let mut blocks = Vec::with_capacity(block_count); + let mut hashes = Vec::with_capacity(block_count); + for (hash, block) in downloaded_blocks { + blocks.push(block); + hashes.push(hash); + } + (chain_hash, blocks, hashes, current_parent_request) + } + /// Get the parent lookup's chain hash. pub fn chain_hash(&self) -> Hash256 { self.chain_hash @@ -125,10 +152,6 @@ impl ParentLookup { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec>> { - std::mem::take(&mut self.downloaded_blocks) - } - /// Verifies that the received block is what we requested. If so, parent lookup now waits for /// the processing result of the block. pub fn verify_block( diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 64a1a6e836..8ade622f8d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -259,7 +259,7 @@ fn test_single_block_lookup_becomes_parent_request() { assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); - assert_eq!(bl.parent_queue.len(), 1); + assert_eq!(bl.parent_lookups.len(), 1); } #[test] @@ -287,7 +287,7 @@ fn test_parent_lookup_happy_path() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -324,7 +324,7 @@ fn test_parent_lookup_wrong_response() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -356,7 +356,7 @@ fn test_parent_lookup_empty_response() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -387,7 +387,7 @@ fn test_parent_lookup_rpc_failure() { was_non_empty: true, }; bl.parent_chain_processed(chain_hash, process_result, &mut cx); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -419,11 +419,11 @@ fn test_parent_lookup_too_many_attempts() { } } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); } } - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -450,11 +450,11 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { rig.expect_penalty(); } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); } } - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); assert!(!bl.failed_chains.contains(&block_hash)); assert!(!bl.failed_chains.contains(&parent.canonical_root())); } @@ -491,7 +491,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { } assert!(bl.failed_chains.contains(&block_hash)); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); } #[test] @@ -545,7 +545,7 @@ fn test_parent_lookup_disconnection() { &mut cx, ); bl.peer_disconnected(&peer_id, &mut cx); - assert!(bl.parent_queue.is_empty()); + assert!(bl.parent_lookups.is_empty()); } #[test] @@ -598,5 +598,78 @@ fn test_parent_lookup_ignored_response() { // Return an Ignored result. The request should be dropped bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); rig.expect_empty_network(); - assert_eq!(bl.parent_queue.len(), 0); + assert_eq!(bl.parent_lookups.len(), 0); +} + +/// This is a regression test. +#[test] +fn test_same_chain_race_condition() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug)); + + #[track_caller] + fn parent_lookups_consistency(bl: &BlockLookups) { + let hashes: Vec<_> = bl + .parent_lookups + .iter() + .map(|req| req.chain_hash()) + .collect(); + let expected = hashes.len(); + assert_eq!( + expected, + hashes + .into_iter() + .collect::>() + .len(), + "duplicated chain hashes in parent queue" + ) + } + // if we use one or two blocks it will match on the hash or the parent hash, so make a longer + // chain. + let depth = 4; + let mut blocks = Vec::>>::with_capacity(depth); + while blocks.len() < depth { + let parent = blocks + .last() + .map(|b| b.canonical_root()) + .unwrap_or_else(Hash256::random); + let block = Arc::new(rig.block_with_parent(parent)); + blocks.push(block); + } + + let peer_id = PeerId::random(); + let trigger_block = blocks.pop().unwrap(); + let chain_hash = trigger_block.canonical_root(); + bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx); + + for (i, block) in blocks.into_iter().rev().enumerate() { + let id = rig.expect_parent_request(); + // the block + bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); + // the stream termination + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + // the processing request + rig.expect_block_process(); + // the processing result + if i + 2 == depth { + // one block was removed + bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) + } else { + bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx) + } + parent_lookups_consistency(&bl) + } + + // Processing succeeds, now the rest of the chain should be sent for processing. + rig.expect_parent_chain_process(); + + // Try to get this block again while the chain is being processed. We should not request it again. + let peer_id = PeerId::random(); + bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx); + parent_lookups_consistency(&bl); + + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); + assert_eq!(bl.parent_lookups.len(), 0); } From 9d6209725f3fa18236d0fbc72504dc22cf456ed9 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 8 Nov 2022 01:58:18 +0000 Subject: [PATCH 05/11] Added Merkle Proof Generation for Beacon State (#3674) ## Issue Addressed This PR addresses partially #3651 ## Proposed Changes This PR adds the following methods: * a new method to trait `TreeHash`, `hash_tree_leaves` which returns all the Merkle leaves of the ssz object. * a new method to `BeaconState`: `compute_merkle_proof` which generates a specific merkle proof for given depth and index by using the `hash_tree_leaves` as leaves function. ## Additional Info Now here is some rationale on why I decided to go down this route: adding a new function to commonly used trait is a pain but was necessary to make sure we have all merkle leaves for every object, that is why I just added `hash_tree_leaves` in the trait and not `compute_merkle_proof` as well. although it would make sense it gives us code duplication/harder review time and we just need it from one specific object in one specific usecase so not worth the effort YET. In my humble opinion. Co-authored-by: Michael Sproul --- Cargo.lock | 1 + consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_state.rs | 59 +++++ .../types/src/beacon_state/tree_hash_cache.rs | 204 ++++++++---------- consensus/types/src/light_client_bootstrap.rs | 10 +- .../types/src/light_client_finality_update.rs | 7 +- consensus/types/src/light_client_update.rs | 13 +- testing/ef_tests/src/cases.rs | 2 + .../src/cases/merkle_proof_validity.rs | 83 +++++++ testing/ef_tests/src/handler.rs | 24 +++ testing/ef_tests/tests/tests.rs | 5 + 11 files changed, 277 insertions(+), 132 deletions(-) create mode 100644 testing/ef_tests/src/cases/merkle_proof_validity.rs diff --git a/Cargo.lock b/Cargo.lock index 6d65ccb48c..c759c46f36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6908,6 +6908,7 @@ dependencies = [ "lazy_static", "log", "maplit", + "merkle_proof", "parking_lot 0.12.1", "rand 0.8.5", "rand_xorshift", diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d1b2ae1823..1ccc8dba8b 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,6 +9,7 @@ name = "benches" harness = false [dependencies] +merkle_proof = { path = "../../consensus/merkle_proof" } bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a5d00cdf2d..a6b913bcb9 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -124,6 +124,8 @@ pub enum Error { current_epoch: Epoch, epoch: Epoch, }, + IndexNotSupported(usize), + MerkleTreeError(merkle_proof::MerkleTreeError), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -1669,6 +1671,57 @@ impl BeaconState { }; Ok(sync_committee) } + + pub fn compute_merkle_proof( + &mut self, + generalized_index: usize, + ) -> Result, Error> { + // 1. Convert generalized index to field index. + let field_index = match generalized_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + generalized_index + .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + light_client_update::FINALIZED_ROOT_INDEX => { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let finalized_checkpoint_generalized_index = generalized_index / 2; + // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches + // position of `finalized_checkpoint` in `BeaconState`. + finalized_checkpoint_generalized_index + .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .ok_or(Error::IndexNotSupported(generalized_index))? + } + _ => return Err(Error::IndexNotSupported(generalized_index)), + }; + + // 2. Get all `BeaconState` leaves. + let cache = self.tree_hash_cache_mut().take(); + let leaves = if let Some(mut cache) = cache { + cache.recalculate_tree_hash_leaves(self)? + } else { + return Err(Error::TreeHashCacheNotInitialized); + }; + + // 3. Make deposit tree. + // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). + let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; + let tree = merkle_proof::MerkleTree::create(&leaves, depth); + let (_, mut proof) = tree.generate_proof(field_index, depth)?; + + // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. + if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + } + + Ok(proof) + } } impl From for Error { @@ -1701,6 +1754,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: merkle_proof::MerkleTreeError) -> Error { + Error::MerkleTreeError(e) + } +} + impl From for Error { fn from(e: ArithError) -> Error { Error::ArithError(e) diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index e67d4096dd..2fc56bdc01 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -18,7 +18,7 @@ use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; /// /// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the /// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** -const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; +pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; /// The number of nodes in the Merkle tree of a validator record. const NODES_PER_VALIDATOR: usize = 15; @@ -210,6 +210,90 @@ impl BeaconTreeHashCacheInner { } } + pub fn recalculate_tree_hash_leaves( + &mut self, + state: &BeaconState, + ) -> Result, Error> { + let mut leaves = vec![ + // Genesis data leaves. + state.genesis_time().tree_hash_root(), + state.genesis_validators_root().tree_hash_root(), + // Current fork data leaves. + state.slot().tree_hash_root(), + state.fork().tree_hash_root(), + state.latest_block_header().tree_hash_root(), + // Roots leaves. + state + .block_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?, + state + .state_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?, + state + .historical_roots() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?, + // Eth1 Data leaves. + state.eth1_data().tree_hash_root(), + self.eth1_data_votes.recalculate_tree_hash_root(state)?, + state.eth1_deposit_index().tree_hash_root(), + // Validator leaves. + self.validators + .recalculate_tree_hash_root(state.validators())?, + state + .balances() + .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?, + state + .randao_mixes() + .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?, + state + .slashings() + .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, + ]; + // Participation + if let BeaconState::Base(state) = state { + leaves.push(state.previous_epoch_attestations.tree_hash_root()); + leaves.push(state.current_epoch_attestations.tree_hash_root()); + } else { + leaves.push( + self.previous_epoch_participation + .recalculate_tree_hash_root(&ParticipationList::new( + state.previous_epoch_participation()?, + ))?, + ); + leaves.push( + self.current_epoch_participation + .recalculate_tree_hash_root(&ParticipationList::new( + state.current_epoch_participation()?, + ))?, + ); + } + // Checkpoint leaves + leaves.push(state.justification_bits().tree_hash_root()); + leaves.push(state.previous_justified_checkpoint().tree_hash_root()); + leaves.push(state.current_justified_checkpoint().tree_hash_root()); + leaves.push(state.finalized_checkpoint().tree_hash_root()); + // Inactivity & light-client sync committees (Altair and later). + if let Ok(inactivity_scores) = state.inactivity_scores() { + leaves.push( + self.inactivity_scores + .recalculate_tree_hash_root(inactivity_scores)?, + ); + } + if let Ok(current_sync_committee) = state.current_sync_committee() { + leaves.push(current_sync_committee.tree_hash_root()); + } + + if let Ok(next_sync_committee) = state.next_sync_committee() { + leaves.push(next_sync_committee.tree_hash_root()); + } + + // Execution payload (merge and later). + if let Ok(payload_header) = state.latest_execution_payload_header() { + leaves.push(payload_header.tree_hash_root()); + } + Ok(leaves) + } + /// Updates the cache and returns the tree hash root for the given `state`. /// /// The provided `state` should be a descendant of the last `state` given to this function, or @@ -246,121 +330,9 @@ impl BeaconTreeHashCacheInner { let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - hasher.write(state.genesis_time().tree_hash_root().as_bytes())?; - hasher.write(state.genesis_validators_root().tree_hash_root().as_bytes())?; - hasher.write(state.slot().tree_hash_root().as_bytes())?; - hasher.write(state.fork().tree_hash_root().as_bytes())?; - hasher.write(state.latest_block_header().tree_hash_root().as_bytes())?; - hasher.write( - state - .block_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)? - .as_bytes(), - )?; - hasher.write( - state - .state_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)? - .as_bytes(), - )?; - hasher.write( - state - .historical_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)? - .as_bytes(), - )?; - hasher.write(state.eth1_data().tree_hash_root().as_bytes())?; - hasher.write( - self.eth1_data_votes - .recalculate_tree_hash_root(state)? - .as_bytes(), - )?; - hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?; - hasher.write( - self.validators - .recalculate_tree_hash_root(state.validators())? - .as_bytes(), - )?; - hasher.write( - state - .balances() - .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)? - .as_bytes(), - )?; - hasher.write( - state - .randao_mixes() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)? - .as_bytes(), - )?; - hasher.write( - state - .slashings() - .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)? - .as_bytes(), - )?; - - // Participation - if let BeaconState::Base(state) = state { - hasher.write( - state - .previous_epoch_attestations - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; - } else { - hasher.write( - self.previous_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.previous_epoch_participation()?, - ))? - .as_bytes(), - )?; - hasher.write( - self.current_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.current_epoch_participation()?, - ))? - .as_bytes(), - )?; - } - - hasher.write(state.justification_bits().tree_hash_root().as_bytes())?; - hasher.write( - state - .previous_justified_checkpoint() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write( - state - .current_justified_checkpoint() - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.finalized_checkpoint().tree_hash_root().as_bytes())?; - - // Inactivity & light-client sync committees (Altair and later). - if let Ok(inactivity_scores) = state.inactivity_scores() { - hasher.write( - self.inactivity_scores - .recalculate_tree_hash_root(inactivity_scores)? - .as_bytes(), - )?; - } - - if let Ok(current_sync_committee) = state.current_sync_committee() { - hasher.write(current_sync_committee.tree_hash_root().as_bytes())?; - } - - if let Ok(next_sync_committee) = state.next_sync_committee() { - hasher.write(next_sync_committee.tree_hash_root().as_bytes())?; - } - - // Execution payload (merge and later). - if let Ok(payload_header) = state.latest_execution_payload_header() { - hasher.write(payload_header.tree_hash_root().as_bytes())?; + let leaves = self.recalculate_tree_hash_leaves(state)?; + for leaf in leaves { + hasher.write(leaf.as_bytes())?; } let root = hasher.finish()?; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 406136d542..d2a46c04a4 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -21,17 +21,15 @@ pub struct LightClientBootstrap { } impl LightClientBootstrap { - pub fn from_beacon_state(beacon_state: BeaconState) -> Result { + pub fn from_beacon_state(beacon_state: &mut BeaconState) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.tree_hash_root(); + let current_sync_committee_branch = + beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?; Ok(LightClientBootstrap { header, current_sync_committee: beacon_state.current_sync_committee()?.clone(), - /// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes - current_sync_committee_branch: FixedVector::new(vec![ - Hash256::zero(); - CURRENT_SYNC_COMMITTEE_PROOF_LEN - ])?, + current_sync_committee_branch: FixedVector::new(current_sync_committee_branch)?, }) } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index c93d15a1a0..fe26c0fa3e 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -31,7 +31,7 @@ impl LightClientFinalityUpdate { chain_spec: ChainSpec, beacon_state: BeaconState, block: BeaconBlock, - attested_state: BeaconState, + attested_state: &mut BeaconState, finalized_block: BeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec @@ -60,11 +60,12 @@ impl LightClientFinalityUpdate { if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } - // TODO(Giulio2002): compute proper merkle proofs. + + let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { attested_header: attested_header, finalized_header: finalized_header, - finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 38609cf1bc..7d01f39bfc 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -77,7 +77,7 @@ impl LightClientUpdate { chain_spec: ChainSpec, beacon_state: BeaconState, block: BeaconBlock, - attested_state: BeaconState, + attested_state: &mut BeaconState, finalized_block: BeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec @@ -114,16 +114,15 @@ impl LightClientUpdate { if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } - // TODO(Giulio2002): compute proper merkle proofs. + let next_sync_committee_branch = + attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; + let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { attested_header, next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(vec![ - Hash256::zero(); - NEXT_SYNC_COMMITTEE_PROOF_LEN - ])?, + next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, finalized_header, - finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?, + finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), }) diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ae70f1e07e..216912a4f1 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -18,6 +18,7 @@ mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; +mod merkle_proof_validity; mod operations; mod rewards; mod sanity_blocks; @@ -41,6 +42,7 @@ pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; +pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; pub use sanity_blocks::*; diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs new file mode 100644 index 0000000000..3a6f4acf1e --- /dev/null +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -0,0 +1,83 @@ +use super::*; +use crate::decode::{ssz_decode_state, yaml_decode_file}; +use serde_derive::Deserialize; +use std::path::Path; +use tree_hash::Hash256; +use types::{BeaconState, EthSpec, ForkName}; + +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + #[serde(rename(deserialize = "description"))] + _description: String, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct MerkleProof { + pub leaf: Hash256, + pub leaf_index: usize, + pub branch: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct MerkleProofValidity { + pub metadata: Option, + pub state: BeaconState, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for MerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); + let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + + Ok(Self { + metadata, + state, + merkle_proof, + }) + } +} + +impl Case for MerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let mut state = self.state.clone(); + state.initialize_tree_hash_cache(); + let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) { + Ok(proof) => proof, + Err(_) => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )) + } + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dd5ed82da7..13f70fea71 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -617,6 +617,30 @@ impl Handler for GenesisInitializationHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct MerkleProofValidityHandler(PhantomData); + +impl Handler for MerkleProofValidityHandler { + type Case = cases::MerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name != ForkName::Base + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 28c57028cf..87a6bec71b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -465,6 +465,11 @@ fn genesis_validity() { // Note: there are no genesis validity tests for mainnet } +#[test] +fn merkle_proof_validity() { + MerkleProofValidityHandler::::default().run(); +} + #[test] fn rewards() { for handler in &["basic", "leak", "random"] { From 266d7652854ba7dd22947396a1b6c7fcfe6eed50 Mon Sep 17 00:00:00 2001 From: tim gretler Date: Wed, 9 Nov 2022 05:37:09 +0000 Subject: [PATCH 06/11] Register blocks in validator monitor (#3635) ## Issue Addressed Closes #3460 ## Proposed Changes `blocks` and `block_min_delay` are never updated in the epoch summary Co-authored-by: Michael Sproul --- .../beacon_chain/src/validator_monitor.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 95f4aadced..f9203f74bf 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -109,6 +109,11 @@ impl EpochSummary { } } + pub fn register_block(&mut self, delay: Duration) { + self.blocks += 1; + Self::update_if_lt(&mut self.block_min_delay, delay); + } + pub fn register_unaggregated_attestation(&mut self, delay: Duration) { self.attestations += 1; Self::update_if_lt(&mut self.attestation_min_delay, delay); @@ -613,13 +618,6 @@ impl ValidatorMonitor { Ok(()) } - fn get_validator_id(&self, validator_index: u64) -> Option<&str> { - self.indices - .get(&validator_index) - .and_then(|pubkey| self.validators.get(pubkey)) - .map(|validator| validator.id.as_str()) - } - fn get_validator(&self, validator_index: u64) -> Option<&MonitoredValidator> { self.indices .get(&validator_index) @@ -685,7 +683,9 @@ impl ValidatorMonitor { block_root: Hash256, slot_clock: &S, ) { - if let Some(id) = self.get_validator_id(block.proposer_index()) { + let epoch = block.slot().epoch(T::slots_per_epoch()); + if let Some(validator) = self.get_validator(block.proposer_index()) { + let id = &validator.id; let delay = get_block_delay_ms(seen_timestamp, block, slot_clock); metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]); @@ -704,6 +704,8 @@ impl ValidatorMonitor { "src" => src, "validator" => %id, ); + + validator.with_epoch_summary(epoch, |summary| summary.register_block(delay)); } } From d99bfcf1a5647744be471111f2316d0e4b2a3dcb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 11 Nov 2022 00:38:27 +0000 Subject: [PATCH 07/11] Blinded block and RANDAO APIs (#3571) ## Issue Addressed https://github.com/ethereum/beacon-APIs/pull/241 https://github.com/ethereum/beacon-APIs/pull/242 ## Proposed Changes Implement two new endpoints for fetching blinded blocks and RANDAO mixes. Co-authored-by: realbigsean --- beacon_node/http_api/src/lib.rs | 79 ++++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 110 ++++++++++++++++++++++++++++ common/eth2/src/lib.rs | 97 ++++++++++++++++++++++++ common/eth2/src/types.rs | 10 +++ 4 files changed, 296 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1ef3c3e2a9..01cc63ecea 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -891,6 +891,37 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/randao?epoch + let get_beacon_state_randao = beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { + blocking_json_task(move || { + let (randao, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic)) + }, + )?; + + Ok( + api_types::GenericResponse::from(api_types::RandaoMix { randao }) + .add_execution_optimistic(execution_optimistic), + ) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -1167,6 +1198,51 @@ pub fn serve( }) }); + // GET beacon/blinded_blocks/{block_id} + let get_beacon_blinded_block = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(block_id_or_err) + .and(chain_filter.clone()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and_then( + |block_id: BlockId, + chain: Arc>, + accept_header: Option| { + blocking_task(move || { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(block.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + execution_optimistic_fork_versioned_response( + V2, + fork_name, + execution_optimistic, + block, + ) + .map(|res| warp::reply::json(&res).into_response()) + } + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ); + /* * beacon/pool */ @@ -3164,10 +3240,12 @@ pub fn serve( .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_committees.boxed()) .or(get_beacon_state_sync_committees.boxed()) + .or(get_beacon_state_randao.boxed()) .or(get_beacon_headers.boxed()) .or(get_beacon_headers_block_id.boxed()) .or(get_beacon_block.boxed()) .or(get_beacon_block_attestations.boxed()) + .or(get_beacon_blinded_block.boxed()) .or(get_beacon_block_root.boxed()) .or(get_beacon_pool_attestations.boxed()) .or(get_beacon_pool_attester_slashings.boxed()) @@ -3212,6 +3290,7 @@ pub fn serve( .or(get_lighthouse_merge_readiness.boxed()) .or(get_events.boxed()), ) + .boxed() .or(warp::post().and( post_beacon_blocks .boxed() diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ff664d6ff0..2e795e522d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -745,6 +745,36 @@ impl ApiTester { self } + pub async fn test_beacon_states_randao(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); + + let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); + let result = self + .client + .get_beacon_states_randao(state_id.0, epoch_opt) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let randao_mix = state + .get_randao_mix(state.slot().epoch(E::slots_per_epoch())) + .unwrap(); + + assert_eq!(result.unwrap().randao, *randao_mix); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -1016,6 +1046,82 @@ impl ApiTester { self } + pub async fn test_beacon_blinded_blocks(self) -> Self { + for block_id in self.interesting_block_ids() { + let expected = block_id + .blinded_block(&self.chain) + .ok() + .map(|(block, _execution_optimistic)| block); + + if let CoreBlockId::Slot(slot) = block_id.0 { + if expected.is_none() { + assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); + } else { + assert!(!SKIPPED_SLOTS.contains(&slot.as_u64())); + } + } + + // Check the JSON endpoint. + let json_result = self + .client + .get_beacon_blinded_blocks(block_id.0) + .await + .unwrap(); + + if let (Some(json), Some(expected)) = (&json_result, &expected) { + assert_eq!(&json.data, expected, "{:?}", block_id); + assert_eq!( + json.version, + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert_eq!(json_result, None); + assert_eq!(expected, None); + } + + // Check the SSZ endpoint. + let ssz_result = self + .client + .get_beacon_blinded_blocks_ssz(block_id.0, &self.chain.spec) + .await + .unwrap(); + assert_eq!(ssz_result.as_ref(), expected.as_ref(), "{:?}", block_id); + + // Check that version headers are provided. + let url = self + .client + .get_beacon_blinded_blocks_path(block_id.0) + .unwrap(); + + let builders: Vec RequestBuilder> = vec![ + |b| b, + |b| b.accept(Accept::Ssz), + |b| b.accept(Accept::Json), + |b| b.accept(Accept::Any), + ]; + + for req_builder in builders { + let raw_res = self + .client + .get_response(url.clone(), req_builder) + .await + .optional() + .unwrap(); + if let (Some(raw_res), Some(expected)) = (&raw_res, &expected) { + assert_eq!( + raw_res.fork_name_from_header().unwrap(), + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert!(raw_res.is_none()); + assert_eq!(expected, None); + } + } + } + + self + } + pub async fn test_beacon_blocks_attestations(self) -> Self { for block_id in self.interesting_block_ids() { let result = self @@ -3696,6 +3802,8 @@ async fn beacon_get() { .await .test_beacon_states_validator_id() .await + .test_beacon_states_randao() + .await .test_beacon_headers_all_slots() .await .test_beacon_headers_all_parents() @@ -3704,6 +3812,8 @@ async fn beacon_get() { .await .test_beacon_blocks() .await + .test_beacon_blinded_blocks() + .await .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index a2fb082a35..58b4c88b3c 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -518,6 +518,29 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET beacon/states/{state_id}/randao?epoch` + pub async fn get_beacon_states_randao( + &self, + state_id: StateId, + epoch: Option, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("randao"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.get_opt(path).await + } + /// `GET beacon/states/{state_id}/validators/{validator_id}` /// /// Returns `Ok(None)` on a 404 error. @@ -636,6 +659,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/blinded_blocks/{block_id}` + pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blinded_blocks") + .push(&block_id.to_string()); + Ok(path) + } + /// `GET v2/beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. @@ -680,6 +714,51 @@ impl BeaconNodeHttpClient { })) } + /// `GET v1/beacon/blinded_blocks/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blinded_blocks( + &self, + block_id: BlockId, + ) -> Result>>, Error> + { + let path = self.get_beacon_blinded_blocks_path(block_id)?; + let response = match self.get_response(path, |b| b).await.optional()? { + Some(res) => res, + None => return Ok(None), + }; + + // If present, use the fork provided in the headers to decode the block. Gracefully handle + // missing and malformed fork names by falling back to regular deserialisation. + let (block, version, execution_optimistic) = match response.fork_name_from_header() { + Ok(Some(fork_name)) => { + let (data, (version, execution_optimistic)) = + map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, (version, execution_optimistic)) + }); + (data, version, execution_optimistic) + } + Ok(None) | Err(_) => { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, version, execution_optimistic) + } + }; + Ok(Some(ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data: block, + })) + } + /// `GET v1/beacon/blocks` (LEGACY) /// /// Returns `Ok(None)` on a 404 error. @@ -714,6 +793,24 @@ impl BeaconNodeHttpClient { .transpose() } + /// `GET beacon/blinded_blocks/{block_id}` as SSZ + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blinded_blocks_ssz( + &self, + block_id: BlockId, + spec: &ChainSpec, + ) -> Result>, Error> { + let path = self.get_beacon_blinded_blocks_path(block_id)?; + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_beacon_blocks_ssz) + .await? + .map(|bytes| { + SignedBlindedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz) + }) + .transpose() + } + /// `GET beacon/blocks/{block_id}/root` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e657358003..7012972460 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -455,6 +455,11 @@ pub struct SyncCommitteesQuery { pub epoch: Option, } +#[derive(Serialize, Deserialize)] +pub struct RandaoQuery { + pub epoch: Option, +} + #[derive(Serialize, Deserialize)] pub struct AttestationPoolQuery { pub slot: Option, @@ -486,6 +491,11 @@ pub struct SyncCommitteeByValidatorIndices { pub validator_aggregates: Vec, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RandaoMix { + pub randao: Hash256, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { From c591fcd20179a8fd8cb3c601d949513a193c3351 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Fri, 11 Nov 2022 00:38:28 +0000 Subject: [PATCH 08/11] add checkpoint-sync-url-timeout flag (#3710) ## Issue Addressed #3702 Which issue # does this PR address? #3702 ## Proposed Changes Added checkpoint-sync-url-timeout flag to cli. Added timeout field to ClientGenesis::CheckpointSyncUrl to utilize timeout set ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: GeemoCandama <104614073+GeemoCandama@users.noreply.github.com> Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/chain_config.rs | 3 +++ beacon_node/client/src/builder.rs | 11 ++++++----- beacon_node/src/cli.rs | 8 ++++++++ beacon_node/src/config.rs | 2 ++ lighthouse/tests/beacon_node.rs | 19 +++++++++++++++++++ 5 files changed, 38 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 5e16a29cf3..286cc17a96 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -45,6 +45,8 @@ pub struct ChainConfig { pub paranoid_block_proposal: bool, /// Whether to strictly count unrealized justified votes. pub count_unrealized_full: CountUnrealizedFull, + /// Optionally set timeout for calls to checkpoint sync endpoint. + pub checkpoint_sync_url_timeout: u64, } impl Default for ChainConfig { @@ -65,6 +67,7 @@ impl Default for ChainConfig { always_reset_payload_statuses: false, paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), + checkpoint_sync_url_timeout: 60, } } } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 36d6491a56..75b865407e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -40,9 +40,6 @@ use types::{ /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; -/// Timeout for checkpoint sync HTTP requests. -pub const CHECKPOINT_SYNC_HTTP_TIMEOUT: Duration = Duration::from_secs(60); - /// Builds a `Client` instance. /// /// ## Notes @@ -273,8 +270,12 @@ where "remote_url" => %url, ); - let remote = - BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); + let remote = BeaconNodeHttpClient::new( + url, + Timeouts::set_all(Duration::from_secs( + config.chain.checkpoint_sync_url_timeout, + )), + ); let slots_per_epoch = TEthSpec::slots_per_epoch(); let deposit_snapshot = if config.sync_eth1_chain { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 81a7c6bbeb..16a6794f43 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -714,6 +714,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("checkpoint-state") ) + .arg( + Arg::with_name("checkpoint-sync-url-timeout") + .long("checkpoint-sync-url-timeout") + .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") + .value_name("SECONDS") + .takes_value(true) + .default_value("60") + ) .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 3b94c31290..6af753afea 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -441,6 +441,8 @@ pub fn get_config( .extend_from_slice(boot_nodes) } } + client_config.chain.checkpoint_sync_url_timeout = + clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; client_config.genesis = if let Some(genesis_state_bytes) = eth2_network_config.genesis_state_bytes.clone() diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b1498f109d..f24ba6895e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -132,6 +132,25 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn checkpoint_sync_url_timeout_flag() { + CommandLineTest::new() + .flag("checkpoint-sync-url-timeout", Some("300")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.checkpoint_sync_url_timeout, 300); + }); +} + +#[test] +fn checkpoint_sync_url_timeout_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.checkpoint_sync_url_timeout, 60); + }); +} + #[test] fn paranoid_block_proposal_default() { CommandLineTest::new() From 3be41006a6a0d4ddf44179f1d86ccf0e3e2d0100 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 11 Nov 2022 11:03:18 +0000 Subject: [PATCH 09/11] Add --light-client-server flag and state cache utils (#3714) ## Issue Addressed Part of https://github.com/sigp/lighthouse/issues/3651. ## Proposed Changes Add a flag for enabling the light client server, which should be checked before gossip/RPC traffic is processed (e.g. https://github.com/sigp/lighthouse/pull/3693, https://github.com/sigp/lighthouse/pull/3711). The flag is available at runtime from `beacon_chain.config.enable_light_client_server`. Additionally, a new method `BeaconChain::with_mutable_state_for_block` is added which I envisage being used for computing light client updates. Unfortunately its performance will be quite poor on average because it will only run quickly with access to the tree hash cache. Each slot the tree hash cache is only available for a brief window of time between the head block being processed and the state advance at 9s in the slot. When the state advance happens the cache is moved and mutated to get ready for the next slot, which makes it no longer useful for merkle proofs related to the head block. Rather than spend more time trying to optimise this I think we should continue prototyping with this code, and I'll make sure `tree-states` is ready to ship before we enable the light client server in prod (cf. https://github.com/sigp/lighthouse/pull/3206). ## Additional Info I also fixed a bug in the implementation of `BeaconState::compute_merkle_proof` whereby the tree hash cache was moved with `.take()` but never put back with `.restore()`. --- beacon_node/beacon_chain/src/beacon_chain.rs | 40 +++++++++++++++++++ beacon_node/beacon_chain/src/chain_config.rs | 3 ++ .../beacon_chain/src/snapshot_cache.rs | 21 ++++++++++ beacon_node/src/cli.rs | 7 ++++ beacon_node/src/config.rs | 3 ++ consensus/types/src/beacon_state.rs | 12 +++--- lighthouse/tests/beacon_node.rs | 15 +++++++ testing/ef_tests/check_all_files_accessed.py | 2 - .../src/cases/merkle_proof_validity.rs | 4 ++ 9 files changed, 99 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b23dd30de0..6f409fdadc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -997,6 +997,46 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } + /// Run a function with mutable access to a state for `block_root`. + /// + /// The primary purpose of this function is to borrow a state with its tree hash cache + /// from the snapshot cache *without moving it*. This means that calls to this function should + /// be kept to an absolute minimum, because holding the snapshot cache lock has the ability + /// to delay block import. + /// + /// If there is no appropriate state in the snapshot cache then one will be loaded from disk. + /// If no state is found on disk then `Ok(None)` will be returned. + /// + /// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used, + /// which can inform logging/metrics. + /// + /// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour + /// of `tree-states`, where all caches are CoW and everything is good in the world. + pub fn with_mutable_state_for_block>( + &self, + block: &SignedBeaconBlock, + block_root: Hash256, + f: F, + ) -> Result, Error> + where + F: FnOnce(&mut BeaconState, bool) -> Result, + { + if let Some(state) = self + .snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .ok_or(Error::SnapshotCacheLockTimeout)? + .borrow_unadvanced_state_mut(block_root) + { + let cache_hit = true; + f(state, cache_hit).map(Some) + } else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? { + let cache_hit = false; + f(&mut state, cache_hit).map(Some) + } else { + Ok(None) + } + } + /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 286cc17a96..f970c5607e 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -47,6 +47,8 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, + /// Whether to enable the light client server protocol. + pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -68,6 +70,7 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, + enable_light_client_server: false, } } } diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 40b73451cb..33447bc2ef 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -298,6 +298,27 @@ impl SnapshotCache { }) } + /// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*. + /// + /// Care must be taken not to mutate the state in an invalid way. This function should only + /// be used to mutate the *caches* of the state, for example the tree hash cache when + /// calculating a light client merkle proof. + pub fn borrow_unadvanced_state_mut( + &mut self, + block_root: Hash256, + ) -> Option<&mut BeaconState> { + self.snapshots + .iter_mut() + .find(|snapshot| { + // If the pre-state exists then state advance has already taken the state for + // `block_root` and mutated its tree hash cache. Rather than re-building it while + // holding the snapshot cache lock (>1 second), prefer to return `None` from this + // function and force the caller to load it from disk. + snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none() + }) + .map(|snapshot| &mut snapshot.beacon_state) + } + /// If there is a snapshot with `block_root`, clone it and return the clone. pub fn get_cloned( &self, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 16a6794f43..b00d56513c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -868,4 +868,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Useful if you intend to run a non-validating beacon node.") .takes_value(false) ) + .arg( + Arg::with_name("light-client-server") + .long("light-client-server") + .help("Act as a full node supporting light clients on the p2p network \ + [experimental]") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6af753afea..99e0af6e4c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -705,6 +705,9 @@ pub fn get_config( client_config.chain.builder_fallback_disable_checks = cli_args.is_present("builder-fallback-disable-checks"); + // Light client server config. + client_config.chain.enable_light_client_server = cli_args.is_present("light-client-server"); + Ok(client_config) } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a6b913bcb9..79625c12e3 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1702,12 +1702,12 @@ impl BeaconState { }; // 2. Get all `BeaconState` leaves. - let cache = self.tree_hash_cache_mut().take(); - let leaves = if let Some(mut cache) = cache { - cache.recalculate_tree_hash_leaves(self)? - } else { - return Err(Error::TreeHashCacheNotInitialized); - }; + let mut cache = self + .tree_hash_cache_mut() + .take() + .ok_or(Error::TreeHashCacheNotInitialized)?; + let leaves = cache.recalculate_tree_hash_leaves(self)?; + self.tree_hash_cache_mut().restore(cache); // 3. Make deposit tree. // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f24ba6895e..d69361a3a4 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1580,3 +1580,18 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { .run_with_zero_port() .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); } + +#[test] +fn light_client_server_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.enable_light_client_server, false)); +} + +#[test] +fn light_client_server_enabled() { + CommandLineTest::new() + .flag("light-client-server", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.enable_light_client_server, true)); +} diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 158e875810..892b9a3770 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -39,8 +39,6 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientOptimistic", # LightClientFinalityUpdate "tests/.*/.*/ssz_static/LightClientFinalityUpdate", - # Merkle-proof tests for light clients - "tests/.*/.*/merkle/single_proof", # Capella tests are disabled for now. "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 3a6f4acf1e..a57abc2e07 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -78,6 +78,10 @@ impl Case for MerkleProofValidity { ))); } } + + // Tree hash cache should still be initialized (not dropped). + assert!(state.tree_hash_cache().is_initialized()); + Ok(()) } } From 5dba89e43b1f1ea08af60c6b1476b8f838a63466 Mon Sep 17 00:00:00 2001 From: tim gretler Date: Sun, 13 Nov 2022 22:40:43 +0000 Subject: [PATCH 10/11] Sync committee sign bn fallback (#3624) ## Issue Addressed Closes #3612 ## Proposed Changes - Iterates through BNs until it finds a non-optimistic head. A slight change in error behavior: - Previously: `spawn_contribution_tasks` did not return an error for a non-optimistic block head. It returned `Ok(())` logged a warning. - Now: `spawn_contribution_tasks` returns an error if it cannot find a non-optimistic block head. The caller of `spawn_contribution_tasks` then logs the error as a critical error. Co-authored-by: Michael Sproul --- .../src/sync_committee_service.rs | 49 ++++++++++--------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 5b95945302..3647396ed5 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -174,39 +174,40 @@ impl SyncCommitteeService { return Ok(()); } - // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. + // Fetch `block_root` with non optimistic execution for `SyncCommitteeContribution`. let response = self .beacon_nodes - .first_success(RequireSynced::Yes, OfflineOnFailure::Yes,|beacon_node| async move { - beacon_node.get_beacon_blocks_root(BlockId::Head).await - }) - .await - .map_err(|e| e.to_string())? - .ok_or_else(|| format!("No block root found for slot {}", slot))?; + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + match beacon_node.get_beacon_blocks_root(BlockId::Head).await { + Ok(Some(block)) if block.execution_optimistic == Some(false) => { + Ok(block) + } + Ok(Some(_)) => { + Err(format!("To sign sync committee messages for slot {slot} a non-optimistic head block is required")) + } + Ok(None) => Err(format!("No block root found for slot {}", slot)), + Err(e) => Err(e.to_string()), + } + }, + ) + .await; - let block_root = response.data.root; - if let Some(execution_optimistic) = response.execution_optimistic { - if execution_optimistic { + let block_root = match response { + Ok(block) => block.data.root, + Err(errs) => { warn!( log, - "Refusing to sign sync committee messages for optimistic head block"; + "Refusing to sign sync committee messages for an optimistic head block or \ + a block head with unknown optimistic status"; + "errors" => errs.to_string(), "slot" => slot, ); return Ok(()); } - } else if let Some(bellatrix_fork_epoch) = self.duties_service.spec.bellatrix_fork_epoch { - // If the slot is post Bellatrix, do not sign messages when we cannot verify the - // optimistic status of the head block. - if slot.epoch(E::slots_per_epoch()) > bellatrix_fork_epoch { - warn!( - log, - "Refusing to sign sync committee messages for a head block with an unknown \ - optimistic status"; - "slot" => slot, - ); - return Ok(()); - } - } + }; // Spawn one task to publish all of the sync committee signatures. let validator_duties = slot_duties.duties; From 9bd6d9ce7a9b4a6fba70253b20cd06b6eeff6660 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sun, 13 Nov 2022 22:40:44 +0000 Subject: [PATCH 11/11] CI gardening maintenance (#3706) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/3656 ## Proposed Changes * Replace `set-output` by `$GITHUB_OUTPUT` usage * Avoid rate-limits when installing `protoc` by making authenticated requests (continuation of https://github.com/sigp/lighthouse/pull/3621) * Upgrade all Ubuntu 18.04 usage to 22.04 (18.04 is end of life) * Upgrade macOS-latest to explicit macOS-12 to silence warning * Use `actions/checkout@v3` and `actions/cache@v3` to avoid deprecated NodeJS v12 ## Additional Info Can't silence the NodeJS warnings entirely due to https://github.com/sigp/lighthouse/issues/3705. Can fix that in future. --- .github/workflows/docker-antithesis.yml | 2 +- .github/workflows/docker.yml | 12 ++-- .github/workflows/linkcheck.yml | 2 +- .github/workflows/local-testnet.yml | 8 +-- .github/workflows/publish-crate.yml | 4 +- .github/workflows/release.yml | 8 +-- .github/workflows/test-suite.yml | 92 +++++++++++++++++-------- 7 files changed, 81 insertions(+), 47 deletions(-) diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml index 40de0bd0a5..84f5541a3c 100644 --- a/.github/workflows/docker-antithesis.yml +++ b/.github/workflows/docker-antithesis.yml @@ -17,7 +17,7 @@ jobs: build-docker: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Dockerhub login diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8d72319c60..13b8411695 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,7 +22,7 @@ jobs: # `unstable`, but for now we keep the two parts of the version separate for backwards # compatibility. extract-version: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - name: Extract version (if stable) if: github.event.ref == 'refs/heads/stable' @@ -44,7 +44,7 @@ jobs: VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: name: build-docker-${{ matrix.binary }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 strategy: matrix: binary: [aarch64, @@ -61,7 +61,7 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Dockerhub login @@ -102,7 +102,7 @@ jobs: --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 needs: [build-docker-single-arch, extract-version] strategy: matrix: @@ -123,13 +123,13 @@ jobs: --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} build-docker-lcli: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 needs: [extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 30a891febf..4d4e92ae14 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Create docker network run: docker network create book diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 170bd9e212..b916ffee65 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -12,11 +12,11 @@ jobs: strategy: matrix: os: - - ubuntu-18.04 - - macos-latest + - ubuntu-22.04 + - macos-12 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable @@ -28,7 +28,7 @@ jobs: run: npm install ganache@latest --global # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v2 + - uses: actions/cache@v3 id: cache-cargo with: path: | diff --git a/.github/workflows/publish-crate.yml b/.github/workflows/publish-crate.yml index a7fda90f74..736057f785 100644 --- a/.github/workflows/publish-crate.yml +++ b/.github/workflows/publish-crate.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Extract tag - run: echo "::set-output name=TAG::$(echo ${GITHUB_REF#refs/tags/})" + run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT id: extract_tag outputs: TAG: ${{ steps.extract_tag.outputs.TAG }} @@ -30,7 +30,7 @@ jobs: env: TAG: ${{ needs.extract-tag.outputs.TAG }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Update Rust run: rustup update stable - name: Cargo login diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6edb1f76c1..957d016dc6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Extract version - run: echo "::set-output name=VERSION::$(echo ${GITHUB_REF#refs/tags/})" + run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT id: extract_version outputs: VERSION: ${{ steps.extract_version.outputs.VERSION }} @@ -62,7 +62,7 @@ jobs: needs: extract-version steps: - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Build toolchain uses: actions-rs/toolchain@v1 with: @@ -199,7 +199,7 @@ jobs: steps: # This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts. - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -216,7 +216,7 @@ jobs: - name: Generate Full Changelog id: changelog - run: echo "::set-output name=CHANGELOG::$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" + run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT - name: Create Release Draft env: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a3e9625b50..423f3deca2 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -24,12 +24,12 @@ jobs: extract-msrv: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Extract Minimum Supported Rust Version (MSRV) run: | metadata=$(cargo metadata --no-deps --format-version 1) msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "::set-output name=MSRV::$msrv" + echo "MSRV=$msrv" >> $GITHUB_OUTPUT id: extract_msrv outputs: MSRV: ${{ steps.extract_msrv.outputs.MSRV }} @@ -37,7 +37,7 @@ jobs: name: cargo-fmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Check formatting with cargo fmt @@ -47,11 +47,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run tests in release @@ -61,7 +63,7 @@ jobs: runs-on: windows-2019 needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Use Node.js @@ -89,11 +91,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks run: make test-beacon-chain op-pool-tests: @@ -101,11 +105,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: @@ -113,7 +119,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Run slasher tests for all supported backends @@ -123,11 +129,13 @@ jobs: runs-on: ubuntu-22.04 needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run tests in debug @@ -137,11 +145,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: @@ -149,11 +159,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto run: make test-ef dockerfile-ubuntu: @@ -161,7 +173,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Build the root Dockerfile @@ -173,11 +185,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract @@ -187,11 +201,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim and go through the merge transition @@ -201,11 +217,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection @@ -215,11 +233,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Run the syncing simulator @@ -229,11 +249,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache run: sudo npm install -g ganache - name: Install lighthouse and lcli @@ -253,17 +275,19 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 - - uses: actions/setup-go@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/setup-dotnet@v1 + - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine check-benchmarks: @@ -271,11 +295,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches check-consensus: @@ -283,7 +309,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Typecheck consensus code in strict mode @@ -293,11 +319,13 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness @@ -308,7 +336,7 @@ jobs: needs: cargo-fmt continue-on-error: true steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install SigP Clippy fork run: | cd .. @@ -319,6 +347,8 @@ jobs: cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run Clippy with the disallowed-from-async lint run: make nightly-lint check-msrv: @@ -326,11 +356,13 @@ jobs: runs-on: ubuntu-latest needs: [cargo-fmt, extract-msrv] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check run: cargo check --workspace arbitrary-check: @@ -338,7 +370,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Validate state_processing feature arbitrary-fuzz @@ -348,7 +380,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Get latest version of stable Rust run: rustup update stable - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database @@ -358,7 +390,7 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: @@ -366,13 +398,15 @@ jobs: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir