From 2f8531dc607b9626253ce141d648d3a6392eb52f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 14 Feb 2022 23:57:23 +0000 Subject: [PATCH 01/14] Update to consensus-specs v1.1.9 (#3016) ## Issue Addressed Closes #3014 ## Proposed Changes - Rename `receipt_root` to `receipts_root` - Rename `execute_payload` to `notify_new_payload` - This is slightly weird since we modify everything except the actual HTTP call to the engine API. That change is expected to be implemented in #2985 (cc @ethDreamer) - Enable "random" tests for Bellatrix. ## Notes This will break *partially* compatibility with Kintusgi testnets in order to gain compatibility with [Kiln](https://hackmd.io/@n0ble/kiln-spec) testnets. I think it will only break the BN APIs due to the `receipts_root` change, however it might have some other effects too. Co-authored-by: Michael Sproul --- .../beacon_chain/src/block_verification.rs | 4 ++-- .../beacon_chain/src/execution_payload.rs | 12 ++++++------ beacon_node/execution_layer/src/engine_api.rs | 2 +- .../execution_layer/src/engine_api/http.rs | 16 ++++++++-------- .../src/engine_api/json_structures.rs | 6 +++--- beacon_node/execution_layer/src/lib.rs | 12 ++++++------ .../test_utils/execution_block_generator.rs | 4 ++-- .../src/test_utils/handle_rpc.rs | 4 ++-- .../src/test_utils/mock_execution_layer.rs | 2 +- .../execution_layer/src/test_utils/mod.rs | 7 ++++--- .../gnosis/config.yaml | 1 + .../mainnet/config.yaml | 1 + .../prater/config.yaml | 1 + common/eth2_network_config/src/lib.rs | 1 + .../src/per_block_processing.rs | 2 +- consensus/types/src/chain_spec.rs | 18 ++++++++++++++++++ consensus/types/src/config_and_preset.rs | 1 - consensus/types/src/execution_payload.rs | 2 +- .../types/src/execution_payload_header.rs | 2 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/src/handler.rs | 5 ----- 21 files changed, 61 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c2dc0028e9..8d61d9cbf9 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -41,7 +41,7 @@ //! //! ``` use crate::execution_payload::{ - execute_payload, validate_execution_payload_for_gossip, validate_merge_block, + notify_new_payload, validate_execution_payload_for_gossip, validate_merge_block, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; @@ -1125,7 +1125,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // // It is important that this function is called *after* `per_slot_processing`, since the // `randao` may change. - let payload_verification_status = execute_payload(chain, &state, block.message())?; + let payload_verification_status = notify_new_payload(chain, &state, block.message())?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 21d51be99d..ba20156699 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -27,11 +27,11 @@ use types::*; /// /// ## Specification /// -/// Equivalent to the `execute_payload` function in the merge Beacon Chain Changes, although it +/// Equivalent to the `notify_new_payload` function in the merge Beacon Chain Changes, although it /// contains a few extra checks by running `partially_verify_execution_payload` first: /// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#execute_payload -pub fn execute_payload( +/// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload +pub fn notify_new_payload( chain: &BeaconChain, state: &BeaconState, block: BeaconBlockRef, @@ -53,10 +53,10 @@ pub fn execute_payload( .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let execute_payload_response = execution_layer - .block_on(|execution_layer| execution_layer.execute_payload(execution_payload)); + let notify_new_payload_response = execution_layer + .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); - match execute_payload_response { + match notify_new_payload_response { Ok((status, _latest_valid_hash)) => match status { ExecutePayloadResponseStatus::Valid => Ok(PayloadVerificationStatus::Verified), // TODO(merge): invalidate any invalid ancestors of this block in fork choice. diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f9654a497b..e59a706b27 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -55,7 +55,7 @@ pub trait EngineApi { block_hash: Hash256, ) -> Result, Error>; - async fn execute_payload_v1( + async fn notify_new_payload_v1( &self, execution_payload: ExecutionPayload, ) -> Result; diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c7c60a9006..39312e660a 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -133,7 +133,7 @@ impl EngineApi for HttpJsonRpc { .await } - async fn execute_payload_v1( + async fn notify_new_payload_v1( &self, execution_payload: ExecutionPayload, ) -> Result { @@ -486,16 +486,16 @@ mod test { } #[tokio::test] - async fn execute_payload_v1_request() { + async fn notify_new_payload_v1_request() { Tester::new() .assert_request_equals( |client| async move { let _ = client - .execute_payload_v1::(ExecutionPayload { + .notify_new_payload_v1::(ExecutionPayload { parent_hash: Hash256::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), - receipt_root: Hash256::repeat_byte(0), + receipts_root: Hash256::repeat_byte(0), logs_bloom: vec![1; 256].into(), random: Hash256::repeat_byte(1), block_number: 0, @@ -702,7 +702,7 @@ mod test { parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), - receipt_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), logs_bloom: vec![0; 256].into(), random: Hash256::zero(), block_number: 1, @@ -723,11 +723,11 @@ mod test { // engine_executePayloadV1 REQUEST validation |client| async move { let _ = client - .execute_payload_v1::(ExecutionPayload { + .notify_new_payload_v1::(ExecutionPayload { parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), - receipt_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), logs_bloom: vec![0; 256].into(), random: Hash256::zero(), block_number: 1, @@ -776,7 +776,7 @@ mod test { })], |client| async move { let response = client - .execute_payload_v1::(ExecutionPayload::default()) + .notify_new_payload_v1::(ExecutionPayload::default()) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index ae6d730fa5..ae542f9a5d 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -89,7 +89,7 @@ impl From> for JsonExecutionPayloadV1 { parent_hash, fee_recipient, state_root, - receipt_root, + receipts_root, logs_bloom, random, block_number, @@ -106,7 +106,7 @@ impl From> for JsonExecutionPayloadV1 { parent_hash, fee_recipient, state_root, - receipts_root: receipt_root, + receipts_root, logs_bloom, random, block_number, @@ -145,7 +145,7 @@ impl From> for ExecutionPayload { parent_hash, fee_recipient, state_root, - receipt_root: receipts_root, + receipts_root, logs_bloom, random, block_number, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cb267e5f0a..2fbd72e157 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -441,7 +441,7 @@ impl ExecutionLayer { .map_err(Error::EngineErrors) } - /// Maps to the `engine_executePayload` JSON-RPC call. + /// Maps to the `engine_newPayload` JSON-RPC call. /// /// ## Fallback Behaviour /// @@ -453,7 +453,7 @@ impl ExecutionLayer { /// - Invalid, if any nodes return invalid. /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. - pub async fn execute_payload( + pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, ) -> Result<(ExecutePayloadResponseStatus, Option), Error> { @@ -467,7 +467,7 @@ impl ExecutionLayer { let broadcast_results = self .engines() - .broadcast(|engine| engine.api.execute_payload_v1(execution_payload.clone())) + .broadcast(|engine| engine.api.notify_new_payload_v1(execution_payload.clone())) .await; let mut errors = vec![]; @@ -486,7 +486,7 @@ impl ExecutionLayer { id: "unknown".to_string(), error: engine_api::Error::BadResponse( format!( - "execute_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + "notify_new_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", execution_payload.block_hash, latest_hash, ) @@ -503,7 +503,7 @@ impl ExecutionLayer { Ok((None, status)) => errors.push(EngineError::Api { id: "unknown".to_string(), error: engine_api::Error::BadResponse(format!( - "execute_payload: status {:?} returned with null latest_valid_hash", + "notify_new_payload: status {:?} returned with null latest_valid_hash", status )), }), @@ -515,7 +515,7 @@ impl ExecutionLayer { crit!( self.log(), "Consensus failure between execution nodes"; - "method" => "execute_payload" + "method" => "notify_new_payload" ); } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 552bea0ea4..61aaedd359 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -235,7 +235,7 @@ impl ExecutionBlockGenerator { self.payload_ids.remove(id) } - pub fn execute_payload(&mut self, payload: ExecutionPayload) -> ExecutePayloadResponse { + pub fn notify_new_payload(&mut self, payload: ExecutionPayload) -> ExecutePayloadResponse { let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { parent } else { @@ -325,7 +325,7 @@ impl ExecutionBlockGenerator { let mut execution_payload = ExecutionPayload { parent_hash: forkchoice_state.head_block_hash, fee_recipient: attributes.suggested_fee_recipient, - receipt_root: Hash256::repeat_byte(42), + receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), logs_bloom: vec![0; 256].into(), random: attributes.random, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 131bc8ba0a..11232bc081 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -57,7 +57,7 @@ pub async fn handle_rpc( ENGINE_EXECUTE_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if let Some(status) = *ctx.static_execute_payload_response.lock() { + let response = if let Some(status) = *ctx.static_notify_new_payload_response.lock() { match status { ExecutePayloadResponseStatus::Valid => ExecutePayloadResponse { status, @@ -74,7 +74,7 @@ pub async fn handle_rpc( } else { ctx.execution_block_generator .write() - .execute_payload(request.into()) + .notify_new_payload(request.into()) }; Ok(serde_json::to_value(JsonExecutePayloadV1Response::from(response)).unwrap()) diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 295e82914b..4f5337075d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -146,7 +146,7 @@ impl MockExecutionLayer { assert_eq!(payload.random, random); let (payload_response, latest_valid_hash) = - self.el.execute_payload(&payload).await.unwrap(); + self.el.notify_new_payload(&payload).await.unwrap(); assert_eq!(payload_response, ExecutePayloadResponseStatus::Valid); assert_eq!(latest_valid_hash, Some(payload.block_hash)); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index cd45d34a1f..fbc3751784 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -62,7 +62,7 @@ impl MockServer { last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), preloaded_responses, - static_execute_payload_response: <_>::default(), + static_notify_new_payload_response: <_>::default(), _phantom: PhantomData, }); @@ -117,7 +117,8 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.static_execute_payload_response.lock() = Some(ExecutePayloadResponseStatus::Valid) + *self.ctx.static_notify_new_payload_response.lock() = + Some(ExecutePayloadResponseStatus::Valid) } pub fn insert_pow_block( @@ -187,7 +188,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub static_execute_payload_response: Arc>>, + pub static_notify_new_payload_response: Arc>>, pub _phantom: PhantomData, } diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index c34ebed7d5..12d7995285 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -1,6 +1,7 @@ # Gnosis Beacon Chain config # Extends the gnosis preset +CONFIG_NAME: 'gnosis' PRESET_BASE: 'gnosis' # Transition diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index b889b82887..6993c24b8e 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -1,6 +1,7 @@ # Mainnet config # Extends the mainnet preset +CONFIG_NAME: 'mainnet' PRESET_BASE: 'mainnet' # Transition diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 72a106f36a..106c95595e 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -1,6 +1,7 @@ # Prater config # Extends the mainnet preset +CONFIG_NAME: 'prater' PRESET_BASE: 'mainnet' # Transition diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index fa8e1a3dd1..8df54a5a8b 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -275,6 +275,7 @@ mod tests { "{:?}", net.name ); + assert_eq!(config.config.config_name, Some(net.name.to_string())); } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 857c776332..a874ce6428 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -366,7 +366,7 @@ pub fn process_execution_payload( parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, - receipt_root: payload.receipt_root, + receipts_root: payload.receipts_root, logs_bloom: payload.logs_bloom.clone(), random: payload.random, block_number: payload.block_number, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index fa74f9d29c..d391fe01e1 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -28,6 +28,11 @@ pub enum Domain { #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone)] pub struct ChainSpec { + /* + * Config name + */ + pub config_name: Option, + /* * Constants */ @@ -405,6 +410,10 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { + /* + * Config name + */ + config_name: Some("mainnet".to_string()), /* * Constants */ @@ -563,6 +572,7 @@ impl ChainSpec { let boot_nodes = vec![]; Self { + config_name: None, max_committees_per_slot: 4, target_committee_size: 4, churn_limit_quotient: 32, @@ -600,6 +610,7 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Gnosis Beacon Chain specification. pub fn gnosis() -> Self { Self { + config_name: Some("gnosis".to_string()), /* * Constants */ @@ -763,6 +774,10 @@ impl Default for ChainSpec { #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] pub struct Config { + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub config_name: Option, + #[serde(default)] pub preset_base: String, @@ -914,6 +929,7 @@ impl Config { pub fn from_chain_spec(spec: &ChainSpec) -> Self { Self { + config_name: spec.config_name.clone(), preset_base: T::spec_name().to_string(), terminal_total_difficulty: spec.terminal_total_difficulty, @@ -964,6 +980,7 @@ impl Config { pub fn apply_to_chain_spec(&self, chain_spec: &ChainSpec) -> Option { // Pattern match here to avoid missing any fields. let &Config { + ref config_name, ref preset_base, terminal_total_difficulty, terminal_block_hash, @@ -997,6 +1014,7 @@ impl Config { } Some(ChainSpec { + config_name: config_name.clone(), min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index affda1a061..d782f4d8b1 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -46,7 +46,6 @@ impl ConfigAndPreset { let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); let fields = vec![ - ("config_name", self.config.preset_base.clone()), ( "bls_withdrawal_prefix", u8_hex(spec.bls_withdrawal_prefix_byte), diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 2fb253f12c..781fb7460f 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -18,7 +18,7 @@ pub struct ExecutionPayload { pub parent_hash: Hash256, pub fee_recipient: Address, pub state_root: Hash256, - pub receipt_root: Hash256, + pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, pub random: Hash256, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 6cb76a6465..aa022f6420 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -12,7 +12,7 @@ pub struct ExecutionPayloadHeader { pub parent_hash: Hash256, pub fee_recipient: Address, pub state_root: Hash256, - pub receipt_root: Hash256, + pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, pub random: Hash256, diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 3cd6d17c0c..816651bb45 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.8 +TESTS_TAG := v1.1.9 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 636119cdba..be6c495aae 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -307,11 +307,6 @@ pub struct RandomHandler(PhantomData); impl Handler for RandomHandler { type Case = cases::SanityBlocks; - // FIXME(merge): enable merge tests once available - fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - fork_name != ForkName::Merge - } - fn config_name() -> &'static str { E::name() } From 0a6a8ea3b0e257028c1248c051d600b036d97575 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 21:47:06 +0000 Subject: [PATCH 02/14] Engine API v1.0.0.alpha.6 + interop tests (#3024) ## Issue Addressed NA ## Proposed Changes This PR extends #3018 to address my review comments there and add automated integration tests with Geth (and other implementations, in the future). I've also de-duplicated the "unused port" logic by creating an `common/unused_port` crate. ## Additional Info I'm not sure if we want to merge this PR, or update #3018 and merge that. I don't mind, I'm primarily opening this PR to make sure CI works. Co-authored-by: Mark Mackey --- .github/workflows/test-suite.yml | 12 +- Cargo.lock | 29 +- Cargo.toml | 2 + Makefile | 7 +- beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 41 +- .../beacon_chain/src/block_verification.rs | 6 +- beacon_node/beacon_chain/src/errors.rs | 5 + .../beacon_chain/src/execution_payload.rs | 30 +- beacon_node/client/src/builder.rs | 1 + beacon_node/execution_layer/src/engine_api.rs | 20 +- .../execution_layer/src/engine_api/http.rs | 76 ++-- .../src/engine_api/json_structures.rs | 99 +++-- beacon_node/execution_layer/src/engines.rs | 8 +- beacon_node/execution_layer/src/lib.rs | 185 ++++++--- .../test_utils/execution_block_generator.rs | 17 +- .../src/test_utils/handle_rpc.rs | 24 +- .../src/test_utils/mock_execution_layer.rs | 4 +- .../execution_layer/src/test_utils/mod.rs | 9 +- beacon_node/lighthouse_network/Cargo.toml | 1 + .../lighthouse_network/src/discovery/mod.rs | 10 +- .../lighthouse_network/tests/common/mod.rs | 36 +- beacon_node/src/config.rs | 47 +-- common/unused_port/Cargo.toml | 8 + common/unused_port/src/lib.rs | 55 +++ lighthouse/Cargo.toml | 1 + lighthouse/tests/beacon_node.rs | 60 +-- lighthouse/tests/boot_node.rs | 16 +- testing/eth1_test_rig/Cargo.toml | 1 + testing/eth1_test_rig/src/ganache.rs | 24 +- .../execution_engine_integration/.gitignore | 1 + .../execution_engine_integration/Cargo.toml | 19 + testing/execution_engine_integration/Makefile | 5 + testing/execution_engine_integration/build.rs | 62 +++ .../src/execution_engine.rs | 131 +++++++ .../src/genesis_json.rs | 42 ++ .../execution_engine_integration/src/lib.rs | 12 + .../src/test_rig.rs | 363 ++++++++++++++++++ .../tests/tests.rs | 16 + testing/web3signer_tests/src/lib.rs | 2 +- 40 files changed, 1125 insertions(+), 363 deletions(-) create mode 100644 common/unused_port/Cargo.toml create mode 100644 common/unused_port/src/lib.rs create mode 100644 testing/execution_engine_integration/.gitignore create mode 100644 testing/execution_engine_integration/Cargo.toml create mode 100644 testing/execution_engine_integration/Makefile create mode 100644 testing/execution_engine_integration/build.rs create mode 100644 testing/execution_engine_integration/src/execution_engine.rs create mode 100644 testing/execution_engine_integration/src/genesis_json.rs create mode 100644 testing/execution_engine_integration/src/lib.rs create mode 100644 testing/execution_engine_integration/src/test_rig.rs create mode 100644 testing/execution_engine_integration/tests/tests.rs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8b590f4e6e..04f7659fe2 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -44,7 +44,7 @@ jobs: run: make test-release release-tests-windows: name: release-tests-windows - runs-on: windows-latest + runs-on: windows-2019 needs: cargo-fmt steps: - uses: actions/checkout@v1 @@ -184,6 +184,16 @@ jobs: run: | cd scripts/tests ./doppelganger_protection.sh failure + execution-engine-integration-ubuntu: + name: execution-engine-integration-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run exec engine integration tests in release + run: make test-exec-engine check-benchmarks: name: check-benchmarks runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 822d24c8af..c9a9e69683 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,6 +361,7 @@ dependencies = [ "store", "task_executor", "types", + "unused_port", ] [[package]] @@ -626,9 +627,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cexpr" @@ -1499,6 +1500,7 @@ dependencies = [ "serde_json", "tokio", "types", + "unused_port", "web3", ] @@ -1804,6 +1806,23 @@ dependencies = [ "uint 0.9.3", ] +[[package]] +name = "execution_engine_integration" +version = "0.1.0" +dependencies = [ + "environment", + "execution_layer", + "exit-future", + "futures", + "sensitive_url", + "serde_json", + "task_executor", + "tempfile", + "tokio", + "types", + "unused_port", +] + [[package]] name = "execution_layer" version = "0.1.0" @@ -3325,6 +3344,7 @@ dependencies = [ "task_executor", "tempfile", "types", + "unused_port", "validator_client", "validator_dir", ] @@ -3380,6 +3400,7 @@ dependencies = [ "tokio-util", "types", "unsigned-varint 0.6.0", + "unused_port", "void", ] @@ -6599,6 +6620,10 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "unused_port" +version = "0.1.0" + [[package]] name = "url" version = "2.2.2" diff --git a/Cargo.toml b/Cargo.toml index d27c1dc132..aee6755da1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ members = [ "common/task_executor", "common/target_check", "common/test_random_derive", + "common/unused_port", "common/validator_dir", "common/warp_utils", "common/fallback", @@ -74,6 +75,7 @@ members = [ "testing/ef_tests", "testing/eth1_test_rig", + "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", "testing/test-test_logger", diff --git a/Makefile b/Makefile index a92da9bcc8..bc607304af 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ EF_TESTS = "testing/ef_tests" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" +EXECUTION_ENGINE_INTEGRATION = "testing/execution_engine_integration" GIT_TAG := $(shell git describe --tags --candidates 1) BIN_DIR = "bin" @@ -123,12 +124,16 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Runs tests checking interop between Lighthouse and execution clients. +test-exec-engine: + make -C $(EXECUTION_ENGINE_INTEGRATION) test + # Runs the full workspace tests in release, without downloading any additional # test vectors. test: test-release # Runs the entire test suite, downloading test vectors if required. -test-full: cargo-fmt test-release test-debug test-ef +test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9710c8ccff..46ff5ba228 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -39,3 +39,4 @@ slasher = { path = "../slasher" } monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } http_api = { path = "http_api" } +unused_port = { path = "../common/unused_port" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6fe96540ba..67aed4b484 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -52,7 +52,7 @@ use crate::{metrics, BeaconChainError}; use eth2::types::{ EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, }; -use execution_layer::ExecutionLayer; +use execution_layer::{ExecutionLayer, PayloadStatusV1Status}; use fork_choice::{AttestationFromBlock, ForkChoice}; use futures::channel::mpsc::Sender; use itertools::process_results; @@ -3590,10 +3590,11 @@ impl BeaconChain { store, new_finalized_checkpoint.root, new_head_execution_block_hash, + &log, ) .await { - debug!( + crit!( log, "Failed to update execution head"; "error" => ?e @@ -3613,6 +3614,7 @@ impl BeaconChain { store: BeaconStore, finalized_beacon_block_root: Hash256, head_execution_block_hash: Hash256, + log: &Logger, ) -> Result<(), Error> { // Loading the finalized block from the store is not ideal. Perhaps it would be better to // store it on fork-choice so we can do a lookup without hitting the database. @@ -3630,14 +3632,45 @@ impl BeaconChain { .map(|ep| ep.block_hash) .unwrap_or_else(Hash256::zero); - execution_layer + let forkchoice_updated_response = execution_layer .notify_forkchoice_updated( head_execution_block_hash, finalized_execution_block_hash, None, ) .await - .map_err(Error::ExecutionForkChoiceUpdateFailed) + .map_err(Error::ExecutionForkChoiceUpdateFailed); + + match forkchoice_updated_response { + Ok((status, latest_valid_hash)) => match status { + PayloadStatusV1Status::Valid | PayloadStatusV1Status::Syncing => Ok(()), + // The specification doesn't list `ACCEPTED` as a valid response to a fork choice + // update. This response *seems* innocent enough, so we won't return early with an + // error. However, we create a log to bring attention to the issue. + PayloadStatusV1Status::Accepted => { + warn!( + log, + "Fork choice update received ACCEPTED"; + "msg" => "execution engine provided an unexpected response to a fork \ + choice update. although this is not a serious issue, please raise \ + an issue." + ); + Ok(()) + } + PayloadStatusV1Status::Invalid + | PayloadStatusV1Status::InvalidTerminalBlock + | PayloadStatusV1Status::InvalidBlockHash => { + // TODO(bellatrix): process the invalid payload. + // + // See: https://github.com/sigp/lighthouse/pull/2837 + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { + status, + latest_valid_hash, + }) + } + }, + Err(e) => Err(e), + } } /// Returns the status of the current head block, regarding the validity of the execution diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 8d61d9cbf9..bb4ca4aa40 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -54,6 +54,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use eth2::types::EventKind; +use execution_layer::PayloadStatusV1Status; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -269,7 +270,10 @@ pub enum ExecutionPayloadError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty - RejectedByExecutionEngine, + RejectedByExecutionEngine { + status: PayloadStatusV1Status, + latest_valid_hash: Option>, + }, /// The execution payload timestamp does not match the slot /// /// ## Peer scoring diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 6920c06039..4ca1597932 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -8,6 +8,7 @@ use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; +use execution_layer::PayloadStatusV1Status; use futures::channel::mpsc::TrySendError; use operation_pool::OpPoolError; use safe_arith::ArithError; @@ -137,6 +138,10 @@ pub enum BeaconChainError { AltairForkDisabled, ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), + ExecutionForkChoiceUpdateInvalid { + status: PayloadStatusV1Status, + latest_valid_hash: Option>, + }, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index ba20156699..09bfa25783 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -11,7 +11,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::ExecutePayloadResponseStatus; +use execution_layer::PayloadStatusV1Status; use fork_choice::PayloadVerificationStatus; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -53,19 +53,29 @@ pub fn notify_new_payload( .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let notify_new_payload_response = execution_layer + let new_payload_response = execution_layer .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); - match notify_new_payload_response { - Ok((status, _latest_valid_hash)) => match status { - ExecutePayloadResponseStatus::Valid => Ok(PayloadVerificationStatus::Verified), - // TODO(merge): invalidate any invalid ancestors of this block in fork choice. - ExecutePayloadResponseStatus::Invalid => { - Err(ExecutionPayloadError::RejectedByExecutionEngine.into()) + match new_payload_response { + Ok((status, latest_valid_hash)) => match status { + PayloadStatusV1Status::Valid => Ok(PayloadVerificationStatus::Verified), + PayloadStatusV1Status::Syncing | PayloadStatusV1Status::Accepted => { + Ok(PayloadVerificationStatus::NotVerified) + } + PayloadStatusV1Status::Invalid + | PayloadStatusV1Status::InvalidTerminalBlock + | PayloadStatusV1Status::InvalidBlockHash => { + // TODO(bellatrix): process the invalid payload. + // + // See: https://github.com/sigp/lighthouse/pull/2837 + Err(ExecutionPayloadError::RejectedByExecutionEngine { + status, + latest_valid_hash, + } + .into()) } - ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), }, - Err(_) => Err(ExecutionPayloadError::RejectedByExecutionEngine.into()), + Err(e) => Err(ExecutionPayloadError::RequestFailed(e).into()), } } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index f5045418ab..c3e0f8af5c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -681,6 +681,7 @@ where store, head.finalized_checkpoint.root, block_hash, + &log, ) .await; diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index e59a706b27..d6877b13a2 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -55,10 +55,10 @@ pub trait EngineApi { block_hash: Hash256, ) -> Result, Error>; - async fn notify_new_payload_v1( + async fn new_payload_v1( &self, execution_payload: ExecutionPayload, - ) -> Result; + ) -> Result; async fn get_payload_v1( &self, @@ -73,15 +73,18 @@ pub trait EngineApi { } #[derive(Clone, Copy, Debug, PartialEq)] -pub enum ExecutePayloadResponseStatus { +pub enum PayloadStatusV1Status { Valid, Invalid, Syncing, + Accepted, + InvalidBlockHash, + InvalidTerminalBlock, } #[derive(Clone, Debug, PartialEq)] -pub struct ExecutePayloadResponse { - pub status: ExecutePayloadResponseStatus, +pub struct PayloadStatusV1 { + pub status: PayloadStatusV1Status, pub latest_valid_hash: Option, pub validation_error: Option, } @@ -110,13 +113,8 @@ pub struct PayloadAttributes { pub suggested_fee_recipient: Address, } -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum ForkchoiceUpdatedResponseStatus { - Success, - Syncing, -} #[derive(Clone, Debug, PartialEq)] pub struct ForkchoiceUpdatedResponse { - pub status: ForkchoiceUpdatedResponseStatus, + pub payload_status: PayloadStatusV1, pub payload_id: Option, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 39312e660a..ce4c3beff0 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -27,8 +27,8 @@ pub const ETH_GET_BLOCK_BY_HASH_TIMEOUT: Duration = Duration::from_secs(1); pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_millis(250); -pub const ENGINE_EXECUTE_PAYLOAD_V1: &str = "engine_executePayloadV1"; -pub const ENGINE_EXECUTE_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); +pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; +pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); @@ -133,18 +133,14 @@ impl EngineApi for HttpJsonRpc { .await } - async fn notify_new_payload_v1( + async fn new_payload_v1( &self, execution_payload: ExecutionPayload, - ) -> Result { + ) -> Result { let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); - let response: JsonExecutePayloadV1Response = self - .rpc_request( - ENGINE_EXECUTE_PAYLOAD_V1, - params, - ENGINE_EXECUTE_PAYLOAD_TIMEOUT, - ) + let response: JsonPayloadStatusV1 = self + .rpc_request(ENGINE_NEW_PAYLOAD_V1, params, ENGINE_NEW_PAYLOAD_TIMEOUT) .await?; Ok(response.into()) @@ -486,12 +482,12 @@ mod test { } #[tokio::test] - async fn notify_new_payload_v1_request() { + async fn new_payload_v1_request() { Tester::new() .assert_request_equals( |client| async move { let _ = client - .notify_new_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload { parent_hash: Hash256::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), @@ -512,7 +508,7 @@ mod test { json!({ "id": STATIC_ID, "jsonrpc": JSONRPC_VERSION, - "method": ENGINE_EXECUTE_PAYLOAD_V1, + "method": ENGINE_NEW_PAYLOAD_V1, "params": [{ "parentHash": HASH_00, "feeRecipient": ADDRESS_01, @@ -627,7 +623,11 @@ mod test { "id": STATIC_ID, "jsonrpc": JSONRPC_VERSION, "result": { - "status": "SUCCESS", + "payloadStatus": { + "status": "VALID", + "latestValidHash": HASH_00, + "validationError": "" + }, "payloadId": "0xa247243752eb10b4" } })], @@ -648,7 +648,11 @@ mod test { .await .unwrap(); assert_eq!(response, ForkchoiceUpdatedResponse { - status: ForkchoiceUpdatedResponseStatus::Success, + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: Some(Hash256::zero()), + validation_error: Some(String::new()), + }, payload_id: Some(str_to_payload_id("0xa247243752eb10b4")), }); @@ -683,12 +687,12 @@ mod test { "logsBloom": LOGS_BLOOM_00, "random": HASH_00, "blockNumber":"0x1", - "gasLimit":"0x1c9c380", + "gasLimit":"0x1c95111", "gasUsed":"0x0", "timestamp":"0x5", "extraData":"0x", "baseFeePerGas":"0x7", - "blockHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "blockHash":"0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c", "transactions":[] } })], @@ -706,12 +710,12 @@ mod test { logs_bloom: vec![0; 256].into(), random: Hash256::zero(), block_number: 1, - gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), + gas_limit: u64::from_str_radix("1c95111",16).unwrap(), gas_used: 0, timestamp: 5, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), - block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + block_hash: Hash256::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), }; @@ -720,10 +724,10 @@ mod test { ) .await .assert_request_equals( - // engine_executePayloadV1 REQUEST validation + // engine_newPayloadV1 REQUEST validation |client| async move { let _ = client - .notify_new_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload { parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -744,7 +748,7 @@ mod test { json!({ "id": STATIC_ID, "jsonrpc": JSONRPC_VERSION, - "method": ENGINE_EXECUTE_PAYLOAD_V1, + "method": ENGINE_NEW_PAYLOAD_V1, "params": [{ "parentHash":"0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a", "feeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", @@ -765,26 +769,27 @@ mod test { ) .await .with_preloaded_responses( - // engine_executePayloadV1 RESPONSE validation + // engine_newPayloadV1 RESPONSE validation vec![json!({ "jsonrpc": JSONRPC_VERSION, "id": STATIC_ID, "result":{ "status":"VALID", - "latestValidHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858" + "latestValidHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "validationError":"", } })], |client| async move { let response = client - .notify_new_payload_v1::(ExecutionPayload::default()) + .new_payload_v1::(ExecutionPayload::default()) .await .unwrap(); assert_eq!(response, - ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid, + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, latest_valid_hash: Some(Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), - validation_error: None + validation_error: Some(String::new()), } ); }, @@ -819,14 +824,15 @@ mod test { .await .with_preloaded_responses( // engine_forkchoiceUpdatedV1 RESPONSE validation - // - // Note: this test was modified to provide `null` rather than `0x`. The geth vectors - // are invalid. vec![json!({ "jsonrpc": JSONRPC_VERSION, "id": STATIC_ID, "result": { - "status":"SUCCESS", + "payloadStatus": { + "status": "VALID", + "latestValidHash": HASH_00, + "validationError": "" + }, "payloadId": JSON_NULL, } })], @@ -843,7 +849,11 @@ mod test { .await .unwrap(); assert_eq!(response, ForkchoiceUpdatedResponse { - status: ForkchoiceUpdatedResponseStatus::Success, + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: Some(Hash256::zero()), + validation_error: Some(String::new()), + }, payload_id: None, }); }, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index ae542f9a5d..03d981d439 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -247,47 +247,60 @@ impl From for ForkChoiceState { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonExecutePayloadV1ResponseStatus { +pub enum JsonPayloadStatusV1Status { Valid, Invalid, Syncing, + Accepted, + InvalidBlockHash, + InvalidTerminalBlock, } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonExecutePayloadV1Response { - pub status: JsonExecutePayloadV1ResponseStatus, +pub struct JsonPayloadStatusV1 { + pub status: JsonPayloadStatusV1Status, pub latest_valid_hash: Option, pub validation_error: Option, } -impl From for JsonExecutePayloadV1ResponseStatus { - fn from(e: ExecutePayloadResponseStatus) -> Self { +impl From for JsonPayloadStatusV1Status { + fn from(e: PayloadStatusV1Status) -> Self { match e { - ExecutePayloadResponseStatus::Valid => JsonExecutePayloadV1ResponseStatus::Valid, - ExecutePayloadResponseStatus::Invalid => JsonExecutePayloadV1ResponseStatus::Invalid, - ExecutePayloadResponseStatus::Syncing => JsonExecutePayloadV1ResponseStatus::Syncing, + PayloadStatusV1Status::Valid => JsonPayloadStatusV1Status::Valid, + PayloadStatusV1Status::Invalid => JsonPayloadStatusV1Status::Invalid, + PayloadStatusV1Status::Syncing => JsonPayloadStatusV1Status::Syncing, + PayloadStatusV1Status::Accepted => JsonPayloadStatusV1Status::Accepted, + PayloadStatusV1Status::InvalidBlockHash => JsonPayloadStatusV1Status::InvalidBlockHash, + PayloadStatusV1Status::InvalidTerminalBlock => { + JsonPayloadStatusV1Status::InvalidTerminalBlock + } } } } -impl From for ExecutePayloadResponseStatus { - fn from(j: JsonExecutePayloadV1ResponseStatus) -> Self { +impl From for PayloadStatusV1Status { + fn from(j: JsonPayloadStatusV1Status) -> Self { match j { - JsonExecutePayloadV1ResponseStatus::Valid => ExecutePayloadResponseStatus::Valid, - JsonExecutePayloadV1ResponseStatus::Invalid => ExecutePayloadResponseStatus::Invalid, - JsonExecutePayloadV1ResponseStatus::Syncing => ExecutePayloadResponseStatus::Syncing, + JsonPayloadStatusV1Status::Valid => PayloadStatusV1Status::Valid, + JsonPayloadStatusV1Status::Invalid => PayloadStatusV1Status::Invalid, + JsonPayloadStatusV1Status::Syncing => PayloadStatusV1Status::Syncing, + JsonPayloadStatusV1Status::Accepted => PayloadStatusV1Status::Accepted, + JsonPayloadStatusV1Status::InvalidBlockHash => PayloadStatusV1Status::InvalidBlockHash, + JsonPayloadStatusV1Status::InvalidTerminalBlock => { + PayloadStatusV1Status::InvalidTerminalBlock + } } } } -impl From for JsonExecutePayloadV1Response { - fn from(e: ExecutePayloadResponse) -> Self { +impl From for JsonPayloadStatusV1 { + fn from(p: PayloadStatusV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ExecutePayloadResponse { + let PayloadStatusV1 { status, latest_valid_hash, validation_error, - } = e; + } = p; Self { status: status.into(), @@ -297,10 +310,10 @@ impl From for JsonExecutePayloadV1Response { } } -impl From for ExecutePayloadResponse { - fn from(j: JsonExecutePayloadV1Response) -> Self { +impl From for PayloadStatusV1 { + fn from(j: JsonPayloadStatusV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutePayloadV1Response { + let JsonPayloadStatusV1 { status, latest_valid_hash, validation_error, @@ -314,50 +327,23 @@ impl From for ExecutePayloadResponse { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonForkchoiceUpdatedV1ResponseStatus { - Success, - Syncing, -} #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceUpdatedV1Response { - pub status: JsonForkchoiceUpdatedV1ResponseStatus, + pub payload_status: JsonPayloadStatusV1, pub payload_id: Option, } -impl From for ForkchoiceUpdatedResponseStatus { - fn from(j: JsonForkchoiceUpdatedV1ResponseStatus) -> Self { - match j { - JsonForkchoiceUpdatedV1ResponseStatus::Success => { - ForkchoiceUpdatedResponseStatus::Success - } - JsonForkchoiceUpdatedV1ResponseStatus::Syncing => { - ForkchoiceUpdatedResponseStatus::Syncing - } - } - } -} -impl From for JsonForkchoiceUpdatedV1ResponseStatus { - fn from(f: ForkchoiceUpdatedResponseStatus) -> Self { - match f { - ForkchoiceUpdatedResponseStatus::Success => { - JsonForkchoiceUpdatedV1ResponseStatus::Success - } - ForkchoiceUpdatedResponseStatus::Syncing => { - JsonForkchoiceUpdatedV1ResponseStatus::Syncing - } - } - } -} impl From for ForkchoiceUpdatedResponse { fn from(j: JsonForkchoiceUpdatedV1Response) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonForkchoiceUpdatedV1Response { status, payload_id } = j; + let JsonForkchoiceUpdatedV1Response { + payload_status: status, + payload_id, + } = j; Self { - status: status.into(), + payload_status: status.into(), payload_id: payload_id.map(Into::into), } } @@ -365,10 +351,13 @@ impl From for ForkchoiceUpdatedResponse { impl From for JsonForkchoiceUpdatedV1Response { fn from(f: ForkchoiceUpdatedResponse) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ForkchoiceUpdatedResponse { status, payload_id } = f; + let ForkchoiceUpdatedResponse { + payload_status: status, + payload_id, + } = f; Self { - status: status.into(), + payload_status: status.into(), payload_id: payload_id.map(Into::into), } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 5db00d37f6..03801f3168 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,6 +1,8 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. -use crate::engine_api::{EngineApi, Error as EngineApiError, PayloadAttributes, PayloadId}; +use crate::engine_api::{ + EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, +}; use futures::future::join_all; use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; @@ -97,7 +99,7 @@ impl Engine { forkchoice_state: ForkChoiceState, payload_attributes: Option, log: &Logger, - ) -> Result, EngineApiError> { + ) -> Result { let response = self .api .forkchoice_updated_v1(forkchoice_state, payload_attributes) @@ -117,7 +119,7 @@ impl Engine { } } - Ok(response.payload_id) + Ok(response) } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2fbd72e157..10ae6b3eb0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,7 +10,7 @@ use lru::LruCache; use sensitive_url::SensitiveUrl; use slog::{crit, debug, error, info, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::future::Future; use std::sync::Arc; use std::time::Duration; @@ -21,7 +21,7 @@ use tokio::{ }; use types::{ChainSpec, Epoch, ProposerPreparationData}; -pub use engine_api::{http::HttpJsonRpc, ExecutePayloadResponseStatus}; +pub use engine_api::{http::HttpJsonRpc, PayloadAttributes, PayloadStatusV1Status}; mod engine_api; mod engines; @@ -49,6 +49,7 @@ pub enum Error { NotSynced, ShuttingDown, FeeRecipientUnspecified, + ConsensusFailure, } impl From for Error { @@ -249,7 +250,7 @@ impl ExecutionLayer { } /// Performs a single execution of the watchdog routine. - async fn watchdog_task(&self) { + pub async fn watchdog_task(&self) { // Disable logging since this runs frequently and may get annoying. self.engines().upcheck_not_synced(Logging::Disabled).await; } @@ -431,7 +432,8 @@ impl ExecutionLayer { Some(payload_attributes), self.log(), ) - .await? + .await + .map(|response| response.payload_id)? .ok_or(ApiError::PayloadIdUnavailable)? }; @@ -449,6 +451,7 @@ impl ExecutionLayer { /// failure) from all nodes and then return based on the first of these conditions which /// returns true: /// + /// - Error::ConsensusFailure if some nodes return valid and some return invalid /// - Valid, if any nodes return valid. /// - Invalid, if any nodes return invalid. /// - Syncing, if any nodes return syncing. @@ -456,10 +459,10 @@ impl ExecutionLayer { pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, - ) -> Result<(ExecutePayloadResponseStatus, Option), Error> { + ) -> Result<(PayloadStatusV1Status, Option>), Error> { debug!( self.log(), - "Issuing engine_executePayload"; + "Issuing engine_newPayload"; "parent_hash" => ?execution_payload.parent_hash, "block_hash" => ?execution_payload.block_hash, "block_number" => execution_payload.block_number, @@ -467,46 +470,55 @@ impl ExecutionLayer { let broadcast_results = self .engines() - .broadcast(|engine| engine.api.notify_new_payload_v1(execution_payload.clone())) + .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; let mut errors = vec![]; let mut valid = 0; let mut invalid = 0; let mut syncing = 0; - let mut invalid_latest_valid_hash = vec![]; + let mut invalid_latest_valid_hash = HashSet::new(); for result in broadcast_results { - match result.map(|response| (response.latest_valid_hash, response.status)) { - Ok((Some(latest_hash), ExecutePayloadResponseStatus::Valid)) => { - if latest_hash == execution_payload.block_hash { - valid += 1; - } else { - invalid += 1; - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - format!( - "notify_new_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", - execution_payload.block_hash, - latest_hash, - ) - ), - }); - invalid_latest_valid_hash.push(latest_hash); + match result { + Ok(response) => match (&response.latest_valid_hash, &response.status) { + (Some(latest_hash), &PayloadStatusV1Status::Valid) => { + // According to a strict interpretation of the spec, the EE should never + // respond with `VALID` *and* a `latest_valid_hash`. + // + // For the sake of being liberal with what we accept, we will accept a + // `latest_valid_hash` *only if* it matches the submitted payload. + // Otherwise, register an error. + if latest_hash == &execution_payload.block_hash { + valid += 1; + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse( + format!( + "new_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + execution_payload.block_hash, + latest_hash, + ) + ), + }); + } } - } - Ok((Some(latest_hash), ExecutePayloadResponseStatus::Invalid)) => { - invalid += 1; - invalid_latest_valid_hash.push(latest_hash); - } - Ok((_, ExecutePayloadResponseStatus::Syncing)) => syncing += 1, - Ok((None, status)) => errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse(format!( - "notify_new_payload: status {:?} returned with null latest_valid_hash", - status - )), - }), + (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { + invalid += 1; + invalid_latest_valid_hash.insert(*latest_hash); + } + (None, &PayloadStatusV1Status::InvalidBlockHash) + | (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, + (None, &PayloadStatusV1Status::Syncing) + | (None, &PayloadStatusV1Status::Accepted) => syncing += 1, + _ => errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse(format!( + "new_payload: response does not conform to engine API spec: {:?}", + response, + )), + }), + }, Err(e) => errors.push(e), } } @@ -515,19 +527,24 @@ impl ExecutionLayer { crit!( self.log(), "Consensus failure between execution nodes"; - "method" => "notify_new_payload" + "method" => "new_payload" ); + // In this situation, better to have a failure of liveness than vote on a potentially invalid chain + return Err(Error::ConsensusFailure); } if valid > 0 { Ok(( - ExecutePayloadResponseStatus::Valid, - Some(execution_payload.block_hash), + PayloadStatusV1Status::Valid, + Some(vec![execution_payload.block_hash]), )) } else if invalid > 0 { - Ok((ExecutePayloadResponseStatus::Invalid, None)) + Ok(( + PayloadStatusV1Status::Invalid, + Some(invalid_latest_valid_hash.into_iter().collect()), + )) } else if syncing > 0 { - Ok((ExecutePayloadResponseStatus::Syncing, None)) + Ok((PayloadStatusV1Status::Syncing, None)) } else { Err(Error::EngineErrors(errors)) } @@ -541,14 +558,17 @@ impl ExecutionLayer { /// failure) from all nodes and then return based on the first of these conditions which /// returns true: /// - /// - Ok, if any node returns successfully. + /// - Error::ConsensusFailure if some nodes return valid and some return invalid + /// - Valid, if any nodes return valid. + /// - Invalid, if any nodes return invalid. + /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. pub async fn notify_forkchoice_updated( &self, head_block_hash: Hash256, finalized_block_hash: Hash256, payload_attributes: Option, - ) -> Result<(), Error> { + ) -> Result<(PayloadStatusV1Status, Option>), Error> { debug!( self.log(), "Issuing engine_forkchoiceUpdated"; @@ -577,13 +597,76 @@ impl ExecutionLayer { }) .await; - if broadcast_results.iter().any(Result::is_ok) { - Ok(()) + let mut errors = vec![]; + let mut valid = 0; + let mut invalid = 0; + let mut syncing = 0; + let mut invalid_latest_valid_hash = HashSet::new(); + for result in broadcast_results { + match result { + Ok(response) => match (&response.payload_status.latest_valid_hash, &response.payload_status.status) { + // TODO(bellatrix) a strict interpretation of the v1.0.0.alpha.6 spec says that + // `latest_valid_hash` *cannot* be `None`. However, we accept it to maintain + // Geth compatibility for the short term. See: + // + // https://github.com/ethereum/go-ethereum/issues/24404 + (None, &PayloadStatusV1Status::Valid) => valid += 1, + (Some(latest_hash), &PayloadStatusV1Status::Valid) => { + if latest_hash == &head_block_hash { + valid += 1; + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse( + format!( + "forkchoice_updated: payload_status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + *latest_hash, + ) + ), + }); + } + } + (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { + invalid += 1; + invalid_latest_valid_hash.insert(*latest_hash); + } + (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, + (None, &PayloadStatusV1Status::Syncing) => syncing += 1, + _ => { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse(format!( + "forkchoice_updated: response does not conform to engine API spec: {:?}", + response + )), + }) + } + } + Err(e) => errors.push(e), + } + } + + if valid > 0 && invalid > 0 { + crit!( + self.log(), + "Consensus failure between execution nodes"; + "method" => "forkchoice_updated" + ); + // In this situation, better to have a failure of liveness than vote on a potentially invalid chain + return Err(Error::ConsensusFailure); + } + + if valid > 0 { + Ok((PayloadStatusV1Status::Valid, Some(vec![head_block_hash]))) + } else if invalid > 0 { + Ok(( + PayloadStatusV1Status::Invalid, + Some(invalid_latest_valid_hash.into_iter().collect()), + )) + } else if syncing > 0 { + Ok((PayloadStatusV1Status::Syncing, None)) } else { - let errors = broadcast_results - .into_iter() - .filter_map(Result::err) - .collect(); Err(Error::EngineErrors(errors)) } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 61aaedd359..8fd6ebfcd1 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,6 +1,5 @@ use crate::engine_api::{ - ExecutePayloadResponse, ExecutePayloadResponseStatus, ExecutionBlock, PayloadAttributes, - PayloadId, + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, }; use crate::engines::ForkChoiceState; use serde::{Deserialize, Serialize}; @@ -235,20 +234,20 @@ impl ExecutionBlockGenerator { self.payload_ids.remove(id) } - pub fn notify_new_payload(&mut self, payload: ExecutionPayload) -> ExecutePayloadResponse { + pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { parent } else { - return ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Syncing, + return PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, latest_valid_hash: None, validation_error: None, }; }; if payload.block_number != parent.block_number() + 1 { - return ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Invalid, + return PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, latest_valid_hash: Some(parent.block_hash()), validation_error: Some("invalid block number".to_string()), }; @@ -257,8 +256,8 @@ impl ExecutionBlockGenerator { let valid_hash = payload.block_hash; self.pending_payloads.insert(payload.block_hash, payload); - ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid, + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, latest_valid_hash: Some(valid_hash), validation_error: None, } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 11232bc081..746d96e293 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,5 +1,5 @@ use super::Context; -use crate::engine_api::{http::*, ExecutePayloadResponse, ExecutePayloadResponseStatus}; +use crate::engine_api::{http::*, PayloadStatusV1, PayloadStatusV1Status}; use crate::json_structures::*; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; @@ -54,30 +54,30 @@ pub async fn handle_rpc( ) .unwrap()) } - ENGINE_EXECUTE_PAYLOAD_V1 => { + ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if let Some(status) = *ctx.static_notify_new_payload_response.lock() { + let response = if let Some(status) = *ctx.static_new_payload_response.lock() { match status { - ExecutePayloadResponseStatus::Valid => ExecutePayloadResponse { + PayloadStatusV1Status::Valid => PayloadStatusV1 { status, latest_valid_hash: Some(request.block_hash), validation_error: None, }, - ExecutePayloadResponseStatus::Syncing => ExecutePayloadResponse { + PayloadStatusV1Status::Syncing => PayloadStatusV1 { status, latest_valid_hash: None, validation_error: None, }, - _ => unimplemented!("invalid static executePayloadResponse"), + _ => unimplemented!("invalid static newPayloadResponse"), } } else { ctx.execution_block_generator .write() - .notify_new_payload(request.into()) + .new_payload(request.into()) }; - Ok(serde_json::to_value(JsonExecutePayloadV1Response::from(response)).unwrap()) + Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } ENGINE_GET_PAYLOAD_V1 => { let request: JsonPayloadIdRequest = get_param(params, 0)?; @@ -94,6 +94,8 @@ pub async fn handle_rpc( ENGINE_FORKCHOICE_UPDATED_V1 => { let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; + + let head_block_hash = forkchoice_state.head_block_hash; let id = ctx .execution_block_generator .write() @@ -103,7 +105,11 @@ pub async fn handle_rpc( )?; Ok(serde_json::to_value(JsonForkchoiceUpdatedV1Response { - status: JsonForkchoiceUpdatedV1ResponseStatus::Success, + payload_status: JsonPayloadStatusV1 { + status: JsonPayloadStatusV1Status::Valid, + latest_valid_hash: Some(head_block_hash), + validation_error: None, + }, payload_id: id.map(Into::into), }) .unwrap()) diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 4f5337075d..0622da473f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -147,8 +147,8 @@ impl MockExecutionLayer { let (payload_response, latest_valid_hash) = self.el.notify_new_payload(&payload).await.unwrap(); - assert_eq!(payload_response, ExecutePayloadResponseStatus::Valid); - assert_eq!(latest_valid_hash, Some(payload.block_hash)); + assert_eq!(payload_response, PayloadStatusV1Status::Valid); + assert_eq!(latest_valid_hash, Some(vec![payload.block_hash])); self.el .notify_forkchoice_updated(block_hash, Hash256::zero(), None) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index fbc3751784..a4b9617764 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -1,7 +1,7 @@ //! Provides a mock execution engine HTTP JSON-RPC API for use in testing. use crate::engine_api::http::JSONRPC_VERSION; -use crate::engine_api::ExecutePayloadResponseStatus; +use crate::engine_api::PayloadStatusV1Status; use bytes::Bytes; use environment::null_logger; use execution_block_generator::{Block, PoWBlock}; @@ -62,7 +62,7 @@ impl MockServer { last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), preloaded_responses, - static_notify_new_payload_response: <_>::default(), + static_new_payload_response: <_>::default(), _phantom: PhantomData, }); @@ -117,8 +117,7 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.static_notify_new_payload_response.lock() = - Some(ExecutePayloadResponseStatus::Valid) + *self.ctx.static_new_payload_response.lock() = Some(PayloadStatusV1Status::Valid) } pub fn insert_pow_block( @@ -188,7 +187,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub static_notify_new_payload_response: Arc>>, + pub static_new_payload_response: Arc>>, pub _phantom: PhantomData, } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index bb30aac55a..0cc53c09e4 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -39,6 +39,7 @@ regex = "1.3.9" strum = { version = "0.21.0", features = ["derive"] } superstruct = "0.4.0" prometheus-client = "0.15.0" +unused_port = { path = "../../common/unused_port" } [dependencies.libp2p] git = "https://github.com/sigp/rust-libp2p" diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 5cc059c2a8..4f7ec432b7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1049,17 +1049,11 @@ mod tests { use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; use slog::{o, Drain}; - use std::net::UdpSocket; use types::{BitVector, MinimalEthSpec, SubnetId}; + use unused_port::unused_udp_port; type E = MinimalEthSpec; - pub fn unused_port() -> u16 { - let socket = UdpSocket::bind("127.0.0.1:0").expect("should create udp socket"); - let local_addr = socket.local_addr().expect("should read udp socket"); - local_addr.port() - } - pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); @@ -1075,7 +1069,7 @@ mod tests { async fn build_discovery() -> Discovery { let keypair = libp2p::identity::Keypair::generate_secp256k1(); let config = NetworkConfig { - discovery_port: unused_port(), + discovery_port: unused_udp_port().unwrap(), ..Default::default() }; let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 7deb2108b0..5656cf0789 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -6,12 +6,12 @@ use lighthouse_network::Multiaddr; use lighthouse_network::Service as LibP2PService; use lighthouse_network::{Libp2pEvent, NetworkConfig}; use slog::{debug, error, o, Drain}; -use std::net::{TcpListener, UdpSocket}; use std::sync::Arc; use std::sync::Weak; use std::time::Duration; use tokio::runtime::Runtime; use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, Hash256, MinimalEthSpec}; +use unused_port::unused_tcp_port; #[allow(clippy::type_complexity)] #[allow(unused)] @@ -61,38 +61,6 @@ pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { } } -// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} - pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { let mut config = NetworkConfig::default(); let path = TempBuilder::new() @@ -121,7 +89,7 @@ pub async fn build_libp2p_instance( boot_nodes: Vec, log: slog::Logger, ) -> Libp2pInstance { - let port = unused_port("tcp").unwrap(); + let port = unused_tcp_port().unwrap(); let config = build_config(port, boot_nodes); // launch libp2p service diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7487acbde0..33603b94e2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -11,10 +11,10 @@ use std::cmp; use std::cmp::max; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; -use std::net::{TcpListener, UdpSocket}; use std::path::{Path, PathBuf}; use std::str::FromStr; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; +use unused_port::{unused_tcp_port, unused_udp_port}; /// Gets the fully-initialized global client. /// @@ -293,9 +293,9 @@ pub fn get_config( client_config.network.enr_address = None } client_config.network.libp2p_port = - unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; + unused_tcp_port().map_err(|e| format!("Failed to get port for libp2p: {}", e))?; client_config.network.discovery_port = - unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?; + unused_udp_port().map_err(|e| format!("Failed to get port for discovery: {}", e))?; client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -785,44 +785,3 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { }) .unwrap_or_else(|| PathBuf::from(".")) } - -/// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -/// -/// Used for passing unused ports to libp2 so that lighthouse won't have to update -/// its own ENR. -/// -/// NOTE: It is possible that libp2p/discv5 is unable to bind to the -/// ports returned by this function as the OS has a buffer period where -/// it doesn't allow binding to the same port even after the socket is closed. -/// We might have to use SO_REUSEADDR socket option from `std::net2` crate in -/// that case. -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml new file mode 100644 index 0000000000..06c1ca8f58 --- /dev/null +++ b/common/unused_port/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "unused_port" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs new file mode 100644 index 0000000000..4a8cf17380 --- /dev/null +++ b/common/unused_port/src/lib.rs @@ -0,0 +1,55 @@ +use std::net::{TcpListener, UdpSocket}; + +#[derive(Copy, Clone)] +pub enum Transport { + Tcp, + Udp, +} + +/// A convenience function for `unused_port(Transport::Tcp)`. +pub fn unused_tcp_port() -> Result { + unused_port(Transport::Tcp) +} + +/// A convenience function for `unused_port(Transport::Tcp)`. +pub fn unused_udp_port() -> Result { + unused_port(Transport::Udp) +} + +/// A bit of hack to find an unused port. +/// +/// Does not guarantee that the given port is unused after the function exits, just that it was +/// unused before the function started (i.e., it does not reserve a port). +/// +/// ## Notes +/// +/// It is possible that users are unable to bind to the ports returned by this function as the OS +/// has a buffer period where it doesn't allow binding to the same port even after the socket is +/// closed. We might have to use SO_REUSEADDR socket option from `std::net2` crate in that case. +pub fn unused_port(transport: Transport) -> Result { + let local_addr = match transport { + Transport::Tcp => { + let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + format!("Failed to create TCP listener to find unused port: {:?}", e) + })?; + listener.local_addr().map_err(|e| { + format!( + "Failed to read TCP listener local_addr to find unused port: {:?}", + e + ) + })? + } + Transport::Udp => { + let socket = UdpSocket::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; + socket.local_addr().map_err(|e| { + format!( + "Failed to read UDP socket local_addr to find unused port: {:?}", + e + ) + })? + } + }; + + Ok(local_addr.port()) +} diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b0ee994ec6..5cf04b3b4f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -44,6 +44,7 @@ serde_json = "1.0.59" task_executor = { path = "../common/task_executor" } malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } +unused_port = { path = "../common/unused_port" } [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f630ed8e73..37c4359453 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -5,13 +5,13 @@ use lighthouse_network::PeerId; use std::fs::File; use std::io::Write; use std::net::{IpAddr, Ipv4Addr}; -use std::net::{TcpListener, UdpSocket}; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; use types::{Address, Checkpoint, Epoch, Hash256}; +use unused_port::{unused_tcp_port, unused_udp_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -279,7 +279,7 @@ fn network_listen_address_flag() { } #[test] fn network_port_flag() { - let port = unused_port("tcp").expect("Unable to find unused port."); + let port = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() @@ -290,8 +290,8 @@ fn network_port_flag() { } #[test] fn network_port_and_discovery_port_flags() { - let port1 = unused_port("tcp").expect("Unable to find unused port."); - let port2 = unused_port("udp").expect("Unable to find unused port."); + let port1 = unused_tcp_port().expect("Unable to find unused port."); + let port2 = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port1.to_string().as_str())) .flag("discovery-port", Some(port2.to_string().as_str())) @@ -414,7 +414,7 @@ fn zero_ports_flag() { // Tests for ENR flags. #[test] fn enr_udp_port_flags() { - let port = unused_port("udp").expect("Unable to find unused port."); + let port = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -422,7 +422,7 @@ fn enr_udp_port_flags() { } #[test] fn enr_tcp_port_flags() { - let port = unused_port("tcp").expect("Unable to find unused port."); + let port = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -431,8 +431,8 @@ fn enr_tcp_port_flags() { #[test] fn enr_match_flag() { let addr = "127.0.0.2".parse::().unwrap(); - let port1 = unused_port("udp").expect("Unable to find unused port."); - let port2 = unused_port("udp").expect("Unable to find unused port."); + let port1 = unused_udp_port().expect("Unable to find unused port."); + let port2 = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) @@ -449,7 +449,7 @@ fn enr_match_flag() { #[test] fn enr_address_flag() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_port("udp").expect("Unable to find unused port."); + let port = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -463,7 +463,7 @@ fn enr_address_flag() { fn enr_address_dns_flag() { let addr = "127.0.0.1".parse::().unwrap(); let ipv6addr = "::1".parse::().unwrap(); - let port = unused_port("udp").expect("Unable to find unused port."); + let port = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -502,8 +502,8 @@ fn http_address_flag() { } #[test] fn http_port_flag() { - let port1 = unused_port("tcp").expect("Unable to find unused port."); - let port2 = unused_port("tcp").expect("Unable to find unused port."); + let port1 = unused_tcp_port().expect("Unable to find unused port."); + let port2 = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("http-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) @@ -573,8 +573,8 @@ fn metrics_address_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_port("tcp").expect("Unable to find unused port."); - let port2 = unused_port("tcp").expect("Unable to find unused port."); + let port1 = unused_tcp_port().expect("Unable to find unused port."); + let port2 = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) @@ -856,35 +856,3 @@ fn ensure_panic_on_failed_launch() { assert_eq!(slasher_config.chunk_size, 10); }); } - -/// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 7b3c3acb3c..1c11ae046e 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -7,11 +7,12 @@ use lighthouse_network::discovery::ENR_FILENAME; use lighthouse_network::Enr; use std::fs::File; use std::io::Write; -use std::net::{Ipv4Addr, UdpSocket}; +use std::net::Ipv4Addr; use std::path::{Path, PathBuf}; use std::process::Command; use std::str::FromStr; use tempfile::TempDir; +use unused_port::unused_udp_port; const IP_ADDRESS: &str = "192.168.2.108"; @@ -51,15 +52,6 @@ impl CommandLineTestExec for CommandLineTest { } } -fn unused_port() -> u16 { - let socket = - UdpSocket::bind("127.0.0.1:0").expect("should create udp socket to find unused port"); - let local_addr = socket - .local_addr() - .expect("should read udp socket to find unused port"); - local_addr.port() -} - #[test] fn enr_address_arg() { let mut test = CommandLineTest::new(); @@ -70,7 +62,7 @@ fn enr_address_arg() { #[test] fn port_flag() { - let port = unused_port(); + let port = unused_udp_port().unwrap(); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run_with_ip() @@ -130,7 +122,7 @@ fn boot_nodes_flag() { #[test] fn enr_port_flag() { - let port = unused_port(); + let port = unused_udp_port().unwrap(); CommandLineTest::new() .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index a04f63f372..787a571e8f 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -10,3 +10,4 @@ web3 = { version = "0.17.0", default-features = false, features = ["http-tls", " types = { path = "../../consensus/types"} serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} +unused_port = { path = "../../common/unused_port" } diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index c48f011a68..505c010437 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -1,9 +1,9 @@ use serde_json::json; use std::io::prelude::*; use std::io::BufReader; -use std::net::TcpListener; use std::process::{Child, Command, Stdio}; use std::time::{Duration, Instant}; +use unused_port::unused_tcp_port; use web3::{transports::Http, Transport, Web3}; /// How long we will wait for ganache to indicate that it is ready. @@ -72,7 +72,7 @@ impl GanacheInstance { /// Start a new `ganache-cli` process, waiting until it indicates that it is ready to accept /// RPC connections. pub fn new(network_id: u64, chain_id: u64) -> Result { - let port = unused_port()?; + let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache-cli.cmd", false => "ganache-cli", @@ -108,7 +108,7 @@ impl GanacheInstance { } pub fn fork(&self) -> Result { - let port = unused_port()?; + let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache-cli.cmd", false => "ganache-cli", @@ -188,24 +188,6 @@ fn endpoint(port: u16) -> String { format!("http://localhost:{}", port) } -/// A bit of hack to find an unused TCP port. -/// -/// Does not guarantee that the given port is unused after the function exists, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port() -> Result { - let listener = TcpListener::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create TCP listener to find unused port: {:?}", e))?; - - let local_addr = listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })?; - - Ok(local_addr.port()) -} - impl Drop for GanacheInstance { fn drop(&mut self) { if cfg!(windows) { diff --git a/testing/execution_engine_integration/.gitignore b/testing/execution_engine_integration/.gitignore new file mode 100644 index 0000000000..07ea3a7ff2 --- /dev/null +++ b/testing/execution_engine_integration/.gitignore @@ -0,0 +1 @@ +execution_clients/ diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml new file mode 100644 index 0000000000..cd9836dd6c --- /dev/null +++ b/testing/execution_engine_integration/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "execution_engine_integration" +version = "0.1.0" +edition = "2021" + +build = "build.rs" + +[dependencies] +tempfile = "3.1.0" +serde_json = "1.0.58" +task_executor = { path = "../../common/task_executor" } +tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } +futures = "0.3.7" +exit-future = "0.2.0" +environment = { path = "../../lighthouse/environment" } +execution_layer = { path = "../../beacon_node/execution_layer" } +sensitive_url = { path = "../../common/sensitive_url" } +types = { path = "../../consensus/types" } +unused_port = { path = "../../common/unused_port" } diff --git a/testing/execution_engine_integration/Makefile b/testing/execution_engine_integration/Makefile new file mode 100644 index 0000000000..8bb2b59233 --- /dev/null +++ b/testing/execution_engine_integration/Makefile @@ -0,0 +1,5 @@ +test: + cargo test --release --locked + +clean: + rm -rf execution_clients diff --git a/testing/execution_engine_integration/build.rs b/testing/execution_engine_integration/build.rs new file mode 100644 index 0000000000..bedf74fbd1 --- /dev/null +++ b/testing/execution_engine_integration/build.rs @@ -0,0 +1,62 @@ +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::Command; + +const GETH_BRANCH: &str = "merge-kiln"; +const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum"; + +fn main() { + let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); + let execution_clients_dir = manifest_dir.join("execution_clients"); + + if !execution_clients_dir.exists() { + fs::create_dir(&execution_clients_dir).unwrap(); + } + + build_geth(&execution_clients_dir); +} + +fn build_geth(execution_clients_dir: &Path) { + let repo_dir = execution_clients_dir.join("go-ethereum"); + + if !repo_dir.exists() { + // Clone the repo + assert!(Command::new("git") + .arg("clone") + .arg(GETH_REPO_URL) + .current_dir(&execution_clients_dir) + .output() + .expect("failed to clone geth repo") + .status + .success()); + } + + // Checkout the correct branch + assert!(Command::new("git") + .arg("checkout") + .arg(GETH_BRANCH) + .current_dir(&repo_dir) + .output() + .expect("failed to checkout geth branch") + .status + .success()); + + // Update the branch + assert!(Command::new("git") + .arg("pull") + .current_dir(&repo_dir) + .output() + .expect("failed to update geth branch") + .status + .success()); + + // Build geth + assert!(Command::new("make") + .arg("geth") + .current_dir(&repo_dir) + .output() + .expect("failed to make geth") + .status + .success()); +} diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs new file mode 100644 index 0000000000..cff36a025b --- /dev/null +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -0,0 +1,131 @@ +use crate::{genesis_json::geth_genesis_json, SUPPRESS_LOGS}; +use sensitive_url::SensitiveUrl; +use std::path::PathBuf; +use std::process::{Child, Command, Output, Stdio}; +use std::{env, fs::File}; +use tempfile::TempDir; +use unused_port::unused_tcp_port; + +/// Defined for each EE type (e.g., Geth, Nethermind, etc). +pub trait GenericExecutionEngine: Clone { + fn init_datadir() -> TempDir; + fn start_client(datadir: &TempDir, http_port: u16) -> Child; +} + +/// Holds handle to a running EE process, plus some other metadata. +pub struct ExecutionEngine { + #[allow(dead_code)] + engine: E, + #[allow(dead_code)] + datadir: TempDir, + http_port: u16, + child: Child, +} + +impl Drop for ExecutionEngine { + fn drop(&mut self) { + // Ensure the EE process is killed on drop. + if let Err(e) = self.child.kill() { + eprintln!("failed to kill child: {:?}", e) + } + } +} + +impl ExecutionEngine { + pub fn new(engine: E) -> Self { + let datadir = E::init_datadir(); + let http_port = unused_tcp_port().unwrap(); + let child = E::start_client(&datadir, http_port); + Self { + engine, + datadir, + http_port, + child, + } + } + + pub fn http_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() + } +} + +/* + * Geth-specific Implementation + */ + +#[derive(Clone)] +pub struct Geth; + +impl Geth { + fn binary_path() -> PathBuf { + let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); + manifest_dir + .join("execution_clients") + .join("go-ethereum") + .join("build") + .join("bin") + .join("geth") + } +} + +impl GenericExecutionEngine for Geth { + fn init_datadir() -> TempDir { + let datadir = TempDir::new().unwrap(); + + let genesis_json_path = datadir.path().join("genesis.json"); + let mut file = File::create(&genesis_json_path).unwrap(); + let json = geth_genesis_json(); + serde_json::to_writer(&mut file, &json).unwrap(); + + let output = Command::new(Self::binary_path()) + .arg("--datadir") + .arg(datadir.path().to_str().unwrap()) + .arg("init") + .arg(genesis_json_path.to_str().unwrap()) + .output() + .expect("failed to init geth"); + + check_command_output(output, "geth init failed"); + + datadir + } + + fn start_client(datadir: &TempDir, http_port: u16) -> Child { + let network_port = unused_tcp_port().unwrap(); + + Command::new(Self::binary_path()) + .arg("--datadir") + .arg(datadir.path().to_str().unwrap()) + .arg("--http") + .arg("--http.api") + .arg("engine,eth") + .arg("--http.port") + .arg(http_port.to_string()) + .arg("--port") + .arg(network_port.to_string()) + .stdout(build_stdio()) + .stderr(build_stdio()) + .spawn() + .expect("failed to start beacon node") + } +} + +fn check_command_output(output: Output, failure_msg: &'static str) { + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + dbg!(stdout); + dbg!(stderr); + panic!("{}", failure_msg); + } +} + +/// Builds the stdout/stderr handler for commands which might output to the terminal. +fn build_stdio() -> Stdio { + if SUPPRESS_LOGS { + Stdio::null() + } else { + Stdio::inherit() + } +} diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs new file mode 100644 index 0000000000..87fdaec14a --- /dev/null +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -0,0 +1,42 @@ +use serde_json::{json, Value}; + +/// Sourced from: +/// +/// https://notes.ethereum.org/rmVErCfCRPKGqGkUe89-Kg +pub fn geth_genesis_json() -> Value { + json!({ + "config": { + "chainId":1, + "homesteadBlock":0, + "eip150Block":0, + "eip155Block":0, + "eip158Block":0, + "byzantiumBlock":0, + "constantinopleBlock":0, + "petersburgBlock":0, + "istanbulBlock":0, + "muirGlacierBlock":0, + "berlinBlock":0, + "londonBlock":0, + "clique": { + "period": 5, + "epoch": 30000 + }, + "terminalTotalDifficulty":0 + }, + "nonce":"0x42", + "timestamp":"0x0", + "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit":"0x1C9C380", + "difficulty":"0x400000000", + "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase":"0x0000000000000000000000000000000000000000", + "alloc":{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"} + }, + "number":"0x0", + "gasUsed":"0x0", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas":"0x7" + }) +} diff --git a/testing/execution_engine_integration/src/lib.rs b/testing/execution_engine_integration/src/lib.rs new file mode 100644 index 0000000000..19a73e6bf2 --- /dev/null +++ b/testing/execution_engine_integration/src/lib.rs @@ -0,0 +1,12 @@ +/// This library provides integration testing between Lighthouse and other execution engines. +/// +/// See the `tests/tests.rs` file to run tests. +mod execution_engine; +mod genesis_json; +mod test_rig; + +pub use execution_engine::Geth; +pub use test_rig::TestRig; + +/// Set to `false` to send logs to the console during tests. Logs are useful when debugging. +const SUPPRESS_LOGS: bool = true; diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs new file mode 100644 index 0000000000..e8253036fb --- /dev/null +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -0,0 +1,363 @@ +use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; +use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatusV1Status}; +use std::sync::Arc; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use task_executor::TaskExecutor; +use tokio::time::sleep; +use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; + +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); + +struct ExecutionPair { + /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. + execution_layer: ExecutionLayer, + /// A handle to external EE process, once this is dropped the process will be killed. + #[allow(dead_code)] + execution_engine: ExecutionEngine, +} + +/// A rig that holds two EE processes for testing. +/// +/// There are two EEs held here so that we can test out-of-order application of payloads, and other +/// edge-cases. +pub struct TestRig { + #[allow(dead_code)] + runtime: Arc, + ee_a: ExecutionPair, + ee_b: ExecutionPair, + spec: ChainSpec, + _runtime_shutdown: exit_future::Signal, +} + +impl TestRig { + pub fn new(generic_engine: E) -> Self { + let log = environment::null_logger().unwrap(); + let runtime = Arc::new( + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(), + ); + let (runtime_shutdown, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + let fee_recipient = None; + + let ee_a = { + let execution_engine = ExecutionEngine::new(generic_engine.clone()); + let urls = vec![execution_engine.http_url()]; + let execution_layer = + ExecutionLayer::from_urls(urls, fee_recipient, executor.clone(), log.clone()) + .unwrap(); + ExecutionPair { + execution_engine, + execution_layer, + } + }; + + let ee_b = { + let execution_engine = ExecutionEngine::new(generic_engine); + let urls = vec![execution_engine.http_url()]; + let execution_layer = + ExecutionLayer::from_urls(urls, fee_recipient, executor, log).unwrap(); + ExecutionPair { + execution_engine, + execution_layer, + } + }; + + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = Uint256::zero(); + + Self { + runtime, + ee_a, + ee_b, + spec, + _runtime_shutdown: runtime_shutdown, + } + } + + pub fn perform_tests_blocking(&self) { + self.ee_a + .execution_layer + .block_on_generic(|_| async { self.perform_tests().await }) + .unwrap() + } + + pub async fn wait_until_synced(&self) { + let start_instant = Instant::now(); + + for pair in [&self.ee_a, &self.ee_b] { + loop { + // Run the routine to check for online nodes. + pair.execution_layer.watchdog_task().await; + + if pair.execution_layer.is_synced().await { + break; + } else if start_instant + EXECUTION_ENGINE_START_TIMEOUT > Instant::now() { + sleep(Duration::from_millis(500)).await; + } else { + panic!("timeout waiting for execution engines to come online") + } + } + } + } + + pub async fn perform_tests(&self) { + self.wait_until_synced().await; + + /* + * Read the terminal block hash from both pairs, check it's equal. + */ + + let terminal_pow_block_hash = self + .ee_a + .execution_layer + .get_terminal_pow_block_hash(&self.spec) + .await + .unwrap() + .unwrap(); + + assert_eq!( + terminal_pow_block_hash, + self.ee_b + .execution_layer + .get_terminal_pow_block_hash(&self.spec) + .await + .unwrap() + .unwrap() + ); + + /* + * Execution Engine A: + * + * Produce a valid payload atop the terminal block. + */ + + let parent_hash = terminal_pow_block_hash; + let timestamp = timestamp_now(); + let random = Hash256::zero(); + let finalized_block_hash = Hash256::zero(); + let proposer_index = 0; + let valid_payload = self + .ee_a + .execution_layer + .get_payload::( + parent_hash, + timestamp, + random, + finalized_block_hash, + proposer_index, + ) + .await + .unwrap(); + + /* + * Execution Engine A: + * + * Indicate that the payload is the head of the chain, before submitting a + * `notify_new_payload`. + */ + let head_block_hash = valid_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .ee_a + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Syncing); + + /* + * Execution Engine A: + * + * Provide the valid payload back to the EE again. + */ + + let (status, _) = self + .ee_a + .execution_layer + .notify_new_payload(&valid_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine A: + * + * Indicate that the payload is the head of the chain. + * + * Do not provide payload attributes (we'll test that later). + */ + let head_block_hash = valid_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .ee_a + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine A: + * + * Provide an invalidated payload to the EE. + */ + + let mut invalid_payload = valid_payload.clone(); + invalid_payload.random = Hash256::from_low_u64_be(42); + let (status, _) = self + .ee_a + .execution_layer + .notify_new_payload(&invalid_payload) + .await + .unwrap(); + assert!(matches!( + status, + PayloadStatusV1Status::Invalid | PayloadStatusV1Status::InvalidBlockHash + )); + + /* + * Execution Engine A: + * + * Produce another payload atop the previous one. + */ + + let parent_hash = valid_payload.block_hash; + let timestamp = valid_payload.timestamp + 1; + let random = Hash256::zero(); + let finalized_block_hash = Hash256::zero(); + let proposer_index = 0; + let second_payload = self + .ee_a + .execution_layer + .get_payload::( + parent_hash, + timestamp, + random, + finalized_block_hash, + proposer_index, + ) + .await + .unwrap(); + + /* + * Execution Engine A: + * + * Provide the second payload back to the EE again. + */ + + let (status, _) = self + .ee_a + .execution_layer + .notify_new_payload(&second_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine A: + * + * Indicate that the payload is the head of the chain, providing payload attributes. + */ + let head_block_hash = valid_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = Some(PayloadAttributes { + timestamp: second_payload.timestamp + 1, + random: Hash256::zero(), + suggested_fee_recipient: Address::zero(), + }); + let (status, _) = self + .ee_a + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine B: + * + * Provide the second payload, without providing the first. + */ + let (status, _) = self + .ee_b + .execution_layer + .notify_new_payload(&second_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Syncing); + + /* + * Execution Engine B: + * + * Set the second payload as the head, without providing payload attributes. + */ + let head_block_hash = second_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .ee_b + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Syncing); + + /* + * Execution Engine B: + * + * Provide the first payload to the EE. + */ + + let (status, _) = self + .ee_b + .execution_layer + .notify_new_payload(&valid_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine B: + * + * Provide the second payload, now the first has been provided. + */ + let (status, _) = self + .ee_b + .execution_layer + .notify_new_payload(&second_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine B: + * + * Set the second payload as the head, without providing payload attributes. + */ + let head_block_hash = second_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .ee_b + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + } +} + +/// Returns the duration since the unix epoch. +pub fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} diff --git a/testing/execution_engine_integration/tests/tests.rs b/testing/execution_engine_integration/tests/tests.rs new file mode 100644 index 0000000000..d4fcb29dca --- /dev/null +++ b/testing/execution_engine_integration/tests/tests.rs @@ -0,0 +1,16 @@ +#[cfg(not(target_family = "windows"))] +mod not_windows { + use execution_engine_integration::{Geth, TestRig}; + #[test] + fn geth() { + TestRig::new(Geth).perform_tests_blocking() + } +} + +#[cfg(target_family = "windows")] +mod windows { + #[test] + fn all_tests_skipped_on_windows() { + // + } +} diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index d73e4a762d..128c4a6fe9 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -45,7 +45,7 @@ mod tests { /// assume it failed to start. const UPCHECK_TIMEOUT: Duration = Duration::from_secs(20); - /// Set to `true` to send the Web3Signer logs to the console during tests. Logs are useful when + /// Set to `false` to send the Web3Signer logs to the console during tests. Logs are useful when /// debugging. const SUPPRESS_WEB3SIGNER_LOGS: bool = true; From da4ca024f13bb00474697c82caa1900dec4c7d48 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 17 Feb 2022 23:55:04 +0000 Subject: [PATCH 03/14] Use SmallVec in Bitfield (#3025) ## Issue Addressed Alternative to #2935 ## Proposed Changes Replace the `Vec` inside `Bitfield` with a `SmallVec<[u8; 32>`. This eliminates heap allocations for attestation bitfields until we reach 500K validators, at which point we can consider increasing `SMALLVEC_LEN` to 40 or 48. While running Lighthouse under `heaptrack` I found that SSZ encoding and decoding of bitfields corresponded to 22% of all allocations by count. I've confirmed that with this change applied those allocations disappear entirely. ## Additional Info We can win another 8 bytes of space by using `smallvec`'s [`union` feature](https://docs.rs/smallvec/1.8.0/smallvec/#union), although I might leave that for a future PR because I don't know how experimental that feature is and whether it uses some spicy `unsafe` blocks. --- Cargo.lock | 2 + consensus/ssz_types/Cargo.toml | 1 + consensus/ssz_types/src/bitfield.rs | 248 ++++++++++-------- consensus/types/Cargo.toml | 1 + consensus/types/src/attestation.rs | 27 +- .../src/test_utils/test_random/bitfield.rs | 5 +- 6 files changed, 174 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9a9e69683..c2ffc9a376 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1678,6 +1678,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "smallvec", "tree_hash", "tree_hash_derive 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "typenum", @@ -6496,6 +6497,7 @@ dependencies = [ "serde_json", "serde_yaml", "slog", + "smallvec", "state_processing", "superstruct", "swap_or_not_shuffle", diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index b71de4ccdb..9c23ce92b5 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -18,6 +18,7 @@ eth2_ssz = "0.4.1" typenum = "1.12.0" arbitrary = { version = "1.0", features = ["derive"], optional = true } derivative = "2.1.1" +smallvec = "1.8.0" [dev-dependencies] serde_json = "1.0.58" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index dfad3aedcb..599170fa29 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -5,10 +5,17 @@ use derivative::Derivative; use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use smallvec::{smallvec, SmallVec, ToSmallVec}; use ssz::{Decode, Encode}; use tree_hash::Hash256; use typenum::Unsigned; +/// Maximum number of bytes to store on the stack in a bitfield's `SmallVec`. +/// +/// The default of 32 bytes is enough to take us through to ~500K validators, as the byte length of +/// attestation bitfields is roughly `N // 32 slots // 64 committes // 8 bits`. +pub const SMALLVEC_LEN: usize = 32; + /// A marker trait applied to `Variable` and `Fixed` that defines the behaviour of a `Bitfield`. pub trait BitfieldBehaviour: Clone {} @@ -87,11 +94,11 @@ pub type BitVector = Bitfield>; /// /// The internal representation of the bitfield is the same as that required by SSZ. The lowest /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest -/// bit-index. E.g., `vec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. +/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. #[derive(Clone, Debug, Derivative)] #[derivative(PartialEq, Hash(bound = ""))] pub struct Bitfield { - bytes: Vec, + bytes: SmallVec<[u8; SMALLVEC_LEN]>, len: usize, _phantom: PhantomData, } @@ -106,7 +113,7 @@ impl Bitfield> { pub fn with_capacity(num_bits: usize) -> Result { if num_bits <= N::to_usize() { Ok(Self { - bytes: vec![0; bytes_for_bit_len(num_bits)], + bytes: smallvec![0; bytes_for_bit_len(num_bits)], len: num_bits, _phantom: PhantomData, }) @@ -131,14 +138,15 @@ impl Bitfield> { /// ## Example /// ``` /// use ssz_types::{BitList, typenum}; + /// use smallvec::SmallVec; /// /// type BitList8 = BitList; /// /// let b = BitList8::with_capacity(4).unwrap(); /// - /// assert_eq!(b.into_bytes(), vec![0b0001_0000]); + /// assert_eq!(b.into_bytes(), SmallVec::from_buf([0b0001_0000])); /// ``` - pub fn into_bytes(self) -> Vec { + pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { let len = self.len(); let mut bytes = self.bytes; @@ -163,7 +171,7 @@ impl Bitfield> { /// produces (SSZ). /// /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: Vec) -> Result { + pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { let bytes_len = bytes.len(); let mut initial_bitfield: Bitfield> = { let num_bits = bytes.len() * 8; @@ -235,7 +243,7 @@ impl Bitfield> { /// All bits are initialized to `false`. pub fn new() -> Self { Self { - bytes: vec![0; bytes_for_bit_len(Self::capacity())], + bytes: smallvec![0; bytes_for_bit_len(Self::capacity())], len: Self::capacity(), _phantom: PhantomData, } @@ -253,12 +261,13 @@ impl Bitfield> { /// ## Example /// ``` /// use ssz_types::{BitVector, typenum}; + /// use smallvec::SmallVec; /// /// type BitVector4 = BitVector; /// - /// assert_eq!(BitVector4::new().into_bytes(), vec![0b0000_0000]); + /// assert_eq!(BitVector4::new().into_bytes(), SmallVec::from_buf([0b0000_0000])); /// ``` - pub fn into_bytes(self) -> Vec { + pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { self.into_raw_bytes() } @@ -266,7 +275,7 @@ impl Bitfield> { /// produces (SSZ). /// /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: Vec) -> Result { + pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { Self::from_raw_bytes(bytes, Self::capacity()) } @@ -355,7 +364,7 @@ impl Bitfield { } /// Returns the underlying bytes representation of the bitfield. - pub fn into_raw_bytes(self) -> Vec { + pub fn into_raw_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { self.bytes } @@ -372,9 +381,9 @@ impl Bitfield { /// - `bytes` is not the minimal required bytes to represent a bitfield of `bit_len` bits. /// - `bit_len` is not a multiple of 8 and `bytes` contains set bits that are higher than, or /// equal to `bit_len`. - fn from_raw_bytes(bytes: Vec, bit_len: usize) -> Result { + fn from_raw_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>, bit_len: usize) -> Result { if bit_len == 0 { - if bytes.len() == 1 && bytes == [0] { + if bytes.len() == 1 && bytes[0] == 0 { // A bitfield with `bit_len` 0 can only be represented by a single zero byte. Ok(Self { bytes, @@ -512,7 +521,7 @@ impl Encode for Bitfield> { } fn ssz_append(&self, buf: &mut Vec) { - buf.append(&mut self.clone().into_bytes()) + buf.extend_from_slice(&self.clone().into_bytes()) } } @@ -522,7 +531,7 @@ impl Decode for Bitfield> { } fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_vec()).map_err(|e| { + Self::from_bytes(bytes.to_smallvec()).map_err(|e| { ssz::DecodeError::BytesInvalid(format!("BitList failed to decode: {:?}", e)) }) } @@ -542,7 +551,7 @@ impl Encode for Bitfield> { } fn ssz_append(&self, buf: &mut Vec) { - buf.append(&mut self.clone().into_bytes()) + buf.extend_from_slice(&self.clone().into_bytes()) } } @@ -556,7 +565,7 @@ impl Decode for Bitfield> { } fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_vec()).map_err(|e| { + Self::from_bytes(bytes.to_smallvec()).map_err(|e| { ssz::DecodeError::BytesInvalid(format!("BitVector failed to decode: {:?}", e)) }) } @@ -649,7 +658,7 @@ impl tree_hash::TreeHash for Bitfield> { impl arbitrary::Arbitrary<'_> for Bitfield> { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { let size = N::to_usize(); - let mut vec: Vec = vec![0u8; size]; + let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) } @@ -661,7 +670,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let max_size = N::to_usize(); let rand = usize::arbitrary(u)?; let size = std::cmp::min(rand, max_size); - let mut vec: Vec = vec![0u8; size]; + let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) } @@ -730,9 +739,9 @@ mod bitvector { #[test] fn intersection() { - let a = BitVector16::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(vec![0b1000, 0b0001], 16).unwrap(); + let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); + let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); + let c = BitVector16::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); assert_eq!(a.intersection(&b), c); assert_eq!(b.intersection(&a), c); @@ -745,9 +754,9 @@ mod bitvector { #[test] fn intersection_diff_length() { - let a = BitVector16::from_bytes(vec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitVector16::from_bytes(vec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitVector16::from_bytes(vec![0b0010_1100, 0b0000_0001]).unwrap(); + let a = BitVector16::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); + let b = BitVector16::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); + let c = BitVector16::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); assert_eq!(a.len(), 16); assert_eq!(b.len(), 16); @@ -758,9 +767,9 @@ mod bitvector { #[test] fn union() { - let a = BitVector16::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(vec![0b1111, 0b1001], 16).unwrap(); + let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); + let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); + let c = BitVector16::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); assert_eq!(a.union(&b), c); assert_eq!(b.union(&a), c); @@ -771,9 +780,9 @@ mod bitvector { #[test] fn union_diff_length() { - let a = BitVector16::from_bytes(vec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitVector16::from_bytes(vec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitVector16::from_bytes(vec![0b0010_1011, 0b0010_1111]).unwrap(); + let a = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); + let b = BitVector16::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); + let c = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); assert_eq!(a.len(), c.len()); assert_eq!(a.union(&b), c); @@ -839,6 +848,12 @@ mod bitvector { assert!(BitVector4::from_ssz_bytes(&bad).is_err()); } + + // Ensure that stack size of a BitVector is manageable. + #[test] + fn size_of() { + assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); + } } #[cfg(test)] @@ -992,50 +1007,50 @@ mod bitlist { #[test] fn from_raw_bytes() { - assert!(BitList1024::from_raw_bytes(vec![0b0000_0000], 0).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 1).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0011], 2).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0111], 3).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_1111], 4).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0001_1111], 5).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0011_1111], 6).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0111_1111], 7).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], 8).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000], 0).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 1).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 2).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 3).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 4).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 5).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 6).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 7).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 8).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0001], 9).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0011], 10).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0111], 11).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_1111], 12).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0001_1111], 13).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0011_1111], 14).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0111_1111], 15).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b1111_1111], 16).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 9).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 10).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 11).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 12).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 13).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 14).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 15).is_ok()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 16).is_ok()); for i in 0..8 { - assert!(BitList1024::from_raw_bytes(vec![], i).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], i).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0000, 0b1111_1110], i).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![], i).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], i).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1111_1110], i).is_err()); } - assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 0).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 0).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0011], 1).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0111], 2).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_1111], 3).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0001_1111], 4).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0011_1111], 5).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0111_1111], 6).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], 7).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 1).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 2).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 3).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 4).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 5).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 6).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 7).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0001], 8).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0011], 9).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0111], 10).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_1111], 11).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0001_1111], 12).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0011_1111], 13).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0111_1111], 14).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b1111_1111], 15).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 8).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 9).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 10).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 11).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 12).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 13).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 14).is_err()); + assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 15).is_err()); } fn test_set_unset(num_bits: usize) { @@ -1083,51 +1098,64 @@ mod bitlist { } } + /// Type-specialised `smallvec` macro for testing. + macro_rules! bytevec { + ($($x : expr),* $(,)*) => { + { + let __smallvec: SmallVec<[u8; SMALLVEC_LEN]> = smallvec!($($x),*); + __smallvec + } + }; + } + #[test] fn into_raw_bytes() { let mut bitfield = BitList1024::with_capacity(9).unwrap(); bitfield.set(0, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0001, 0b0000_0000] + bytevec![0b0000_0001, 0b0000_0000] ); bitfield.set(1, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0011, 0b0000_0000] + bytevec![0b0000_0011, 0b0000_0000] ); bitfield.set(2, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0111, 0b0000_0000] + bytevec![0b0000_0111, 0b0000_0000] ); bitfield.set(3, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_1111, 0b0000_0000] + bytevec![0b0000_1111, 0b0000_0000] ); bitfield.set(4, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0001_1111, 0b0000_0000] + bytevec![0b0001_1111, 0b0000_0000] ); bitfield.set(5, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0011_1111, 0b0000_0000] + bytevec![0b0011_1111, 0b0000_0000] ); bitfield.set(6, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0111_1111, 0b0000_0000] + bytevec![0b0111_1111, 0b0000_0000] ); bitfield.set(7, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b1111_1111, 0b0000_0000] + bytevec![0b1111_1111, 0b0000_0000] ); bitfield.set(8, true).unwrap(); - assert_eq!(bitfield.into_raw_bytes(), vec![0b1111_1111, 0b0000_0001]); + assert_eq!( + bitfield.into_raw_bytes(), + bytevec![0b1111_1111, 0b0000_0001] + ); } #[test] @@ -1138,28 +1166,28 @@ mod bitlist { ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b0000_0001, 0b0000_0000], 16) + BitList1024::from_raw_bytes(smallvec![0b0000_0001, 0b0000_0000], 16) .unwrap() .highest_set_bit(), Some(0) ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b0000_0010, 0b0000_0000], 16) + BitList1024::from_raw_bytes(smallvec![0b0000_0010, 0b0000_0000], 16) .unwrap() .highest_set_bit(), Some(1) ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b0000_1000], 8) + BitList1024::from_raw_bytes(smallvec![0b0000_1000], 8) .unwrap() .highest_set_bit(), Some(3) ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b0000_0000, 0b1000_0000], 16) + BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1000_0000], 16) .unwrap() .highest_set_bit(), Some(15) @@ -1168,9 +1196,9 @@ mod bitlist { #[test] fn intersection() { - let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(vec![0b1000, 0b0001], 16).unwrap(); + let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); + let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); + let c = BitList1024::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); assert_eq!(a.intersection(&b), c); assert_eq!(b.intersection(&a), c); @@ -1183,10 +1211,10 @@ mod bitlist { #[test] fn intersection_diff_length() { - let a = BitList1024::from_bytes(vec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitList1024::from_bytes(vec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitList1024::from_bytes(vec![0b0010_1100, 0b0000_0001]).unwrap(); - let d = BitList1024::from_bytes(vec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); + let a = BitList1024::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); + let b = BitList1024::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); + let c = BitList1024::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); + let d = BitList1024::from_bytes(smallvec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); assert_eq!(a.len(), 13); assert_eq!(b.len(), 8); @@ -1200,9 +1228,9 @@ mod bitlist { #[test] fn union() { - let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(vec![0b1111, 0b1001], 16).unwrap(); + let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); + let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); + let c = BitList1024::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); assert_eq!(a.union(&b), c); assert_eq!(b.union(&a), c); @@ -1213,10 +1241,10 @@ mod bitlist { #[test] fn union_diff_length() { - let a = BitList1024::from_bytes(vec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitList1024::from_bytes(vec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitList1024::from_bytes(vec![0b0010_1011, 0b0010_1111]).unwrap(); - let d = BitList1024::from_bytes(vec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); + let a = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); + let b = BitList1024::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); + let c = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); + let d = BitList1024::from_bytes(smallvec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); assert_eq!(a.len(), c.len()); assert_eq!(a.union(&b), c); @@ -1227,10 +1255,10 @@ mod bitlist { #[test] fn difference() { - let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(vec![0b0100, 0b0000], 16).unwrap(); - let b_a = BitList1024::from_raw_bytes(vec![0b0011, 0b1000], 16).unwrap(); + let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); + let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); + let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0000], 16).unwrap(); + let b_a = BitList1024::from_raw_bytes(smallvec![0b0011, 0b1000], 16).unwrap(); assert_eq!(a.difference(&b), a_b); assert_eq!(b.difference(&a), b_a); @@ -1239,10 +1267,10 @@ mod bitlist { #[test] fn difference_diff_length() { - let a = BitList1024::from_raw_bytes(vec![0b0110, 0b1100, 0b0011], 24).unwrap(); - let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(vec![0b0100, 0b0100, 0b0011], 24).unwrap(); - let b_a = BitList1024::from_raw_bytes(vec![0b1001, 0b0001], 16).unwrap(); + let a = BitList1024::from_raw_bytes(smallvec![0b0110, 0b1100, 0b0011], 24).unwrap(); + let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); + let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0100, 0b0011], 24).unwrap(); + let b_a = BitList1024::from_raw_bytes(smallvec![0b1001, 0b0001], 16).unwrap(); assert_eq!(a.difference(&b), a_b); assert_eq!(b.difference(&a), b_a); @@ -1250,8 +1278,8 @@ mod bitlist { #[test] fn shift_up() { - let mut a = BitList1024::from_raw_bytes(vec![0b1100_1111, 0b1101_0110], 16).unwrap(); - let mut b = BitList1024::from_raw_bytes(vec![0b1001_1110, 0b1010_1101], 16).unwrap(); + let mut a = BitList1024::from_raw_bytes(smallvec![0b1100_1111, 0b1101_0110], 16).unwrap(); + let mut b = BitList1024::from_raw_bytes(smallvec![0b1001_1110, 0b1010_1101], 16).unwrap(); a.shift_up(1).unwrap(); assert_eq!(a, b); @@ -1265,8 +1293,8 @@ mod bitlist { #[test] fn num_set_bits() { - let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); + let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); + let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); assert_eq!(a.num_set_bits(), 3); assert_eq!(b.num_set_bits(), 5); @@ -1295,4 +1323,10 @@ mod bitlist { assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); } } + + // Ensure that the stack size of a BitList is manageable. + #[test] + fn size_of() { + assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); + } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index bc013fe42d..be1e6907c2 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -45,6 +45,7 @@ parking_lot = "0.11.1" itertools = "0.10.0" superstruct = "0.4.0" serde_json = "1.0.74" +smallvec = "1.8.0" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 1c9ec3bc4d..12586e28d5 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -110,9 +110,34 @@ impl SlotData for Attestation { #[cfg(test)] mod tests { + use super::*; use crate::*; - use super::*; + // Check the in-memory size of an `Attestation`, which is useful for reasoning about memory + // and preventing regressions. + // + // This test will only pass with `blst`, if we run these tests with Milagro or another + // BLS library in future we will have to make it generic. + #[test] + fn size_of() { + use std::mem::size_of; + + let aggregation_bits = + size_of::::MaxValidatorsPerCommittee>>(); + let attestation_data = size_of::(); + let signature = size_of::(); + + assert_eq!(aggregation_bits, 56); + assert_eq!(attestation_data, 128); + assert_eq!(signature, 288 + 16); + + let attestation_expected = aggregation_bits + attestation_data + signature; + assert_eq!(attestation_expected, 488); + assert_eq!( + size_of::>(), + attestation_expected + ); + } ssz_and_tree_hash_tests!(Attestation); } diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 2ba3576b77..5cb4e7d521 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -1,9 +1,10 @@ use super::*; use crate::{BitList, BitVector, Unsigned}; +use smallvec::smallvec; impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = vec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; rng.fill_bytes(&mut raw_bytes); Self::from_bytes(raw_bytes).expect("we generate a valid BitList") } @@ -11,7 +12,7 @@ impl TestRandom for BitList { impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = vec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; rng.fill_bytes(&mut raw_bytes); Self::from_bytes(raw_bytes).expect("we generate a valid BitVector") } From 3ebb8b0244b3d905c1dc2aed7c8efbe9cb2c6ecb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 18 Feb 2022 02:36:43 +0000 Subject: [PATCH 04/14] Improved peer management (#2993) ## Issue Addressed I noticed in some logs some excess and unecessary discovery queries. What was happening was we were pruning our peers down to our outbound target and having some disconnect. When we are below this threshold we try to find more peers (even if we are at our peer limit). The request becomes futile because we have no more peer slots. This PR corrects this issue and advances the pruning mechanism to favour subnet peers. An overview the new logic added is: - We prune peers down to a target outbound peer count which is higher than the minimum outbound peer count. - We only search for more peers if there is room to do so, and we are below the minimum outbound peer count not the target. So this gives us some buffer for peers to disconnect. The buffer is currently 10% The modified pruning logic is documented in the code but for reference it should do the following: - Prune peers with bad scores first - If we need to prune more peers, then prune peers that are subscribed to a long-lived subnet - If we still need to prune peers, the prune peers that we have a higher density of on any given subnet which should drive for uniform peers across all subnets. This will need a bit of testing as it modifies some significant peer management behaviours in lighthouse. --- .../lighthouse_network/src/behaviour/mod.rs | 10 +- .../lighthouse_network/src/discovery/mod.rs | 13 +- .../src/peer_manager/mod.rs | 817 ++++++++++++++++-- .../src/peer_manager/peerdb.rs | 29 + .../src/peer_manager/peerdb/peer_info.rs | 82 ++ .../src/subnet_service/attestation_subnets.rs | 2 +- .../src/subnet_service/sync_subnets.rs | 4 +- 7 files changed, 876 insertions(+), 81 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index d3f9b40c42..c0a1fb3f71 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -2,7 +2,9 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; use crate::config::gossipsub_config; -use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent}; +use crate::discovery::{ + subnet_predicate, Discovery, DiscoveryEvent, FIND_NODE_QUERY_CLOSEST_PEERS, +}; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -218,7 +220,7 @@ impl Behaviour { let mut discovery = Discovery::new(local_key, &config, network_globals.clone(), log).await?; // start searching for peers - discovery.discover_peers(); + discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); // Grab our local ENR FORK ID let enr_fork_id = network_globals @@ -1230,9 +1232,9 @@ impl NetworkBehaviourEventProcess for Behaviou // the network to send a status to this peer self.add_event(BehaviourEvent::StatusPeer(peer_id)); } - PeerManagerEvent::DiscoverPeers => { + PeerManagerEvent::DiscoverPeers(peers_to_find) => { // Peer manager has requested a discovery query for more peers. - self.discovery.discover_peers(); + self.discovery.discover_peers(peers_to_find); } PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { // Peer manager has requested a subnet discovery query for more peers. diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 4f7ec432b7..ab4d54a1e7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -63,7 +63,7 @@ const MAX_SUBNETS_IN_QUERY: usize = 3; /// /// We could reduce this constant to speed up queries however at the cost of security. It will /// make it easier to peers to eclipse this node. Kademlia suggests a value of 16. -const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; +pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; /// The threshold for updating `min_ttl` on a connected peer. const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); @@ -317,17 +317,18 @@ impl Discovery { } /// This adds a new `FindPeers` query to the queue if one doesn't already exist. - pub fn discover_peers(&mut self) { + /// The `target_peers` parameter informs discovery to end the query once the target is found. + /// The maximum this can be is 16. + pub fn discover_peers(&mut self, target_peers: usize) { // If the discv5 service isn't running or we are in the process of a query, don't bother queuing a new one. if !self.started || self.find_peer_active { return; } // Immediately start a FindNode query - debug!(self.log, "Starting a peer discovery request"); + let target_peers = std::cmp::min(FIND_NODE_QUERY_CLOSEST_PEERS, target_peers); + debug!(self.log, "Starting a peer discovery request"; "target_peers" => target_peers ); self.find_peer_active = true; - self.start_query(QueryType::FindPeers, FIND_NODE_QUERY_CLOSEST_PEERS, |_| { - true - }); + self.start_query(QueryType::FindPeers, target_peers, |_| true); } /// Processes a request to search for more peers on a subnet. diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6b8f6fff60..48edd3abb6 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -9,6 +9,7 @@ use discv5::Enr; use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; +use rand::seq::SliceRandom; use slog::{debug, error, warn}; use smallvec::SmallVec; use std::{ @@ -37,17 +38,24 @@ mod network_behaviour; /// requests. This defines the interval in seconds. const HEARTBEAT_INTERVAL: u64 = 30; +/// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would +/// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet +/// peers. +pub const MIN_SYNC_COMMITTEE_PEERS: u64 = 2; /// A fraction of `PeerManager::target_peers` that we allow to connect to us in excess of /// `PeerManager::target_peers`. For clarity, if `PeerManager::target_peers` is 50 and /// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55. pub const PEER_EXCESS_FACTOR: f32 = 0.1; -/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections. -pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.3; +/// A fraction of `PeerManager::target_peers` that we want to be outbound-only connections. +pub const TARGET_OUTBOUND_ONLY_FACTOR: f32 = 0.3; +/// A fraction of `PeerManager::target_peers` that if we get below, we start a discovery query to +/// reach our target. MIN_OUTBOUND_ONLY_FACTOR must be < TARGET_OUTBOUND_ONLY_FACTOR. +pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.2; /// The fraction of extra peers beyond the PEER_EXCESS_FACTOR that we allow us to dial for when /// requiring subnet peers. More specifically, if our target peer limit is 50, and our excess peer /// limit is 55, and we are at 55 peers, the following parameter provisions a few more slots of /// dialing priority peers we need for validator duties. -pub const PRIORITY_PEER_EXCESS: f32 = 0.1; +pub const PRIORITY_PEER_EXCESS: f32 = 0.2; /// The main struct that handles peer's reputation and connection status. pub struct PeerManager { @@ -99,8 +107,8 @@ pub enum PeerManagerEvent { Banned(PeerId, Vec), /// The peer should be unbanned with the associated ip addresses. UnBanned(PeerId, Vec), - /// Request the behaviour to discover more peers. - DiscoverPeers, + /// Request the behaviour to discover more peers and the amount of peers to discover. + DiscoverPeers(usize), /// Request the behaviour to discover peers on subnets. DiscoverSubnetPeers(Vec), } @@ -291,19 +299,7 @@ impl PeerManager { } // Queue another discovery if we need to - let peer_count = self.network_globals.connected_or_dialing_peers(); - let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); - let min_outbound_only_target = - (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize; - - if self.discovery_enabled - && (peer_count < self.target_peers.saturating_sub(to_dial_peers.len()) - || outbound_only_peer_count < min_outbound_only_target) - { - // We need more peers, re-queue a discovery lookup. - debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); - self.events.push(PeerManagerEvent::DiscoverPeers); - } + self.maintain_peer_count(to_dial_peers.len()); to_dial_peers } @@ -342,6 +338,23 @@ impl PeerManager { as usize } + /// The minimum number of outbound peers that we reach before we start another discovery query. + fn min_outbound_only_peers(&self) -> usize { + (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize + } + + /// The minimum number of outbound peers that we reach before we start another discovery query. + fn target_outbound_peers(&self) -> usize { + (self.target_peers as f32 * TARGET_OUTBOUND_ONLY_FACTOR).ceil() as usize + } + + /// The maximum number of peers that are connected or dialing before we refuse to do another + /// discovery search for more outbound peers. We can use up to half the priority peer excess allocation. + fn max_outbound_dialing_peers(&self) -> usize { + (self.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS / 2.0)).ceil() + as usize + } + /* Notifications from the Swarm */ // A peer is being dialed. @@ -363,11 +376,12 @@ impl PeerManager { /// Reports whether the peer limit is reached in which case we stop allowing new incoming /// connections. pub fn peer_limit_reached(&self, count_dialing: bool) -> bool { - let max_peers = self.max_peers(); if count_dialing { - self.network_globals.connected_or_dialing_peers() >= max_peers + // This is an incoming connection so limit by the standard max peers + self.network_globals.connected_or_dialing_peers() >= self.max_peers() } else { - self.network_globals.connected_peers() >= max_peers + // We dialed this peer, allow up to max_outbound_dialing_peers + self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() } } @@ -819,6 +833,278 @@ impl PeerManager { } } + /// This function checks the status of our current peers and optionally requests a discovery + /// query if we need to find more peers to maintain the current number of peers + fn maintain_peer_count(&mut self, dialing_peers: usize) { + // Check if we need to do a discovery lookup + if self.discovery_enabled { + let peer_count = self.network_globals.connected_or_dialing_peers(); + let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); + let wanted_peers = if peer_count < self.target_peers.saturating_sub(dialing_peers) { + // We need more peers in general. + // The maximum discovery query is for 16 peers, but we can search for less if + // needed. + std::cmp::min( + self.target_peers.saturating_sub(dialing_peers) - peer_count, + 16, + ) + } else if outbound_only_peer_count < self.min_outbound_only_peers() + && peer_count < self.max_outbound_dialing_peers() + { + std::cmp::min( + self.max_outbound_dialing_peers() + .saturating_sub(dialing_peers) + - peer_count, + 16, + ) + } else { + 0 + }; + + if wanted_peers != 0 { + // We need more peers, re-queue a discovery lookup. + debug!(self.log, "Starting a new peer discovery query"; "connected" => peer_count, "target" => self.target_peers, "outbound" => outbound_only_peer_count, "wanted" => wanted_peers); + self.events + .push(PeerManagerEvent::DiscoverPeers(wanted_peers)); + } + } + } + + /// Remove excess peers back down to our target values. + /// This prioritises peers with a good score and uniform distribution of peers across + /// subnets. + /// + /// The logic for the peer pruning is as follows: + /// + /// Global rules: + /// - Always maintain peers we need for a validator duty. + /// - Do not prune outbound peers to exceed our outbound target. + /// - Do not prune more peers than our target peer count. + /// - If we have an option to remove a number of peers, remove ones that have the least + /// long-lived subnets. + /// - When pruning peers based on subnet count. If multiple peers can be chosen, choose a peer + /// that is not subscribed to a long-lived sync committee subnet. + /// - When pruning peers based on subnet count, do not prune a peer that would lower us below the + /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over + /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is + /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers + /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be + /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're + /// in a bit of trouble anyway if we have so few peers on subnets. The + /// MIN_SYNC_COMMITTEE_PEERS + /// number should be set low as an absolute lower bound to maintain peers on the sync + /// committees. + /// + /// Prune peers in the following order: + /// 1. Remove worst scoring peers + /// 2. Remove peers that are not subscribed to a subnet (they have less value) + /// 3. Remove peers that we have many on any particular subnet + /// 4. Randomly remove peers if all the above are satisfied + /// + fn prune_excess_peers(&mut self) { + // The current number of connected peers. + let connected_peer_count = self.network_globals.connected_peers(); + if connected_peer_count <= self.target_peers { + // No need to prune peers + return; + } + + // Keep a list of peers we are pruning. + let mut peers_to_prune = std::collections::HashSet::new(); + let connected_outbound_peer_count = self.network_globals.connected_outbound_only_peers(); + + // Keep track of the number of outbound peers we are pruning. + let mut outbound_peers_pruned = 0; + + macro_rules! prune_peers { + ($filter: expr) => { + for (peer_id, info) in self + .network_globals + .peers + .read() + .worst_connected_peers() + .iter() + .filter(|(_, info)| !info.has_future_duty() && $filter(*info)) + { + if peers_to_prune.len() + >= connected_peer_count.saturating_sub(self.target_peers) + { + // We have found all the peers we need to drop, end. + break; + } + if peers_to_prune.contains(*peer_id) { + continue; + } + // Only remove up to the target outbound peer count. + if info.is_outbound_only() { + if self.target_outbound_peers() + outbound_peers_pruned + < connected_outbound_peer_count + { + outbound_peers_pruned += 1; + } else { + continue; + } + } + peers_to_prune.insert(**peer_id); + } + }; + } + + // 1. Look through peers that have the worst score (ignoring non-penalized scored peers). + prune_peers!(|info: &PeerInfo| { info.score().score() < 0.0 }); + + // 2. Attempt to remove peers that are not subscribed to a subnet, if we still need to + // prune more. + if peers_to_prune.len() < connected_peer_count.saturating_sub(self.target_peers) { + prune_peers!(|info: &PeerInfo| { !info.has_long_lived_subnet() }); + } + + // 3. and 4. Remove peers that are too grouped on any given subnet. If all subnets are + // uniformly distributed, remove random peers. + if peers_to_prune.len() < connected_peer_count.saturating_sub(self.target_peers) { + // Of our connected peers, build a map from subnet_id -> Vec<(PeerId, PeerInfo)> + let mut subnet_to_peer: HashMap)>> = + HashMap::new(); + // These variables are used to track if a peer is in a long-lived sync-committee as we + // may wish to retain this peer over others when pruning. + let mut sync_committee_peer_count: HashMap = HashMap::new(); + let mut peer_to_sync_committee: HashMap< + PeerId, + std::collections::HashSet, + > = HashMap::new(); + + for (peer_id, info) in self.network_globals.peers.read().connected_peers() { + // Ignore peers we are already pruning + if peers_to_prune.contains(peer_id) { + continue; + } + + // Count based on long-lived subnets not short-lived subnets + // NOTE: There are only 4 sync committees. These are likely to be denser than the + // subnets, so our priority here to make the subnet peer count uniform, ignoring + // the dense sync committees. + for subnet in info.long_lived_subnets() { + match subnet { + Subnet::Attestation(_) => { + subnet_to_peer + .entry(subnet) + .or_insert_with(Vec::new) + .push((*peer_id, info.clone())); + } + Subnet::SyncCommittee(id) => { + *sync_committee_peer_count.entry(id).or_default() += 1; + peer_to_sync_committee + .entry(*peer_id) + .or_default() + .insert(id); + } + } + } + } + + // Add to the peers to prune mapping + while peers_to_prune.len() < connected_peer_count.saturating_sub(self.target_peers) { + if let Some((_, peers_on_subnet)) = subnet_to_peer + .iter_mut() + .max_by_key(|(_, peers)| peers.len()) + { + // and the subnet still contains peers + if !peers_on_subnet.is_empty() { + // Order the peers by the number of subnets they are long-lived + // subscribed too, shuffle equal peers. + peers_on_subnet.shuffle(&mut rand::thread_rng()); + peers_on_subnet.sort_by_key(|(_, info)| info.long_lived_subnet_count()); + + // Try and find a candidate peer to remove from the subnet. + // We ignore peers that would put us below our target outbound peers + // and we currently ignore peers that would put us below our + // sync-committee threshold, if we can avoid it. + + let mut removed_peer_index = None; + for (index, (candidate_peer, info)) in peers_on_subnet.iter().enumerate() { + // Ensure we don't remove too many outbound peers + if info.is_outbound_only() { + if self.target_outbound_peers() + < connected_outbound_peer_count + .saturating_sub(outbound_peers_pruned) + { + outbound_peers_pruned += 1; + } else { + // Restart the main loop with the outbound peer removed from + // the list. This will lower the peers per subnet count and + // potentially a new subnet may be chosen to remove peers. This + // can occur recursively until we have no peers left to choose + // from. + continue; + } + } + + // Check the sync committee + if let Some(subnets) = peer_to_sync_committee.get(candidate_peer) { + // The peer is subscribed to some long-lived sync-committees + // Of all the subnets this peer is subscribed too, the minimum + // peer count of all of them is min_subnet_count + if let Some(min_subnet_count) = subnets + .iter() + .filter_map(|v| sync_committee_peer_count.get(v).copied()) + .min() + { + // If the minimum count is our target or lower, we + // shouldn't remove this peer, because it drops us lower + // than our target + if min_subnet_count <= MIN_SYNC_COMMITTEE_PEERS { + // Do not drop this peer in this pruning interval + continue; + } + } + } + + // This peer is suitable to be pruned + removed_peer_index = Some(index); + break; + } + + // If we have successfully found a candidate peer to prune, prune it, + // otherwise all peers on this subnet should not be removed due to our + // outbound limit or min_subnet_count. In this case, we remove all + // peers from the pruning logic and try another subnet. + if let Some(index) = removed_peer_index { + let (candidate_peer, _) = peers_on_subnet.remove(index); + // Remove pruned peers from other subnet counts + for subnet_peers in subnet_to_peer.values_mut() { + subnet_peers.retain(|(peer_id, _)| peer_id != &candidate_peer); + } + // Remove pruned peers from all sync-committee counts + if let Some(known_sync_committes) = + peer_to_sync_committee.get(&candidate_peer) + { + for sync_committee in known_sync_committes { + if let Some(sync_committee_count) = + sync_committee_peer_count.get_mut(sync_committee) + { + *sync_committee_count = + sync_committee_count.saturating_sub(1); + } + } + } + peers_to_prune.insert(candidate_peer); + } else { + peers_on_subnet.clear(); + } + continue; + } + } + // If there are no peers left to prune exit. + break; + } + } + + // Disconnect the pruned peers. + for peer_id in peers_to_prune { + self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -826,19 +1112,15 @@ impl PeerManager { /// /// NOTE: Discovery will only add a new query if one isn't already queued. fn heartbeat(&mut self) { - let peer_count = self.network_globals.connected_or_dialing_peers(); - let mut outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); - let min_outbound_only_target = - (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize; + // Optionally run a discovery query if we need more peers. + self.maintain_peer_count(0); - if self.discovery_enabled - && (peer_count < self.target_peers - || outbound_only_peer_count < min_outbound_only_target) - { - // If we need more peers, queue a discovery lookup. - debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); - self.events.push(PeerManagerEvent::DiscoverPeers); - } + // Cleans up the connection state of dialing peers. + // Libp2p dials peer-ids, but sometimes the response is from another peer-id or libp2p + // returns dial errors without a peer-id attached. This function reverts peers that have a + // dialing status long than DIAL_TIMEOUT seconds to a disconnected status. This is important because + // we count the number of dialing peers in our inbound connections. + self.network_globals.peers.write().cleanup_dialing_peers(); // Updates peer's scores and unban any peers if required. let actions = self.network_globals.peers.write().update_scores(); @@ -852,40 +1134,9 @@ impl PeerManager { // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); - // Keep a list of peers we are disconnecting - let mut disconnecting_peers = Vec::new(); - - let connected_peer_count = self.network_globals.connected_peers(); - if connected_peer_count > self.target_peers { - // Remove excess peers with the worst scores, but keep subnet peers. - // Must also ensure that the outbound-only peer count does not go below the minimum threshold. - outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); - let mut n_outbound_removed = 0; - for (peer_id, info) in self - .network_globals - .peers - .read() - .worst_connected_peers() - .iter() - .filter(|(_, info)| !info.has_future_duty()) - { - if disconnecting_peers.len() == connected_peer_count - self.target_peers { - break; - } - if info.is_outbound_only() { - if min_outbound_only_target < outbound_only_peer_count - n_outbound_removed { - n_outbound_removed += 1; - } else { - continue; - } - } - disconnecting_peers.push(**peer_id); - } - } - - for peer_id in disconnecting_peers { - self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); - } + // Prune any excess peers back to our target in such a way that incentivises good scores and + // a uniform distribution of subnets. + self.prune_excess_peers(); } // Update metrics related to peer scoring. @@ -977,7 +1228,7 @@ enum ConnectingType { mod tests { use super::*; use slog::{o, Drain}; - use types::MinimalEthSpec as E; + use types::MainnetEthSpec as E; pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); @@ -1212,4 +1463,434 @@ mod tests { // the number of connected peers updates and we will not remove too many peers. assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); } + + #[tokio::test] + /// We want to test that the peer manager removes peers that are not subscribed to a subnet as + /// a priority over all else. + async fn test_peer_manager_remove_non_subnet_peers_when_all_healthy() { + let mut peer_manager = build_peer_manager(3).await; + + // Create 5 peers to connect to. + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + let peer4 = PeerId::random(); + + println!("{}", peer0); + println!("{}", peer1); + println!("{}", peer2); + println!("{}", peer3); + println!("{}", peer4); + + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer3, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer4, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + attnets.set(1, true).unwrap(); + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets: Default::default(), + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer0) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer0, Subnet::Attestation(1.into())); + + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + attnets.set(10, true).unwrap(); + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets: Default::default(), + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer2) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer2, Subnet::Attestation(10.into())); + + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + syncnets.set(3, true).unwrap(); + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets: Default::default(), + syncnets, + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer4) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer4, Subnet::SyncCommittee(3.into())); + + // Perform the heartbeat. + peer_manager.heartbeat(); + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); + + // Check that we removed the peers that were not subscribed to any subnet + let mut peers_should_have_removed = std::collections::HashSet::new(); + peers_should_have_removed.insert(peer1); + peers_should_have_removed.insert(peer3); + for (peer, _) in peer_manager + .network_globals + .peers + .read() + .peers() + .filter(|(_, info)| { + matches!( + info.connection_status(), + PeerConnectionStatus::Disconnecting { .. } + ) + }) + { + println!("{}", peer); + assert!(peers_should_have_removed.remove(peer)); + } + // Ensure we removed all the peers + assert!(peers_should_have_removed.is_empty()); + } + + #[tokio::test] + /// Test the pruning logic to remove grouped subnet peers + async fn test_peer_manager_prune_grouped_subnet_peers() { + let target = 9; + let mut peer_manager = build_peer_manager(target).await; + + // Create 5 peers to connect to. + let mut peers = Vec::new(); + for x in 0..20 { + // Make 20 peers and group peers as: + // id mod % 4 + // except for the last 5 peers which all go on their own subnets + // So subnets 0-2 should have 4 peers subnet 3 should have 3 and 15-19 should have 1 + let subnet: u64 = { + if x < 15 { + x % 4 + } else { + x + } + }; + + let peer = PeerId::random(); + peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + attnets.set(subnet as usize, true).unwrap(); + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets: Default::default(), + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer, Subnet::Attestation(subnet.into())); + println!("{},{},{}", x, subnet, peer); + peers.push(peer); + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!( + peer_manager.network_globals.connected_or_dialing_peers(), + target + ); + + // Check that we removed the peers that were not subscribed to any subnet + // Should remove peers from subnet 0-2 first. Removing 3 peers subnets 0-3 now have 3 + // peers. + // Should then remove 8 peers each from subnets 1-4. New total: 11 peers. + // Therefore the remaining peer set should be each on their own subnet. + // Lets check this: + + let connected_peers: std::collections::HashSet<_> = peer_manager + .network_globals + .peers + .read() + .connected_or_dialing_peers() + .cloned() + .collect(); + + for peer in connected_peers.iter() { + let position = peers.iter().position(|peer_id| peer_id == peer).unwrap(); + println!("{},{}", position, peer); + } + + println!(); + + for peer in connected_peers.iter() { + let position = peers.iter().position(|peer_id| peer_id == peer).unwrap(); + println!("{},{}", position, peer); + + if position < 15 { + let y = position % 4; + for x in 0..4 { + let alternative_index = y + 4 * x; + if alternative_index != position && alternative_index < 15 { + // Make sure a peer on the same subnet has been removed + println!( + "Check against: {}, {}", + alternative_index, &peers[alternative_index] + ); + assert!(!connected_peers.contains(&peers[alternative_index])); + } + } + } + } + } + + /// Test the pruning logic to prioritise peers with the most subnets + /// + /// Create 6 peers. + /// Peer0: None + /// Peer1 : Subnet 1,2,3 + /// Peer2 : Subnet 1,2 + /// Peer3 : Subnet 3 + /// Peer4 : Subnet 1 + /// Peer5 : Subnet 2 + /// + /// Prune 3 peers: Should be Peer0, Peer 4 and Peer 5 because (4 and 5) are both on the subnet with the + /// most peers and have the least subscribed long-lived subnets. And peer 0 because it has no + /// long-lived subnet. + #[tokio::test] + async fn test_peer_manager_prune_subnet_peers_most_subscribed() { + let target = 3; + let mut peer_manager = build_peer_manager(target).await; + + // Create 6 peers to connect to. + let mut peers = Vec::new(); + for x in 0..6 { + let peer = PeerId::random(); + peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + + match x { + 0 => {} + 1 => { + attnets.set(1, true).unwrap(); + attnets.set(2, true).unwrap(); + attnets.set(3, true).unwrap(); + } + 2 => { + attnets.set(1, true).unwrap(); + attnets.set(2, true).unwrap(); + } + 3 => { + attnets.set(3, true).unwrap(); + } + 4 => { + attnets.set(1, true).unwrap(); + } + 5 => { + attnets.set(2, true).unwrap(); + } + _ => unreachable!(), + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets: Default::default(), + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + let long_lived_subnets = peer_manager + .network_globals + .peers + .read() + .peer_info(&peer) + .unwrap() + .long_lived_subnets(); + for subnet in long_lived_subnets { + println!("Subnet: {:?}", subnet); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer, subnet); + } + println!("{},{}", x, peer); + peers.push(peer); + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!( + peer_manager.network_globals.connected_or_dialing_peers(), + target + ); + + // Check that we removed peers 4 and 5 + let connected_peers: std::collections::HashSet<_> = peer_manager + .network_globals + .peers + .read() + .connected_or_dialing_peers() + .cloned() + .collect(); + + assert!(!connected_peers.contains(&peers[0])); + assert!(!connected_peers.contains(&peers[4])); + assert!(!connected_peers.contains(&peers[5])); + } + + /// Test the pruning logic to prioritise peers with the most subnets, but not at the expense of + /// removing our few sync-committee subnets. + /// + /// Create 6 peers. + /// Peer0: None + /// Peer1 : Subnet 1,2,3, + /// Peer2 : Subnet 1,2, + /// Peer3 : Subnet 3 + /// Peer4 : Subnet 1,2, Sync-committee-1 + /// Peer5 : Subnet 1,2, Sync-committee-2 + /// + /// Prune 3 peers: Should be Peer0, Peer1 and Peer2 because (4 and 5 are on a sync-committee) + #[tokio::test] + async fn test_peer_manager_prune_subnet_peers_sync_committee() { + let target = 3; + let mut peer_manager = build_peer_manager(target).await; + + // Create 6 peers to connect to. + let mut peers = Vec::new(); + for x in 0..6 { + let peer = PeerId::random(); + peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + match x { + 0 => {} + 1 => { + attnets.set(1, true).unwrap(); + attnets.set(2, true).unwrap(); + attnets.set(3, true).unwrap(); + } + 2 => { + attnets.set(1, true).unwrap(); + attnets.set(2, true).unwrap(); + } + 3 => { + attnets.set(3, true).unwrap(); + } + 4 => { + attnets.set(1, true).unwrap(); + attnets.set(2, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 5 => { + attnets.set(1, true).unwrap(); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + _ => unreachable!(), + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets, + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + let long_lived_subnets = peer_manager + .network_globals + .peers + .read() + .peer_info(&peer) + .unwrap() + .long_lived_subnets(); + println!("{},{}", x, peer); + for subnet in long_lived_subnets { + println!("Subnet: {:?}", subnet); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer, subnet); + } + peers.push(peer); + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!( + peer_manager.network_globals.connected_or_dialing_peers(), + target + ); + + // Check that we removed peers 4 and 5 + let connected_peers: std::collections::HashSet<_> = peer_manager + .network_globals + .peers + .read() + .connected_or_dialing_peers() + .cloned() + .collect(); + + assert!(!connected_peers.contains(&peers[0])); + assert!(!connected_peers.contains(&peers[1])); + assert!(!connected_peers.contains(&peers[2])); + } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index cddff1218c..1f44488a56 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -29,6 +29,9 @@ const BANNED_PEERS_PER_IP_THRESHOLD: usize = 5; /// Relative factor of peers that are allowed to have a negative gossipsub score without penalizing /// them in lighthouse. const ALLOWED_NEGATIVE_GOSSIPSUB_FACTOR: f32 = 0.1; +/// The time we allow peers to be in the dialing state in our PeerDb before we revert them to a +/// disconnected state. +const DIAL_TIMEOUT: u64 = 15; /// Storage of known peers, their reputation and information pub struct PeerDB { @@ -322,6 +325,32 @@ impl PeerDB { /* Mutability */ + /// Cleans up the connection state of dialing peers. + // Libp2p dial's peerids, but sometimes the response is from another peer-id or libp2p + // returns dial errors without a peer-id attached. This function reverts peers that have a + // dialing status longer than DIAL_TIMEOUT seconds to a disconnected status. This is important because + // we count the number of dialing peers in our inbound connections. + pub fn cleanup_dialing_peers(&mut self) { + let peers_to_disconnect: Vec<_> = self + .peers + .iter() + .filter_map(|(peer_id, info)| { + if let PeerConnectionStatus::Dialing { since } = info.connection_status() { + if (*since) + std::time::Duration::from_secs(DIAL_TIMEOUT) + < std::time::Instant::now() + { + return Some(*peer_id); + } + } + None + }) + .collect(); + + for peer_id in peers_to_disconnect { + self.update_connection_state(&peer_id, NewConnectionState::Disconnected); + } + } + /// Allows the sync module to update sync status' of peers. Returns None, if the peer doesn't /// exist and returns Some(bool) representing if the sync state was modified. pub fn update_sync_status( diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 941ca7e6c9..6273356b8f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -1,6 +1,7 @@ use super::client::Client; use super::score::{PeerAction, Score, ScoreState}; use super::sync_status::SyncStatus; +use crate::discovery::Eth2Enr; use crate::Multiaddr; use crate::{rpc::MetaData, types::Subnet}; use discv5::Enr; @@ -139,11 +140,92 @@ impl PeerInfo { self.enr.as_ref() } + /// An iterator over all the subnets this peer is subscribed to. + pub fn subnets(&self) -> impl Iterator { + self.subnets.iter() + } + + /// Returns the number of long lived subnets a peer is subscribed to. + // NOTE: This currently excludes sync committee subnets + pub fn long_lived_subnet_count(&self) -> usize { + if let Some(meta_data) = self.meta_data.as_ref() { + return meta_data.attnets().num_set_bits(); + } else if let Some(enr) = self.enr.as_ref() { + if let Ok(attnets) = enr.attestation_bitfield::() { + return attnets.num_set_bits(); + } + } + 0 + } + + /// Returns an iterator over the long-lived subnets if it has any. + pub fn long_lived_subnets(&self) -> Vec { + let mut long_lived_subnets = Vec::new(); + // Check the meta_data + if let Some(meta_data) = self.meta_data.as_ref() { + for subnet in 0..=meta_data.attnets().highest_set_bit().unwrap_or(0) { + if meta_data.attnets().get(subnet).unwrap_or(false) { + long_lived_subnets.push(Subnet::Attestation((subnet as u64).into())); + } + } + + if let Ok(syncnet) = meta_data.syncnets() { + for subnet in 0..=syncnet.highest_set_bit().unwrap_or(0) { + if syncnet.get(subnet).unwrap_or(false) { + long_lived_subnets.push(Subnet::SyncCommittee((subnet as u64).into())); + } + } + } + } else if let Some(enr) = self.enr.as_ref() { + if let Ok(attnets) = enr.attestation_bitfield::() { + for subnet in 0..=attnets.highest_set_bit().unwrap_or(0) { + if attnets.get(subnet).unwrap_or(false) { + long_lived_subnets.push(Subnet::Attestation((subnet as u64).into())); + } + } + } + + if let Ok(syncnets) = enr.sync_committee_bitfield::() { + for subnet in 0..=syncnets.highest_set_bit().unwrap_or(0) { + if syncnets.get(subnet).unwrap_or(false) { + long_lived_subnets.push(Subnet::SyncCommittee((subnet as u64).into())); + } + } + } + } + long_lived_subnets + } + /// Returns if the peer is subscribed to a given `Subnet` from the gossipsub subscriptions. pub fn on_subnet_gossipsub(&self, subnet: &Subnet) -> bool { self.subnets.contains(subnet) } + /// Returns true if the peer is connected to a long-lived subnet. + pub fn has_long_lived_subnet(&self) -> bool { + // Check the meta_data + if let Some(meta_data) = self.meta_data.as_ref() { + if !meta_data.attnets().is_zero() && !self.subnets.is_empty() { + return true; + } + if let Ok(sync) = meta_data.syncnets() { + if !sync.is_zero() { + return true; + } + } + } + + // We may not have the metadata but may have an ENR. Lets check that + if let Some(enr) = self.enr.as_ref() { + if let Ok(attnets) = enr.attestation_bitfield::() { + if !attnets.is_zero() && !self.subnets.is_empty() { + return true; + } + } + } + false + } + /// Returns the seen addresses of the peer. pub fn seen_addresses(&self) -> impl Iterator + '_ { self.seen_addresses.iter() diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 5c87062e2c..2b0fe6f55a 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -23,7 +23,7 @@ use crate::metrics; /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. +/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random /// gossip topics that we subscribed to due to the validator connection. diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 51fef235a1..9e92f62250 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -21,7 +21,7 @@ use crate::metrics; /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. +/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// A particular subnet at a given slot. @@ -115,7 +115,7 @@ impl SyncCommitteeService { metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); //NOTE: We assume all subscriptions have been verified before reaching this service - // Registers the validator with the subnet service. + // Registers the validator with the subnet service. // This will subscribe to long-lived random subnets if required. trace!(self.log, "Sync committee subscription"; From c8019caba64a5116935c888cfa72b1647dab51bb Mon Sep 17 00:00:00 2001 From: tim gretler Date: Fri, 18 Feb 2022 02:36:44 +0000 Subject: [PATCH 05/14] Fix sync committee polling for 0 validators (#2999) ## Issue Addressed #2953 ## Proposed Changes Adds empty local validator check. ## Additional Info Two other options: - add check inside `local_index` collection. Instead of after collection. - Move `local_index` collection to the beginning of the `poll_sync_committee_duties` function and combine sync committee with altair fork check. --- validator_client/src/duties_service/sync.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index f61c600e9e..02f45ebc45 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -399,6 +399,16 @@ pub async fn poll_sync_committee_duties_for_period sync_committee_period, + ); + return Ok(()); + } + debug!( log, "Fetching sync committee duties"; From 56b2ec6b29337a44ed66ebf6044b6a936d2bff78 Mon Sep 17 00:00:00 2001 From: eklm Date: Fri, 18 Feb 2022 05:32:00 +0000 Subject: [PATCH 06/14] Allow proposer duties request for the next epoch (#2963) ## Issue Addressed Closes #2880 ## Proposed Changes Support requests to the next epoch in proposer_duties api. ## Additional Info Implemented with skipping proposer cache for this case because the cache for the future epoch will be missed every new slot as dependent_root is changed and we don't want to "wash it out" by saving additional values. --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/proposer_duties.rs | 54 +++++++++++++-------- beacon_node/http_api/tests/tests.rs | 46 +++++++++++++----- 4 files changed, 71 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2ffc9a376..cb51cac30c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2442,6 +2442,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "network", + "safe_arith", "sensitive_url", "serde", "slog", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 85bdbad51f..62373b464a 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -27,6 +27,7 @@ slot_clock = { path = "../../common/slot_clock" } eth2_ssz = "0.4.1" bs58 = "0.4.0" futures = "0.3.8" +safe_arith = {path = "../../consensus/safe_arith"} [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 16670b507d..d817c9f653 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -5,11 +5,12 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; use eth2::types::{self as api_types}; +use safe_arith::SafeArith; use slog::{debug, Logger}; use slot_clock::SlotClock; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; -use types::{BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -49,11 +50,21 @@ pub fn proposer_duties( ); compute_and_cache_proposer_duties(request_epoch, chain) } - } else if request_epoch > current_epoch { - // Reject queries about the future as they're very expensive there's no look-ahead for - // proposer duties. + } else if request_epoch + == current_epoch + .safe_add(1) + .map_err(warp_utils::reject::arith_error)? + { + let (proposers, dependent_root, _) = compute_proposer_duties(request_epoch, chain)?; + convert_to_api_response(chain, request_epoch, dependent_root, proposers) + } else if request_epoch + > current_epoch + .safe_add(1) + .map_err(warp_utils::reject::arith_error)? + { + // Reject queries about the future epochs for which lookahead is not possible Err(warp_utils::reject::custom_bad_request(format!( - "request epoch {} is ahead of the current epoch {}", + "request epoch {} is ahead of the next epoch {}", request_epoch, current_epoch ))) } else { @@ -119,6 +130,24 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { + let (indices, dependent_root, fork) = compute_proposer_duties(current_epoch, chain)?; + + // Prime the proposer shuffling cache with the newly-learned value. + chain + .beacon_proposer_cache + .lock() + .insert(current_epoch, dependent_root, indices.clone(), fork) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + convert_to_api_response(chain, current_epoch, dependent_root, indices) +} + +/// Compute the proposer duties using the head state without cache. +fn compute_proposer_duties( + current_epoch: Epoch, + chain: &BeaconChain, +) -> Result<(Vec, Hash256, Fork), warp::reject::Rejection> { // Take a copy of the head of the chain. let head = chain .head() @@ -140,20 +169,7 @@ fn compute_and_cache_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - // Prime the proposer shuffling cache with the newly-learned value. - chain - .beacon_proposer_cache - .lock() - .insert( - state.current_epoch(), - dependent_root, - indices.clone(), - state.fork(), - ) - .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; - - convert_to_api_response(chain, current_epoch, dependent_root, indices) + Ok((indices, dependent_root, state.fork())) } /// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 878af7a039..2957a68c05 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1671,7 +1671,7 @@ impl ApiTester { pub async fn test_get_validator_duties_proposer(self) -> Self { let current_epoch = self.chain.epoch().unwrap(); - for epoch in 0..=self.chain.epoch().unwrap().as_u64() { + for epoch in 0..=self.chain.epoch().unwrap().as_u64() + 1 { let epoch = Epoch::from(epoch); let dependent_root = self @@ -1780,9 +1780,9 @@ impl ApiTester { } } - // Requests to future epochs should fail. + // Requests to the epochs after the next epoch should fail. self.client - .get_validator_duties_proposer(current_epoch + 1) + .get_validator_duties_proposer(current_epoch + 2) .await .unwrap_err(); @@ -1802,15 +1802,27 @@ impl ApiTester { current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1), ); - assert_eq!( - self.client - .get_validator_duties_proposer(current_epoch) - .await - .unwrap_err() - .status() - .map(Into::into), - Some(400), - "should not get proposer duties outside of tolerance" + let dependent_root = self + .chain + .block_root_at_slot( + current_epoch.start_slot(E::slots_per_epoch()) - 1, + WhenSlotSkipped::Prev, + ) + .unwrap() + .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + + self.client + .get_validator_duties_proposer(current_epoch) + .await + .expect("should get proposer duties for the next epoch outside of tolerance"); + + assert!( + self.chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, current_epoch) + .is_none(), + "should not prime the proposer cache outside of tolerance" ); assert_eq!( @@ -1832,6 +1844,16 @@ impl ApiTester { .get_validator_duties_proposer(current_epoch) .await .expect("should get proposer duties within tolerance"); + + assert!( + self.chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, current_epoch) + .is_some(), + "should prime the proposer cache inside the tolerance" + ); + self.client .post_validator_duties_attester(next_epoch, &[0]) .await From 104e3104f9ac2ca864ceb78fa5a67cde6bf27cef Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 21 Feb 2022 23:21:02 +0000 Subject: [PATCH 07/14] Add API to compute block packing efficiency data (#2879) ## Issue Addressed N/A ## Proposed Changes Add a HTTP API which can be used to compute the block packing data for all blocks over a discrete range of epochs. ## Usage ### Request ``` curl "http:localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=57730&end_epoch=57732" ``` ### Response ``` [ { "slot": "1847360", "block_hash": "0xa7dc230659802df2f99ea3798faede2e75942bb5735d56e6bfdc2df335dcd61f", "proposer_info": { "validator_index": 1686, "graffiti": "" }, "available_attestations": 7096, "included_attestations": 6459, "prior_skip_slots": 0 }, ... ] ``` ## Additional Info This is notably different to the existing lcli code: - Uses `BlockReplayer` #2863 and as such runs significantly faster than the previous method. - Corrects the off-by-one #2878 - Removes the `offline` validators component. This was only a "best guess" and simply was used as a way to determine an estimate of the "true" packing efficiency and was generally not helpful in terms of direct comparisons between different packing methods. As such it has been removed from the API and any future estimates of "offline" validators would be better suited in a separate/more targeted API or as part of 'beacon watch': #2873 - Includes `prior_skip_slots`. --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 2 + .../http_api/src/block_packing_efficiency.rs | 382 ++++++++++++++++++ beacon_node/http_api/src/lib.rs | 15 + common/eth2/src/lighthouse.rs | 4 + .../lighthouse/block_packing_efficiency.rs | 34 ++ 6 files changed, 438 insertions(+) create mode 100644 beacon_node/http_api/src/block_packing_efficiency.rs create mode 100644 common/eth2/src/lighthouse/block_packing_efficiency.rs diff --git a/Cargo.lock b/Cargo.lock index cb51cac30c..611cdf57a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2442,6 +2442,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "network", + "parking_lot", "safe_arith", "sensitive_url", "serde", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 62373b464a..315dbb9e55 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -27,8 +27,10 @@ slot_clock = { path = "../../common/slot_clock" } eth2_ssz = "0.4.1" bs58 = "0.4.0" futures = "0.3.8" +parking_lot = "0.11.0" safe_arith = {path = "../../consensus/safe_arith"} + [dev-dependencies] store = { path = "../store" } environment = { path = "../../lighthouse/environment" } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs new file mode 100644 index 0000000000..d948c0d7d8 --- /dev/null +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -0,0 +1,382 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{ + BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, +}; +use parking_lot::Mutex; +use state_processing::{ + per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, +}; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use std::sync::Arc; +use types::{ + BeaconCommittee, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, + OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, +}; +use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; + +/// Load blocks from block roots in chunks to reduce load on memory. +const BLOCK_ROOT_CHUNK_SIZE: usize = 100; + +#[derive(Debug)] +enum PackingEfficiencyError { + BlockReplay(BlockReplayError), + BeaconState(BeaconStateError), + CommitteeStoreError(Slot), + InvalidAttestationError, +} + +impl From for PackingEfficiencyError { + fn from(e: BlockReplayError) -> Self { + Self::BlockReplay(e) + } +} + +impl From for PackingEfficiencyError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +struct CommitteeStore { + current_epoch_committees: Vec, + previous_epoch_committees: Vec, +} + +impl CommitteeStore { + fn new() -> Self { + CommitteeStore { + current_epoch_committees: Vec::new(), + previous_epoch_committees: Vec::new(), + } + } +} + +struct PackingEfficiencyHandler { + current_slot: Slot, + current_epoch: Epoch, + prior_skip_slots: u64, + available_attestations: HashSet, + included_attestations: HashMap, + committee_store: CommitteeStore, + _phantom: PhantomData, +} + +impl PackingEfficiencyHandler { + fn new( + start_epoch: Epoch, + starting_state: BeaconState, + spec: &ChainSpec, + ) -> Result { + let mut handler = PackingEfficiencyHandler { + current_slot: start_epoch.start_slot(T::slots_per_epoch()), + current_epoch: start_epoch, + prior_skip_slots: 0, + available_attestations: HashSet::new(), + included_attestations: HashMap::new(), + committee_store: CommitteeStore::new(), + _phantom: PhantomData::default(), + }; + + handler.compute_epoch(start_epoch, &starting_state, spec)?; + Ok(handler) + } + + fn update_slot(&mut self, slot: Slot) { + self.current_slot = slot; + if slot % T::slots_per_epoch() == 0 { + self.current_epoch = Epoch::new(slot.as_u64() / T::slots_per_epoch()); + } + } + + fn prune_included_attestations(&mut self) { + let epoch = self.current_epoch; + self.included_attestations.retain(|x, _| { + x.slot >= Epoch::new(epoch.as_u64().saturating_sub(2)).start_slot(T::slots_per_epoch()) + }); + } + + fn prune_available_attestations(&mut self) { + let slot = self.current_slot; + self.available_attestations + .retain(|x| x.slot >= (slot.as_u64().saturating_sub(T::slots_per_epoch()))); + } + + fn apply_block( + &mut self, + block: &SignedBeaconBlock, + ) -> Result { + let block_body = block.message().body(); + let attestations = block_body.attestations(); + + let mut attestations_in_block = HashMap::new(); + for attestation in attestations.iter() { + for (position, voted) in attestation.aggregation_bits.iter().enumerate() { + if voted { + let unique_attestation = UniqueAttestation { + slot: attestation.data.slot, + committee_index: attestation.data.index, + committee_position: position, + }; + let inclusion_distance: u64 = block + .slot() + .as_u64() + .checked_sub(attestation.data.slot.as_u64()) + .ok_or(PackingEfficiencyError::InvalidAttestationError)?; + + self.available_attestations.remove(&unique_attestation); + attestations_in_block.insert(unique_attestation, inclusion_distance); + } + } + } + + // Remove duplicate attestations as these yield no reward. + attestations_in_block.retain(|x, _| self.included_attestations.get(x).is_none()); + self.included_attestations + .extend(attestations_in_block.clone()); + + Ok(attestations_in_block.len()) + } + + fn add_attestations(&mut self, slot: Slot) -> Result<(), PackingEfficiencyError> { + let committees = self.get_committees_at_slot(slot)?; + for committee in committees { + for position in 0..committee.committee.len() { + let unique_attestation = UniqueAttestation { + slot, + committee_index: committee.index, + committee_position: position, + }; + self.available_attestations.insert(unique_attestation); + } + } + + Ok(()) + } + + fn compute_epoch( + &mut self, + epoch: Epoch, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), PackingEfficiencyError> { + // Free some memory by pruning old attestations from the included set. + self.prune_included_attestations(); + + let new_committees = if state.committee_cache_is_initialized(RelativeEpoch::Current) { + state + .get_beacon_committees_at_epoch(RelativeEpoch::Current)? + .into_iter() + .map(BeaconCommittee::into_owned) + .collect::>() + } else { + state + .initialize_committee_cache(epoch, spec)? + .get_all_beacon_committees()? + .into_iter() + .map(BeaconCommittee::into_owned) + .collect::>() + }; + + self.committee_store.previous_epoch_committees = + self.committee_store.current_epoch_committees.clone(); + + self.committee_store.current_epoch_committees = new_committees; + + Ok(()) + } + + fn get_committees_at_slot( + &self, + slot: Slot, + ) -> Result, PackingEfficiencyError> { + let mut committees = Vec::new(); + + for committee in &self.committee_store.current_epoch_committees { + if committee.slot == slot { + committees.push(committee.clone()); + } + } + for committee in &self.committee_store.previous_epoch_committees { + if committee.slot == slot { + committees.push(committee.clone()); + } + } + + if committees.is_empty() { + return Err(PackingEfficiencyError::CommitteeStoreError(slot)); + } + + Ok(committees) + } +} + +pub fn get_block_packing_efficiency( + query: BlockPackingEfficiencyQuery, + chain: Arc>, +) -> Result, warp::Rejection> { + let spec = &chain.spec; + + let start_epoch = query.start_epoch; + let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let prior_slot = start_slot - 1; + + let end_epoch = query.end_epoch; + let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); + + // Check query is valid. + if start_epoch > end_epoch || start_epoch == 0 { + return Err(custom_bad_request(format!( + "invalid start and end epochs: {}, {}", + start_epoch, end_epoch + ))); + } + + let prior_epoch = start_epoch - 1; + let start_slot_of_prior_epoch = prior_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + // Load block roots. + let mut block_roots: Vec = chain + .forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot) + .map_err(beacon_chain_error)? + .collect::, _>>() + .map_err(beacon_chain_error)? + .iter() + .map(|(root, _)| *root) + .collect(); + block_roots.dedup(); + + let first_block_root = block_roots + .first() + .ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?; + + let first_block = chain + .get_block(first_block_root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) + }) + .map_err(beacon_chain_error)?; + + // Load state for block replay. + let starting_state_root = first_block.state_root(); + + let starting_state = chain + .get_state(&starting_state_root, Some(prior_slot)) + .and_then(|maybe_state| { + maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root)) + }) + .map_err(beacon_chain_error)?; + + // Initialize response vector. + let mut response = Vec::new(); + + // Initialize handler. + let handler = Arc::new(Mutex::new( + PackingEfficiencyHandler::new(prior_epoch, starting_state.clone(), spec) + .map_err(|e| custom_server_error(format!("{:?}", e)))?, + )); + + let pre_slot_hook = + |state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { + // Add attestations to `available_attestations`. + handler.lock().add_attestations(state.slot())?; + Ok(()) + }; + + let post_slot_hook = |state: &mut BeaconState, + _summary: Option>, + is_skip_slot: bool| + -> Result<(), PackingEfficiencyError> { + handler.lock().update_slot(state.slot()); + + // Check if this a new epoch. + if state.slot() % T::EthSpec::slots_per_epoch() == 0 { + handler.lock().compute_epoch( + state.slot().epoch(T::EthSpec::slots_per_epoch()), + state, + spec, + )?; + } + + if is_skip_slot { + handler.lock().prior_skip_slots += 1; + } + + // Remove expired attestations. + handler.lock().prune_available_attestations(); + + Ok(()) + }; + + let pre_block_hook = |_state: &mut BeaconState, + block: &SignedBeaconBlock| + -> Result<(), PackingEfficiencyError> { + let slot = block.slot(); + + let block_message = block.message(); + // Get block proposer info. + let proposer_info = ProposerInfo { + validator_index: block_message.proposer_index(), + graffiti: block_message.body().graffiti().as_utf8_lossy(), + }; + + // Store the count of available attestations at this point. + // In the future it may be desirable to check that the number of available attestations + // does not exceed the maximum possible amount given the length of available committees. + let available_count = handler.lock().available_attestations.len(); + + // Get all attestations included in the block. + let included = handler.lock().apply_block(block)?; + + let efficiency = BlockPackingEfficiency { + slot, + block_hash: block.canonical_root(), + proposer_info, + available_attestations: available_count, + included_attestations: included, + prior_skip_slots: handler.lock().prior_skip_slots, + }; + + // Write to response. + if slot >= start_slot { + response.push(efficiency); + } + + handler.lock().prior_skip_slots = 0; + + Ok(()) + }; + + // Build BlockReplayer. + let mut replayer = BlockReplayer::new(starting_state, spec) + .no_state_root_iter() + .no_signature_verification() + .minimal_block_root_verification() + .pre_slot_hook(Box::new(pre_slot_hook)) + .post_slot_hook(Box::new(post_slot_hook)) + .pre_block_hook(Box::new(pre_block_hook)); + + // Iterate through the block roots, loading blocks in chunks to reduce load on memory. + for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) { + // Load blocks from the block root chunks. + let blocks = block_root_chunks + .iter() + .map(|root| { + chain + .get_block(root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) + }) + .map_err(beacon_chain_error) + }) + .collect::>, _>>()?; + + replayer = replayer + .apply_blocks(blocks, None) + .map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?; + } + + drop(replayer); + + Ok(response) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5ef845858a..dcc6528a9b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -8,6 +8,7 @@ mod attestation_performance; mod attester_duties; mod block_id; +mod block_packing_efficiency; mod block_rewards; mod database; mod metrics; @@ -2615,6 +2616,19 @@ pub fn serve( }) }); + // GET lighthouse/analysis/block_packing_efficiency + let get_lighthouse_block_packing_efficiency = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("block_packing_efficiency")) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|query, chain: Arc>| { + blocking_json_task(move || { + block_packing_efficiency::get_block_packing_efficiency(query, chain) + }) + }); + let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) @@ -2741,6 +2755,7 @@ pub fn serve( .or(get_lighthouse_database_info.boxed()) .or(get_lighthouse_block_rewards.boxed()) .or(get_lighthouse_attestation_performance.boxed()) + .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index adf73d8b92..a2e4a66c4b 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,6 +1,7 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. mod attestation_performance; +mod block_packing_efficiency; mod block_rewards; use crate::{ @@ -18,6 +19,9 @@ use store::{AnchorInfo, Split}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, }; +pub use block_packing_efficiency::{ + BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, +}; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; diff --git a/common/eth2/src/lighthouse/block_packing_efficiency.rs b/common/eth2/src/lighthouse/block_packing_efficiency.rs new file mode 100644 index 0000000000..0ad6f46031 --- /dev/null +++ b/common/eth2/src/lighthouse/block_packing_efficiency.rs @@ -0,0 +1,34 @@ +use serde::{Deserialize, Serialize}; +use types::{Epoch, Hash256, Slot}; + +type CommitteePosition = usize; +type Committee = u64; +type ValidatorIndex = u64; + +#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] +pub struct UniqueAttestation { + pub slot: Slot, + pub committee_index: Committee, + pub committee_position: CommitteePosition, +} +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct ProposerInfo { + pub validator_index: ValidatorIndex, + pub graffiti: String, +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockPackingEfficiency { + pub slot: Slot, + pub block_hash: Hash256, + pub proposer_info: ProposerInfo, + pub available_attestations: usize, + pub included_attestations: usize, + pub prior_skip_slots: u64, +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockPackingEfficiencyQuery { + pub start_epoch: Epoch, + pub end_epoch: Epoch, +} From b37d5db8df46c5904bf048d16a20f4f12aad0334 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Feb 2022 23:21:03 +0000 Subject: [PATCH 08/14] Increase Bors timeout, refine target-branch-check (#3035) ## Issue Addressed Timeouts due to Windows builds running for 2h 20m. ## Proposed Changes * Increase Bors timeout to 3h * Refine the target branch check so that it will pass when we make PRs to feature branches. This is just an extra change I've been meaning to sneak in for a while. ## Additional Info * I think it would also be cool to try caching for CI again, but that's a separate issue and we'll still need the long timeout on a cache miss. --- .github/workflows/test-suite.yml | 4 ++-- bors.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 04f7659fe2..919bdbac2a 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -19,8 +19,8 @@ jobs: runs-on: ubuntu-latest if: github.event_name == 'pull_request' steps: - - name: Check that pull request is targeting unstable branch - run: test ${{ github.base_ref }} = "unstable" + - name: Check that the pull request is not targeting the stable branch + run: test ${{ github.base_ref }} != "stable" cargo-fmt: name: cargo-fmt runs-on: ubuntu-latest diff --git a/bors.toml b/bors.toml index ae1b03e9b1..3c6826a9e8 100644 --- a/bors.toml +++ b/bors.toml @@ -16,5 +16,5 @@ status = [ "cargo-udeps" ] use_squash_merge = true -timeout_sec = 7200 +timeout_sec = 10800 pr_status = ["license/cla", "target-branch-check"] From 5a0b0490493294e843cd3835b0b0ce1301210650 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 22 Feb 2022 03:09:00 +0000 Subject: [PATCH 09/14] Avoid hogging the fallback `status` lock in the VC (#3022) ## Issue Addressed Addresses https://github.com/sigp/lighthouse/issues/2926 ## Proposed Changes Appropriated from https://github.com/sigp/lighthouse/issues/2926#issuecomment-1039676768: When a node returns *any* error we call [`CandidateBeaconNode::set_offline`](https://github.com/sigp/lighthouse/blob/c3a793fd73a3b11b130b82032904d39c952869e4/validator_client/src/beacon_node_fallback.rs#L424) which sets it's `status` to `CandidateError::Offline`. That node will then be ignored until the routine [`fallback_updater_service`](https://github.com/sigp/lighthouse/blob/c3a793fd73a3b11b130b82032904d39c952869e4/validator_client/src/beacon_node_fallback.rs#L44) manages to reconnect to it. However, I believe there was an issue in the [`CanidateBeaconNode::refesh_status`](https://github.com/sigp/lighthouse/blob/c3a793fd73a3b11b130b82032904d39c952869e4/validator_client/src/beacon_node_fallback.rs#L157-L178) method, which is used by the updater service to see if the node has come good again. It was holding a [write lock on the `status` field](https://github.com/sigp/lighthouse/blob/c3a793fd73a3b11b130b82032904d39c952869e4/validator_client/src/beacon_node_fallback.rs#L165) whilst it polled the node status. This means a long timeout would hog the write lock and starve other processes. When a VC is trying to access a beacon node for whatever purpose (getting duties, posting blocks, etc), it performs [three passes](https://github.com/sigp/lighthouse/blob/c3a793fd73a3b11b130b82032904d39c952869e4/validator_client/src/beacon_node_fallback.rs#L432-L482) through the lists of nodes, trying to run some generic `function` (closure, lambda, etc) on each node: - 1st pass: only try running `function` on all nodes which are both synced and online. - 2nd pass: try running `function` on all nodes that are online, but not necessarily synced. - 3rd pass: for each offline node, try refreshing its status and then running `function` on it. So, it turns out that if the `CanidateBeaconNode::refesh_status` function from the routine update service is hogging the write-lock, the 1st pass gets blocked whilst trying to read the status of the first node. So, nodes that should be left until the 3rd pass are blocking the process of the 1st and 2nd passes, hence the behaviour described in #2926. ## Additional Info NA --- validator_client/src/beacon_node_fallback.rs | 22 ++++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 18780c3092..d4f7c6c874 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -162,19 +162,23 @@ impl CandidateBeaconNode { spec: &ChainSpec, log: &Logger, ) -> Result<(), CandidateError> { - let mut status = self.status.write().await; - - if let Err(e) = self.is_online(log).await { - *status = Err(e); + let new_status = if let Err(e) = self.is_online(log).await { + Err(e) } else if let Err(e) = self.is_compatible(spec, log).await { - *status = Err(e); + Err(e) } else if let Err(e) = self.is_synced(slot_clock, log).await { - *status = Err(e); + Err(e) } else { - *status = Ok(()) - } + Ok(()) + }; - *status + // In case of concurrent use, the latest value will always be used. It's possible that a + // long time out might over-ride a recent successful response, leading to a falsely-offline + // status. I deem this edge-case acceptable in return for the concurrency benefits of not + // holding a write-lock whilst we check the online status of the node. + *self.status.write().await = new_status; + + new_status } /// Checks if the node is reachable. From 696de58141ecc34010c22256efac9c07849e5fb0 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 22 Feb 2022 03:09:02 +0000 Subject: [PATCH 10/14] Add aliases for validator-dir flags (#3034) ## Issue Addressed #3020 ## Proposed Changes - Alias the `validators-dir` arg to `validator-dir` in the `validator_client` subcommand. - Alias the `validator-dir` arg to `validators-dir` in the `account_manager validator` subcommand. - Add test for the validator_client alias. --- account_manager/src/lib.rs | 1 + account_manager/src/validator/mod.rs | 3 ++- lighthouse/tests/validator_client.rs | 13 +++++++++++++ validator_client/src/cli.rs | 1 + 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 8297567781..a032a85f71 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -10,6 +10,7 @@ use types::EthSpec; pub const CMD: &str = "account_manager"; pub const SECRETS_DIR_FLAG: &str = "secrets-dir"; pub const VALIDATOR_DIR_FLAG: &str = "validator-dir"; +pub const VALIDATOR_DIR_FLAG_ALIAS: &str = "validators-dir"; pub const WALLETS_DIR_FLAG: &str = "wallets-dir"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index a652603ff6..4f1bde0795 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -6,7 +6,7 @@ pub mod modify; pub mod recover; pub mod slashing_protection; -use crate::VALIDATOR_DIR_FLAG; +use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; use clap::{App, Arg, ArgMatches}; use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; @@ -21,6 +21,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name(VALIDATOR_DIR_FLAG) .long(VALIDATOR_DIR_FLAG) + .alias(VALIDATOR_DIR_FLAG_ALIAS) .value_name("VALIDATOR_DIRECTORY") .help( "The path to search for validator directories. \ diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index e682471c46..76315daaa9 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -66,6 +66,19 @@ fn validators_and_secrets_dir_flags() { }); } +#[test] +fn validators_dir_alias_flags() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("validator-dir", dir.path().join("validators").to_str()) + .flag("secrets-dir", dir.path().join("secrets").to_str()) + .run_with_no_datadir() + .with_config(|config| { + assert_eq!(config.validator_dir, dir.path().join("validators")); + assert_eq!(config.secrets_dir, dir.path().join("secrets")); + }); +} + #[test] fn beacon_nodes_flag() { CommandLineTest::new() diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index d16e1e0a1e..49a8f58167 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -38,6 +38,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("validators-dir") .long("validators-dir") + .alias("validator-dir") .value_name("VALIDATORS_DIR") .help( "The directory which contains the validator keystores, deposit data for \ From c1df5d29cb5ecf8f80c5ed8ef051249f11c47ad5 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 24 Feb 2022 00:31:35 +0000 Subject: [PATCH 11/14] Ensure logfile respects the validators-dir CLI flag (#3003) ## Issue Addressed Closes #2990 ## Proposed Changes Add a check to see if the `--validators-dir` CLI flag is set and if so store validator logs into it. Ensure that if the log directory cannot be created, emit a `WARN` and disable file logging rather than panicking. ## Additional Info Panics associated with logfiles can still occur in these scenarios: 1. The `$datadir/validators/logs` directory already exists with the wrong permissions (or was changed after creation). 1. The logfile already exists with the wrong permissions (or was changed after creation). > These panics are cosmetic only since only the logfile thread panics. Following the panics, LH will continue to function as normal. I believe this is due to the use of [`slog::Fuse`](https://docs.rs/slog/latest/slog/struct.Fuse.html) when initializing the logger. I'm not sure if there a better way of handling logfile errors? I think ideally, rather than panicking, we would emit a `WARN` to the stdout logger with the panic reason, then exit the logfile thread gracefully. --- lighthouse/environment/src/lib.rs | 16 +++++++++++++++- lighthouse/src/main.rs | 25 ++++++++++++++++--------- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 448c84b54d..91feef5b05 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -182,7 +182,21 @@ impl EnvironmentBuilder { // Create the necessary directories for the correct service and network. if !dir.exists() { - create_dir_all(dir).map_err(|e| format!("Unable to create directory: {:?}", e))?; + let res = create_dir_all(dir); + + // If the directories cannot be created, warn and disable the logger. + match res { + Ok(_) => (), + Err(e) => { + let log = stdout_logger; + warn!( + log, + "Background file logging is disabled"; + "error" => e); + self.log = Some(log); + return Ok(self); + } + } } } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 51c1075cdb..2f04b95ca4 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -372,21 +372,28 @@ fn run( // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { - log_path = match matches.subcommand_name() { - Some("beacon_node") => Some( + log_path = match matches.subcommand() { + ("beacon_node", _) => Some( parse_path_or_default(matches, "datadir")? .join(DEFAULT_BEACON_NODE_DIR) .join("logs") .join("beacon") .with_extension("log"), ), - Some("validator_client") => Some( - parse_path_or_default(matches, "datadir")? - .join(DEFAULT_VALIDATOR_DIR) - .join("logs") - .join("validator") - .with_extension("log"), - ), + ("validator_client", Some(vc_matches)) => { + let base_path = if vc_matches.is_present("validators-dir") { + parse_path_or_default(vc_matches, "validators-dir")? + } else { + parse_path_or_default(matches, "datadir")?.join(DEFAULT_VALIDATOR_DIR) + }; + + Some( + base_path + .join("logs") + .join("validator") + .with_extension("log"), + ) + } _ => None, }; } From 5e1f8a84804a18491a3c8b7ccbe4d7ef37fed55a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 25 Feb 2022 00:10:17 +0000 Subject: [PATCH 12/14] Update to Rust 1.59 and 2021 edition (#3038) ## Proposed Changes Lots of lint updates related to `flat_map`, `unwrap_or_else` and string patterns. I did a little more creative refactoring in the op pool, but otherwise followed Clippy's suggestions. ## Additional Info We need this PR to unblock CI. --- account_manager/Cargo.toml | 2 +- beacon_node/Cargo.toml | 2 +- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../src/naive_aggregation_pool.rs | 5 +---- .../src/sync_committee_verification.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 13 +++++-------- .../src/validator_pubkey_cache.rs | 4 +--- .../tests/sync_committee_verification.rs | 4 ++-- beacon_node/client/Cargo.toml | 2 +- beacon_node/eth1/Cargo.toml | 2 +- beacon_node/eth1/src/http.rs | 2 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/genesis/Cargo.toml | 2 +- beacon_node/http_api/Cargo.toml | 2 +- beacon_node/http_metrics/Cargo.toml | 2 +- beacon_node/lighthouse_network/Cargo.toml | 2 +- .../lighthouse_network/src/behaviour/mod.rs | 2 +- beacon_node/network/Cargo.toml | 2 +- beacon_node/operation_pool/Cargo.toml | 2 +- beacon_node/operation_pool/src/lib.rs | 13 ++++++------- beacon_node/operation_pool/src/persistence.rs | 15 +++++++-------- beacon_node/src/config.rs | 16 +++++++++------- beacon_node/store/Cargo.toml | 2 +- beacon_node/store/src/hot_cold_store.rs | 2 +- beacon_node/timer/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/account_utils/Cargo.toml | 2 +- common/clap_utils/Cargo.toml | 2 +- common/clap_utils/src/lib.rs | 2 +- common/compare_fields/Cargo.toml | 2 +- common/compare_fields_derive/Cargo.toml | 2 +- common/compare_fields_derive/src/lib.rs | 2 +- common/deposit_contract/Cargo.toml | 2 +- common/directory/Cargo.toml | 2 +- common/eth2/Cargo.toml | 2 +- common/eth2_config/Cargo.toml | 2 +- common/eth2_interop_keypairs/Cargo.toml | 2 +- common/eth2_network_config/Cargo.toml | 2 +- common/eth2_wallet_manager/Cargo.toml | 2 +- common/fallback/Cargo.toml | 2 +- common/filesystem/Cargo.toml | 2 +- common/hashset_delay/Cargo.toml | 2 +- common/lighthouse_metrics/Cargo.toml | 2 +- common/lighthouse_version/Cargo.toml | 2 +- common/lockfile/Cargo.toml | 2 +- common/logging/Cargo.toml | 2 +- common/logging/tests/test.rs | 6 ++---- common/lru_cache/Cargo.toml | 2 +- common/malloc_utils/Cargo.toml | 2 +- common/monitoring_api/Cargo.toml | 2 +- common/sensitive_url/Cargo.toml | 2 +- common/slot_clock/Cargo.toml | 2 +- common/target_check/Cargo.toml | 2 +- common/task_executor/Cargo.toml | 2 +- common/test_random_derive/Cargo.toml | 2 +- common/test_random_derive/src/lib.rs | 2 +- common/validator_dir/Cargo.toml | 2 +- common/warp_utils/Cargo.toml | 2 +- consensus/cached_tree_hash/Cargo.toml | 2 +- consensus/cached_tree_hash/src/cache_arena.rs | 4 ++-- consensus/fork_choice/Cargo.toml | 2 +- consensus/fork_choice/src/fork_choice.rs | 2 +- consensus/fork_choice/tests/tests.rs | 4 ++-- consensus/int_to_bytes/Cargo.toml | 2 +- consensus/merkle_proof/Cargo.toml | 2 +- consensus/proto_array/Cargo.toml | 2 +- consensus/proto_array/src/proto_array.rs | 2 +- consensus/safe_arith/Cargo.toml | 2 +- consensus/serde_utils/Cargo.toml | 2 +- consensus/serde_utils/src/u64_hex_be.rs | 2 +- consensus/ssz/Cargo.toml | 2 +- consensus/ssz/src/decode.rs | 19 ++++++++++--------- consensus/ssz_derive/Cargo.toml | 2 +- consensus/ssz_types/Cargo.toml | 2 +- consensus/state_processing/Cargo.toml | 2 +- consensus/state_processing/src/lib.rs | 2 +- .../per_block_processing/signature_sets.rs | 12 +++++------- .../base/rewards_and_penalties.rs | 5 ++--- consensus/swap_or_not_shuffle/Cargo.toml | 2 +- consensus/tree_hash/Cargo.toml | 2 +- consensus/tree_hash/src/merkle_hasher.rs | 4 ++-- consensus/tree_hash_derive/Cargo.toml | 2 +- consensus/tree_hash_derive/src/lib.rs | 2 +- consensus/types/Cargo.toml | 2 +- .../types/src/beacon_state/tree_hash_cache.rs | 2 +- consensus/types/src/lib.rs | 2 +- crypto/bls/Cargo.toml | 2 +- crypto/eth2_hashing/Cargo.toml | 2 +- crypto/eth2_key_derivation/Cargo.toml | 2 +- crypto/eth2_key_derivation/src/derived_key.rs | 3 +-- crypto/eth2_keystore/Cargo.toml | 2 +- crypto/eth2_wallet/Cargo.toml | 2 +- lcli/Cargo.toml | 2 +- lcli/src/etl/block_efficiency.rs | 6 +++--- lighthouse/Cargo.toml | 2 +- lighthouse/environment/Cargo.toml | 2 +- slasher/Cargo.toml | 2 +- slasher/service/Cargo.toml | 2 +- testing/ef_tests/Cargo.toml | 2 +- .../ef_tests/src/cases/epoch_processing.rs | 5 +---- testing/ef_tests/src/cases/operations.rs | 5 +---- testing/ef_tests/src/cases/rewards.rs | 5 +---- testing/ef_tests/src/cases/sanity_blocks.rs | 5 +---- testing/ef_tests/src/cases/sanity_slots.rs | 5 +---- testing/eth1_test_rig/Cargo.toml | 2 +- testing/node_test_rig/Cargo.toml | 2 +- testing/simulator/Cargo.toml | 2 +- testing/state_transition_vectors/Cargo.toml | 2 +- testing/test-test_logger/Cargo.toml | 2 +- testing/web3signer_tests/Cargo.toml | 2 +- validator_client/Cargo.toml | 2 +- .../slashing_protection/Cargo.toml | 2 +- .../src/slashing_database.rs | 4 ++-- .../src/http_api/tests/keystores.rs | 15 ++++++++++++++- 115 files changed, 173 insertions(+), 188 deletions(-) diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index c14c47787f..ce863f9147 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -2,7 +2,7 @@ name = "account_manager" version = "0.3.5" authors = ["Paul Hauner ", "Luke Anderson "] -edition = "2018" +edition = "2021" [dependencies] bls = { path = "../crypto/bls" } diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 46ff5ba228..3242336c55 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -2,7 +2,7 @@ name = "beacon_node" version = "2.1.3" authors = ["Paul Hauner ", "Age Manning ", "Age Manning "] -edition = "2018" +edition = "2021" autotests = false # using a single test binary compiles faster [features] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 67aed4b484..0b9dbb50a2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2289,7 +2289,7 @@ impl BeaconChain { .position(|(_root, block)| { block.slot().epoch(T::EthSpec::slots_per_epoch()) > start_epoch }) - .unwrap_or_else(|| filtered_chain_segment.len()); + .unwrap_or(filtered_chain_segment.len()); // Split off the first section blocks that are all either within the current epoch of // the first block. These blocks can all be signature-verified with the same diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 8d8dd19b50..252e9915d8 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -421,10 +421,7 @@ impl NaiveAggregationPool { /// Iterate all items in all slots of `self`. pub fn iter(&self) -> impl Iterator { - self.maps - .iter() - .map(|(_slot, map)| map.get_map().iter().map(|(_key, value)| value)) - .flatten() + self.maps.values().flat_map(|map| map.get_map().values()) } /// Removes any items with a slot lower than `current_slot` and bars any future diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index fa7d4dcfed..7d56af7935 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -635,7 +635,7 @@ pub fn verify_sync_committee_message( let pubkey = pubkey_cache .get_pubkey_from_pubkey_bytes(pubkey_bytes) .map(Cow::Borrowed) - .ok_or_else(|| Error::UnknownValidatorPubkey(*pubkey_bytes))?; + .ok_or(Error::UnknownValidatorPubkey(*pubkey_bytes))?; let next_slot_epoch = (sync_message.get_slot() + 1).epoch(T::EthSpec::slots_per_epoch()); let fork = chain.spec.fork_at_epoch(next_slot_epoch); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index ef3632f26c..6292409d7f 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -198,14 +198,11 @@ impl MonitoredValidator { /// as the value recorded by the validator monitor ignores skip slots. fn min_inclusion_distance(&self, epoch: &Epoch) -> Option { let summaries = self.summaries.read(); - summaries - .get(epoch) - .map(|summary| { - summary - .attestation_min_block_inclusion_distance - .map(Into::into) - }) - .flatten() + summaries.get(epoch).and_then(|summary| { + summary + .attestation_min_block_inclusion_distance + .map(Into::into) + }) } /// Maps `func` across the `self.summaries`. diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index da877cf4e5..14bb57e5e2 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -175,9 +175,7 @@ impl ValidatorPubkeyCache { /// Get the `PublicKey` for a validator with `PublicKeyBytes`. pub fn get_pubkey_from_pubkey_bytes(&self, pubkey: &PublicKeyBytes) -> Option<&PublicKey> { - self.get_index(pubkey) - .map(|index| self.get(index)) - .flatten() + self.get_index(pubkey).and_then(|index| self.get(index)) } /// Get the public key (in bytes form) for a validator with index `i`. diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 2596ff18c1..18cd691ac3 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -179,7 +179,7 @@ fn aggregated_gossip_verification() { get_valid_sync_contribution(&harness, RelativeSyncCommittee::Current); macro_rules! assert_invalid { - ($desc: tt, $attn_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => { + ($desc: tt, $attn_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { assert!( matches!( harness @@ -505,7 +505,7 @@ fn unaggregated_gossip_verification() { get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); macro_rules! assert_invalid { - ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => { + ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { assert!( matches!( harness diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index acb8376dbd..a34d02ae12 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -2,7 +2,7 @@ name = "client" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dev-dependencies] toml = "0.5.6" diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 0b0c2ea168..9600ef489b 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -2,7 +2,7 @@ name = "eth1" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 9e3465f0fa..876613a2ae 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -449,7 +449,7 @@ fn response_result_or_error(response: &str) -> Result { let json = serde_json::from_str::(response) .map_err(|e| RpcError::InvalidJson(e.to_string()))?; - if let Some(error) = json.get("error").map(|e| e.get("message")).flatten() { + if let Some(error) = json.get("error").and_then(|e| e.get("message")) { let error = error.to_string(); if error.contains(EIP155_ERROR_STR) { Err(RpcError::Eip155Error) diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index c166024c06..7dbb326a67 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "execution_layer" version = "0.1.0" -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 778e0a4ca6..3a19fe0f21 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -2,7 +2,7 @@ name = "genesis" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 315dbb9e55..7a23d128bd 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -2,7 +2,7 @@ name = "http_api" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" autotests = false # using a single test binary compiles faster [dependencies] diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index c300aaa8eb..c98f2cb856 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -2,7 +2,7 @@ name = "http_metrics" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 0cc53c09e4..70a50c02d5 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -2,7 +2,7 @@ name = "lighthouse_network" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dependencies] discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index c0a1fb3f71..155e01d789 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -458,7 +458,7 @@ impl Behaviour { GossipKind::Attestation(subnet_id) => { if let Some(v) = metrics::get_int_gauge( &metrics::FAILED_ATTESTATION_PUBLISHES_PER_SUBNET, - &[&subnet_id.to_string()], + &[subnet_id.as_ref()], ) { v.inc() }; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index df68518881..1c7506483e 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -2,7 +2,7 @@ name = "network" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dev-dependencies] sloggers = { version = "2.1.1", features = ["json"] } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 449a2f59d7..361c0a07fc 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -2,7 +2,7 @@ name = "operation_pool" version = "0.2.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] derivative = "2.1.1" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index c9b252ca11..eef09631eb 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -560,9 +560,8 @@ impl OperationPool { pub fn get_all_attestations(&self) -> Vec> { self.attestations .read() - .iter() - .map(|(_, attns)| attns.iter().cloned()) - .flatten() + .values() + .flat_map(|attns| attns.iter().cloned()) .collect() } @@ -575,10 +574,10 @@ impl OperationPool { { self.attestations .read() - .iter() - .map(|(_, attns)| attns.iter().cloned()) - .flatten() - .filter(filter) + .values() + .flat_map(|attns| attns.iter()) + .filter(|attn| filter(*attn)) + .cloned() .collect() } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 50b7828fae..acab2db60e 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -97,20 +97,19 @@ impl PersistedOperationPool { /// Reconstruct an `OperationPool`. Sets `sync_contributions` to its `Default` if `self` matches /// `PersistedOperationPool::Base`. pub fn into_operation_pool(self) -> Result, OpPoolError> { - let attestations = RwLock::new(self.attestations().to_vec().into_iter().collect()); - let attester_slashings = - RwLock::new(self.attester_slashings().to_vec().into_iter().collect()); + let attestations = RwLock::new(self.attestations().iter().cloned().collect()); + let attester_slashings = RwLock::new(self.attester_slashings().iter().cloned().collect()); let proposer_slashings = RwLock::new( self.proposer_slashings() - .to_vec() - .into_iter() + .iter() + .cloned() .map(|slashing| (slashing.signed_header_1.message.proposer_index, slashing)) .collect(), ); let voluntary_exits = RwLock::new( self.voluntary_exits() - .to_vec() - .into_iter() + .iter() + .cloned() .map(|exit| (exit.message.validator_index, exit)) .collect(), ); @@ -125,7 +124,7 @@ impl PersistedOperationPool { }, PersistedOperationPool::Altair(_) => { let sync_contributions = - RwLock::new(self.sync_contributions()?.to_vec().into_iter().collect()); + RwLock::new(self.sync_contributions()?.iter().cloned().collect()); OperationPool { attestations, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 33603b94e2..6e8743c055 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -705,13 +705,15 @@ pub fn set_network_config( // Appending enr-port to the dns hostname to appease `to_socket_addrs()` parsing. // Since enr-update is disabled with a dns address, not setting the enr-udp-port // will make the node undiscoverable. - if let Some(enr_udp_port) = config.enr_udp_port.or_else(|| { - if use_listening_port_as_enr_port_by_default { - Some(config.discovery_port) - } else { - None - } - }) { + if let Some(enr_udp_port) = + config + .enr_udp_port + .or(if use_listening_port_as_enr_port_by_default { + Some(config.discovery_port) + } else { + None + }) + { addr.push_str(&format!(":{}", enr_udp_port)); } else { return Err( diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 66a6cf5d28..be98f269f8 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -2,7 +2,7 @@ name = "store" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dev-dependencies] tempfile = "3.1.0" diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 62441ce0f2..c413309c9f 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -762,7 +762,7 @@ impl, Cold: ItemStore> HotColdDB let partial_state_bytes = self .cold_db .get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? - .ok_or_else(|| HotColdDBError::MissingRestorePoint(*state_root))?; + .ok_or(HotColdDBError::MissingRestorePoint(*state_root))?; let mut partial_state: PartialBeaconState = PartialBeaconState::from_ssz_bytes(&partial_state_bytes, &self.spec)?; diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index e67f52d92a..bd20f24ee6 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -2,7 +2,7 @@ name = "timer" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 1d86da52e5..8c89ab2e4e 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -2,7 +2,7 @@ name = "boot_node" version = "2.1.3" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dependencies] beacon_node = { path = "../beacon_node" } diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index b1a2f7e2fe..4b42e039df 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -2,7 +2,7 @@ name = "account_utils" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 542a13ad4e..0aa35b2333 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -2,7 +2,7 @@ name = "clap_utils" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index f8c6e8b7ce..3dd42f2a99 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -31,7 +31,7 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? { - let stripped = string.replace(",", ""); + let stripped = string.replace(',', ""); let terminal_total_difficulty = Uint256::from_dec_str(&stripped).map_err(|e| { format!( "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", diff --git a/common/compare_fields/Cargo.toml b/common/compare_fields/Cargo.toml index dc07f940b3..58527b5711 100644 --- a/common/compare_fields/Cargo.toml +++ b/common/compare_fields/Cargo.toml @@ -2,7 +2,7 @@ name = "compare_fields" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dev-dependencies] compare_fields_derive = { path = "../compare_fields_derive" } diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index 256af2767f..7696d3606b 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -2,7 +2,7 @@ name = "compare_fields_derive" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [lib] proc-macro = true diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index a8f95a1bd9..beabc6ca9b 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -8,7 +8,7 @@ use syn::{parse_macro_input, DeriveInput}; fn is_slice(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { attr.path.is_ident("compare_fields") - && attr.tokens.to_string().replace(" ", "") == "(as_slice)" + && attr.tokens.to_string().replace(' ', "") == "(as_slice)" }) } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index e1f0579a40..d1eae97ce1 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -2,7 +2,7 @@ name = "deposit_contract" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" build = "build.rs" diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml index c85c7b61f8..f7b77ab7b7 100644 --- a/common/directory/Cargo.toml +++ b/common/directory/Cargo.toml @@ -2,7 +2,7 @@ name = "directory" version = "0.1.0" authors = ["pawan "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d039a0c91a..fecebe8cad 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index 18433f09f9..08f8c9a393 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_config" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] types = { path = "../../consensus/types" } diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index cf8f889cf4..2e2d781288 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index aac11c4ea8..32cee89f7f 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_network_config" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" build = "build.rs" diff --git a/common/eth2_wallet_manager/Cargo.toml b/common/eth2_wallet_manager/Cargo.toml index 4c72323f77..8e6f0c0e5c 100644 --- a/common/eth2_wallet_manager/Cargo.toml +++ b/common/eth2_wallet_manager/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_wallet_manager" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/fallback/Cargo.toml b/common/fallback/Cargo.toml index 31a701d16e..0d71bbbd27 100644 --- a/common/fallback/Cargo.toml +++ b/common/fallback/Cargo.toml @@ -2,7 +2,7 @@ name = "fallback" version = "0.1.0" authors = ["blacktemplar "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/filesystem/Cargo.toml b/common/filesystem/Cargo.toml index f263f680ce..66cbedbb83 100644 --- a/common/filesystem/Cargo.toml +++ b/common/filesystem/Cargo.toml @@ -2,7 +2,7 @@ name = "filesystem" version = "0.1.0" authors = ["Mark Mackey "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/hashset_delay/Cargo.toml b/common/hashset_delay/Cargo.toml index a416b43f8c..1aa525a115 100644 --- a/common/hashset_delay/Cargo.toml +++ b/common/hashset_delay/Cargo.toml @@ -2,7 +2,7 @@ name = "hashset_delay" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dependencies] futures = "0.3.7" diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml index 5c0acd9bd5..06e33555d9 100644 --- a/common/lighthouse_metrics/Cargo.toml +++ b/common/lighthouse_metrics/Cargo.toml @@ -2,7 +2,7 @@ name = "lighthouse_metrics" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 7da51ea11f..782344e9ca 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -2,7 +2,7 @@ name = "lighthouse_version" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/lockfile/Cargo.toml b/common/lockfile/Cargo.toml index 004aaa300e..b9616e8715 100644 --- a/common/lockfile/Cargo.toml +++ b/common/lockfile/Cargo.toml @@ -2,7 +2,7 @@ name = "lockfile" version = "0.1.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] fs2 = "0.4.3" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index da1aa8b529..e56a1a2358 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -2,7 +2,7 @@ name = "logging" version = "0.2.0" authors = ["blacktemplar "] -edition = "2018" +edition = "2021" [features] test_logger = [] # Print log output to stderr when running tests instead of dropping it diff --git a/common/logging/tests/test.rs b/common/logging/tests/test.rs index 1d3d3c5e2a..f39f2b6d5a 100644 --- a/common/logging/tests/test.rs +++ b/common/logging/tests/test.rs @@ -3,13 +3,11 @@ use std::process::Command; use std::process::Output; fn run_cmd(cmd_line: &str) -> Result { - let output; if cfg!(target_os = "windows") { - output = Command::new(r#"cmd"#).args(["/C", cmd_line]).output(); + Command::new(r#"cmd"#).args(["/C", cmd_line]).output() } else { - output = Command::new(r#"sh"#).args(["-c", cmd_line]).output(); + Command::new(r#"sh"#).args(["-c", cmd_line]).output() } - output } #[test] diff --git a/common/lru_cache/Cargo.toml b/common/lru_cache/Cargo.toml index df5d9b1628..405b7be5d8 100644 --- a/common/lru_cache/Cargo.toml +++ b/common/lru_cache/Cargo.toml @@ -2,7 +2,7 @@ name = "lru_cache" version = "0.1.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dependencies] fnv = "1.0.7" diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 813584992e..881b9e346f 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -2,7 +2,7 @@ name = "malloc_utils" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 7812e5ada2..4196f8ccea 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -2,7 +2,7 @@ name = "monitoring_api" version = "0.1.0" authors = ["pawan "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index b6b0620a08..6de591efcf 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -2,7 +2,7 @@ name = "sensitive_url" version = "0.1.0" authors = ["Mac L "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index 63a571e5ba..eaf280398f 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -2,7 +2,7 @@ name = "slot_clock" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] types = { path = "../../consensus/types" } diff --git a/common/target_check/Cargo.toml b/common/target_check/Cargo.toml index 8981483eec..2cf2cacc64 100644 --- a/common/target_check/Cargo.toml +++ b/common/target_check/Cargo.toml @@ -2,7 +2,7 @@ name = "target_check" version = "0.1.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] static_assertions = "1.1.0" diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index db5c2e7b0c..660cc1ca01 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -2,7 +2,7 @@ name = "task_executor" version = "0.1.0" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" [dependencies] tokio = { version = "1.14.0", features = ["rt"] } diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index 0186ab3264..8794eeea21 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -2,7 +2,7 @@ name = "test_random_derive" version = "0.2.0" authors = ["thojest "] -edition = "2018" +edition = "2021" description = "Procedural derive macros for implementation of TestRandom trait" [lib] diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index fabc61c7fd..6c72ecb449 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -10,7 +10,7 @@ use syn::{parse_macro_input, DeriveInput}; /// The field attribute is: `#[test_random(default)]` fn should_use_default(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("test_random") && attr.tokens.to_string().replace(" ", "") == "(default)" + attr.path.is_ident("test_random") && attr.tokens.to_string().replace(' ', "") == "(default)" }) } diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 784d4d1df0..6fd4730f4b 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -2,7 +2,7 @@ name = "validator_dir" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [features] insecure_keys = [] diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 09b6f125fc..e66aeddfb8 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -2,7 +2,7 @@ name = "warp_utils" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index b77c800b10..95762bbc79 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -2,7 +2,7 @@ name = "cached_tree_hash" version = "0.1.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] ethereum-types = "0.12.1" diff --git a/consensus/cached_tree_hash/src/cache_arena.rs b/consensus/cached_tree_hash/src/cache_arena.rs index a938d48266..42819e8df5 100644 --- a/consensus/cached_tree_hash/src/cache_arena.rs +++ b/consensus/cached_tree_hash/src/cache_arena.rs @@ -127,7 +127,7 @@ impl CacheArena { .offsets .get(alloc_id + 1) .copied() - .unwrap_or_else(|| self.backing.len()); + .unwrap_or(self.backing.len()); Ok(end - start) } @@ -168,7 +168,7 @@ impl CacheArena { .offsets .get(alloc_id + 1) .copied() - .unwrap_or_else(|| self.backing.len()); + .unwrap_or(self.backing.len()); Ok(start..end) } diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index a17b31db64..77603d09e6 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -2,7 +2,7 @@ name = "fork_choice" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3ab07c6af1..f6c6f16414 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -219,7 +219,7 @@ fn dequeue_attestations( queued_attestations .iter() .position(|a| a.slot >= current_slot) - .unwrap_or_else(|| queued_attestations.len()), + .unwrap_or(queued_attestations.len()), ); std::mem::replace(queued_attestations, remaining) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 42b56f6abf..0b230ffd32 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -613,7 +613,7 @@ fn justified_balances() { } macro_rules! assert_invalid_block { - ($err: tt, $($error: pat) |+ $( if $guard: expr )?) => { + ($err: tt, $($error: pat_param) |+ $( if $guard: expr )?) => { assert!( matches!( $err, @@ -719,7 +719,7 @@ fn invalid_block_finalized_descendant() { } macro_rules! assert_invalid_attestation { - ($err: tt, $($error: pat) |+ $( if $guard: expr )?) => { + ($err: tt, $($error: pat_param) |+ $( if $guard: expr )?) => { assert!( matches!( $err, diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index 81fd49b4f2..73dfec40f9 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -2,7 +2,7 @@ name = "int_to_bytes" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] bytes = "1.0.1" diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 7b2af88ec0..89cd502cf2 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -2,7 +2,7 @@ name = "merkle_proof" version = "0.2.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] ethereum-types = "0.12.1" diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 2794d3c8e1..ad79ecc1e6 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -2,7 +2,7 @@ name = "proto_array" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [[bin]] name = "proto_array" diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 465ef9d4fc..759bee6ba9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -313,7 +313,7 @@ impl ProtoArray { .indices .get(justified_root) .copied() - .ok_or_else(|| Error::JustifiedNodeUnknown(*justified_root))?; + .ok_or(Error::JustifiedNodeUnknown(*justified_root))?; let justified_node = self .nodes diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml index 7784a03929..d212f98842 100644 --- a/consensus/safe_arith/Cargo.toml +++ b/consensus/safe_arith/Cargo.toml @@ -2,7 +2,7 @@ name = "safe_arith" version = "0.1.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 54eb55b8fc..1d258ec6ef 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_serde_utils" version = "0.1.1" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" description = "Serialization and deserialization utilities useful for JSON representations of Ethereum 2.0 types." license = "Apache-2.0" diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs index 145292f8c3..dc6af0fa4c 100644 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ b/consensus/serde_utils/src/u64_hex_be.rs @@ -51,7 +51,7 @@ where let raw = hex::encode(num.to_be_bytes()); let trimmed = raw.trim_start_matches('0'); - let hex = if trimmed.is_empty() { "0" } else { &trimmed }; + let hex = if trimmed.is_empty() { "0" } else { trimmed }; serializer.serialize_str(&format!("0x{}", &hex)) } diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index 555017daae..7ba3e0678c 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_ssz" version = "0.4.1" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" license = "Apache-2.0" diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs index 1c4c04ff08..604cc68d7b 100644 --- a/consensus/ssz/src/decode.rs +++ b/consensus/ssz/src/decode.rs @@ -187,12 +187,13 @@ impl<'a> SszDecoderBuilder<'a> { let start = self.items_index; self.items_index += ssz_fixed_len; - let slice = self.bytes.get(start..self.items_index).ok_or_else(|| { - DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - } - })?; + let slice = + self.bytes + .get(start..self.items_index) + .ok_or(DecodeError::InvalidByteLength { + len: self.bytes.len(), + expected: self.items_index, + })?; self.items.push(slice); } else { @@ -347,12 +348,12 @@ pub fn split_union_bytes(bytes: &[u8]) -> Result<(UnionSelector, &[u8]), DecodeE /// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= /// BYTES_PER_LENGTH_OFFSET`. pub fn read_offset(bytes: &[u8]) -> Result { - decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or_else(|| { + decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or( DecodeError::InvalidLengthPrefix { len: bytes.len(), expected: BYTES_PER_LENGTH_OFFSET, - } - })?) + }, + )?) } /// Decode bytes as a little-endian usize, returning an `Err` if `bytes.len() != diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml index 3375308736..cac617d391 100644 --- a/consensus/ssz_derive/Cargo.toml +++ b/consensus/ssz_derive/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_ssz_derive" version = "0.3.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" description = "Procedural derive macros to accompany the eth2_ssz crate." license = "Apache-2.0" diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 9c23ce92b5..2baa8994fb 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_ssz_types" version = "0.2.2" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" description = "Provides types with unique properties required for SSZ serialization and Merklization." license = "Apache-2.0" diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index c26b020ad5..d5599782a3 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -2,7 +2,7 @@ name = "state_processing" version = "0.2.0" authors = ["Paul Hauner ", "Michael Sproul "] -edition = "2018" +edition = "2021" [dev-dependencies] env_logger = "0.9.0" diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index cb4ffee780..cf541d4572 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -3,7 +3,7 @@ not(test), deny( clippy::integer_arithmetic, - clippy::disallowed_method, + clippy::disallowed_methods, clippy::indexing_slicing, clippy::unwrap_used, clippy::expect_used, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index a3b8dcaf21..5a89bd6867 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -177,7 +177,7 @@ where Ok(SignatureSet::single_pubkey( block.body().randao_reveal(), - get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?, + get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, message, )) } @@ -199,15 +199,13 @@ where block_header_signature_set( state, &proposer_slashing.signed_header_1, - get_pubkey(proposer_index) - .ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?, + get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, spec, ), block_header_signature_set( state, &proposer_slashing.signed_header_2, - get_pubkey(proposer_index) - .ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?, + get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, spec, ), )) @@ -363,7 +361,7 @@ where Ok(SignatureSet::single_pubkey( &signed_exit.signature, - get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?, + get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, message, )) } @@ -521,7 +519,7 @@ where { let mut pubkeys = Vec::with_capacity(T::SyncSubcommitteeSize::to_usize()); for pubkey in pubkey_bytes { - pubkeys.push(get_pubkey(pubkey).ok_or_else(|| Error::ValidatorPubkeyUnknown(*pubkey))?); + pubkeys.push(get_pubkey(pubkey).ok_or(Error::ValidatorPubkeyUnknown(*pubkey))?); } let domain = spec.get_domain(epoch, Domain::SyncCommittee, fork, genesis_validators_root); diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 2c1ef6178e..99d08a6db3 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -4,7 +4,6 @@ use crate::per_epoch_processing::{ Delta, Error, }; use safe_arith::SafeArith; -use std::array::IntoIter as ArrayIter; use types::{BeaconState, ChainSpec, EthSpec}; /// Combination of several deltas for different components of an attestation reward. @@ -30,13 +29,13 @@ impl AttestationDelta { inactivity_penalty_delta, } = self; let mut result = Delta::default(); - for delta in ArrayIter::new([ + for delta in [ source_delta, target_delta, head_delta, inclusion_delay_delta, inactivity_penalty_delta, - ]) { + ] { result.combine(delta)?; } Ok(result) diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 619d8336d2..ada4fba403 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -2,7 +2,7 @@ name = "swap_or_not_shuffle" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [[bench]] name = "benches" diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index 0c89fab80d..bdc7244032 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -2,7 +2,7 @@ name = "tree_hash" version = "0.4.1" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "Efficient Merkle-hashing as used in Ethereum 2.0" diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs index c7fdb17461..1753eade1b 100644 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ b/consensus/tree_hash/src/merkle_hasher.rs @@ -376,8 +376,8 @@ mod test { fn compare_with_reference(leaves: &[Hash256], depth: usize) { let reference_bytes = leaves .iter() - .map(|hash| hash.as_bytes().to_vec()) - .flatten() + .flat_map(|hash| hash.as_bytes()) + .copied() .collect::>(); let reference_root = merkleize_padded(&reference_bytes, 1 << (depth - 1)); diff --git a/consensus/tree_hash_derive/Cargo.toml b/consensus/tree_hash_derive/Cargo.toml index c9231040e4..5f3396eb16 100644 --- a/consensus/tree_hash_derive/Cargo.toml +++ b/consensus/tree_hash_derive/Cargo.toml @@ -2,7 +2,7 @@ name = "tree_hash_derive" version = "0.4.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" description = "Procedural derive macros to accompany the tree_hash crate." license = "Apache-2.0" diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs index f2695b1f8b..f65be1b6b1 100644 --- a/consensus/tree_hash_derive/src/lib.rs +++ b/consensus/tree_hash_derive/src/lib.rs @@ -109,7 +109,7 @@ fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { fn should_skip_hashing(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { attr.path.is_ident("tree_hash") - && attr.tokens.to_string().replace(" ", "") == "(skip_hashing)" + && attr.tokens.to_string().replace(' ', "") == "(skip_hashing)" }) } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index be1e6907c2..acd6b03b92 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -2,7 +2,7 @@ name = "types" version = "0.2.0" authors = ["Paul Hauner ", "Age Manning "] -edition = "2018" +edition = "2021" [[bench]] name = "benches" diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 40b2c4bde0..e67d4096dd 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -1,5 +1,5 @@ #![allow(clippy::integer_arithmetic)] -#![allow(clippy::disallowed_method)] +#![allow(clippy::disallowed_methods)] #![allow(clippy::indexing_slicing)] use super::Error; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7e5c66bdb8..832f262698 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -7,7 +7,7 @@ not(test), deny( clippy::integer_arithmetic, - clippy::disallowed_method, + clippy::disallowed_methods, clippy::indexing_slicing ) )] diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 9600da6df3..d71b46dc55 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -2,7 +2,7 @@ name = "bls" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] eth2_ssz = "0.4.1" diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 1eeaff54bb..574dbcf2c2 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_hashing" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "Hashing primitives used in Ethereum 2.0" diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index 1fd73bad21..43a8fe88b9 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crypto/eth2_key_derivation/src/derived_key.rs b/crypto/eth2_key_derivation/src/derived_key.rs index 1598619dfb..b3373782ac 100644 --- a/crypto/eth2_key_derivation/src/derived_key.rs +++ b/crypto/eth2_key_derivation/src/derived_key.rs @@ -148,8 +148,7 @@ fn parent_sk_to_lamport_pk(ikm: &[u8], index: u32) -> ZeroizeHash { lamports .iter() - .map(LamportSecretKey::iter_chunks) - .flatten() + .flat_map(LamportSecretKey::iter_chunks) .enumerate() .for_each(|(i, chunk)| { let mut hasher = Sha256::new(); diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index a629f9a374..2bfdde8ac5 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index eeebfbfb43..e564209b65 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -2,7 +2,7 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 07f02a40b9..965b16d6b9 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -3,7 +3,7 @@ name = "lcli" description = "Lighthouse CLI (modeled after zcli)" version = "2.1.3" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [features] portable = ["bls/supranational-portable"] diff --git a/lcli/src/etl/block_efficiency.rs b/lcli/src/etl/block_efficiency.rs index 87175ace89..1446428100 100644 --- a/lcli/src/etl/block_efficiency.rs +++ b/lcli/src/etl/block_efficiency.rs @@ -74,9 +74,9 @@ async fn get_block_attestations_set<'a, T: EthSpec>( .graffiti() .as_utf8_lossy() // Remove commas and apostropes from graffiti to ensure correct CSV format. - .replace(",", "") - .replace("\"", "") - .replace("'", ""), + .replace(',', "") + .replace('"', "") + .replace('\'', ""), }; let attestations = block.message().body().attestations(); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 5cf04b3b4f..0b4b38b589 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -2,7 +2,7 @@ name = "lighthouse" version = "2.1.3" authors = ["Sigma Prime "] -edition = "2018" +edition = "2021" autotests = false [features] diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index ee196e70f1..7dc31e06bf 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -2,7 +2,7 @@ name = "environment" version = "0.1.2" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] tokio = { version = "1.14.0", features = ["macros", "rt", "rt-multi-thread", "signal" ] } diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index c319c2de1a..0139b6624a 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -2,7 +2,7 @@ name = "slasher" version = "0.1.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] bincode = "1.3.1" diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 9aef23f4d1..63cf1e4649 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -2,7 +2,7 @@ name = "slasher_service" version = "0.1.0" authors = ["Michael Sproul "] -edition = "2018" +edition = "2021" [dependencies] beacon_chain = { path = "../../beacon_node/beacon_chain" } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 6819674664..e04d671396 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -2,7 +2,7 @@ name = "ef_tests" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [features] # `ef_tests` feature must be enabled to actually run the tests diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index b187d46fed..08722c8e46 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -259,10 +259,7 @@ impl> LoadCase for EpochProcessing { impl> Case for EpochProcessing { fn description(&self) -> String { - self.metadata - .description - .clone() - .unwrap_or_else(String::new) + self.metadata.description.clone().unwrap_or_default() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 195df7f382..2e78022251 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -304,10 +304,7 @@ impl> LoadCase for Operations { impl> Case for Operations { fn description(&self) -> String { - self.metadata - .description - .clone() - .unwrap_or_else(String::new) + self.metadata.description.clone().unwrap_or_default() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index 8aa041bce1..c59ceabe0b 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -103,10 +103,7 @@ impl LoadCase for RewardsTest { impl Case for RewardsTest { fn description(&self) -> String { - self.metadata - .description - .clone() - .unwrap_or_else(String::new) + self.metadata.description.clone().unwrap_or_default() } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index c155be877a..5f0db25ded 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -56,10 +56,7 @@ impl LoadCase for SanityBlocks { impl Case for SanityBlocks { fn description(&self) -> String { - self.metadata - .description - .clone() - .unwrap_or_else(String::new) + self.metadata.description.clone().unwrap_or_default() } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index 93a05b3641..a38a8930a0 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -50,10 +50,7 @@ impl LoadCase for SanitySlots { impl Case for SanitySlots { fn description(&self) -> String { - self.metadata - .description - .clone() - .unwrap_or_else(String::new) + self.metadata.description.clone().unwrap_or_default() } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 787a571e8f..57162f97b3 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -2,7 +2,7 @@ name = "eth1_test_rig" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] tokio = { version = "1.14.0", features = ["time"] } diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index be2416a6c1..8e4b8595df 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -2,7 +2,7 @@ name = "node_test_rig" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" [dependencies] environment = { path = "../../lighthouse/environment" } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 78919c8e79..c2f435fabe 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -2,7 +2,7 @@ name = "simulator" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 1192f79909..4e93db3b32 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -2,7 +2,7 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml index d2a8b87b65..3d91862dbb 100644 --- a/testing/test-test_logger/Cargo.toml +++ b/testing/test-test_logger/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "test-test_logger" version = "0.1.0" -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 16a4b1b10b..8ce5830062 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "web3signer_tests" version = "0.1.0" -edition = "2018" +edition = "2021" build = "build.rs" diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 08f5cec07c..a1604064ad 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -2,7 +2,7 @@ name = "validator_client" version = "0.3.5" authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] -edition = "2018" +edition = "2021" [lib] name = "validator_client" diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 697bd602bf..55e7f3f715 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -2,7 +2,7 @@ name = "slashing_protection" version = "0.1.0" authors = ["Michael Sproul ", "pscott "] -edition = "2018" +edition = "2021" autotests = false [[test]] diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 9f585c010a..9a743ee184 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -287,7 +287,7 @@ impl SlashingDatabase { ) -> Result { let (validator_id, enabled) = self .get_validator_id_with_status(txn, public_key)? - .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key))?; + .ok_or(NotSafe::UnregisteredValidator(*public_key))?; if enabled { Ok(validator_id) } else { @@ -303,7 +303,7 @@ impl SlashingDatabase { ) -> Result { let (validator_id, _) = self .get_validator_id_with_status(txn, public_key)? - .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key))?; + .ok_or(NotSafe::UnregisteredValidator(*public_key))?; Ok(validator_id) } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index c56f2f2298..427f22adc3 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -5,7 +5,6 @@ use eth2::lighthouse_vc::{ std_types::{KeystoreJsonStr as Keystore, *}, types::Web3SignerValidatorRequest, }; -// use eth2_keystore::Keystore; use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; @@ -148,6 +147,7 @@ fn check_delete_response<'a>( #[test] fn get_auth_no_token() { run_test(|mut tester| async move { + let _ = &tester; tester.client.send_authorization_header(false); let auth_response = tester.client.get_auth().await.unwrap(); @@ -163,6 +163,7 @@ fn get_auth_no_token() { #[test] fn get_empty_keystores() { run_test(|tester| async move { + let _ = &tester; let res = tester.client.get_keystores().await.unwrap(); assert_eq!(res, ListKeystoresResponse { data: vec![] }); }) @@ -171,6 +172,7 @@ fn get_empty_keystores() { #[test] fn import_new_keystores() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..3) .map(|_| new_keystore(password.clone())) @@ -198,6 +200,7 @@ fn import_new_keystores() { #[test] fn import_only_duplicate_keystores() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..3) .map(|_| new_keystore(password.clone())) @@ -226,6 +229,7 @@ fn import_only_duplicate_keystores() { #[test] fn import_some_duplicate_keystores() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let num_keystores = 5; let keystores_all = (0..num_keystores) @@ -276,6 +280,7 @@ fn import_some_duplicate_keystores() { #[test] fn import_wrong_number_of_passwords() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..3) .map(|_| new_keystore(password.clone())) @@ -297,6 +302,7 @@ fn import_wrong_number_of_passwords() { #[test] fn get_web3_signer_keystores() { run_test(|tester| async move { + let _ = &tester; let num_local = 3; let num_remote = 2; @@ -356,6 +362,7 @@ fn get_web3_signer_keystores() { #[test] fn import_and_delete_conflicting_web3_signer_keystores() { run_test(|tester| async move { + let _ = &tester; let num_keystores = 3; // Create some keystores to be used as both web3signer keystores and local keystores. @@ -420,6 +427,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() { #[test] fn import_keystores_wrong_password() { run_test(|tester| async move { + let _ = &tester; let num_keystores = 4; let (keystores, correct_passwords): (Vec<_>, Vec<_>) = (0..num_keystores) .map(|_| { @@ -494,6 +502,7 @@ fn import_keystores_wrong_password() { #[test] fn import_invalid_slashing_protection() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..3) .map(|_| new_keystore(password.clone())) @@ -642,6 +651,7 @@ fn generic_migration_test( second_vc_attestations: Vec<(usize, Attestation, bool)>, ) { run_dual_vc_test(move |tester1, tester2| async move { + let _ = (&tester1, &tester2); // Create the validators on VC1. let (keystores, passwords): (Vec<_>, Vec<_>) = (0..num_validators) .map(|_| { @@ -756,6 +766,7 @@ fn generic_migration_test( #[test] fn delete_keystores_twice() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..2) .map(|_| new_keystore(password.clone())) @@ -786,6 +797,7 @@ fn delete_keystores_twice() { #[test] fn delete_nonexistent_keystores() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..2) .map(|_| new_keystore(password.clone())) @@ -947,6 +959,7 @@ fn delete_concurrent_with_signing() { #[test] fn delete_then_reimport() { run_test(|tester| async move { + let _ = &tester; let password = random_password_string(); let keystores = (0..2) .map(|_| new_keystore(password.clone())) From 27e83b888ce5e7c1ebc59275e2bf0bfc0065e2c2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 28 Feb 2022 22:07:48 +0000 Subject: [PATCH 13/14] Retrospective invalidation of exec. payloads for opt. sync (#2837) ## Issue Addressed NA ## Proposed Changes Adds the functionality to allow blocks to be validated/invalidated after their import as per the [optimistic sync spec](https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#how-to-optimistically-import-blocks). This means: - Updating `ProtoArray` to allow flipping the `execution_status` of ancestors/descendants based on payload validity updates. - Creating separation between `execution_layer` and the `beacon_chain` by creating a `PayloadStatus` struct. - Refactoring how the `execution_layer` selects a `PayloadStatus` from the multiple statuses returned from multiple EEs. - Adding testing framework for optimistic imports. - Add `ExecutionBlockHash(Hash256)` new-type struct to avoid confusion between *beacon block roots* and *execution payload hashes*. - Add `merge` to [`FORKS`](https://github.com/sigp/lighthouse/blob/c3a793fd73a3b11b130b82032904d39c952869e4/Makefile#L17) in the `Makefile` to ensure we test the beacon chain with merge settings. - Fix some tests here that were failing due to a missing execution layer. ## TODO - [ ] Balance tests Co-authored-by: Mark Mackey --- Makefile | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 315 +++-- .../beacon_chain/src/block_verification.rs | 21 +- beacon_node/beacon_chain/src/errors.rs | 18 +- .../beacon_chain/src/execution_payload.rs | 40 +- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 15 +- .../tests/attestation_production.rs | 1 + .../tests/attestation_verification.rs | 1 + .../beacon_chain/tests/block_verification.rs | 4 + beacon_node/beacon_chain/tests/main.rs | 1 + beacon_node/beacon_chain/tests/merge.rs | 4 +- .../beacon_chain/tests/op_verification.rs | 1 + .../tests/payload_invalidation.rs | 601 +++++++++ beacon_node/beacon_chain/tests/store_tests.rs | 14 + .../tests/sync_committee_verification.rs | 1 + beacon_node/beacon_chain/tests/tests.rs | 1 + beacon_node/client/src/builder.rs | 47 +- beacon_node/execution_layer/src/engine_api.rs | 15 +- .../execution_layer/src/engine_api/http.rs | 60 +- .../src/engine_api/json_structures.rs | 14 +- beacon_node/execution_layer/src/engines.rs | 12 +- beacon_node/execution_layer/src/lib.rs | 210 +--- .../execution_layer/src/payload_status.rs | 191 +++ .../test_utils/execution_block_generator.rs | 99 +- .../src/test_utils/handle_rpc.rs | 52 +- .../src/test_utils/mock_execution_layer.rs | 14 +- .../execution_layer/src/test_utils/mod.rs | 65 +- common/task_executor/src/lib.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 59 +- consensus/fork_choice/tests/tests.rs | 38 +- consensus/proto_array/src/bin.rs | 12 + consensus/proto_array/src/error.rs | 20 +- .../src/fork_choice_test_definition.rs | 91 +- .../execution_status.rs | 1092 +++++++++++++++++ .../ffg_updates.rs | 120 +- .../fork_choice_test_definition/no_votes.rs | 38 +- .../src/fork_choice_test_definition/votes.rs | 236 ++-- consensus/proto_array/src/proto_array.rs | 310 ++++- .../src/proto_array_fork_choice.rs | 83 +- .../src/per_block_processing/errors.rs | 4 +- consensus/types/src/chain_spec.rs | 12 +- consensus/types/src/execution_block_hash.rs | 101 ++ consensus/types/src/execution_payload.rs | 4 +- .../types/src/execution_payload_header.rs | 4 +- consensus/types/src/lib.rs | 2 + lcli/src/create_payload_header.rs | 2 +- lcli/src/new_testnet.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 8 +- .../src/test_rig.rs | 65 +- 50 files changed, 3358 insertions(+), 768 deletions(-) create mode 100644 beacon_node/beacon_chain/tests/payload_invalidation.rs create mode 100644 beacon_node/execution_layer/src/payload_status.rs create mode 100644 consensus/proto_array/src/fork_choice_test_definition/execution_status.rs create mode 100644 consensus/types/src/execution_block_hash.rs diff --git a/Makefile b/Makefile index bc607304af..8507767c3c 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ PINNED_NIGHTLY ?= nightly # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair +FORKS=phase0 altair merge # Builds the Lighthouse binary in release (optimized). # diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0b9dbb50a2..4cd5cfe07b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -52,7 +52,7 @@ use crate::{metrics, BeaconChainError}; use eth2::types::{ EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, }; -use execution_layer::{ExecutionLayer, PayloadStatusV1Status}; +use execution_layer::{ExecutionLayer, PayloadStatus}; use fork_choice::{AttestationFromBlock, ForkChoice}; use futures::channel::mpsc::Sender; use itertools::process_results; @@ -112,6 +112,10 @@ pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); /// Defines how old a block can be before it's no longer a candidate for the early attester cache. const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; +/// Reported to the user when the justified block has an invalid execution payload. +pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = + "Justified block has an invalid execution payload."; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -201,7 +205,7 @@ pub struct HeadInfo { pub genesis_validators_root: Hash256, pub proposer_shuffling_decision_root: Hash256, pub is_merge_transition_complete: bool, - pub execution_payload_block_hash: Option, + pub execution_payload_block_hash: Option, } pub trait BeaconChainTypes: Send + Sync + 'static { @@ -220,15 +224,15 @@ pub enum HeadSafetyStatus { /// /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with /// the variant. - Safe(Option), + Safe(Option), /// The head block execution payload has not yet been verified by an EL. /// /// The `execution_payload.block_hash` of the head block is returned. - Unsafe(Hash256), + Unsafe(ExecutionBlockHash), /// The head block execution payload was deemed to be invalid by an EL. /// /// The `execution_payload.block_hash` of the head block is returned. - Invalid(Hash256), + Invalid(ExecutionBlockHash), } pub type BeaconForkChoice = ForkChoice< @@ -3173,6 +3177,101 @@ impl BeaconChain { Ok((block, state)) } + /// This method must be called whenever an execution engine indicates that a payload is + /// invalid. + /// + /// If the `latest_root` is known to fork-choice it will be invalidated. If it is not known, an + /// error will be returned. + /// + /// If `latest_valid_hash` is `None` or references a block unknown to fork choice, no other + /// blocks will be invalidated. If `latest_valid_hash` is a block known to fork choice, all + /// blocks between the `latest_root` and the `latest_valid_hash` will be invalidated (which may + /// cause further, second-order invalidations). + /// + /// ## Notes + /// + /// Use these rules to set `latest_root`: + /// + /// - When `forkchoiceUpdated` indicates an invalid block, set `latest_root` to be the + /// block root that was the head of the chain when `forkchoiceUpdated` was called. + /// - When `executePayload` returns an invalid block *during* block import, set + /// `latest_root` to be the parent of the beacon block containing the invalid + /// payload (because the block containing the payload is not present in fork choice). + /// - When `executePayload` returns an invalid block *after* block import, set + /// `latest_root` to be root of the beacon block containing the invalid payload. + pub fn process_invalid_execution_payload( + &self, + latest_root: Hash256, + latest_valid_hash: Option, + ) -> Result<(), Error> { + debug!( + self.log, + "Invalid execution payload in block"; + "latest_valid_hash" => ?latest_valid_hash, + "latest_root" => ?latest_root, + ); + + // Update fork choice. + if let Err(e) = self + .fork_choice + .write() + .on_invalid_execution_payload(latest_root, latest_valid_hash) + { + crit!( + self.log, + "Failed to process invalid payload"; + "error" => ?e, + "latest_valid_hash" => ?latest_valid_hash, + "latest_root" => ?latest_root, + ); + } + + // Run fork choice since it's possible that the payload invalidation might result in a new + // head. + // + // Don't return early though, since invalidating the justified checkpoint might cause an + // error here. + if let Err(e) = self.fork_choice() { + crit!( + self.log, + "Failed to run fork choice routine"; + "error" => ?e, + ); + } + + // Atomically obtain the justified root from fork choice. + let justified_block = self.fork_choice.read().get_justified_block()?; + + if justified_block.execution_status.is_invalid() { + crit!( + self.log, + "The justified checkpoint is invalid"; + "msg" => "ensure you are not connected to a malicious network. This error is not \ + recoverable, please reach out to the lighthouse developers for assistance." + ); + + let mut shutdown_sender = self.shutdown_sender(); + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + )) { + crit!( + self.log, + "Unable to trigger client shut down"; + "msg" => "shut down may already be under way", + "error" => ?e + ); + } + + // Return an error here to try and prevent progression by upstream functions. + return Err(Error::JustifiedPayloadInvalid { + justified_root: justified_block.root, + execution_block_hash: justified_block.execution_status.block_hash(), + }); + } + + Ok(()) + } + /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); @@ -3188,19 +3287,47 @@ impl BeaconChain { } fn fork_choice_internal(&self) -> Result<(), Error> { - // Determine the root of the block that is the head of the chain. - let beacon_block_root = self - .fork_choice - .write() - .get_head(self.slot()?, &self.spec)?; + // Atomically obtain the head block root and the finalized block. + let (beacon_block_root, finalized_block) = { + let mut fork_choice = self.fork_choice.write(); + + // Determine the root of the block that is the head of the chain. + let beacon_block_root = fork_choice.get_head(self.slot()?, &self.spec)?; + + (beacon_block_root, fork_choice.get_finalized_block()?) + }; let current_head = self.head_info()?; let old_finalized_checkpoint = current_head.finalized_checkpoint; + // Exit early if the head hasn't changed. if beacon_block_root == current_head.block_root { return Ok(()); } + // Check to ensure that this finalized block hasn't been marked as invalid. + if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { + crit!( + self.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = self.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Err(Error::InvalidFinalizedPayload { + finalized_root: finalized_block.root, + execution_block_hash: block_hash, + }); + } + let lag_timer = metrics::start_timer(&metrics::FORK_CHOICE_SET_HEAD_LAG_TIMES); // At this point we know that the new head block is not the same as the previous one @@ -3448,33 +3575,6 @@ impl BeaconChain { } if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { - // Check to ensure that this finalized block hasn't been marked as invalid. - let finalized_block = self - .fork_choice - .read() - .get_block(&new_finalized_checkpoint.root) - .ok_or(BeaconChainError::FinalizedBlockMissingFromForkChoice( - new_finalized_checkpoint.root, - ))?; - if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { - crit!( - self.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block_hash - ); - let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Finalized block has an invalid execution payload.", - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; - - // Exit now, the node is in an invalid state. - return Ok(()); - } - // Due to race conditions, it's technically possible that the head we load here is // different to the one earlier in this function. // @@ -3575,64 +3675,59 @@ impl BeaconChain { // If this is a post-merge block, update the execution layer. if let Some(new_head_execution_block_hash) = new_head_execution_block_hash_opt { if is_merge_transition_complete { - let execution_layer = self - .execution_layer - .clone() - .ok_or(Error::ExecutionLayerMissing)?; - let store = self.store.clone(); - let log = self.log.clone(); - - // Spawn the update task, without waiting for it to complete. - execution_layer.spawn( - move |execution_layer| async move { - if let Err(e) = Self::update_execution_engine_forkchoice( - execution_layer, - store, - new_finalized_checkpoint.root, - new_head_execution_block_hash, - &log, - ) - .await - { - crit!( - log, - "Failed to update execution head"; - "error" => ?e - ); - } - }, - "update_execution_engine_forkchoice", - ) + let finalized_execution_block_hash = finalized_block + .execution_status + .block_hash() + .unwrap_or_else(ExecutionBlockHash::zero); + if let Err(e) = self.update_execution_engine_forkchoice_blocking( + finalized_execution_block_hash, + beacon_block_root, + new_head_execution_block_hash, + ) { + crit!( + self.log, + "Failed to update execution head"; + "error" => ?e + ); + } } } Ok(()) } - pub async fn update_execution_engine_forkchoice( - execution_layer: ExecutionLayer, - store: BeaconStore, - finalized_beacon_block_root: Hash256, - head_execution_block_hash: Hash256, - log: &Logger, + pub fn update_execution_engine_forkchoice_blocking( + &self, + finalized_execution_block_hash: ExecutionBlockHash, + head_block_root: Hash256, + head_execution_block_hash: ExecutionBlockHash, ) -> Result<(), Error> { - // Loading the finalized block from the store is not ideal. Perhaps it would be better to - // store it on fork-choice so we can do a lookup without hitting the database. - // - // See: https://github.com/sigp/lighthouse/pull/2627#issuecomment-927537245 - let finalized_block = store - .get_block(&finalized_beacon_block_root)? - .ok_or(Error::MissingBeaconBlock(finalized_beacon_block_root))?; + let execution_layer = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)?; - let finalized_execution_block_hash = finalized_block - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash) - .unwrap_or_else(Hash256::zero); + execution_layer + .block_on_generic(|_| { + self.update_execution_engine_forkchoice_async( + finalized_execution_block_hash, + head_block_root, + head_execution_block_hash, + ) + }) + .map_err(Error::ForkchoiceUpdate)? + } - let forkchoice_updated_response = execution_layer + pub async fn update_execution_engine_forkchoice_async( + &self, + finalized_execution_block_hash: ExecutionBlockHash, + head_block_root: Hash256, + head_execution_block_hash: ExecutionBlockHash, + ) -> Result<(), Error> { + let forkchoice_updated_response = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)? .notify_forkchoice_updated( head_execution_block_hash, finalized_execution_block_hash, @@ -3642,14 +3737,14 @@ impl BeaconChain { .map_err(Error::ExecutionForkChoiceUpdateFailed); match forkchoice_updated_response { - Ok((status, latest_valid_hash)) => match status { - PayloadStatusV1Status::Valid | PayloadStatusV1Status::Syncing => Ok(()), + Ok(status) => match &status { + PayloadStatus::Valid | PayloadStatus::Syncing => Ok(()), // The specification doesn't list `ACCEPTED` as a valid response to a fork choice // update. This response *seems* innocent enough, so we won't return early with an // error. However, we create a log to bring attention to the issue. - PayloadStatusV1Status::Accepted => { + PayloadStatus::Accepted => { warn!( - log, + self.log, "Fork choice update received ACCEPTED"; "msg" => "execution engine provided an unexpected response to a fork \ choice update. although this is not a serious issue, please raise \ @@ -3657,16 +3752,38 @@ impl BeaconChain { ); Ok(()) } - PayloadStatusV1Status::Invalid - | PayloadStatusV1Status::InvalidTerminalBlock - | PayloadStatusV1Status::InvalidBlockHash => { - // TODO(bellatrix): process the invalid payload. + PayloadStatus::Invalid { + latest_valid_hash, .. + } => { + warn!( + self.log, + "Fork choice update invalidated payload"; + "status" => ?status + ); + // The execution engine has stated that all blocks between the + // `head_execution_block_hash` and `latest_valid_hash` are invalid. + self.process_invalid_execution_payload( + head_block_root, + Some(*latest_valid_hash), + )?; + + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) + } + PayloadStatus::InvalidTerminalBlock { .. } + | PayloadStatus::InvalidBlockHash { .. } => { + warn!( + self.log, + "Fork choice update invalidated payload"; + "status" => ?status + ); + // The execution engine has stated that the head block is invalid, however it + // hasn't returned a latest valid ancestor. // - // See: https://github.com/sigp/lighthouse/pull/2837 - Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { - status, - latest_valid_hash, - }) + // Using a `None` latest valid ancestor will result in only the head block + // being invalidated (no ancestors). + self.process_invalid_execution_payload(head_block_root, None)?; + + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } }, Err(e) => Err(e), diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index bb4ca4aa40..4ac587fd76 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -54,7 +54,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use eth2::types::EventKind; -use execution_layer::PayloadStatusV1Status; +use execution_layer::PayloadStatus; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -76,9 +76,9 @@ use std::time::Duration; use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, - InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, + ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -270,10 +270,7 @@ pub enum ExecutionPayloadError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty - RejectedByExecutionEngine { - status: PayloadStatusV1Status, - latest_valid_hash: Option>, - }, + RejectedByExecutionEngine { status: PayloadStatus }, /// The execution payload timestamp does not match the slot /// /// ## Peer scoring @@ -286,7 +283,7 @@ pub enum ExecutionPayloadError { /// /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, /// but is invalid upon further verification. - InvalidTerminalPoWBlock { parent_hash: Hash256 }, + InvalidTerminalPoWBlock { parent_hash: ExecutionBlockHash }, /// The `TERMINAL_BLOCK_HASH` is set, but the block has not reached the /// `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH`. /// @@ -305,8 +302,8 @@ pub enum ExecutionPayloadError { /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, /// but is invalid upon further verification. InvalidTerminalBlockHash { - terminal_block_hash: Hash256, - payload_parent_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, + payload_parent_hash: ExecutionBlockHash, }, /// The execution node failed to provide a parent block to a known block. This indicates an /// issue with the execution node. @@ -314,7 +311,7 @@ pub enum ExecutionPayloadError { /// ## Peer scoring /// /// The peer is not necessarily invalid. - PoWParentMissing(Hash256), + PoWParentMissing(ExecutionBlockHash), } impl From for ExecutionPayloadError { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 4ca1597932..e8cc157ce4 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -8,7 +8,7 @@ use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; -use execution_layer::PayloadStatusV1Status; +use execution_layer::PayloadStatus; use futures::channel::mpsc::TrySendError; use operation_pool::OpPoolError; use safe_arith::ArithError; @@ -139,15 +139,27 @@ pub enum BeaconChainError { ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), ExecutionForkChoiceUpdateInvalid { - status: PayloadStatusV1Status, - latest_valid_hash: Option>, + status: PayloadStatus, }, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), + InvalidFinalizedPayload { + finalized_root: Hash256, + execution_block_hash: ExecutionBlockHash, + }, InvalidFinalizedPayloadShutdownError(TrySendError), + JustifiedPayloadInvalid { + justified_root: Hash256, + execution_block_hash: Option, + }, + ForkchoiceUpdate(execution_layer::Error), + FinalizedCheckpointMismatch { + head_state: Checkpoint, + fork_choice: Hash256, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 09bfa25783..30a0d2b198 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -11,7 +11,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::PayloadStatusV1Status; +use execution_layer::PayloadStatus; use fork_choice::PayloadVerificationStatus; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -57,22 +57,26 @@ pub fn notify_new_payload( .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); match new_payload_response { - Ok((status, latest_valid_hash)) => match status { - PayloadStatusV1Status::Valid => Ok(PayloadVerificationStatus::Verified), - PayloadStatusV1Status::Syncing | PayloadStatusV1Status::Accepted => { + Ok(status) => match status { + PayloadStatus::Valid => Ok(PayloadVerificationStatus::Verified), + PayloadStatus::Syncing | PayloadStatus::Accepted => { Ok(PayloadVerificationStatus::NotVerified) } - PayloadStatusV1Status::Invalid - | PayloadStatusV1Status::InvalidTerminalBlock - | PayloadStatusV1Status::InvalidBlockHash => { - // TODO(bellatrix): process the invalid payload. - // - // See: https://github.com/sigp/lighthouse/pull/2837 - Err(ExecutionPayloadError::RejectedByExecutionEngine { - status, - latest_valid_hash, - } - .into()) + PayloadStatus::Invalid { + latest_valid_hash, .. + } => { + // This block has not yet been applied to fork choice, so the latest block that was + // imported to fork choice was the parent. + let latest_root = block.parent_root(); + chain.process_invalid_execution_payload(latest_root, Some(latest_valid_hash))?; + + Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) + } + PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { + // Returning an error here should be sufficient to invalidate the block. We have no + // information to indicate its parent is invalid, so no need to run + // `BeaconChain::process_invalid_execution_payload`. + Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } }, Err(e) => Err(ExecutionPayloadError::RequestFailed(e).into()), @@ -99,7 +103,7 @@ pub fn validate_merge_block( let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let execution_payload = block.execution_payload()?; - if spec.terminal_block_hash != Hash256::zero() { + if spec.terminal_block_hash != ExecutionBlockHash::zero() { if block_epoch < spec.terminal_block_hash_activation_epoch { return Err(ExecutionPayloadError::InvalidActivationEpoch { activation_epoch: spec.terminal_block_hash_activation_epoch, @@ -263,7 +267,7 @@ pub async fn prepare_execution_payload( .ok_or(BlockProductionError::ExecutionLayerMissing)?; let parent_hash = if !is_merge_transition_complete(state) { - let is_terminal_block_hash_set = spec.terminal_block_hash != Hash256::zero(); + let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = state.current_epoch() >= spec.terminal_block_hash_activation_epoch; @@ -314,7 +318,7 @@ pub async fn prepare_execution_payload( parent_hash, timestamp, random, - finalized_block_hash.unwrap_or_else(Hash256::zero), + finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, ) .await diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d41c1a5cc5..74649bdee3 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -41,7 +41,7 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, HeadInfo, HeadSafetyStatus, StateSkipConfig, WhenSlotSkipped, - MAXIMUM_GOSSIP_CLOCK_DISPARITY, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 574895296d..0142a0f0bb 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -432,7 +432,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, - shutdown_receiver, + shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), mock_execution_layer: self.mock_execution_layer, execution_layer_runtime: self.execution_layer_runtime, rng: make_rng(), @@ -449,7 +449,7 @@ pub struct BeaconChainHarness { pub chain: Arc>, pub spec: ChainSpec, - pub shutdown_receiver: Receiver, + pub shutdown_receiver: Arc>>, pub mock_execution_layer: Option>, pub execution_layer_runtime: Option, @@ -502,6 +502,17 @@ where epoch.start_slot(E::slots_per_epoch()).into() } + pub fn shutdown_reasons(&self) -> Vec { + let mutex = self.shutdown_receiver.clone(); + let mut receiver = mutex.lock(); + std::iter::from_fn(move || match receiver.try_next() { + Ok(Some(s)) => Some(s), + Ok(None) => panic!("shutdown sender dropped"), + Err(_) => None, + }) + .collect() + } + pub fn get_current_state(&self) -> BeaconState { self.chain.head().unwrap().beacon_state } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 4d862cbac7..189d3baded 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -26,6 +26,7 @@ fn produces_attestations() { .default_spec() .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let chain = &harness.chain; diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 3c675ec6a4..00bf9fa9aa 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -42,6 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness(chain: &[ExecutionPayload]) { for ep in chain { assert!(*ep != ExecutionPayload::default()); - assert!(ep.block_hash != Hash256::zero()); + assert!(ep.block_hash != ExecutionBlockHash::zero()); // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { @@ -40,7 +40,7 @@ fn merge_with_terminal_block_hash_override() { spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, 0, - Hash256::zero(), + ExecutionBlockHash::zero(), ) .unwrap() .block_hash; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index ec22a4804a..c9df6aa31d 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -40,6 +40,7 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store) + .mock_execution_layer() .build(); harness.advance_slot(); harness diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs new file mode 100644 index 0000000000..198f674157 --- /dev/null +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -0,0 +1,601 @@ +#![cfg(not(debug_assertions))] + +use beacon_chain::{ + test_utils::{BeaconChainHarness, EphemeralHarnessType}, + BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig, + WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, +}; +use proto_array::ExecutionStatus; +use task_executor::ShutdownReason; +use types::*; + +const VALIDATOR_COUNT: usize = 32; + +type E = MainnetEthSpec; + +#[derive(PartialEq, Clone)] +enum Payload { + Valid, + Invalid { + latest_valid_hash: Option, + }, + Syncing, +} + +struct InvalidPayloadRig { + harness: BeaconChainHarness>, + enable_attestations: bool, +} + +impl InvalidPayloadRig { + fn new() -> Self { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .deterministic_keypairs(VALIDATOR_COUNT) + .mock_execution_layer() + .fresh_ephemeral_store() + .build(); + + // Move to slot 1. + harness.advance_slot(); + + Self { + harness, + enable_attestations: false, + } + } + + fn enable_attestations(mut self) -> Self { + self.enable_attestations = true; + self + } + + fn block_hash(&self, block_root: Hash256) -> ExecutionBlockHash { + self.harness + .chain + .get_block(&block_root) + .unwrap() + .unwrap() + .message() + .body() + .execution_payload() + .unwrap() + .block_hash + } + + fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { + self.harness + .chain + .fork_choice + .read() + .get_block(&block_root) + .unwrap() + .execution_status + } + + fn fork_choice(&self) { + self.harness.chain.fork_choice().unwrap(); + } + + fn head_info(&self) -> HeadInfo { + self.harness.chain.head_info().unwrap() + } + + fn move_to_terminal_block(&self) { + let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + } + + fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { + (0..num_blocks) + .map(|_| self.import_block(is_valid.clone())) + .collect() + } + + fn move_to_first_justification(&mut self, is_valid: Payload) { + let slots_till_justification = E::slots_per_epoch() * 3; + self.build_blocks(slots_till_justification, is_valid); + + let justified_checkpoint = self.head_info().current_justified_checkpoint; + assert_eq!(justified_checkpoint.epoch, 2); + } + + fn import_block(&mut self, is_valid: Payload) -> Hash256 { + self.import_block_parametric(is_valid, |error| { + matches!( + error, + BlockError::ExecutionPayloadError( + ExecutionPayloadError::RejectedByExecutionEngine { .. } + ) + ) + }) + } + + fn block_root_at_slot(&self, slot: Slot) -> Option { + self.harness + .chain + .block_root_at_slot(slot, WhenSlotSkipped::None) + .unwrap() + } + + fn import_block_parametric) -> bool>( + &mut self, + is_valid: Payload, + evaluate_error: F, + ) -> Hash256 { + let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); + + let head = self.harness.chain.head().unwrap(); + let state = head.beacon_state; + let slot = state.slot() + 1; + let (block, post_state) = self.harness.make_block(state, slot); + let block_root = block.canonical_root(); + + match is_valid { + Payload::Valid | Payload::Syncing => { + if is_valid == Payload::Syncing { + // Importing a payload whilst returning `SYNCING` simulates an EE that obtains + // the block via it's own means (e.g., devp2p). + let should_import_payload = true; + mock_execution_layer + .server + .all_payloads_syncing(should_import_payload); + } else { + mock_execution_layer.server.full_payload_verification(); + } + let root = self.harness.process_block(slot, block.clone()).unwrap(); + + if self.enable_attestations { + let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); + self.harness.attest_block( + &post_state, + block.state_root(), + block_root.into(), + &block, + &all_validators, + ); + } + + let execution_status = self.execution_status(root.into()); + + match is_valid { + Payload::Syncing => assert!(execution_status.is_not_verified()), + Payload::Valid => assert!(execution_status.is_valid()), + Payload::Invalid { .. } => unreachable!(), + } + + assert_eq!( + self.harness.chain.get_block(&block_root).unwrap().unwrap(), + block, + "block from db must match block imported" + ); + } + Payload::Invalid { latest_valid_hash } => { + let latest_valid_hash = latest_valid_hash + .unwrap_or_else(|| self.block_hash(block.message().parent_root())); + + mock_execution_layer + .server + .all_payloads_invalid(latest_valid_hash); + + match self.harness.process_block(slot, block) { + Err(error) if evaluate_error(&error) => (), + Err(other) => { + panic!("evaluate_error returned false with {:?}", other) + } + Ok(_) => panic!("block with invalid payload was imported"), + }; + + assert!( + self.harness + .chain + .fork_choice + .read() + .get_block(&block_root) + .is_none(), + "invalid block must not exist in fork choice" + ); + assert!( + self.harness.chain.get_block(&block_root).unwrap().is_none(), + "invalid block cannot be accessed via get_block" + ); + } + } + + block_root + } +} + +/// Simple test of the different import types. +#[test] +fn valid_invalid_syncing() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + rig.import_block(Payload::Valid); + rig.import_block(Payload::Invalid { + latest_valid_hash: None, + }); + rig.import_block(Payload::Syncing); +} + +/// Ensure that an invalid payload can invalidate its parent too (given the right +/// `latest_valid_hash`. +#[test] +fn invalid_payload_invalidates_parent() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + let roots = vec![ + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + ]; + + let latest_valid_hash = rig.block_hash(roots[0]); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + assert!(rig.execution_status(roots[0]).is_valid()); + assert!(rig.execution_status(roots[1]).is_invalid()); + assert!(rig.execution_status(roots[2]).is_invalid()); + + assert_eq!(rig.head_info().block_root, roots[0]); +} + +/// Ensure the client tries to exit when the justified checkpoint is invalidated. +#[test] +fn justified_checkpoint_becomes_invalid() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.move_to_first_justification(Payload::Syncing); + + let justified_checkpoint = rig.head_info().current_justified_checkpoint; + let parent_root_of_justified = rig + .harness + .chain + .get_block(&justified_checkpoint.root) + .unwrap() + .unwrap() + .parent_root(); + let parent_hash_of_justified = rig.block_hash(parent_root_of_justified); + + // No service should have triggered a shutdown, yet. + assert!(rig.harness.shutdown_reasons().is_empty()); + + // Import a block that will invalidate the justified checkpoint. + rig.import_block_parametric( + Payload::Invalid { + latest_valid_hash: Some(parent_hash_of_justified), + }, + |error| { + matches!( + error, + // The block import should fail since the beacon chain knows the justified payload + // is invalid. + BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) + ) + }, + ); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON + )] + ); +} + +/// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. +#[test] +fn pre_finalized_latest_valid_hash() { + let num_blocks = E::slots_per_epoch() * 4; + let finalized_epoch = 2; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + + let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); + let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root); + + // No service should have triggered a shutdown, yet. + assert!(rig.harness.shutdown_reasons().is_empty()); + + // Import a pre-finalized block. + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(pre_finalized_block_hash), + }); + + // The latest imported block should be the head. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // The beacon chain should *not* have triggered a shutdown. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + // All blocks should still be unverified. + for i in E::slots_per_epoch() * finalized_epoch..num_blocks { + let slot = Slot::new(i); + let root = rig.block_root_at_slot(slot).unwrap(); + assert!(rig.execution_status(root).is_not_verified()); + } +} + +/// Ensure that a `latest_valid_hash` will: +/// +/// - Invalidate descendants of `latest_valid_root`. +/// - Validate `latest_valid_root` and its ancestors. +#[test] +fn latest_valid_hash_will_validate() { + const LATEST_VALID_SLOT: u64 = 3; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(4, Payload::Syncing); + + let latest_valid_root = rig + .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) + .unwrap(); + let latest_valid_hash = rig.block_hash(latest_valid_root); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); + + for slot in 0..=4 { + let slot = Slot::new(slot); + let root = if slot > 0 { + // If not the genesis slot, check the blocks we just produced. + blocks[slot.as_usize() - 1] + } else { + // Genesis slot + rig.block_root_at_slot(slot).unwrap() + }; + let execution_status = rig.execution_status(root); + + if slot > LATEST_VALID_SLOT { + assert!(execution_status.is_invalid()) + } else if slot == 0 { + assert!(execution_status.is_irrelevant()) + } else { + assert!(execution_status.is_valid()) + } + } +} + +/// Check behaviour when the `latest_valid_hash` is a junk value. +#[test] +fn latest_valid_hash_is_junk() { + let num_blocks = E::slots_per_epoch() * 5; + let finalized_epoch = 3; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + + // No service should have triggered a shutdown, yet. + assert!(rig.harness.shutdown_reasons().is_empty()); + + let junk_hash = ExecutionBlockHash::repeat_byte(42); + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(junk_hash), + }); + + // The latest imported block should be the head. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // The beacon chain should *not* have triggered a shutdown. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + // All blocks should still be unverified. + for i in E::slots_per_epoch() * finalized_epoch..num_blocks { + let slot = Slot::new(i); + let root = rig.block_root_at_slot(slot).unwrap(); + assert!(rig.execution_status(root).is_not_verified()); + } +} + +/// Check that descendants of invalid blocks are also invalidated. +#[test] +fn invalidates_all_descendants() { + let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; + let finalized_epoch = 2; + let finalized_slot = E::slots_per_epoch() * 2; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // Apply a block which conflicts with the canonical chain. + let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); + let fork_parent_slot = fork_slot - 1; + let fork_parent_state = rig + .harness + .chain + .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + assert_eq!(fork_parent_state.slot(), fork_parent_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); + rig.fork_choice(); + + // The latest valid hash will be set to the grandparent of the fork block. This means that the + // parent of the fork block will become invalid. + let latest_valid_slot = fork_parent_slot - 1; + let latest_valid_root = rig + .harness + .chain + .block_root_at_slot(latest_valid_slot, WhenSlotSkipped::None) + .unwrap() + .unwrap(); + assert!(blocks.contains(&latest_valid_root)); + let latest_valid_hash = rig.block_hash(latest_valid_root); + + // The new block should not become the head, the old head should remain. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + // The block before the fork should become the head. + assert_eq!(rig.head_info().block_root, latest_valid_root); + + // The fork block should be invalidated, even though it's not an ancestor of the block that + // triggered the INVALID response from the EL. + assert!(rig.execution_status(fork_block_root).is_invalid()); + + for root in blocks { + let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + + // Fork choice doesn't have info about pre-finalization, nothing to check here. + if slot < finalized_slot { + continue; + } + + let execution_status = rig.execution_status(root); + if slot <= latest_valid_slot { + // Blocks prior to the latest valid hash are valid. + assert!(execution_status.is_valid()); + } else { + // Blocks after the latest valid hash are invalid. + assert!(execution_status.is_invalid()); + } + } +} + +/// Check that the head will switch after the canonical branch is invalidated. +#[test] +fn switches_heads() { + let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; + let finalized_epoch = 2; + let finalized_slot = E::slots_per_epoch() * 2; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // Apply a block which conflicts with the canonical chain. + let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); + let fork_parent_slot = fork_slot - 1; + let fork_parent_state = rig + .harness + .chain + .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + assert_eq!(fork_parent_state.slot(), fork_parent_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let fork_parent_root = fork_block.parent_root(); + let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); + rig.fork_choice(); + + let latest_valid_slot = fork_parent_slot; + let latest_valid_hash = rig.block_hash(fork_parent_root); + + // The new block should not become the head, the old head should remain. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + // The fork block should become the head. + assert_eq!(rig.head_info().block_root, fork_block_root); + + // The fork block has not yet been validated. + assert!(rig.execution_status(fork_block_root).is_not_verified()); + + for root in blocks { + let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + + // Fork choice doesn't have info about pre-finalization, nothing to check here. + if slot < finalized_slot { + continue; + } + + let execution_status = rig.execution_status(root); + if slot <= latest_valid_slot { + // Blocks prior to the latest valid hash are valid. + assert!(execution_status.is_valid()); + } else { + // Blocks after the latest valid hash are invalid. + assert!(execution_status.is_invalid()); + } + } +} + +#[test] +fn invalid_during_processing() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + let roots = &[ + rig.import_block(Payload::Valid), + rig.import_block(Payload::Invalid { + latest_valid_hash: None, + }), + rig.import_block(Payload::Valid), + ]; + + // 0 should be present in the chain. + assert!(rig.harness.chain.get_block(&roots[0]).unwrap().is_some()); + // 1 should *not* be present in the chain. + assert_eq!(rig.harness.chain.get_block(&roots[1]).unwrap(), None); + // 2 should be the head. + let head = rig.harness.chain.head_info().unwrap(); + assert_eq!(head.block_root, roots[2]); +} + +#[test] +fn invalid_after_optimistic_sync() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + let mut roots = vec![ + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + ]; + + for root in &roots { + assert!(rig.harness.chain.get_block(root).unwrap().is_some()); + } + + // 2 should be the head. + let head = rig.harness.chain.head_info().unwrap(); + assert_eq!(head.block_root, roots[2]); + + roots.push(rig.import_block(Payload::Invalid { + latest_valid_hash: Some(rig.block_hash(roots[1])), + })); + + // Running fork choice is necessary since a block has been invalidated. + rig.fork_choice(); + + // 1 should be the head, since 2 was invalidated. + let head = rig.harness.chain.head_info().unwrap(); + assert_eq!(head.block_root, roots[1]); +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 5c020df492..d3038ac48d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -66,6 +66,7 @@ fn get_harness( .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store) + .mock_execution_layer() .build(); harness.advance_slot(); harness @@ -554,6 +555,7 @@ fn delete_blocks_and_states() { .default_spec() .keypairs(validators_keypairs) .fresh_disk_store(store.clone()) + .mock_execution_layer() .build(); let unforked_blocks: u64 = 4 * E::slots_per_epoch(); @@ -680,6 +682,7 @@ fn multi_epoch_fork_valid_blocks_test( .default_spec() .keypairs(validators_keypairs) .fresh_disk_store(store) + .mock_execution_layer() .build(); let num_fork1_blocks: u64 = num_fork1_blocks_.try_into().unwrap(); @@ -974,6 +977,7 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1083,6 +1087,7 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let slots_per_epoch = rig.slots_per_epoch(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1212,6 +1217,7 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1306,6 +1312,7 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1448,6 +1455,7 @@ fn prunes_skipped_slots_states() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1571,6 +1579,7 @@ fn finalizes_non_epoch_start_slot() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let (state, state_root) = rig.get_current_state_and_root(); @@ -2140,6 +2149,7 @@ fn finalizes_after_resuming_from_db() { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store.clone()) + .mock_execution_layer() .build(); harness.advance_slot(); @@ -2183,6 +2193,7 @@ fn finalizes_after_resuming_from_db() { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(store) + .mock_execution_layer() .build(); assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain); @@ -2254,6 +2265,7 @@ fn revert_minority_fork_on_resume() { .spec(spec1) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store1) + .mock_execution_layer() .build(); // Chain with fork epoch configured. @@ -2263,6 +2275,7 @@ fn revert_minority_fork_on_resume() { .spec(spec2.clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store2) + .mock_execution_layer() .build(); // Apply the same blocks to both chains initially. @@ -2358,6 +2371,7 @@ fn revert_minority_fork_on_resume() { .set_slot(end_slot.as_u64()); builder })) + .mock_execution_layer() .build(); // Head should now be just before the fork. diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 18cd691ac3..626c132d69 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -30,6 +30,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness, - >::update_execution_engine_forkchoice( - inner_execution_layer, - store, - head.finalized_checkpoint.root, - block_hash, - &log, - ) - .await; + let result = inner_chain + .update_execution_engine_forkchoice_async( + finalized_execution_block_hash, + head.block_root, + block_hash, + ) + .await; // No need to exit early if setting the head fails. It will be set again if/when the // node comes online. diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index d6877b13a2..605679dd7e 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; pub const LATEST_TAG: &str = "latest"; use crate::engines::ForkChoiceState; -pub use types::{Address, EthSpec, ExecutionPayload, Hash256, Uint256}; +pub use types::{Address, EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; pub mod http; pub mod json_structures; @@ -17,14 +17,15 @@ pub enum Error { Reqwest(reqwest::Error), BadResponse(String), RequestFailed(String), + InvalidExecutePayloadResponse(&'static str), JsonRpc(RpcError), Json(serde_json::Error), ServerMessage { code: i64, message: String }, Eip155Failure, IsSyncing, - ExecutionBlockNotFound(Hash256), + ExecutionBlockNotFound(ExecutionBlockHash), ExecutionHeadBlockNotFound, - ParentHashEqualsBlockHash(Hash256), + ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, } @@ -52,7 +53,7 @@ pub trait EngineApi { async fn get_block_by_hash<'a>( &self, - block_hash: Hash256, + block_hash: ExecutionBlockHash, ) -> Result, Error>; async fn new_payload_v1( @@ -85,7 +86,7 @@ pub enum PayloadStatusV1Status { #[derive(Clone, Debug, PartialEq)] pub struct PayloadStatusV1 { pub status: PayloadStatusV1Status, - pub latest_valid_hash: Option, + pub latest_valid_hash: Option, pub validation_error: Option, } @@ -99,10 +100,10 @@ pub enum BlockByNumberQuery<'a> { #[serde(rename_all = "camelCase")] pub struct ExecutionBlock { #[serde(rename = "hash")] - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] pub block_number: u64, - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index ce4c3beff0..8d82b8d311 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -125,7 +125,7 @@ impl EngineApi for HttpJsonRpc { async fn get_block_by_hash<'a>( &self, - block_hash: Hash256, + block_hash: ExecutionBlockHash, ) -> Result, Error> { let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]); @@ -413,7 +413,9 @@ mod test { Tester::new() .assert_request_equals( |client| async move { - let _ = client.get_block_by_hash(Hash256::repeat_byte(1)).await; + let _ = client + .get_block_by_hash(ExecutionBlockHash::repeat_byte(1)) + .await; }, json!({ "id": STATIC_ID, @@ -433,9 +435,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::repeat_byte(1), - safe_block_hash: Hash256::repeat_byte(1), - finalized_block_hash: Hash256::zero(), + head_block_hash: ExecutionBlockHash::repeat_byte(1), + safe_block_hash: ExecutionBlockHash::repeat_byte(1), + finalized_block_hash: ExecutionBlockHash::zero(), }, Some(PayloadAttributes { timestamp: 5, @@ -488,7 +490,7 @@ mod test { |client| async move { let _ = client .new_payload_v1::(ExecutionPayload { - parent_hash: Hash256::repeat_byte(0), + parent_hash: ExecutionBlockHash::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), @@ -500,7 +502,7 @@ mod test { timestamp: 42, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(1), - block_hash: Hash256::repeat_byte(1), + block_hash: ExecutionBlockHash::repeat_byte(1), transactions: vec![].into(), }) .await; @@ -538,9 +540,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::repeat_byte(0), - safe_block_hash: Hash256::repeat_byte(0), - finalized_block_hash: Hash256::repeat_byte(1), + head_block_hash: ExecutionBlockHash::repeat_byte(0), + safe_block_hash: ExecutionBlockHash::repeat_byte(0), + finalized_block_hash: ExecutionBlockHash::repeat_byte(1), }, None, ) @@ -588,9 +590,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - safe_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - finalized_block_hash: Hash256::zero(), + head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + finalized_block_hash: ExecutionBlockHash::zero(), }, Some(PayloadAttributes { timestamp: 5, @@ -635,9 +637,9 @@ mod test { let response = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - safe_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - finalized_block_hash: Hash256::zero(), + head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + finalized_block_hash: ExecutionBlockHash::zero(), }, Some(PayloadAttributes { timestamp: 5, @@ -650,7 +652,7 @@ mod test { assert_eq!(response, ForkchoiceUpdatedResponse { payload_status: PayloadStatusV1 { status: PayloadStatusV1Status::Valid, - latest_valid_hash: Some(Hash256::zero()), + latest_valid_hash: Some(ExecutionBlockHash::zero()), validation_error: Some(String::new()), }, payload_id: @@ -703,7 +705,7 @@ mod test { .unwrap(); let expected = ExecutionPayload { - parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), @@ -715,7 +717,7 @@ mod test { timestamp: 5, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), - block_hash: Hash256::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), + block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), }; @@ -728,7 +730,7 @@ mod test { |client| async move { let _ = client .new_payload_v1::(ExecutionPayload { - parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), @@ -740,7 +742,7 @@ mod test { timestamp: 5, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), - block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), transactions: vec![].into(), }) .await; @@ -788,7 +790,7 @@ mod test { assert_eq!(response, PayloadStatusV1 { status: PayloadStatusV1Status::Valid, - latest_valid_hash: Some(Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), + latest_valid_hash: Some(ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), validation_error: Some(String::new()), } ); @@ -801,9 +803,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - safe_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - finalized_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), }, None, ) @@ -840,9 +842,9 @@ mod test { let response = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - safe_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - finalized_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), }, None, ) @@ -851,7 +853,7 @@ mod test { assert_eq!(response, ForkchoiceUpdatedResponse { payload_status: PayloadStatusV1 { status: PayloadStatusV1Status::Valid, - latest_valid_hash: Some(Hash256::zero()), + latest_valid_hash: Some(ExecutionBlockHash::zero()), validation_error: Some(String::new()), }, payload_id: None, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 03d981d439..e9559e894c 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,6 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{EthSpec, FixedVector, Transaction, Unsigned, VariableList}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -58,7 +58,7 @@ pub struct JsonPayloadIdResponse { #[derive(Debug, PartialEq, Default, Serialize, Deserialize)] #[serde(bound = "T: EthSpec", rename_all = "camelCase")] pub struct JsonExecutionPayloadV1 { - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -76,7 +76,7 @@ pub struct JsonExecutionPayloadV1 { #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, pub base_fee_per_gas: Uint256, - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, #[serde(with = "serde_transactions")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, @@ -206,9 +206,9 @@ impl From for PayloadAttributes { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkChoiceStateV1 { - pub head_block_hash: Hash256, - pub safe_block_hash: Hash256, - pub finalized_block_hash: Hash256, + pub head_block_hash: ExecutionBlockHash, + pub safe_block_hash: ExecutionBlockHash, + pub finalized_block_hash: ExecutionBlockHash, } impl From for JsonForkChoiceStateV1 { @@ -260,7 +260,7 @@ pub enum JsonPayloadStatusV1Status { #[serde(rename_all = "camelCase")] pub struct JsonPayloadStatusV1 { pub status: JsonPayloadStatusV1Status, - pub latest_valid_hash: Option, + pub latest_valid_hash: Option, pub validation_error: Option, } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 03801f3168..d8e19baae1 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -8,7 +8,7 @@ use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; use std::future::Future; use tokio::sync::{Mutex, RwLock}; -use types::{Address, Hash256}; +use types::{Address, ExecutionBlockHash, Hash256}; /// The number of payload IDs that will be stored for each `Engine`. /// @@ -25,9 +25,9 @@ enum EngineState { #[derive(Copy, Clone, PartialEq, Debug)] pub struct ForkChoiceState { - pub head_block_hash: Hash256, - pub safe_block_hash: Hash256, - pub finalized_block_hash: Hash256, + pub head_block_hash: ExecutionBlockHash, + pub safe_block_hash: ExecutionBlockHash, + pub finalized_block_hash: ExecutionBlockHash, } /// Used to enable/disable logging on some tasks. @@ -48,7 +48,7 @@ impl Logging { #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { - pub head_block_hash: Hash256, + pub head_block_hash: ExecutionBlockHash, pub timestamp: u64, pub random: Hash256, pub suggested_fee_recipient: Address, @@ -75,7 +75,7 @@ impl Engine { pub async fn get_payload_id( &self, - head_block_hash: Hash256, + head_block_hash: ExecutionBlockHash, timestamp: u64, random: Hash256, suggested_fee_recipient: Address, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 10ae6b3eb0..326c46f870 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,10 +7,11 @@ use engine_api::{Error as ApiError, *}; use engines::{Engine, EngineError, Engines, ForkChoiceState, Logging}; use lru::LruCache; +use payload_status::process_multiple_payload_statuses; use sensitive_url::SensitiveUrl; use slog::{crit, debug, error, info, Logger}; use slot_clock::SlotClock; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::future::Future; use std::sync::Arc; use std::time::Duration; @@ -19,12 +20,14 @@ use tokio::{ sync::{Mutex, MutexGuard}, time::{sleep, sleep_until, Instant}, }; -use types::{ChainSpec, Epoch, ProposerPreparationData}; +use types::{ChainSpec, Epoch, ExecutionBlockHash, ProposerPreparationData}; pub use engine_api::{http::HttpJsonRpc, PayloadAttributes, PayloadStatusV1Status}; +pub use payload_status::PayloadStatus; mod engine_api; mod engines; +mod payload_status; pub mod test_utils; /// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block @@ -50,6 +53,7 @@ pub enum Error { ShuttingDown, FeeRecipientUnspecified, ConsensusFailure, + MissingLatestValidHash, } impl From for Error { @@ -68,7 +72,7 @@ struct Inner { engines: Engines, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, - execution_blocks: Mutex>, + execution_blocks: Mutex>, executor: TaskExecutor, log: Logger, } @@ -137,7 +141,9 @@ impl ExecutionLayer { } /// Note: this function returns a mutex guard, be careful to avoid deadlocks. - async fn execution_blocks(&self) -> MutexGuard<'_, LruCache> { + async fn execution_blocks( + &self, + ) -> MutexGuard<'_, LruCache> { self.inner.execution_blocks.lock().await } @@ -384,10 +390,10 @@ impl ExecutionLayer { /// will be contacted. pub async fn get_payload( &self, - parent_hash: Hash256, + parent_hash: ExecutionBlockHash, timestamp: u64, random: Hash256, - finalized_block_hash: Hash256, + finalized_block_hash: ExecutionBlockHash, proposer_index: u64, ) -> Result, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -434,7 +440,16 @@ impl ExecutionLayer { ) .await .map(|response| response.payload_id)? - .ok_or(ApiError::PayloadIdUnavailable)? + .ok_or_else(|| { + error!( + self.log(), + "Exec engine unable to produce payload"; + "msg" => "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block proposal.", + ); + + ApiError::PayloadIdUnavailable + })? }; engine.api.get_payload_v1(payload_id).await @@ -459,7 +474,7 @@ impl ExecutionLayer { pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, - ) -> Result<(PayloadStatusV1Status, Option>), Error> { + ) -> Result { debug!( self.log(), "Issuing engine_newPayload"; @@ -473,81 +488,11 @@ impl ExecutionLayer { .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - let mut errors = vec![]; - let mut valid = 0; - let mut invalid = 0; - let mut syncing = 0; - let mut invalid_latest_valid_hash = HashSet::new(); - for result in broadcast_results { - match result { - Ok(response) => match (&response.latest_valid_hash, &response.status) { - (Some(latest_hash), &PayloadStatusV1Status::Valid) => { - // According to a strict interpretation of the spec, the EE should never - // respond with `VALID` *and* a `latest_valid_hash`. - // - // For the sake of being liberal with what we accept, we will accept a - // `latest_valid_hash` *only if* it matches the submitted payload. - // Otherwise, register an error. - if latest_hash == &execution_payload.block_hash { - valid += 1; - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - format!( - "new_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", - execution_payload.block_hash, - latest_hash, - ) - ), - }); - } - } - (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { - invalid += 1; - invalid_latest_valid_hash.insert(*latest_hash); - } - (None, &PayloadStatusV1Status::InvalidBlockHash) - | (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, - (None, &PayloadStatusV1Status::Syncing) - | (None, &PayloadStatusV1Status::Accepted) => syncing += 1, - _ => errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse(format!( - "new_payload: response does not conform to engine API spec: {:?}", - response, - )), - }), - }, - Err(e) => errors.push(e), - } - } - - if valid > 0 && invalid > 0 { - crit!( - self.log(), - "Consensus failure between execution nodes"; - "method" => "new_payload" - ); - // In this situation, better to have a failure of liveness than vote on a potentially invalid chain - return Err(Error::ConsensusFailure); - } - - if valid > 0 { - Ok(( - PayloadStatusV1Status::Valid, - Some(vec![execution_payload.block_hash]), - )) - } else if invalid > 0 { - Ok(( - PayloadStatusV1Status::Invalid, - Some(invalid_latest_valid_hash.into_iter().collect()), - )) - } else if syncing > 0 { - Ok((PayloadStatusV1Status::Syncing, None)) - } else { - Err(Error::EngineErrors(errors)) - } + process_multiple_payload_statuses( + execution_payload.block_hash, + broadcast_results.into_iter(), + self.log(), + ) } /// Maps to the `engine_consensusValidated` JSON-RPC call. @@ -565,10 +510,10 @@ impl ExecutionLayer { /// - An error, if all nodes return an error. pub async fn notify_forkchoice_updated( &self, - head_block_hash: Hash256, - finalized_block_hash: Hash256, + head_block_hash: ExecutionBlockHash, + finalized_block_hash: ExecutionBlockHash, payload_attributes: Option, - ) -> Result<(PayloadStatusV1Status, Option>), Error> { + ) -> Result { debug!( self.log(), "Issuing engine_forkchoiceUpdated"; @@ -597,78 +542,13 @@ impl ExecutionLayer { }) .await; - let mut errors = vec![]; - let mut valid = 0; - let mut invalid = 0; - let mut syncing = 0; - let mut invalid_latest_valid_hash = HashSet::new(); - for result in broadcast_results { - match result { - Ok(response) => match (&response.payload_status.latest_valid_hash, &response.payload_status.status) { - // TODO(bellatrix) a strict interpretation of the v1.0.0.alpha.6 spec says that - // `latest_valid_hash` *cannot* be `None`. However, we accept it to maintain - // Geth compatibility for the short term. See: - // - // https://github.com/ethereum/go-ethereum/issues/24404 - (None, &PayloadStatusV1Status::Valid) => valid += 1, - (Some(latest_hash), &PayloadStatusV1Status::Valid) => { - if latest_hash == &head_block_hash { - valid += 1; - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - format!( - "forkchoice_updated: payload_status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", - head_block_hash, - *latest_hash, - ) - ), - }); - } - } - (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { - invalid += 1; - invalid_latest_valid_hash.insert(*latest_hash); - } - (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, - (None, &PayloadStatusV1Status::Syncing) => syncing += 1, - _ => { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse(format!( - "forkchoice_updated: response does not conform to engine API spec: {:?}", - response - )), - }) - } - } - Err(e) => errors.push(e), - } - } - - if valid > 0 && invalid > 0 { - crit!( - self.log(), - "Consensus failure between execution nodes"; - "method" => "forkchoice_updated" - ); - // In this situation, better to have a failure of liveness than vote on a potentially invalid chain - return Err(Error::ConsensusFailure); - } - - if valid > 0 { - Ok((PayloadStatusV1Status::Valid, Some(vec![head_block_hash]))) - } else if invalid > 0 { - Ok(( - PayloadStatusV1Status::Invalid, - Some(invalid_latest_valid_hash.into_iter().collect()), - )) - } else if syncing > 0 { - Ok((PayloadStatusV1Status::Syncing, None)) - } else { - Err(Error::EngineErrors(errors)) - } + process_multiple_payload_statuses( + head_block_hash, + broadcast_results + .into_iter() + .map(|result| result.map(|response| response.payload_status)), + self.log(), + ) } /// Used during block production to determine if the merge has been triggered. @@ -681,12 +561,12 @@ impl ExecutionLayer { pub async fn get_terminal_pow_block_hash( &self, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, Error> { let hash_opt = self .engines() .first_success(|engine| async move { let terminal_block_hash = spec.terminal_block_hash; - if terminal_block_hash != Hash256::zero() { + if terminal_block_hash != ExecutionBlockHash::zero() { if self .get_pow_block(engine, terminal_block_hash) .await? @@ -730,7 +610,7 @@ impl ExecutionLayer { &self, engine: &Engine, spec: &ChainSpec, - ) -> Result, ApiError> { + ) -> Result, ApiError> { let mut block = engine .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) @@ -742,7 +622,7 @@ impl ExecutionLayer { loop { let block_reached_ttd = block.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd { - if block.parent_hash == Hash256::zero() { + if block.parent_hash == ExecutionBlockHash::zero() { return Ok(Some(block.block_hash)); } let parent = self @@ -790,7 +670,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/fork-choice.md pub async fn is_valid_terminal_pow_block_hash( &self, - block_hash: Hash256, + block_hash: ExecutionBlockHash, spec: &ChainSpec, ) -> Result, Error> { let broadcast_results = self @@ -869,7 +749,7 @@ impl ExecutionLayer { async fn get_pow_block( &self, engine: &Engine, - hash: Hash256, + hash: ExecutionBlockHash, ) -> Result, ApiError> { if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { // The block was in the cache, no need to request it from the execution @@ -963,7 +843,7 @@ mod test { MockExecutionLayer::default_params() .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - let missing_terminal_block = Hash256::repeat_byte(42); + let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); assert_eq!( el.is_valid_terminal_pow_block_hash(missing_terminal_block, &spec) diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs new file mode 100644 index 0000000000..e0b1a01b43 --- /dev/null +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -0,0 +1,191 @@ +use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Status}; +use crate::engines::EngineError; +use crate::Error; +use slog::{crit, warn, Logger}; +use types::ExecutionBlockHash; + +/// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. +/// +/// It primarily ensures that the `latest_valid_hash` is always present when relevant. +#[derive(Debug, Clone, PartialEq)] +pub enum PayloadStatus { + Valid, + Invalid { + latest_valid_hash: ExecutionBlockHash, + validation_error: Option, + }, + Syncing, + Accepted, + InvalidBlockHash { + validation_error: Option, + }, + InvalidTerminalBlock { + validation_error: Option, + }, +} + +/// Processes the responses from multiple execution engines, finding the "best" status and returning +/// it (if any). +/// +/// This function has the following basic goals: +/// +/// - Detect a consensus failure between nodes. +/// - Find the most-synced node by preferring a definite response (valid/invalid) over a +/// syncing/accepted response or error. +/// +/// # Details +/// +/// - If there are conflicting valid/invalid responses, always return an error. +/// - If there are syncing/accepted responses but valid/invalid responses exist, return the +/// valid/invalid responses since they're definite. +/// - If there are multiple valid responses, return the first one processed. +/// - If there are multiple invalid responses, return the first one processed. +/// - Syncing/accepted responses are grouped, if there are multiple of them, return the first one +/// processed. +/// - If there are no responses (only errors or nothing), return an error. +pub fn process_multiple_payload_statuses( + head_block_hash: ExecutionBlockHash, + statuses: impl Iterator>, + log: &Logger, +) -> Result { + let mut errors = vec![]; + let mut valid_statuses = vec![]; + let mut invalid_statuses = vec![]; + let mut other_statuses = vec![]; + + for status in statuses { + match status { + Err(e) => errors.push(e), + Ok(response) => match &response.status { + PayloadStatusV1Status::Valid => { + if response + .latest_valid_hash + .map_or(false, |h| h == head_block_hash) + { + // The response is only valid if `latest_valid_hash` is not `null` and + // equal to the provided `block_hash`. + valid_statuses.push(PayloadStatus::Valid) + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: ApiError::BadResponse( + format!( + "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + response.latest_valid_hash, + ) + ), + }); + } + } + PayloadStatusV1Status::Invalid => { + if let Some(latest_valid_hash) = response.latest_valid_hash { + // The response is only valid if `latest_valid_hash` is not `null`. + invalid_statuses.push(PayloadStatus::Invalid { + latest_valid_hash, + validation_error: response.validation_error.clone(), + }) + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: ApiError::BadResponse( + "new_payload: response.status = INVALID but null latest_valid_hash" + .to_string(), + ), + }); + } + } + PayloadStatusV1Status::InvalidBlockHash => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + invalid_statuses.push(PayloadStatus::InvalidBlockHash { + validation_error: response.validation_error.clone(), + }); + } + PayloadStatusV1Status::InvalidTerminalBlock => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + invalid_statuses.push(PayloadStatus::InvalidTerminalBlock { + validation_error: response.validation_error.clone(), + }); + } + PayloadStatusV1Status::Syncing => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + other_statuses.push(PayloadStatus::Syncing) + } + PayloadStatusV1Status::Accepted => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + other_statuses.push(PayloadStatus::Accepted) + } + }, + } + } + + if !valid_statuses.is_empty() && !invalid_statuses.is_empty() { + crit!( + log, + "Consensus failure between execution nodes"; + "invalid_statuses" => ?invalid_statuses, + "valid_statuses" => ?valid_statuses, + ); + + // Choose to exit and ignore the valid response. This preferences correctness over + // liveness. + return Err(Error::ConsensusFailure); + } + + // Log any errors to assist with troubleshooting. + for error in &errors { + warn!( + log, + "Error whilst processing payload status"; + "error" => ?error, + ); + } + + valid_statuses + .first() + .or_else(|| invalid_statuses.first()) + .or_else(|| other_statuses.first()) + .cloned() + .map(Result::Ok) + .unwrap_or_else(|| Err(Error::EngineErrors(errors))) +} diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 8fd6ebfcd1..52accad3a1 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,7 @@ use crate::engine_api::{ + json_structures::{ + JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + }, ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, }; use crate::engines::ForkChoiceState; @@ -6,7 +9,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::{EthSpec, ExecutionPayload, Hash256, Uint256}; +use types::{EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -26,14 +29,14 @@ impl Block { } } - pub fn parent_hash(&self) -> Hash256 { + pub fn parent_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.parent_hash, Block::PoS(payload) => payload.parent_hash, } } - pub fn block_hash(&self) -> Hash256 { + pub fn block_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.block_hash, Block::PoS(payload) => payload.block_hash, @@ -69,8 +72,8 @@ impl Block { #[serde(rename_all = "camelCase")] pub struct PoWBlock { pub block_number: u64, - pub block_hash: Hash256, - pub parent_hash: Hash256, + pub block_hash: ExecutionBlockHash, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, } @@ -78,18 +81,18 @@ pub struct ExecutionBlockGenerator { /* * Common database */ - blocks: HashMap>, - block_hashes: HashMap, + blocks: HashMap>, + block_hashes: HashMap, /* * PoW block parameters */ pub terminal_total_difficulty: Uint256, pub terminal_block_number: u64, - pub terminal_block_hash: Hash256, + pub terminal_block_hash: ExecutionBlockHash, /* * PoS block parameters */ - pub pending_payloads: HashMap>, + pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, } @@ -98,7 +101,7 @@ impl ExecutionBlockGenerator { pub fn new( terminal_total_difficulty: Uint256, terminal_block_number: u64, - terminal_block_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, ) -> Self { let mut gen = Self { blocks: <_>::default(), @@ -141,11 +144,11 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } - pub fn block_by_hash(&self, hash: Hash256) -> Option> { + pub fn block_by_hash(&self, hash: ExecutionBlockHash) -> Option> { self.blocks.get(&hash).cloned() } - pub fn execution_block_by_hash(&self, hash: Hash256) -> Option { + pub fn execution_block_by_hash(&self, hash: ExecutionBlockHash) -> Option { self.block_by_hash(hash) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } @@ -187,7 +190,7 @@ impl ExecutionBlockGenerator { pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { let parent_hash = if block_number == 0 { - Hash256::zero() + ExecutionBlockHash::zero() } else if let Some(hash) = self.block_hashes.get(&(block_number - 1)) { *hash } else { @@ -231,7 +234,7 @@ impl ExecutionBlockGenerator { } pub fn get_payload(&mut self, id: &PayloadId) -> Option> { - self.payload_ids.remove(id) + self.payload_ids.get(id).cloned() } pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { @@ -267,39 +270,35 @@ impl ExecutionBlockGenerator { &mut self, forkchoice_state: ForkChoiceState, payload_attributes: Option, - ) -> Result, String> { + ) -> Result { if let Some(payload) = self .pending_payloads .remove(&forkchoice_state.head_block_hash) { self.insert_block(Block::PoS(payload))?; } - if !self.blocks.contains_key(&forkchoice_state.head_block_hash) { - return Err(format!( - "block hash {:?} unknown", - forkchoice_state.head_block_hash - )); - } - if !self.blocks.contains_key(&forkchoice_state.safe_block_hash) { - return Err(format!( - "block hash {:?} unknown", - forkchoice_state.head_block_hash - )); - } - if forkchoice_state.finalized_block_hash != Hash256::zero() + let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); + let unknown_safe_block_hash = !self.blocks.contains_key(&forkchoice_state.safe_block_hash); + let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash + != ExecutionBlockHash::zero() && !self .blocks - .contains_key(&forkchoice_state.finalized_block_hash) - { - return Err(format!( - "finalized block hash {:?} is unknown", - forkchoice_state.finalized_block_hash - )); + .contains_key(&forkchoice_state.finalized_block_hash); + + if unknown_head_block_hash || unknown_safe_block_hash || unknown_finalized_block_hash { + return Ok(JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1 { + status: JsonPayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + }, + payload_id: None, + }); } - match payload_attributes { - None => Ok(None), + let id = match payload_attributes { + None => None, Some(attributes) => { if !self.blocks.iter().any(|(_, block)| { block.block_hash() == self.terminal_block_hash @@ -334,17 +333,27 @@ impl ExecutionBlockGenerator { timestamp: attributes.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), base_fee_per_gas: Uint256::one(), - block_hash: Hash256::zero(), + block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), }; - execution_payload.block_hash = execution_payload.tree_hash_root(); + execution_payload.block_hash = + ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); self.payload_ids.insert(id, execution_payload); - Ok(Some(id)) + Some(id) } - } + }; + + Ok(JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1 { + status: JsonPayloadStatusV1Status::Valid, + latest_valid_hash: Some(forkchoice_state.head_block_hash), + validation_error: None, + }, + payload_id: id.map(Into::into), + }) } } @@ -356,7 +365,7 @@ pub fn generate_pow_block( terminal_total_difficulty: Uint256, terminal_block_number: u64, block_number: u64, - parent_hash: Hash256, + parent_hash: ExecutionBlockHash, ) -> Result { if block_number > terminal_block_number { return Err(format!( @@ -378,12 +387,12 @@ pub fn generate_pow_block( let mut block = PoWBlock { block_number, - block_hash: Hash256::zero(), + block_hash: ExecutionBlockHash::zero(), parent_hash, total_difficulty, }; - block.block_hash = block.tree_hash_root(); + block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); Ok(block) } @@ -402,7 +411,7 @@ mod test { let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, - Hash256::zero(), + ExecutionBlockHash::zero(), ); for i in 0..=TERMINAL_BLOCK { @@ -420,7 +429,7 @@ mod test { let expected_parent = i .checked_sub(1) .map(|i| generator.block_by_number(i).unwrap().block_hash()) - .unwrap_or_else(Hash256::zero); + .unwrap_or_else(ExecutionBlockHash::zero); assert_eq!(block.parent_hash(), expected_parent); assert_eq!( diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 746d96e293..1ee29ce7a9 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,5 +1,5 @@ use super::Context; -use crate::engine_api::{http::*, PayloadStatusV1, PayloadStatusV1Status}; +use crate::engine_api::{http::*, *}; use crate::json_structures::*; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; @@ -57,26 +57,29 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if let Some(status) = *ctx.static_new_payload_response.lock() { - match status { - PayloadStatusV1Status::Valid => PayloadStatusV1 { - status, - latest_valid_hash: Some(request.block_hash), - validation_error: None, - }, - PayloadStatusV1Status::Syncing => PayloadStatusV1 { - status, - latest_valid_hash: None, - validation_error: None, - }, - _ => unimplemented!("invalid static newPayloadResponse"), - } + let (static_response, should_import) = + if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { + if response.status.status == PayloadStatusV1Status::Valid { + response.status.latest_valid_hash = Some(request.block_hash) + } + + (Some(response.status), response.should_import) + } else { + (None, true) + }; + + let dynamic_response = if should_import { + Some( + ctx.execution_block_generator + .write() + .new_payload(request.into()), + ) } else { - ctx.execution_block_generator - .write() - .new_payload(request.into()) + None }; + let response = static_response.or(dynamic_response).unwrap(); + Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } ENGINE_GET_PAYLOAD_V1 => { @@ -95,8 +98,7 @@ pub async fn handle_rpc( let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; - let head_block_hash = forkchoice_state.head_block_hash; - let id = ctx + let response = ctx .execution_block_generator .write() .forkchoice_updated_v1( @@ -104,15 +106,7 @@ pub async fn handle_rpc( payload_attributes.map(|json| json.into()), )?; - Ok(serde_json::to_value(JsonForkchoiceUpdatedV1Response { - payload_status: JsonPayloadStatusV1 { - status: JsonPayloadStatusV1Status::Valid, - latest_valid_hash: Some(head_block_hash), - validation_error: None, - }, - payload_id: id.map(Into::into), - }) - .unwrap()) + Ok(serde_json::to_value(response).unwrap()) } other => Err(format!( "The method {} does not exist/is not available", diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 0622da473f..a15ab25254 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -58,7 +58,7 @@ impl MockExecutionLayer { Self::new( DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - Hash256::zero(), + ExecutionBlockHash::zero(), Epoch::new(0), ) } @@ -66,7 +66,7 @@ impl MockExecutionLayer { pub fn new( terminal_total_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, ) -> Self { let el_runtime = ExecutionLayerRuntime::default(); @@ -117,7 +117,7 @@ impl MockExecutionLayer { self.el .notify_forkchoice_updated( parent_hash, - Hash256::zero(), + ExecutionBlockHash::zero(), Some(PayloadAttributes { timestamp, random, @@ -145,13 +145,11 @@ impl MockExecutionLayer { assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.random, random); - let (payload_response, latest_valid_hash) = - self.el.notify_new_payload(&payload).await.unwrap(); - assert_eq!(payload_response, PayloadStatusV1Status::Valid); - assert_eq!(latest_valid_hash, Some(vec![payload.block_hash])); + let status = self.el.notify_new_payload(&payload).await.unwrap(); + assert_eq!(status, PayloadStatus::Valid); self.el - .notify_forkchoice_updated(block_hash, Hash256::zero(), None) + .notify_forkchoice_updated(block_hash, ExecutionBlockHash::zero(), None) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index a4b9617764..9d6eb5cf04 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -1,7 +1,6 @@ //! Provides a mock execution engine HTTP JSON-RPC API for use in testing. -use crate::engine_api::http::JSONRPC_VERSION; -use crate::engine_api::PayloadStatusV1Status; +use crate::engine_api::{http::JSONRPC_VERSION, PayloadStatusV1, PayloadStatusV1Status}; use bytes::Bytes; use environment::null_logger; use execution_block_generator::{Block, PoWBlock}; @@ -15,7 +14,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use tokio::{runtime, sync::oneshot}; -use types::{EthSpec, Hash256, Uint256}; +use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::Filter; pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; @@ -41,7 +40,7 @@ impl MockServer { &runtime::Handle::current(), DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - Hash256::zero(), + ExecutionBlockHash::zero(), ) } @@ -49,7 +48,7 @@ impl MockServer { handle: &runtime::Handle, terminal_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, ) -> Self { let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); @@ -117,14 +116,54 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.static_new_payload_response.lock() = Some(PayloadStatusV1Status::Valid) + let response = StaticNewPayloadResponse { + status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: None, + validation_error: None, + }, + should_import: true, + }; + *self.ctx.static_new_payload_response.lock() = Some(response) + } + + /// Setting `should_import = true` simulates an EE that initially returns `SYNCING` but obtains + /// the block via it's own means (e.g., devp2p). + pub fn all_payloads_syncing(&self, should_import: bool) { + let response = StaticNewPayloadResponse { + status: PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + }, + should_import, + }; + *self.ctx.static_new_payload_response.lock() = Some(response) + } + + pub fn all_payloads_invalid(&self, latest_valid_hash: ExecutionBlockHash) { + let response = StaticNewPayloadResponse { + status: PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: Some(latest_valid_hash), + validation_error: Some("static response".into()), + }, + should_import: true, + }; + *self.ctx.static_new_payload_response.lock() = Some(response) + } + + /// Disables any static payload response so the execution block generator will do its own + /// verification. + pub fn full_payload_verification(&self) { + *self.ctx.static_new_payload_response.lock() = None } pub fn insert_pow_block( &self, block_number: u64, - block_hash: Hash256, - parent_hash: Hash256, + block_hash: ExecutionBlockHash, + parent_hash: ExecutionBlockHash, total_difficulty: Uint256, ) { let block = Block::PoW(PoWBlock { @@ -143,7 +182,7 @@ impl MockServer { .unwrap() } - pub fn get_block(&self, block_hash: Hash256) -> Option> { + pub fn get_block(&self, block_hash: ExecutionBlockHash) -> Option> { self.ctx .execution_block_generator .read() @@ -178,6 +217,12 @@ struct MissingIdField; impl warp::reject::Reject for MissingIdField {} +#[derive(Debug, Clone, PartialEq)] +pub struct StaticNewPayloadResponse { + status: PayloadStatusV1, + should_import: bool, +} + /// A wrapper around all the items required to spawn the HTTP server. /// /// The server will gracefully handle the case where any fields are `None`. @@ -187,7 +232,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub static_new_payload_response: Arc>>, + pub static_new_payload_response: Arc>>, pub _phantom: PhantomData, } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 6874966abd..2d3e941a3e 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -7,7 +7,7 @@ use std::sync::Weak; use tokio::runtime::Runtime; /// Provides a reason when Lighthouse is shut down. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { /// The node shut down successfully. Success(&'static str), diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index f6c6f16414..9f98dadf3b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -6,8 +6,8 @@ use std::marker::PhantomData; use std::time::Duration; use types::{ consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, - BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, - RelativeEpoch, SignedBeaconBlock, Slot, + BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -17,6 +17,7 @@ pub enum Error { ProtoArrayError(String), InvalidProtoArrayBytes(String), InvalidLegacyProtoArrayBytes(String), + FailedToProcessInvalidExecutionPayload(String), MissingProtoArrayBlock(Hash256), UnknownAncestor { ancestor_slot: Slot, @@ -43,6 +44,12 @@ pub enum Error { block_root: Hash256, payload_verification_status: PayloadVerificationStatus, }, + MissingJustifiedBlock { + justified_checkpoint: Checkpoint, + }, + MissingFinalizedBlock { + finalized_checkpoint: Checkpoint, + }, } impl From for Error { @@ -299,9 +306,15 @@ where let execution_status = anchor_block.message_merge().map_or_else( |()| ExecutionStatus::irrelevant(), |message| { - // Assume that this payload is valid, since the anchor should be a trusted block and - // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash) + let execution_payload = &message.body.execution_payload; + if execution_payload == &<_>::default() { + // A default payload does not have execution enabled. + ExecutionStatus::irrelevant() + } else { + // Assume that this payload is valid, since the anchor should be a trusted block and + // state. + ExecutionStatus::Valid(message.body.execution_payload.block_hash) + } }, ); @@ -464,6 +477,17 @@ where Ok(true) } + /// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation. + pub fn on_invalid_execution_payload( + &mut self, + head_block_root: Hash256, + latest_valid_ancestor_root: Option, + ) -> Result<(), Error> { + self.proto_array + .process_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) + .map_err(Error::FailedToProcessInvalidExecutionPayload) + } + /// Add `block` to the fork choice DAG. /// /// - `block_root` is the root of `block. @@ -592,7 +616,7 @@ where let execution_status = if let Ok(execution_payload) = block.body().execution_payload() { let block_hash = execution_payload.block_hash; - if block_hash == Hash256::zero() { + if block_hash == ExecutionBlockHash::zero() { // The block is post-merge-fork, but pre-terminal-PoW block. We don't need to verify // the payload. ExecutionStatus::irrelevant() @@ -875,6 +899,29 @@ where } } + /// Returns the `ProtoBlock` for the justified checkpoint. + /// + /// ## Notes + /// + /// This does *not* return the "best justified checkpoint". It returns the justified checkpoint + /// that is used for computing balances. + pub fn get_justified_block(&self) -> Result> { + let justified_checkpoint = self.justified_checkpoint(); + self.get_block(&justified_checkpoint.root) + .ok_or(Error::MissingJustifiedBlock { + justified_checkpoint, + }) + } + + /// Returns the `ProtoBlock` for the finalized checkpoint. + pub fn get_finalized_block(&self) -> Result> { + let finalized_checkpoint = self.finalized_checkpoint(); + self.get_block(&finalized_checkpoint.root) + .ok_or(Error::MissingFinalizedBlock { + finalized_checkpoint, + }) + } + /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { self.proto_array diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 0b230ffd32..160800ca50 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -122,18 +122,24 @@ impl ForkChoiceTest { } /// Assert there was a shutdown signal sent by the beacon chain. - pub fn assert_shutdown_signal_sent(mut self) -> Self { - self.harness.shutdown_receiver.close(); - let msg = self.harness.shutdown_receiver.try_next().unwrap(); - assert!(msg.is_some()); + pub fn shutdown_signal_sent(&self) -> bool { + let mutex = self.harness.shutdown_receiver.clone(); + let mut shutdown_receiver = mutex.lock(); + + shutdown_receiver.close(); + let msg = shutdown_receiver.try_next().unwrap(); + msg.is_some() + } + + /// Assert there was a shutdown signal sent by the beacon chain. + pub fn assert_shutdown_signal_sent(self) -> Self { + assert!(self.shutdown_signal_sent()); self } /// Assert no shutdown was signal sent by the beacon chain. - pub fn assert_shutdown_signal_not_sent(mut self) -> Self { - self.harness.shutdown_receiver.close(); - let msg = self.harness.shutdown_receiver.try_next().unwrap(); - assert!(msg.is_none()); + pub fn assert_shutdown_signal_not_sent(self) -> Self { + assert!(!self.shutdown_signal_sent()); self } @@ -479,6 +485,22 @@ fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified } +#[test] +fn justified_and_finalized_blocks() { + let tester = ForkChoiceTest::new(); + let fork_choice = tester.harness.chain.fork_choice.read(); + + let justified_checkpoint = fork_choice.justified_checkpoint(); + assert_eq!(justified_checkpoint.epoch, 0); + assert!(justified_checkpoint.root != Hash256::zero()); + assert!(fork_choice.get_justified_block().is_ok()); + + let finalized_checkpoint = fork_choice.finalized_checkpoint(); + assert_eq!(finalized_checkpoint.epoch, 0); + assert!(finalized_checkpoint.root != Hash256::zero()); + assert!(fork_choice.get_finalized_block().is_ok()); +} + /// - The new justified checkpoint descends from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` #[test] diff --git a/consensus/proto_array/src/bin.rs b/consensus/proto_array/src/bin.rs index ba83714ce7..e1d307affb 100644 --- a/consensus/proto_array/src/bin.rs +++ b/consensus/proto_array/src/bin.rs @@ -6,6 +6,18 @@ fn main() { write_test_def_to_yaml("no_votes.yaml", get_no_votes_test_definition()); write_test_def_to_yaml("ffg_01.yaml", get_ffg_case_01_test_definition()); write_test_def_to_yaml("ffg_02.yaml", get_ffg_case_02_test_definition()); + write_test_def_to_yaml( + "execution_status_01.yaml", + get_execution_status_test_definition_01(), + ); + write_test_def_to_yaml( + "execution_status_02.yaml", + get_execution_status_test_definition_02(), + ); + write_test_def_to_yaml( + "execution_status_03.yaml", + get_execution_status_test_definition_03(), + ); } fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index adb10c035d..7e1b73bedc 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,9 +1,10 @@ -use types::{Checkpoint, Epoch, Hash256}; +use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256}; #[derive(Clone, PartialEq, Debug)] pub enum Error { FinalizedNodeUnknown(Hash256), JustifiedNodeUnknown(Hash256), + NodeUnknown(Hash256), InvalidFinalizedRootChange, InvalidNodeIndex(usize), InvalidParentIndex(usize), @@ -15,6 +16,7 @@ pub enum Error { DeltaOverflow(usize), ProposerBoostOverflow(usize), IndexOverflow(&'static str), + InvalidExecutionDeltaOverflow(usize), InvalidDeltaLen { deltas: usize, indices: usize, @@ -26,7 +28,21 @@ pub enum Error { InvalidBestNode(Box), InvalidAncestorOfValidPayload { ancestor_block_root: Hash256, - ancestor_payload_block_hash: Hash256, + ancestor_payload_block_hash: ExecutionBlockHash, + }, + ValidExecutionStatusBecameInvalid { + block_root: Hash256, + payload_block_hash: ExecutionBlockHash, + }, + InvalidJustifiedCheckpointExecutionStatus { + justified_root: Hash256, + }, + UnknownLatestValidAncestorHash { + block_root: Hash256, + latest_valid_ancestor_hash: Option, + }, + IrrelevantDescendant { + block_root: Hash256, }, } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index e28fc67718..fd90d53903 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -1,11 +1,16 @@ +mod execution_status; mod ffg_updates; mod no_votes; mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use serde_derive::{Deserialize, Serialize}; -use types::{AttestationShufflingId, Checkpoint, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot}; +use types::{ + AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + MainnetEthSpec, Slot, +}; +pub use execution_status::*; pub use ffg_updates::*; pub use no_votes::*; pub use votes::*; @@ -18,6 +23,13 @@ pub enum Operation { justified_state_balances: Vec, expected_head: Hash256, }, + ProposerBoostFindHead { + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + justified_state_balances: Vec, + expected_head: Hash256, + proposer_boost_root: Hash256, + }, InvalidFindHead { justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, @@ -40,6 +52,14 @@ pub enum Operation { prune_threshold: usize, expected_len: usize, }, + InvalidatePayload { + head_block_root: Hash256, + latest_valid_ancestor_root: Option, + }, + AssertWeight { + block_root: Hash256, + weight: u64, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -52,9 +72,11 @@ pub struct ForkChoiceTestDefinition { impl ForkChoiceTestDefinition { pub fn run(self) { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); - let execution_status = ExecutionStatus::irrelevant(); let mut fork_choice = ProtoArrayForkChoice::new( self.finalized_block_slot, Hash256::zero(), @@ -62,7 +84,7 @@ impl ForkChoiceTestDefinition { self.finalized_checkpoint, junk_shuffling_id.clone(), junk_shuffling_id, - execution_status, + ExecutionStatus::Unknown(ExecutionBlockHash::zero()), ) .expect("should create fork choice struct"); @@ -80,7 +102,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), - &MainnetEthSpec::default_spec(), + &spec, ) .map_err(|e| e) .unwrap_or_else(|e| { @@ -89,7 +111,34 @@ impl ForkChoiceTestDefinition { assert_eq!( head, expected_head, - "Operation at index {} failed checks. Operation: {:?}", + "Operation at index {} failed head check. Operation: {:?}", + op_index, op + ); + check_bytes_round_trip(&fork_choice); + } + Operation::ProposerBoostFindHead { + justified_checkpoint, + finalized_checkpoint, + justified_state_balances, + expected_head, + proposer_boost_root, + } => { + let head = fork_choice + .find_head::( + justified_checkpoint, + finalized_checkpoint, + &justified_state_balances, + proposer_boost_root, + &spec, + ) + .map_err(|e| e) + .unwrap_or_else(|e| { + panic!("find_head op at index {} returned error {}", op_index, e) + }); + + assert_eq!( + head, expected_head, + "Operation at index {} failed head check. Operation: {:?}", op_index, op ); check_bytes_round_trip(&fork_choice); @@ -104,7 +153,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), - &MainnetEthSpec::default_spec(), + &spec, ); assert!( @@ -138,7 +187,10 @@ impl ForkChoiceTestDefinition { ), justified_checkpoint, finalized_checkpoint, - execution_status, + // All blocks are imported optimistically. + execution_status: ExecutionStatus::Unknown(ExecutionBlockHash::from_root( + root, + )), }; fork_choice.process_block(block).unwrap_or_else(|e| { panic!( @@ -183,22 +235,41 @@ impl ForkChoiceTestDefinition { expected_len ); } + Operation::InvalidatePayload { + head_block_root, + latest_valid_ancestor_root, + } => fork_choice + .process_execution_payload_invalidation( + head_block_root, + latest_valid_ancestor_root, + ) + .unwrap(), + Operation::AssertWeight { block_root, weight } => assert_eq!( + fork_choice.get_weight(&block_root).unwrap(), + weight, + "block weight" + ), } } } } -/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. -fn get_hash(i: u64) -> Hash256 { +/// Gives a root that is not the zero hash (unless i is `usize::max_value)`. +fn get_root(i: u64) -> Hash256 { Hash256::from_low_u64_be(i + 1) } +/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. +fn get_hash(i: u64) -> ExecutionBlockHash { + ExecutionBlockHash::from_root(get_root(i)) +} + /// Gives a checkpoint with a root that is not the zero hash (unless i is `usize::max_value)`. /// `Epoch` will always equal `i`. fn get_checkpoint(i: u64) -> Checkpoint { Checkpoint { epoch: Epoch::new(i), - root: get_hash(i), + root: get_root(i), } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs new file mode 100644 index 0000000000..f1b0e512d7 --- /dev/null +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -0,0 +1,1092 @@ +use super::*; + +pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { + let balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, because 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add a vote to block 2 + // + // 0 + // / \ + // +vote-> 2 1 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 2 since 1 and 2 both have a vote + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + // Move validator #0 vote from 1 to 3 + // + // 0 + // / \ + // 2 1 <- -vote + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(3), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 1, + }); + + // Invalidate the payload of 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- INVALID + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(3), + latest_valid_ancestor_root: Some(get_hash(1)), + }); + + // Ensure that the head is still 2. + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 <- INVALID + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + // Invalidation should have removed weight. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + // Move a vote from 2 to 1. This is slashable, but that's not relevant here. + // + // 0 + // / \ + // -vote-> 2 1 <- +vote + // | + // 3 <- INVALID + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(1), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head has switched back to 1 + // + // 0 + // / \ + // 2 1 <-head + // | + // 3 <- INVALID + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances, + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + // Invalidation should have removed weight. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + operations: ops, + } +} + +pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { + let balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, because 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add a vote to block 2 + // + // 0 + // / \ + // +vote-> 2 1 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 2 since 1 and 2 both have a vote + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + // Move validator #0 vote from 1 to 3 + // + // 0 + // / \ + // 2 1 <- -vote + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(3), + target_epoch: Epoch::new(3), + }); + + // Move validator #1 vote from 2 to 3 + // + // 0 + // / \ + // -vote->2 1 + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(3), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head is now 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <-head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(3), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 2, + }); + + // Invalidate the payload of 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- INVALID + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(3), + latest_valid_ancestor_root: Some(get_hash(1)), + }); + + // Ensure that the head is now 2. + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 <- INVALID + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances, + expected_head: get_root(2), + }); + + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 0, + }); + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + // Invalidation should have removed weight. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + operations: ops, + } +} + +pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { + let balances = vec![1_000; 2_000]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, because 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add another vote to 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 1. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is now 3, applying a proposer boost to 3 as well. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- head + ops.push(Operation::ProposerBoostFindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(3), + proposer_boost_root: get_root(3), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 33_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 33_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + // This is a "magic number" generated from `calculate_proposer_boost`. + weight: 31_000, + }); + + // Invalidate the payload of 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- INVALID + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(3), + latest_valid_ancestor_root: Some(get_hash(1)), + }); + + // Ensure that the head is now 1, maintaining the proposer boost on the invalid block. + // + // 0 + // / \ + // 2 1 <- head + // | + // 3 <- INVALID + ops.push(Operation::ProposerBoostFindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances, + expected_head: get_root(1), + proposer_boost_root: get_root(3), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + // The proposer boost should be reverted due to the invalid payload. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + operations: ops, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_01() { + let test = get_execution_status_test_definition_01(); + test.run(); + } + + #[test] + fn test_02() { + let test = get_execution_status_test_definition_02(); + test.run(); + } + + #[test] + fn test_03() { + let test = get_execution_status_test_definition_03(); + test.run(); + } +} diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index a129064504..77211a86a7 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -9,7 +9,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(0), + expected_head: get_root(0), }); // Build the following tree (stick? lol). @@ -23,22 +23,22 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // 3 <- just: 2, fin: 1 ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(2), - parent_root: get_hash(1), + root: get_root(2), + parent_root: get_root(1), justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(3), - parent_root: get_hash(2), + root: get_root(3), + parent_root: get_root(2), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: get_checkpoint(1), }); @@ -56,7 +56,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(3), + expected_head: get_root(3), }); // Ensure that with justified epoch 1 we find 2 @@ -72,7 +72,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Ensure that with justified epoch 2 we find 3 @@ -88,7 +88,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(2), finalized_checkpoint: get_checkpoint(1), justified_state_balances: balances, - expected_head: get_hash(3), + expected_head: get_root(3), }); // END OF TESTS @@ -109,7 +109,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(0), + expected_head: get_root(0), }); // Build the following tree. @@ -129,48 +129,48 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // Left branch ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(3), - parent_root: get_hash(1), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(5), - parent_root: get_hash(3), + root: get_root(5), + parent_root: get_root(3), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(7), - parent_root: get_hash(5), + root: get_root(7), + parent_root: get_root(5), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), - root: get_hash(9), - parent_root: get_hash(7), + root: get_root(9), + parent_root: get_root(7), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(3), + root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), }); @@ -178,42 +178,42 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // Right branch ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(2), - parent_root: get_hash(0), + root: get_root(2), + parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(4), - parent_root: get_hash(2), + root: get_root(4), + parent_root: get_root(2), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(6), - parent_root: get_hash(4), + root: get_root(6), + parent_root: get_root(4), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(8), - parent_root: get_hash(6), + root: get_root(8), + parent_root: get_root(6), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(2), + root: get_root(2), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), - root: get_hash(10), - parent_root: get_hash(8), + root: get_root(10), + parent_root: get_root(8), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), }); @@ -235,23 +235,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above, but with justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above, but with justified epoch 3 (should be invalid). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(6), + root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -272,7 +272,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // 9 10 ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(1), + block_root: get_root(1), target_epoch: Epoch::new(0), }); @@ -293,23 +293,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Save as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(3), + root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Save as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -330,7 +330,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // 9 10 ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(2), + block_root: get_root(2), target_epoch: Epoch::new(0), }); @@ -351,23 +351,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(6), + root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -389,27 +389,27 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(0), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(3), + root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -432,23 +432,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(6), + root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances, diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index 0fbcafc5d4..a60b3e6b36 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -24,7 +24,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 2 Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(2), + root: get_root(2), parent_root: Hash256::zero(), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -50,7 +50,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }, // Add block 1 // @@ -59,8 +59,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 2 1 Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: Hash256::zero(), @@ -85,7 +85,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }, // Add block 3 // @@ -96,8 +96,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 3 Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(3), - parent_root: get_hash(1), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: Hash256::zero(), @@ -124,7 +124,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }, // Add block 4 // @@ -135,8 +135,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 4 3 Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(4), - parent_root: get_hash(2), + root: get_root(4), + parent_root: get_root(2), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: Hash256::zero(), @@ -163,7 +163,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }, // Add block 5 with a justified epoch of 2 // @@ -176,8 +176,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 5 <- justified epoch = 2 Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(5), - parent_root: get_hash(4), + root: get_root(5), + parent_root: get_root(4), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -203,7 +203,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }, // Ensure there is an error when starting from a block that has the wrong justified epoch. // @@ -217,7 +217,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -241,7 +241,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(5), + expected_head: get_root(5), }, // Add block 6 // @@ -256,8 +256,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 6 Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(6), - parent_root: get_hash(5), + root: get_root(6), + parent_root: get_root(5), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -282,7 +282,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances, - expected_head: get_hash(6), + expected_head: get_root(6), }, ]; diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index f65177a849..58ac6af60b 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -8,14 +8,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(0), + expected_head: get_root(0), }); // Add a block with a hash of 2. @@ -25,15 +25,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 2 ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(2), - parent_root: get_hash(0), + root: get_root(2), + parent_root: get_root(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -45,14 +45,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -63,15 +63,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 2 1 ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -83,14 +83,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Add a vote to block 1 @@ -100,7 +100,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 2 1 <- +vote ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(1), + block_root: get_root(1), target_epoch: Epoch::new(2), }); @@ -112,14 +112,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(1), + expected_head: get_root(1), }); // Add a vote to block 2 @@ -129,7 +129,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // +vote-> 2 1 ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(2), + block_root: get_root(2), target_epoch: Epoch::new(2), }); @@ -141,14 +141,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Add block 3. @@ -160,15 +160,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 3 ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(3), - parent_root: get_hash(1), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -182,14 +182,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Move validator #0 vote from 1 to 3 @@ -201,7 +201,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 3 <- +vote ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(3), + block_root: get_root(3), target_epoch: Epoch::new(3), }); @@ -215,14 +215,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't @@ -235,7 +235,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 3 ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(1), + block_root: get_root(1), target_epoch: Epoch::new(3), }); @@ -249,14 +249,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(3), + expected_head: get_root(3), }); // Add block 4. @@ -270,15 +270,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 4 ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(4), - parent_root: get_hash(3), + root: get_root(4), + parent_root: get_root(3), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -294,14 +294,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }); // Add block 5, which has a justified epoch of 2. @@ -317,15 +317,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 5 <- justified epoch = 2 ops.push(Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(5), - parent_root: get_hash(4), + root: get_root(5), + parent_root: get_root(4), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(1), + root: get_root(1), }, }); @@ -343,14 +343,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }); // Add block 6, which has a justified epoch of 0. @@ -366,15 +366,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 5 6 <- justified epoch = 0 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(6), - parent_root: get_hash(4), + root: get_root(6), + parent_root: get_root(4), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -391,12 +391,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // +2 vote-> 5 6 ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(5), + block_root: get_root(5), target_epoch: Epoch::new(4), }); ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(5), + block_root: get_root(5), target_epoch: Epoch::new(4), }); @@ -420,41 +420,41 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(7), - parent_root: get_hash(5), + root: get_root(7), + parent_root: get_root(5), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(8), - parent_root: get_hash(7), + root: get_root(8), + parent_root: get_root(7), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(9), - parent_root: get_hash(8), + root: get_root(9), + parent_root: get_root(8), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); @@ -479,14 +479,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(6), + expected_head: get_root(6), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -512,14 +512,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -544,12 +544,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 <- +2 votes ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(9), + block_root: get_root(9), target_epoch: Epoch::new(5), }); ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(9), + block_root: get_root(9), target_epoch: Epoch::new(5), }); @@ -572,15 +572,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 10 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(10), - parent_root: get_hash(8), + root: get_root(10), + parent_root: get_root(8), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); @@ -588,14 +588,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Introduce 2 more validators into the system @@ -620,12 +620,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 10 <- +2 votes ops.push(Operation::ProcessAttestation { validator_index: 2, - block_root: get_hash(10), + block_root: get_root(10), target_epoch: Epoch::new(5), }); ops.push(Operation::ProcessAttestation { validator_index: 3, - block_root: get_hash(10), + block_root: get_root(10), target_epoch: Epoch::new(5), }); @@ -649,14 +649,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Set the balances of the last two validators to zero @@ -674,14 +674,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Set the balances of the last two validators back to 1 @@ -699,14 +699,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Remove the last two validators @@ -725,19 +725,19 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Ensure that pruning below the prune threshold does not prune. ops.push(Operation::Prune { - finalized_root: get_hash(5), + finalized_root: get_root(5), prune_threshold: usize::max_value(), expected_len: 11, }); @@ -746,14 +746,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Ensure that pruning above the prune threshold does prune. @@ -775,7 +775,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // 9 10 ops.push(Operation::Prune { - finalized_root: get_hash(5), + finalized_root: get_root(5), prune_threshold: 1, expected_len: 6, }); @@ -784,14 +784,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Add block 11 @@ -807,15 +807,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 11 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(11), - parent_root: get_hash(9), + root: get_root(11), + parent_root: get_root(9), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); @@ -833,25 +833,25 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances, - expected_head: get_hash(11), + expected_head: get_root(11), }); ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, operations: ops, } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 759bee6ba9..b0e8991a78 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -4,8 +4,11 @@ use serde_derive::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; -use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; +use std::collections::{HashMap, HashSet}; +use types::{ + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, +}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union // selector. @@ -126,26 +129,42 @@ impl ProtoArray { continue; } - let mut node_delta = deltas - .get(node_index) - .copied() - .ok_or(Error::InvalidNodeDelta(node_index))?; + let execution_status_is_invalid = node.execution_status.is_invalid(); + + let mut node_delta = if execution_status_is_invalid { + // If the node has an invalid execution payload, reduce its weight to zero. + 0_i64 + .checked_sub(node.weight as i64) + .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? + } else { + deltas + .get(node_index) + .copied() + .ok_or(Error::InvalidNodeDelta(node_index))? + }; // If we find the node for which the proposer boost was previously applied, decrease // the delta by the previous score amount. if self.previous_proposer_boost.root != Hash256::zero() && self.previous_proposer_boost.root == node.root + // Invalid nodes will always have a weight of zero so there's no need to subtract + // the proposer boost delta. + && !execution_status_is_invalid { node_delta = node_delta .checked_sub(self.previous_proposer_boost.score as i64) .ok_or(Error::DeltaOverflow(node_index))?; } // If we find the node matching the current proposer boost root, increase - // the delta by the new score amount. + // the delta by the new score amount (unless the block has an invalid execution status). // // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance if let Some(proposer_score_boost) = spec.proposer_score_boost { - if proposer_boost_root != Hash256::zero() && proposer_boost_root == node.root { + if proposer_boost_root != Hash256::zero() + && proposer_boost_root == node.root + // Invalid nodes (or their ancestors) should not receive a proposer boost. + && !execution_status_is_invalid + { proposer_score = calculate_proposer_boost::(new_balances, proposer_score_boost) .ok_or(Error::ProposerBoostOverflow(node_index))?; @@ -156,7 +175,10 @@ impl ProtoArray { } // Apply the delta to the node. - if node_delta < 0 { + if execution_status_is_invalid { + // Invalid nodes always have a weight of 0. + node.weight = 0 + } else if node_delta < 0 { // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` // here. // @@ -250,14 +272,20 @@ impl ProtoArray { self.maybe_update_best_child_and_descendant(parent_index, node_index)?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { - self.propagate_execution_payload_verification(parent_index)?; + self.propagate_execution_payload_validation(parent_index)?; } } Ok(()) } - pub fn propagate_execution_payload_verification( + /// Updates the `verified_node_index` and all ancestors to have validated execution payloads. + /// + /// Returns an error if: + /// + /// - The `verified_node_index` is unknown. + /// - Any of the to-be-validated payloads are already invalid. + pub fn propagate_execution_payload_validation( &mut self, verified_node_index: usize, ) -> Result<(), Error> { @@ -300,6 +328,213 @@ impl ProtoArray { } } + /// Invalidate the relevant ancestors and descendants of a block with an invalid execution + /// payload. + /// + /// The `head_block_root` should be the beacon block root of the block with the invalid + /// execution payload, _or_ its parent where the block with the invalid payload has not yet + /// been applied to `self`. + /// + /// The `latest_valid_hash` should be the hash of most recent *valid* execution payload + /// contained in an ancestor block of `head_block_root`. + /// + /// This function will invalidate: + /// + /// * The block matching `head_block_root` _unless_ that block has a payload matching `latest_valid_hash`. + /// * All ancestors of `head_block_root` back to the block with payload matching + /// `latest_valid_hash` (endpoint > exclusive). In the case where the `head_block_root` is the parent + /// of the invalid block and itself matches `latest_valid_hash`, no ancestors will be invalidated. + /// * All descendants of `latest_valid_hash` if supplied and consistent with `head_block_root`, + /// or else all descendants of `head_block_root`. + /// + /// ## Details + /// + /// If `head_block_root` is not known to fork choice, an error is returned. + /// + /// If `latest_valid_hash` is `Some(hash)` where `hash` is either not known to fork choice + /// (perhaps it's junk or pre-finalization), then only the `head_block_root` block will be + /// invalidated (no ancestors). No error will be returned in this case. + /// + /// If `latest_valid_hash` is `Some(hash)` where `hash` is a known ancestor of + /// `head_block_root`, then all blocks between `head_block_root` and `latest_valid_hash` will + /// be invalidated. Additionally, all blocks that descend from a newly-invalidated block will + /// also be invalidated. + pub fn propagate_execution_payload_invalidation( + &mut self, + head_block_root: Hash256, + latest_valid_ancestor_hash: Option, + ) -> Result<(), Error> { + let mut invalidated_indices: HashSet = <_>::default(); + + /* + * Step 1: + * + * Find the `head_block_root` and maybe iterate backwards and invalidate ancestors. Record + * all invalidated block indices in `invalidated_indices`. + */ + + let mut index = *self + .indices + .get(&head_block_root) + .ok_or(Error::NodeUnknown(head_block_root))?; + + // Try to map the ancestor payload *hash* to an ancestor beacon block *root*. + let latest_valid_ancestor_root = latest_valid_ancestor_hash + .and_then(|hash| self.execution_block_hash_to_beacon_block_root(&hash)); + + // Set to `true` if both conditions are satisfied: + // + // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` + // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. + let latest_valid_ancestor_is_descendant = + latest_valid_ancestor_root.map_or(false, |ancestor_root| { + self.is_descendant(ancestor_root, head_block_root) + && self.is_descendant(self.finalized_checkpoint.root, ancestor_root) + }); + + // Collect all *ancestors* which were declared invalid since they reside between the + // `head_block_root` and the `latest_valid_ancestor_root`. + loop { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + match node.execution_status { + ExecutionStatus::Valid(hash) + | ExecutionStatus::Invalid(hash) + | ExecutionStatus::Unknown(hash) => { + // If we're no longer processing the `head_block_root` and the last valid + // ancestor is unknown, exit this loop and proceed to invalidate and + // descendants of `head_block_root`/`latest_valid_ancestor_root`. + // + // In effect, this means that if an unknown hash (junk or pre-finalization) is + // supplied, don't validate any ancestors. The alternative is to invalidate + // *all* ancestors, which would likely involve shutting down the client due to + // an invalid justified checkpoint. + if !latest_valid_ancestor_is_descendant && node.root != head_block_root { + break; + } else if Some(hash) == latest_valid_ancestor_hash { + // If the `best_child` or `best_descendant` of the latest valid hash was + // invalidated, set those fields to `None`. + // + // In theory, an invalid `best_child` necessarily infers an invalid + // `best_descendant`. However, we check each variable independently to + // defend against errors which might result in an invalid block being set as + // head. + if node + .best_child + .map_or(false, |i| invalidated_indices.contains(&i)) + { + node.best_child = None + } + if node + .best_descendant + .map_or(false, |i| invalidated_indices.contains(&i)) + { + node.best_descendant = None + } + + // It might be new knowledge that this block is valid, ensure that it and all + // ancestors are marked as valid. + self.propagate_execution_payload_validation(index)?; + break; + } + } + ExecutionStatus::Irrelevant(_) => break, + } + + match &node.execution_status { + // It's illegal for an execution client to declare that some previously-valid block + // is now invalid. This is a consensus failure on their behalf. + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }) + } + ExecutionStatus::Unknown(hash) => { + node.execution_status = ExecutionStatus::Invalid(*hash); + + // It's impossible for an invalid block to lead to a "best" block, so set these + // fields to `None`. + // + // Failing to set these values will result in `Self::node_leads_to_viable_head` + // returning `false` for *valid* ancestors of invalid blocks. + node.best_child = None; + node.best_descendant = None; + } + // The block is already invalid, but keep going backwards to ensure all ancestors + // are updated. + ExecutionStatus::Invalid(_) => (), + // This block is pre-merge, therefore it has no execution status. Nor do its + // ancestors. + ExecutionStatus::Irrelevant(_) => break, + } + + invalidated_indices.insert(index); + + if let Some(parent_index) = node.parent { + index = parent_index + } else { + // The root of the block tree has been reached (aka the finalized block), without + // matching `latest_valid_ancestor_hash`. It's not possible or useful to go any + // further back: the finalized checkpoint is invalid so all is lost! + break; + } + } + + /* + * Step 2: + * + * Start at either the `latest_valid_ancestor` or the `head_block_root` and iterate + * *forwards* to invalidate all descendants of all blocks in `invalidated_indices`. + */ + + let starting_block_root = latest_valid_ancestor_root + .filter(|_| latest_valid_ancestor_is_descendant) + .unwrap_or(head_block_root); + let latest_valid_ancestor_index = *self + .indices + .get(&starting_block_root) + .ok_or(Error::NodeUnknown(starting_block_root))?; + let first_potential_descendant = latest_valid_ancestor_index + 1; + + // Collect all *descendants* which have been declared invalid since they're the descendant of a block + // with an invalid execution payload. + for index in first_potential_descendant..self.nodes.len() { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + if let Some(parent_index) = node.parent { + if invalidated_indices.contains(&parent_index) { + match &node.execution_status { + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }) + } + ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { + node.execution_status = ExecutionStatus::Invalid(*hash) + } + ExecutionStatus::Irrelevant(_) => { + return Err(Error::IrrelevantDescendant { + block_root: node.root, + }) + } + } + + invalidated_indices.insert(index); + } + } + } + + Ok(()) + } + /// Follows the best-descendant links to find the best-block (i.e., head-block). /// /// ## Notes @@ -320,6 +555,19 @@ impl ProtoArray { .get(justified_index) .ok_or(Error::InvalidJustifiedIndex(justified_index))?; + // Since there are no valid descendants of a justified block with an invalid execution + // payload, there would be no head to choose from. + // + // Fork choice is effectively broken until a new justified root is set. It might not be + // practically possible to set a new justified root if we are unable to find a new head. + // + // This scenario is *unsupported*. It represents a serious consensus failure. + if justified_node.execution_status.is_invalid() { + return Err(Error::InvalidJustifiedCheckpointExecutionStatus { + justified_root: *justified_root, + }); + } + let best_descendant_index = justified_node.best_descendant.unwrap_or(justified_index); let best_node = self @@ -537,6 +785,10 @@ impl ProtoArray { /// Any node that has a different finalized or justified epoch should not be viable for the /// head. fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { + if node.execution_status.is_invalid() { + return false; + } + if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = (node.justified_checkpoint, node.finalized_checkpoint) { @@ -568,6 +820,42 @@ impl ProtoArray { self.iter_nodes(block_root) .map(|node| (node.root, node.slot)) } + + /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always + /// returns `false` if either input root is unknown. + /// + /// ## Notes + /// + /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { + self.indices + .get(&ancestor_root) + .and_then(|ancestor_index| self.nodes.get(*ancestor_index)) + .and_then(|ancestor| { + self.iter_block_roots(&descendant_root) + .take_while(|(_root, slot)| *slot >= ancestor.slot) + .find(|(_root, slot)| *slot == ancestor.slot) + .map(|(root, _slot)| root == ancestor_root) + }) + .unwrap_or(false) + } + + /// Returns the first *beacon block root* which contains an execution payload with the given + /// `block_hash`, if any. + pub fn execution_block_hash_to_beacon_block_root( + &self, + block_hash: &ExecutionBlockHash, + ) -> Option { + self.nodes + .iter() + .rev() + .find(|node| { + node.execution_status + .block_hash() + .map_or(false, |node_block_hash| node_block_hash == *block_hash) + }) + .map(|node| node.root) + } } /// A helper method to calculate the proposer boost based on the given `validator_balances`. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 891eafabe9..1f5b997f67 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -5,7 +5,10 @@ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; +use types::{ + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, +}; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -21,11 +24,11 @@ pub struct VoteTracker { #[ssz(enum_behaviour = "union")] pub enum ExecutionStatus { /// An EL has determined that the payload is valid. - Valid(Hash256), + Valid(ExecutionBlockHash), /// An EL has determined that the payload is invalid. - Invalid(Hash256), + Invalid(ExecutionBlockHash), /// An EL has not yet verified the execution payload. - Unknown(Hash256), + Unknown(ExecutionBlockHash), /// The block is either prior to the merge fork, or after the merge fork but before the terminal /// PoW block has been found. /// @@ -41,7 +44,7 @@ impl ExecutionStatus { ExecutionStatus::Irrelevant(false) } - pub fn block_hash(&self) -> Option { + pub fn block_hash(&self) -> Option { match self { ExecutionStatus::Valid(hash) | ExecutionStatus::Invalid(hash) @@ -49,6 +52,37 @@ impl ExecutionStatus { ExecutionStatus::Irrelevant(_) => None, } } + + /// Returns `true` if the block: + /// + /// - Has execution enabled + /// - Has a valid payload + pub fn is_valid(&self) -> bool { + matches!(self, ExecutionStatus::Valid(_)) + } + + /// Returns `true` if the block: + /// + /// - Has execution enabled + /// - Has a payload that has not yet been verified by an EL. + pub fn is_not_verified(&self) -> bool { + matches!(self, ExecutionStatus::Unknown(_)) + } + + /// Returns `true` if the block: + /// + /// - Has execution enabled + /// - Has an invalid payload. + pub fn is_invalid(&self) -> bool { + matches!(self, ExecutionStatus::Invalid(_)) + } + + /// Returns `true` if the block: + /// + /// - Does not have execution enabled (before or after Bellatrix fork) + pub fn is_irrelevant(&self) -> bool { + matches!(self, ExecutionStatus::Irrelevant(_)) + } } /// A block that is to be applied to the fork choice. @@ -150,6 +184,17 @@ impl ProtoArrayForkChoice { }) } + /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. + pub fn process_execution_payload_invalidation( + &mut self, + head_block_root: Hash256, + latest_valid_ancestor_root: Option, + ) -> Result<(), String> { + self.proto_array + .propagate_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) + .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) + } + pub fn process_attestation( &mut self, validator_index: usize, @@ -267,25 +312,19 @@ impl ProtoArrayForkChoice { } } - /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always - /// returns `false` if either input roots are unknown. - /// - /// ## Notes - /// - /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + /// Returns the weight of a given block. + pub fn get_weight(&self, block_root: &Hash256) -> Option { + let block_index = self.proto_array.indices.get(block_root)?; + self.proto_array + .nodes + .get(*block_index) + .map(|node| node.weight) + } + + /// See `ProtoArray` documentation. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.proto_array - .indices - .get(&ancestor_root) - .and_then(|ancestor_index| self.proto_array.nodes.get(*ancestor_index)) - .and_then(|ancestor| { - self.proto_array - .iter_block_roots(&descendant_root) - .take_while(|(_root, slot)| *slot >= ancestor.slot) - .find(|(_root, slot)| *slot == ancestor.slot) - .map(|(root, _slot)| root == ancestor_root) - }) - .unwrap_or(false) + .is_descendant(ancestor_root, descendant_root) } pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index abfbb621d9..e214b6e63d 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -58,8 +58,8 @@ pub enum BlockProcessingError { InconsistentBlockFork(InconsistentFork), InconsistentStateFork(InconsistentFork), ExecutionHashChainIncontiguous { - expected: Hash256, - found: Hash256, + expected: ExecutionBlockHash, + found: ExecutionBlockHash, }, ExecutionRandaoMismatch { expected: Hash256, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d391fe01e1..29c67808cc 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -144,7 +144,7 @@ pub struct ChainSpec { /// The Merge fork epoch is optional, with `None` representing "Merge never happens". pub bellatrix_fork_epoch: Option, pub terminal_total_difficulty: Uint256, - pub terminal_block_hash: Hash256, + pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, /* @@ -549,7 +549,7 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), - terminal_block_hash: Hash256::zero(), + terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), /* @@ -746,7 +746,7 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), - terminal_block_hash: Hash256::zero(), + terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), /* @@ -787,7 +787,7 @@ pub struct Config { pub terminal_total_difficulty: Uint256, // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash")] - pub terminal_block_hash: Hash256, + pub terminal_block_hash: ExecutionBlockHash, // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, @@ -870,8 +870,8 @@ const fn default_terminal_total_difficulty() -> Uint256 { ]) } -fn default_terminal_block_hash() -> Hash256 { - Hash256::zero() +fn default_terminal_block_hash() -> ExecutionBlockHash { + ExecutionBlockHash::zero() } fn default_terminal_block_hash_activation_epoch() -> Epoch { diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs new file mode 100644 index 0000000000..dbfe218159 --- /dev/null +++ b/consensus/types/src/execution_block_hash.rs @@ -0,0 +1,101 @@ +use crate::test_utils::TestRandom; +use crate::Hash256; +use rand::RngCore; +use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[serde(transparent)] +pub struct ExecutionBlockHash(Hash256); + +impl ExecutionBlockHash { + pub fn zero() -> Self { + Self(Hash256::zero()) + } + + pub fn repeat_byte(b: u8) -> Self { + Self(Hash256::repeat_byte(b)) + } + + pub fn from_root(root: Hash256) -> Self { + Self(root) + } + + pub fn into_root(self) -> Hash256 { + self.0 + } +} + +impl Encode for ExecutionBlockHash { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for ExecutionBlockHash { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + Hash256::from_ssz_bytes(bytes).map(Self) + } +} + +impl tree_hash::TreeHash for ExecutionBlockHash { + fn tree_hash_type() -> tree_hash::TreeHashType { + Hash256::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + Hash256::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for ExecutionBlockHash { + fn random_for_test(rng: &mut impl RngCore) -> Self { + Self(Hash256::random_for_test(rng)) + } +} + +impl std::str::FromStr for ExecutionBlockHash { + type Err = String; + + fn from_str(s: &str) -> Result { + Hash256::from_str(s) + .map(Self) + .map_err(|e| format!("{:?}", e)) + } +} + +impl fmt::Display for ExecutionBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 781fb7460f..fc37c1193b 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -15,7 +15,7 @@ pub type Transaction = VariableList; #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct ExecutionPayload { - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -34,7 +34,7 @@ pub struct ExecutionPayload { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] pub base_fee_per_gas: Uint256, - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index aa022f6420..1c173093a4 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash; Default, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct ExecutionPayloadHeader { - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -28,7 +28,7 @@ pub struct ExecutionPayloadHeader { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] pub base_fee_per_gas: Uint256, - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 832f262698..6aeb6f3205 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -37,6 +37,7 @@ pub mod deposit_message; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; +pub mod execution_block_hash; pub mod execution_payload; pub mod execution_payload_header; pub mod fork; @@ -113,6 +114,7 @@ pub use crate::deposit_message::DepositMessage; pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; +pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_payload::{ExecutionPayload, Transaction}; pub use crate::execution_payload_header::ExecutionPayloadHeader; pub use crate::fork::Fork; diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 814a57f264..04122d0e6b 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -23,7 +23,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, - random: eth1_block_hash, + random: eth1_block_hash.into_root(), ..ExecutionPayloadHeader::default() }; let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 83dcc2e719..5254ff5a62 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -108,7 +108,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul let genesis_state = interop_genesis_state::( &keypairs, genesis_time, - eth1_block_hash, + eth1_block_hash.into_root(), execution_payload_header, &spec, )?; diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 608429a9cb..9744434f53 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -14,15 +14,15 @@ use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ForkName, Hash256, - IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, + ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] #[serde(deny_unknown_fields)] pub struct PowBlock { - pub block_hash: Hash256, - pub parent_hash: Hash256, + pub block_hash: ExecutionBlockHash, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index e8253036fb..26dbc1bfdd 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,10 +1,10 @@ use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatusV1Status}; +use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; -use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; +use types::{Address, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Uint256}; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); @@ -139,7 +139,7 @@ impl TestRig { let parent_hash = terminal_pow_block_hash; let timestamp = timestamp_now(); let random = Hash256::zero(); - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let valid_payload = self .ee_a @@ -161,15 +161,15 @@ impl TestRig { * `notify_new_payload`. */ let head_block_hash = valid_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Syncing); + assert_eq!(status, PayloadStatus::Syncing); /* * Execution Engine A: @@ -177,13 +177,13 @@ impl TestRig { * Provide the valid payload back to the EE again. */ - let (status, _) = self + let status = self .ee_a .execution_layer .notify_new_payload(&valid_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine A: @@ -193,15 +193,15 @@ impl TestRig { * Do not provide payload attributes (we'll test that later). */ let head_block_hash = valid_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine A: @@ -211,16 +211,13 @@ impl TestRig { let mut invalid_payload = valid_payload.clone(); invalid_payload.random = Hash256::from_low_u64_be(42); - let (status, _) = self + let status = self .ee_a .execution_layer .notify_new_payload(&invalid_payload) .await .unwrap(); - assert!(matches!( - status, - PayloadStatusV1Status::Invalid | PayloadStatusV1Status::InvalidBlockHash - )); + assert!(matches!(status, PayloadStatus::InvalidBlockHash { .. })); /* * Execution Engine A: @@ -231,7 +228,7 @@ impl TestRig { let parent_hash = valid_payload.block_hash; let timestamp = valid_payload.timestamp + 1; let random = Hash256::zero(); - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let second_payload = self .ee_a @@ -252,13 +249,13 @@ impl TestRig { * Provide the second payload back to the EE again. */ - let (status, _) = self + let status = self .ee_a .execution_layer .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine A: @@ -266,32 +263,32 @@ impl TestRig { * Indicate that the payload is the head of the chain, providing payload attributes. */ let head_block_hash = valid_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = Some(PayloadAttributes { timestamp: second_payload.timestamp + 1, random: Hash256::zero(), suggested_fee_recipient: Address::zero(), }); - let (status, _) = self + let status = self .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine B: * * Provide the second payload, without providing the first. */ - let (status, _) = self + let status = self .ee_b .execution_layer .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Syncing); + assert_eq!(status, PayloadStatus::Accepted); /* * Execution Engine B: @@ -299,15 +296,15 @@ impl TestRig { * Set the second payload as the head, without providing payload attributes. */ let head_block_hash = second_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_b .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Syncing); + assert_eq!(status, PayloadStatus::Syncing); /* * Execution Engine B: @@ -315,26 +312,26 @@ impl TestRig { * Provide the first payload to the EE. */ - let (status, _) = self + let status = self .ee_b .execution_layer .notify_new_payload(&valid_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine B: * * Provide the second payload, now the first has been provided. */ - let (status, _) = self + let status = self .ee_b .execution_layer .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine B: @@ -342,15 +339,15 @@ impl TestRig { * Set the second payload as the head, without providing payload attributes. */ let head_block_hash = second_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_b .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); } } From a1b730c0434a0cafbd83dbc56dda90ab1bc70c73 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 1 Mar 2022 01:49:22 +0000 Subject: [PATCH 14/14] Cleanup small issues (#3027) Downgrades some excessive networking logs and corrects some metrics. --- .github/workflows/local-testnet.yml | 1 - .../lighthouse_network/src/peer_manager/mod.rs | 14 +++++++------- beacon_node/network/src/metrics.rs | 18 +++++++++++++++++- 3 files changed, 24 insertions(+), 9 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index f97b271c35..7f367821c3 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -34,7 +34,6 @@ jobs: key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - name: Install lighthouse - if: steps.cache-cargo.outputs.cache-hit != 'true' run: make && make install-lcli - name: Start local testnet diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 48edd3abb6..437d05d474 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -10,7 +10,7 @@ use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; -use slog::{debug, error, warn}; +use slog::{debug, error, trace, warn}; use smallvec::SmallVec; use std::{ sync::Arc, @@ -547,7 +547,7 @@ impl PeerManager { if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a ping // reset the to-ping timer for this peer - debug!(self.log, "Received a ping request"; "peer_id" => %peer_id, "seq_no" => seq); + trace!(self.log, "Received a ping request"; "peer_id" => %peer_id, "seq_no" => seq); match peer_info.connection_direction() { Some(ConnectionDirection::Incoming) => { self.inbound_ping_peers.insert(*peer_id); @@ -563,7 +563,7 @@ impl PeerManager { // if the sequence number is unknown send an update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data() { if *meta_data.seq_number() < seq { - debug!(self.log, "Requesting new metadata from peer"; + trace!(self.log, "Requesting new metadata from peer"; "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "ping_seq_no" => seq); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } @@ -587,13 +587,13 @@ impl PeerManager { // if the sequence number is unknown send update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data() { if *meta_data.seq_number() < seq { - debug!(self.log, "Requesting new metadata from peer"; + trace!(self.log, "Requesting new metadata from peer"; "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "pong_seq_no" => seq); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { // if we don't know the meta-data, request it - debug!(self.log, "Requesting first metadata from peer"; + trace!(self.log, "Requesting first metadata from peer"; "peer_id" => %peer_id); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } @@ -607,10 +607,10 @@ impl PeerManager { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data() { if *known_meta_data.seq_number() < *meta_data.seq_number() { - debug!(self.log, "Updating peer's metadata"; + trace!(self.log, "Updating peer's metadata"; "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); } else { - debug!(self.log, "Received old metadata"; + trace!(self.log, "Received old metadata"; "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); // Updating metadata even in this case to prevent storing // incorrect `attnets/syncnets` for a peer diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index a10d238764..446aa0a033 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -5,10 +5,12 @@ use beacon_chain::{ use fnv::FnvHashMap; pub use lighthouse_metrics::*; use lighthouse_network::{ - types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, + peer_manager::peerdb::client::ClientKind, types::GossipKind, BandwidthSinks, GossipTopic, + Gossipsub, NetworkGlobals, }; use std::sync::Arc; use strum::AsStaticRef; +use strum::IntoEnumIterator; use types::EthSpec; lazy_static! { @@ -343,6 +345,20 @@ pub fn update_gossip_metrics( network_globals: &Arc>, ) { // Mesh peers per client + // Reset the gauges + for client_kind in ClientKind::iter() { + set_gauge_vec( + &BEACON_BLOCK_MESH_PEERS_PER_CLIENT, + &[&client_kind.to_string()], + 0_i64, + ); + set_gauge_vec( + &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, + &[&client_kind.to_string()], + 0_i64, + ); + } + for topic_hash in gossipsub.topics() { if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { match topic.kind() {