From 023674ab3b49a4736431c9d3224b66102798ca19 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 01:20:02 +0000 Subject: [PATCH 01/19] Update Gnosis chain bootnodes (#3793) ## Proposed Changes Update the Gnosis chain bootnodes. The current list of Gnosis bootnodes were abandoned at some point before the Gnosis merge and are now failing to bootstrap peers. There's a workaround list of bootnodes here: https://docs.gnosischain.com/updates/20221208-temporary-bootnodes The list from this PR represents the long-term bootnodes run by the Gnosis team. We will also try to set up SigP bootnodes for Gnosis chain at some point. --- .../built_in_network_configs/gnosis/boot_enr.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml index 4b232d8b32..130c1fa1c3 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml @@ -1,5 +1,5 @@ # Gnosis Chain Team -- enr:-IS4QGmLwm7gFd0L0CEisllrb1op3v-wAGSc7_pwSMGgN3bOS9Fz7m1dWbwuuPHKqeETz9MbhjVuoWk0ohkyRv98kVoBgmlkgnY0gmlwhGjtlgaJc2VjcDI1NmsxoQLMdh0It9fJbuiLydZ9fpF6MRzgNle0vODaDiMqhbC7WIN1ZHCCIyg -- enr:-IS4QFUVG3dvLPCUEI7ycRvFm0Ieg_ITa5tALmJ9LI7dJ6ieT3J4fF9xLRjOoB4ApV-Rjp7HeLKzyTWG1xRdbFBNZPQBgmlkgnY0gmlwhErP5weJc2VjcDI1NmsxoQOBbaJBvx0-w_pyZUhQl9A510Ho2T0grE0K8JevzES99IN1ZHCCIyg -- enr:-Ku4QOQk8V-Hu2gxFzRXmLYIO4AvWDZhoMFwTf3n3DYm_mbsWv0ZitoqiN6JZUUj6Li6e1Jk1w2zFSVHKPMUP1g5tsgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC1PTpmJc2VjcDI1NmsxoQL1Ynt5PoA0UOcHa1Rfn98rmnRlLzNuWTePPP4m4qHVroN1ZHCCKvg -- enr:-Ku4QFaTwgoms-EiiRIfHUH3FXprWUFgjHg4UuWvilqoUQtDbmTszVIxUEOwQUmA2qkiP-T9wXjc_rVUuh9cU7WgwbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC0hBmCJc2VjcDI1NmsxoQOpsg1XCrXmCwZKcSTcycLwldoKUMHPUpMEVGeg_EEhuYN1ZHCCKvg +- enr:-Ly4QMU1y81COwm1VZgxGF4_eZ21ub9-GHF6dXZ29aEJ0oZpcV2Rysw-viaEKfpcpu9ZarILJLxFZjcKOjE0Sybs3MQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANLnx-Jc2VjcDI1NmsxoQKoaYT8I-wf2I_f_ii6EgoSSXj5T3bhiDyW-7ZLsY3T64hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QBf76jLiCA_pDXoZjhyRbuwzFOscFY-MIKkPnmHPQbvaKhIDZutfe38G9ibzgQP0RKrTo3vcWOy4hf_8wOZ-U5MBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhBLGgjaJc2VjcDI1NmsxoQLGeo0Q4lDvxIjHjnkAqEuETTaFIjsNrEcSpdDhcHXWFYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QLjZUWdqUO_RwyDqCAccIK5-MbLRD6A2c7oBuVbBgBnWDkEf0UKJVAaJqi2pO101WVQQLYSnYgz1Q3pRhYdrlFoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANA8sSJc2VjcDI1NmsxoQK4TC_EK1jSs0VVPUpOjIo1rhJmff2SLBPFOWSXMwdLVYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QKwX2rTFtKWKQHSGQFhquxsxL1jewO8JB1MG-jgHqAZVFWxnb3yMoQqnYSV1bk25-_jiLuhIulxar3RBWXEDm6EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAN-qZeJc2VjcDI1NmsxoQI7EPGMpecl0QofLp4Wy_lYNCCChUFEH6kY7k-oBGkPFIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA From 63c74b37f4805aee853f8be8d9e2e27886ba898f Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 15 Dec 2022 00:16:38 +0000 Subject: [PATCH 02/19] send error answering bbrange requests when an error occurrs (#3800) ## Issue Addressed While testing withdrawals with @ethDreamer we noticed lighthouse is sending empty batches when an error occurs. As LH peer receiving this, we would consider this a low tolerance action because the peer is claiming the batch is right and is empty. ## Proposed Changes If any kind of error occurs, send a error response instead ## Additional Info Right now we don't handle such thing as a partial batch with an error. If an error is received, the whole batch is discarded. Because of this it makes little sense to send partial batches that end with an error, so it's better to do the proposed solution instead of sending empty batches. --- .../network/src/beacon_processor/worker/rpc_methods.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 3e354a70d2..bfa0ea516f 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -398,6 +398,15 @@ impl Worker { "block_root" => ?root, "error" => ?e ); + + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Failed fetching blocks".into(), + request_id, + ); + send_response = false; break; } } From 558367ab8c5c2b99c9c5aaca1b4561bbddc9b017 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 16 Dec 2022 09:20:45 +1100 Subject: [PATCH 03/19] Bounded withdrawals and spec v1.3.0-alpha.2 (#3802) --- .../src/per_block_processing.rs | 33 +++++++++++++---- consensus/types/presets/gnosis/capella.yaml | 17 +++++++++ consensus/types/presets/mainnet/capella.yaml | 7 +++- consensus/types/presets/minimal/capella.yaml | 7 +++- consensus/types/src/chain_spec.rs | 6 +++- consensus/types/src/config_and_preset.rs | 35 +++++++++++-------- consensus/types/src/lib.rs | 4 +-- consensus/types/src/preset.rs | 24 +++++++++++++ testing/ef_tests/Makefile | 2 +- 9 files changed, 108 insertions(+), 27 deletions(-) create mode 100644 consensus/types/presets/gnosis/capella.yaml diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 5269922260..7af74428b5 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -466,7 +466,9 @@ pub fn compute_timestamp_at_slot( .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } -/// FIXME: add link to this function once the spec is stable +/// Compute the next batch of withdrawals which should be included in a block. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals #[cfg(feature = "withdrawals")] pub fn get_expected_withdrawals( state: &BeaconState, @@ -481,7 +483,11 @@ pub fn get_expected_withdrawals( return Ok(withdrawals.into()); } - for _ in 0..state.validators().len() { + let bound = std::cmp::min( + state.validators().len() as u64, + spec.max_validators_per_withdrawals_sweep, + ); + for _ in 0..bound { let validator = state.get_validator(validator_index as usize)?; let balance = *state.balances().get(validator_index as usize).ok_or( BeaconStateError::BalancesOutOfBounds(validator_index as usize), @@ -518,7 +524,7 @@ pub fn get_expected_withdrawals( Ok(withdrawals.into()) } -/// FIXME: add link to this function once the spec is stable +/// Apply withdrawals to the state. #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, @@ -547,11 +553,26 @@ pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload )?; } + // Update the next withdrawal index if this block contained withdrawals if let Some(latest_withdrawal) = expected_withdrawals.last() { *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - let next_validator_index = latest_withdrawal - .validator_index - .safe_add(1)? + + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == T::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + } + + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != T::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? .safe_rem(state.validators().len() as u64)?; *state.next_withdrawal_validator_index_mut()? = next_validator_index; } diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml new file mode 100644 index 0000000000..913c2956ba --- /dev/null +++ b/consensus/types/presets/gnosis/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml index 0c087255bf..913c2956ba 100644 --- a/consensus/types/presets/mainnet/capella.yaml +++ b/consensus/types/presets/mainnet/capella.yaml @@ -9,4 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # 2**4 (= 16) withdrawals -MAX_WITHDRAWALS_PER_PAYLOAD: 16 \ No newline at end of file +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml index eacd6c7cbc..d27253de87 100644 --- a/consensus/types/presets/minimal/capella.yaml +++ b/consensus/types/presets/minimal/capella.yaml @@ -9,4 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # [customized] 2**2 (= 4) -MAX_WITHDRAWALS_PER_PAYLOAD: 4 \ No newline at end of file +MAX_WITHDRAWALS_PER_PAYLOAD: 4 + +# Withdrawals processing +# --------------------------------------------------------------- +# [customized] 2**4 (= 16) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d16c9b8091..bf9a7ed34d 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -158,8 +158,9 @@ pub struct ChainSpec { * Capella hard fork params */ pub capella_fork_version: [u8; 4], - /// The Capella fork epoch is optional, with `None` representing "Merge never happens". + /// The Capella fork epoch is optional, with `None` representing "Capella never happens". pub capella_fork_epoch: Option, + pub max_validators_per_withdrawals_sweep: u64, /* * Eip4844 hard fork params @@ -634,6 +635,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 00, 00, 00], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, /* * Eip4844 hard fork params @@ -707,6 +709,7 @@ impl ChainSpec { // Capella capella_fork_version: [0x03, 0x00, 0x00, 0x01], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16, // Eip4844 eip4844_fork_version: [0x04, 0x00, 0x00, 0x01], eip4844_fork_epoch: None, @@ -869,6 +872,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 0x00, 0x00, 0x64], capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, /* * Eip4844 hard fork params diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f72b1710de..9a618f7cc3 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ use crate::{ - consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, + consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, + EthSpec, ForkName, }; use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; @@ -11,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Altair, Bellatrix), + variants(Bellatrix, Capella), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -24,9 +25,10 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - #[superstruct(only(Bellatrix))] #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Capella))] + pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -37,14 +39,24 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.bellatrix_fork_epoch.is_some() + if spec.capella_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Merge) + || fork_name == Some(ForkName::Capella) { - let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + let capella_preset = CapellaPreset::from_chain_spec::(spec); + ConfigAndPreset::Capella(ConfigAndPresetCapella { + config, + base_preset, + altair_preset, + bellatrix_preset, + capella_preset, + extra_fields, + }) + } else { ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { config, base_preset, @@ -52,13 +64,6 @@ impl ConfigAndPreset { bellatrix_preset, extra_fields, }) - } else { - ConfigAndPreset::Altair(ConfigAndPresetAltair { - config, - base_preset, - altair_preset, - extra_fields, - }) } } } @@ -131,8 +136,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetBellatrix = + let from: ConfigAndPresetCapella = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); + assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 92f87f01dd..6cbb9568da 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -124,7 +124,7 @@ pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, + ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, }; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; @@ -163,7 +163,7 @@ pub use crate::payload::{ FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 8ee38e46a6..7d7db228ce 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -184,6 +184,27 @@ impl BellatrixPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct CapellaPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bls_to_execution_changes: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_withdrawals_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_validators_per_withdrawals_sweep: u64, +} + +impl CapellaPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + max_bls_to_execution_changes: T::max_bls_to_execution_changes() as u64, + max_withdrawals_per_payload: T::max_withdrawals_per_payload() as u64, + max_validators_per_withdrawals_sweep: spec.max_validators_per_withdrawals_sweep, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -219,6 +240,9 @@ mod test { let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); + + let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); + assert_eq!(capella, CapellaPreset::from_chain_spec::(&spec)); } #[test] diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 717ff13c97..d52f546dc2 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-alpha.1 +TESTS_TAG := v1.3.0-alpha.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) From 3a08c7634e68875b40cdee34ba58c6566d397c9f Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Fri, 16 Dec 2022 14:56:07 -0600 Subject: [PATCH 04/19] Make engine_getPayloadV2 accept local block value --- beacon_node/execution_layer/src/engine_api/http.rs | 4 ++-- .../execution_layer/src/engine_api/json_structures.rs | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c71cfa0c04..1616b21634 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -760,7 +760,7 @@ impl HttpJsonRpc { ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let payload_v2: JsonExecutionPayloadV2 = self + let response: JsonGetPayloadResponse = self .rpc_request( ENGINE_GET_PAYLOAD_V2, params, @@ -768,7 +768,7 @@ impl HttpJsonRpc { ) .await?; - JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name) + JsonExecutionPayload::V2(response.execution_payload).try_into_execution_payload(fork_name) } pub async fn get_blobs_bundle_v1( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index feed621589..18e52eb06f 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -324,6 +324,15 @@ impl TryFrom> for JsonExecutionPayloadV2 { } } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonGetPayloadResponse { + pub execution_payload: JsonExecutionPayloadV2, + // uncomment this when geth fixes its serialization + //#[serde(with = "eth2_serde_utils::u256_hex_be")] + //pub block_value: Uint256, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { From b75ca74222ac1b8dda5efd6a48b1388cd622f372 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Mon, 19 Dec 2022 15:10:50 -0600 Subject: [PATCH 05/19] Removed `withdrawals` feature flag --- .github/workflows/docker.yml | 2 +- Makefile | 2 +- beacon_node/Cargo.toml | 1 - beacon_node/beacon_chain/Cargo.toml | 1 - beacon_node/beacon_chain/src/beacon_chain.rs | 16 +---- .../beacon_chain/src/execution_payload.rs | 21 ++---- beacon_node/execution_layer/Cargo.toml | 1 - beacon_node/execution_layer/src/engine_api.rs | 3 - .../execution_layer/src/engine_api/http.rs | 6 +- .../src/engine_api/json_structures.rs | 8 --- beacon_node/execution_layer/src/lib.rs | 6 -- .../src/test_utils/mock_execution_layer.rs | 3 - beacon_node/store/Cargo.toml | 1 - beacon_node/store/src/partial_beacon_state.rs | 64 ------------------- common/eth2/Cargo.toml | 1 - consensus/state_processing/Cargo.toml | 1 - .../src/per_block_processing.rs | 9 ++- .../block_signature_verifier.rs | 2 - .../process_operations.rs | 4 +- .../state_processing/src/upgrade/capella.rs | 2 - .../state_processing/src/upgrade/eip4844.rs | 10 +-- consensus/types/Cargo.toml | 1 - consensus/types/src/beacon_block.rs | 2 - consensus/types/src/beacon_block_body.rs | 9 --- consensus/types/src/beacon_state.rs | 2 - .../types/src/beacon_state/tree_hash_cache.rs | 2 - consensus/types/src/execution_payload.rs | 1 - .../types/src/execution_payload_header.rs | 5 -- consensus/types/src/payload.rs | 6 -- consensus/types/src/signed_beacon_block.rs | 4 -- lighthouse/Cargo.toml | 2 - testing/ef_tests/src/cases/operations.rs | 10 +-- testing/ef_tests/src/lib.rs | 2 +- testing/ef_tests/tests/tests.rs | 4 +- .../execution_engine_integration/Cargo.toml | 3 +- 35 files changed, 29 insertions(+), 188 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 25d2cdab30..c0a02adf4e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -66,7 +66,7 @@ jobs: DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} - CROSS_FEATURES: withdrawals,withdrawals-processing + CROSS_FEATURES: withdrawals-processing steps: - uses: actions/checkout@v3 - name: Update Rust diff --git a/Makefile b/Makefile index 56e05fffcb..15d09c5867 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx CROSS_PROFILE ?= release # List of features to use when running EF tests. -EF_TEST_FEATURES ?= beacon_chain/withdrawals,beacon_chain/withdrawals-processing +EF_TEST_FEATURES ?= beacon_chain/withdrawals-processing # Cargo profile for regular builds. PROFILE ?= release diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 309d7a83f7..bed32011f1 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -13,7 +13,6 @@ node_test_rig = { path = "../testing/node_test_rig" } [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. -withdrawals = ["beacon_chain/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = [ "beacon_chain/withdrawals-processing", "store/withdrawals-processing", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 6d768476e6..a6ac660379 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -10,7 +10,6 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable -withdrawals = ["state_processing/withdrawals", "types/withdrawals", "store/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = [ "state_processing/withdrawals-processing", "store/withdrawals-processing", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 17e0a6f121..5c0311736c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -79,14 +79,12 @@ use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -#[cfg(feature = "withdrawals")] -use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::{ common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ - errors::AttestationValidationError, verify_attestation_for_block_inclusion, - VerifySignatures, + errors::AttestationValidationError, get_expected_withdrawals, + verify_attestation_for_block_inclusion, VerifySignatures, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, @@ -287,7 +285,6 @@ struct PartialBeaconBlock> { voluntary_exits: Vec, sync_aggregate: Option>, prepare_payload_handle: Option>, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: Vec, } @@ -4182,7 +4179,6 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; - #[cfg(feature = "withdrawals")] let bls_to_execution_changes = self .op_pool .get_bls_to_execution_changes(&state, &self.spec); @@ -4345,7 +4341,6 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }) } @@ -4375,7 +4370,6 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = partial_beacon_block; @@ -4460,7 +4454,6 @@ impl BeaconChain { .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.into(), }, }), @@ -4485,7 +4478,6 @@ impl BeaconChain { .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.into(), //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), @@ -4743,7 +4735,6 @@ impl BeaconChain { return Ok(()); } - #[cfg(feature = "withdrawals")] let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { ForkName::Base | ForkName::Altair | ForkName::Merge => None, ForkName::Capella | ForkName::Eip4844 => { @@ -4778,10 +4769,7 @@ impl BeaconChain { execution_layer .get_suggested_fee_recipient(proposer as u64) .await, - #[cfg(feature = "withdrawals")] withdrawals, - #[cfg(not(feature = "withdrawals"))] - None, ); debug!( diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 1982bdbf02..d52df4853d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,11 +17,9 @@ use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; use slot_clock::SlotClock; -#[cfg(feature = "withdrawals")] -use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_block_processing::{ - compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, - partially_verify_execution_payload, + compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, + is_merge_transition_complete, partially_verify_execution_payload, }; use std::sync::Arc; use tokio::task::JoinHandle; @@ -382,7 +380,6 @@ pub fn get_execution_payload< let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); - #[cfg(feature = "withdrawals")] let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { Some(get_expected_withdrawals(state, spec)?.into()) @@ -407,7 +404,6 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, - #[cfg(feature = "withdrawals")] withdrawals, ) .await @@ -442,7 +438,7 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, - #[cfg(feature = "withdrawals")] withdrawals: Option>, + withdrawals: Option>, ) -> Result, BlockProductionError> where T: BeaconChainTypes, @@ -504,15 +500,8 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = PayloadAttributes::new( - timestamp, - random, - suggested_fee_recipient, - #[cfg(feature = "withdrawals")] - withdrawals, - #[cfg(not(feature = "withdrawals"))] - None, - ); + let payload_attributes = + PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index b3bdc54d02..47c1e0341b 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -5,7 +5,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals", "eth2/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"] [dependencies] diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 424ca30d13..80cdeacb34 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -165,7 +165,6 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Vec, } @@ -215,7 +214,6 @@ impl TryFrom> for ExecutionBlockWithTransactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>()?, - #[cfg(feature = "withdrawals")] withdrawals: Vec::from(block.withdrawals) .into_iter() .map(|withdrawal| withdrawal.into()) @@ -243,7 +241,6 @@ impl TryFrom> for ExecutionBlockWithTransactions .iter() .map(|tx| Transaction::decode(&Rlp::new(tx))) .collect::, _>>()?, - #[cfg(feature = "withdrawals")] withdrawals: Vec::from(block.withdrawals) .into_iter() .map(|withdrawal| withdrawal.into()) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1616b21634..29f66393e5 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -852,11 +852,11 @@ impl HttpJsonRpc { pub async fn supported_apis_v1(&self) -> Result { Ok(SupportedApis { new_payload_v1: true, - new_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + new_payload_v2: cfg!(not(test)), forkchoice_updated_v1: true, - forkchoice_updated_v2: cfg!(all(feature = "withdrawals", not(test))), + forkchoice_updated_v2: cfg!(not(test)), get_payload_v1: true, - get_payload_v2: cfg!(all(feature = "withdrawals", not(test))), + get_payload_v2: cfg!(not(test)), exchange_transition_configuration_v1: true, }) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 18e52eb06f..13948affb5 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -166,7 +166,6 @@ impl JsonExecutionPayload { base_fee_per_gas: v2.base_fee_per_gas, block_hash: v2.block_hash, transactions: v2.transactions, - #[cfg(feature = "withdrawals")] withdrawals: v2 .withdrawals .map(|v| { @@ -194,7 +193,6 @@ impl JsonExecutionPayload { excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, block_hash: v2.block_hash, transactions: v2.transactions, - #[cfg(feature = "withdrawals")] withdrawals: v2 .withdrawals .map(|v| { @@ -282,7 +280,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { excess_data_gas: None, block_hash: capella.block_hash, transactions: capella.transactions, - #[cfg(feature = "withdrawals")] withdrawals: Some( Vec::from(capella.withdrawals) .into_iter() @@ -290,8 +287,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { .collect::>() .into(), ), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, }), ExecutionPayload::Eip4844(eip4844) => Ok(JsonExecutionPayloadV2 { parent_hash: eip4844.parent_hash, @@ -309,7 +304,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { excess_data_gas: Some(eip4844.excess_data_gas), block_hash: eip4844.block_hash, transactions: eip4844.transactions, - #[cfg(feature = "withdrawals")] withdrawals: Some( Vec::from(eip4844.withdrawals) .into_iter() @@ -317,8 +311,6 @@ impl TryFrom> for JsonExecutionPayloadV2 { .collect::>() .into(), ), - #[cfg(not(feature = "withdrawals"))] - withdrawals: None, }), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 1761af09e8..e22da42a72 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1633,7 +1633,6 @@ impl ExecutionLayer { }) } ExecutionBlockWithTransactions::Capella(capella_block) => { - #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new( capella_block .withdrawals @@ -1642,7 +1641,6 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: capella_block.parent_hash, fee_recipient: capella_block.fee_recipient, @@ -1658,12 +1656,10 @@ impl ExecutionLayer { base_fee_per_gas: capella_block.base_fee_per_gas, block_hash: capella_block.block_hash, transactions, - #[cfg(feature = "withdrawals")] withdrawals, }) } ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { - #[cfg(feature = "withdrawals")] let withdrawals = VariableList::new( eip4844_block .withdrawals @@ -1672,7 +1668,6 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { parent_hash: eip4844_block.parent_hash, fee_recipient: eip4844_block.fee_recipient, @@ -1689,7 +1684,6 @@ impl ExecutionLayer { excess_data_gas: eip4844_block.excess_data_gas, block_hash: eip4844_block.block_hash, transactions, - #[cfg(feature = "withdrawals")] withdrawals, }) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f0f8449125..e552b7ca7a 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -103,10 +103,7 @@ impl MockExecutionLayer { prev_randao, Address::repeat_byte(42), // FIXME: think about how to handle different forks / withdrawals here.. - #[cfg(feature = "withdrawals")] Some(vec![]), - #[cfg(not(feature = "withdrawals"))] - None, ); // Insert a proposer to ensure the fork choice updated command works. diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b3e8e1fc6b..897f6b020c 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -28,5 +28,4 @@ directory = { path = "../../common/directory" } strum = { version = "0.24.0", features = ["derive"] } [features] -withdrawals = ["state_processing/withdrawals", "types/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing"] \ No newline at end of file diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 12c5628496..ca35bc0b22 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -105,10 +105,8 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_index: u64, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub next_withdrawal_validator_index: u64, } @@ -199,7 +197,6 @@ impl PartialBeaconState { latest_execution_payload_header ] ), - #[cfg(feature = "withdrawals")] BeaconState::Capella(s) => impl_from_state_forgetful!( s, outer, @@ -216,22 +213,6 @@ impl PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - BeaconState::Capella(s) => impl_from_state_forgetful!( - s, - outer, - Capella, - PartialBeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - #[cfg(feature = "withdrawals")] BeaconState::Eip4844(s) => impl_from_state_forgetful!( s, outer, @@ -248,21 +229,6 @@ impl PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - BeaconState::Eip4844(s) => impl_from_state_forgetful!( - s, - outer, - Eip4844, - PartialBeaconStateEip4844, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), } } @@ -450,7 +416,6 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header ] ), - #[cfg(feature = "withdrawals")] PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( inner, Capella, @@ -466,21 +431,6 @@ impl TryInto> for PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( - inner, - Capella, - BeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), - #[cfg(feature = "withdrawals")] PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( inner, Eip4844, @@ -496,20 +446,6 @@ impl TryInto> for PartialBeaconState { next_withdrawal_validator_index ] ), - #[cfg(not(feature = "withdrawals"))] - PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( - inner, - Eip4844, - BeaconStateEip4844, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ] - ), }; Ok(state) } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 6ee02b71ba..fc5eba98e2 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,5 +35,4 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] -withdrawals = ["store/withdrawals"] withdrawals-processing = ["store/withdrawals-processing"] \ No newline at end of file diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 39a0be3d9f..0b79539877 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -43,5 +43,4 @@ arbitrary-fuzz = [ "eth2_ssz_types/arbitrary", "tree_hash/arbitrary", ] -withdrawals = ["types/withdrawals"] withdrawals-processing = [] diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 7af74428b5..f1a544099f 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -19,7 +19,7 @@ pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, @@ -36,7 +36,7 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; @@ -165,7 +165,7 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + #[cfg(feature = "withdrawals-processing")] process_withdrawals::(state, payload, spec)?; process_execution_payload::(state, payload, spec)?; } @@ -469,7 +469,6 @@ pub fn compute_timestamp_at_slot( /// Compute the next batch of withdrawals which should be included in a block. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals -#[cfg(feature = "withdrawals")] pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -525,7 +524,7 @@ pub fn get_expected_withdrawals( } /// Apply withdrawals to the state. -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload>( state: &mut BeaconState, payload: Payload::Ref<'payload>, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 50bfbfdc45..bbf2c1caa5 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -170,7 +170,6 @@ where // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; - #[cfg(feature = "withdrawals")] self.include_bls_to_execution_changes(block)?; Ok(()) @@ -345,7 +344,6 @@ where } /// Include the signature of the block's BLS to execution changes for verification. - #[cfg(feature = "withdrawals")] pub fn include_bls_to_execution_changes>( &mut self, block: &'a SignedBeaconBlock, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 105faba83b..f27fd48b4f 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -34,7 +34,7 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload>( process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] + #[cfg(feature = "withdrawals-processing")] if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } @@ -295,7 +295,7 @@ pub fn process_exits( /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns /// an `Err` describing the invalid object or cause of failure. -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub fn process_bls_to_execution_changes( state: &mut BeaconState, bls_to_execution_changes: &[SignedBlsToExecutionChange], diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 9a88369883..dc759b384d 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -56,9 +56,7 @@ pub fn upgrade_to_capella( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), // Withdrawals - #[cfg(feature = "withdrawals")] next_withdrawal_index: 0, - #[cfg(feature = "withdrawals")] next_withdrawal_validator_index: 0, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 6d66fd8412..131100bb38 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -10,12 +10,8 @@ pub fn upgrade_to_eip4844( let pre = pre_state.as_capella_mut()?; // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. - // if we are disabling withdrawals, assume we should fork off of bellatrix. - let previous_fork_version = if cfg!(feature = "withdrawals") { - pre.fork.current_version - } else { - spec.bellatrix_fork_version - }; + // let previous_fork_version = spec.bellatrix_fork_version; + let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. @@ -64,9 +60,7 @@ pub fn upgrade_to_eip4844( // Execution latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), // Withdrawals - #[cfg(feature = "withdrawals")] next_withdrawal_index: pre.next_withdrawal_index, - #[cfg(feature = "withdrawals")] next_withdrawal_validator_index: pre.next_withdrawal_validator_index, // Caches total_active_balance: pre.total_active_balance, diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index b3ef3ae382..671cacfa2e 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -72,4 +72,3 @@ arbitrary-fuzz = [ "swap_or_not_shuffle/arbitrary", "tree_hash/arbitrary", ] -withdrawals = [] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 124cb08bcc..fd38e9faf2 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -502,7 +502,6 @@ impl> EmptyBlock for BeaconBlockCape voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), execution_payload: Payload::Capella::default(), - #[cfg(feature = "withdrawals")] bls_to_execution_changes: VariableList::empty(), }, } @@ -532,7 +531,6 @@ impl> EmptyBlock for BeaconBlockEip4 voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), execution_payload: Payload::Eip4844::default(), - #[cfg(feature = "withdrawals")] bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), }, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 1dd938ac46..dbdbcddb1b 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -62,7 +62,6 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] #[serde(flatten)] pub execution_payload: Payload::Eip4844, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub bls_to_execution_changes: VariableList, @@ -301,7 +300,6 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = body; @@ -319,7 +317,6 @@ impl From>> execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, Some(execution_payload), @@ -345,7 +342,6 @@ impl From>> voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, } = body; @@ -364,7 +360,6 @@ impl From>> execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, @@ -433,7 +428,6 @@ impl BeaconBlockBodyCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, } = self; @@ -450,7 +444,6 @@ impl BeaconBlockBodyCapella> { execution_payload: BlindedPayloadCapella { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.clone(), } } @@ -469,7 +462,6 @@ impl BeaconBlockBodyEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, } = self; @@ -487,7 +479,6 @@ impl BeaconBlockBodyEip4844> { execution_payload: BlindedPayloadEip4844 { execution_payload_header: From::from(execution_payload.clone()), }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 48a83f94f4..b3eff7374b 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -297,10 +297,8 @@ where pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, // Withdrawals - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_index: u64, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844), partial_getter(copy))] pub next_withdrawal_validator_index: u64, diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 30dd9f8d6b..4cfc684f4d 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -336,11 +336,9 @@ impl BeaconTreeHashCacheInner { } // Withdrawal indices (Capella and later). - #[cfg(feature = "withdrawals")] if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?; } - #[cfg(feature = "withdrawals")] if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?; } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 18005094e4..45f52fb65a 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -80,7 +80,6 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] pub withdrawals: Withdrawals, } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index a98a68e3e5..e2c23389a1 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -75,7 +75,6 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[cfg(feature = "withdrawals")] #[superstruct(only(Capella, Eip4844))] #[superstruct(getter(copy))] pub withdrawals_root: Hash256, @@ -128,7 +127,6 @@ impl ExecutionPayloadHeaderMerge { base_fee_per_gas: self.base_fee_per_gas, block_hash: self.block_hash, transactions_root: self.transactions_root, - #[cfg(feature = "withdrawals")] withdrawals_root: Hash256::zero(), } } @@ -153,7 +151,6 @@ impl ExecutionPayloadHeaderCapella { excess_data_gas: Uint256::zero(), block_hash: self.block_hash, transactions_root: self.transactions_root, - #[cfg(feature = "withdrawals")] withdrawals_root: self.withdrawals_root, } } @@ -196,7 +193,6 @@ impl From> for ExecutionPayloadHeaderCape base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), - #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } @@ -219,7 +215,6 @@ impl From> for ExecutionPayloadHeaderEip4 excess_data_gas: payload.excess_data_gas, block_hash: payload.block_hash, transactions_root: payload.transactions.tree_hash_root(), - #[cfg(feature = "withdrawals")] withdrawals_root: payload.withdrawals.tree_hash_root(), } } diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 2d9e37b81a..8bba00b46d 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -37,7 +37,6 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? @@ -241,7 +240,6 @@ impl ExecPayload for FullPayload { }) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), @@ -343,7 +341,6 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { }) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), @@ -523,7 +520,6 @@ impl ExecPayload for BlindedPayload { None } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), @@ -614,7 +610,6 @@ impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { None } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { match self { BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), @@ -712,7 +707,6 @@ macro_rules! impl_exec_payload_common { f(self) } - #[cfg(feature = "withdrawals")] fn withdrawals_root(&self) -> Result { let g = $g; g(self) diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 2a8398f83f..14f9358f61 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -341,7 +341,6 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadCapella { .. }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, }, @@ -364,7 +363,6 @@ impl SignedBeaconBlockCapella> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadCapella { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, }, }, @@ -397,7 +395,6 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: BlindedPayloadEip4844 { .. }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, @@ -421,7 +418,6 @@ impl SignedBeaconBlockEip4844> { voluntary_exits, sync_aggregate, execution_payload: FullPayloadEip4844 { execution_payload }, - #[cfg(feature = "withdrawals")] bls_to_execution_changes, blob_kzg_commitments, }, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3b4dd57533..2db42d6ec3 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,8 +24,6 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] -# Support for inclusion of withdrawals fields in all capella consensus types in all APIs. -withdrawals = ["types/withdrawals", "beacon_node/withdrawals"] # Support for withdrawals consensus processing logic. withdrawals-processing = ["beacon_node/withdrawals-processing"] diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index f5487a6940..a08ee1996a 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,7 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] use state_processing::per_block_processing::process_operations::{ process_bls_to_execution_changes, process_bls_to_execution_changes, }; @@ -22,7 +22,7 @@ use state_processing::{ }; use std::fmt::Debug; use std::path::Path; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, @@ -42,7 +42,7 @@ struct ExecutionMetadata { } /// Newtype for testing withdrawals. -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { payload: FullPayload, @@ -341,7 +341,7 @@ impl Operation for BlindedPayload { } } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] impl Operation for WithdrawalsPayload { fn handler_name() -> String { "withdrawals".into() @@ -374,7 +374,7 @@ impl Operation for WithdrawalsPayload { } } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] impl Operation for SignedBlsToExecutionChange { fn handler_name() -> String { "bls_to_execution_change".into() diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index fd3bf2bd1b..a4d4f2d52d 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,5 +1,5 @@ pub use case_result::CaseResult; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 0227b92ec8..66c4f83ece 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,14 +82,14 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(feature = "withdrawals-processing")] #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index b5923aafe5..e058d58afb 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -23,5 +23,4 @@ hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } [features] -default = [] -withdrawals = [] \ No newline at end of file +default = [] \ No newline at end of file From 0c22d69e1546bbc35193c602dc55348e343c2a79 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 19 Dec 2022 19:35:08 -0600 Subject: [PATCH 06/19] Update consensus/state_processing/src/upgrade/eip4844.rs Co-authored-by: realbigsean --- consensus/state_processing/src/upgrade/eip4844.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 131100bb38..92a102ce96 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -10,7 +10,6 @@ pub fn upgrade_to_eip4844( let pre = pre_state.as_capella_mut()?; // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. - // let previous_fork_version = spec.bellatrix_fork_version; let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut From b224ed81515167c8632ea6f6c66ef94ead4fdd81 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Mon, 19 Dec 2022 19:35:17 -0600 Subject: [PATCH 07/19] Update consensus/state_processing/src/upgrade/eip4844.rs Co-authored-by: realbigsean --- consensus/state_processing/src/upgrade/eip4844.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs index 92a102ce96..e829c01e7e 100644 --- a/consensus/state_processing/src/upgrade/eip4844.rs +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -9,7 +9,6 @@ pub fn upgrade_to_eip4844( let epoch = pre_state.current_epoch(); let pre = pre_state.as_capella_mut()?; - // FIXME(sean) This is a hack to let us participate in testnets where capella doesn't exist. let previous_fork_version = pre.fork.current_version; // Where possible, use something like `mem::take` to move fields from behind the &mut From 3d253abadcb9381e95ca6d517ceef80cf707be0b Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 21 Dec 2022 11:40:21 -0600 Subject: [PATCH 08/19] Fixed spec serialization bug --- consensus/types/src/config_and_preset.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 9a618f7cc3..ac93818b9c 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -28,6 +28,7 @@ pub struct ConfigAndPreset { #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, #[superstruct(only(Capella))] + #[serde(flatten)] pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] From cd6655dba91e46b2f91edfdb0a39135c95ea7cff Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 22 Dec 2022 17:30:04 -0500 Subject: [PATCH 09/19] handle no blobs from peers instead of empty blobs in range requests --- .../src/sync/block_sidecar_coupling.rs | 115 +++++++++ beacon_node/network/src/sync/manager.rs | 197 ++++++++------ beacon_node/network/src/sync/mod.rs | 1 + .../network/src/sync/network_context.rs | 241 ++++++------------ .../network/src/sync/range_sync/range.rs | 22 +- consensus/types/src/blobs_sidecar.rs | 9 +- 6 files changed, 317 insertions(+), 268 deletions(-) create mode 100644 beacon_node/network/src/sync/block_sidecar_coupling.rs diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs new file mode 100644 index 0000000000..762f37692f --- /dev/null +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -0,0 +1,115 @@ +use std::{ + collections::{hash_map::OccupiedEntry, VecDeque}, + sync::Arc, +}; + +use types::{ + signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock, + SignedBeaconBlockAndBlobsSidecar, +}; + +struct ReceivedData { + block: Option>>, + blob: Option>>, +} + +#[derive(Debug, Default)] +pub struct BlockBlobRequestInfo { + /// Blocks we have received awaiting for their corresponding sidecar. + accumulated_blocks: VecDeque>>, + /// Sidecars we have received awaiting for their corresponding block. + accumulated_sidecars: VecDeque>>, + /// Whether the individual RPC request for blocks is finished or not. + is_blocks_rpc_finished: bool, + /// Whether the individual RPC request for sidecars is finished or not. + is_sidecar_rpc_finished: bool, +} + +pub struct BlockBlobRequestEntry<'a, K, T: EthSpec> { + entry: OccupiedEntry<'a, K, BlockBlobRequestInfo>, +} + +impl<'a, K, T: EthSpec> From>> + for BlockBlobRequestEntry<'a, K, T> +{ + fn from(entry: OccupiedEntry<'a, K, BlockBlobRequestInfo>) -> Self { + BlockBlobRequestEntry { entry } + } +} + +impl BlockBlobRequestInfo { + pub fn add_block_response(&mut self, maybe_block: Option>>) { + match maybe_block { + Some(block) => self.accumulated_blocks.push_back(block), + None => self.is_blocks_rpc_finished = true, + } + } + + pub fn add_sidecar_response(&mut self, maybe_sidecar: Option>>) { + match maybe_sidecar { + Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), + None => self.is_sidecar_rpc_finished = true, + } + } + + pub fn into_responses(self) -> Result>, &'static str> { + let BlockBlobRequestInfo { + accumulated_blocks, + accumulated_sidecars, + .. + } = self; + + // Create the storage for our pairs. + let mut pairs = Vec::with_capacity(accumulated_blocks.len()); + + // ASSUMPTION: There can't be more more blobs than blocks. i.e. sending any block (empty + // included) for a skipped slot is not permitted. + for sidecar in accumulated_sidecars { + let blob_slot = sidecar.beacon_block_slot; + // First queue any blocks that might not have a blob. + while let Some(block) = { + // We identify those if their slot is less than the current blob's slot. + match accumulated_blocks.front() { + Some(borrowed_block) if borrowed_block.slot() < blob_slot => { + accumulated_blocks.pop_front() + } + Some(_) => None, + None => { + // We received a blob and ran out of blocks. This is a peer error + return Err("Blob without more blobs to pair with returned by peer"); + } + } + } { + pairs.push(BlockWrapper::Block { block }) + } + + // The next block must be present and must match the blob's slot + let next_block = accumulated_blocks + .pop_front() + .expect("If block stream ended, an error was previously returned"); + if next_block.slot() != blob_slot { + // We verified that the slot of the block is not less than the slot of the blob (it + // would have been returned before). It's also not equal, so this block is ahead + // than the blob. This means the blob is not paired. + return Err("Blob without a matching block returned by peer"); + } + pairs.push(BlockWrapper::BlockAndBlob { + block_sidecar_pair: SignedBeaconBlockAndBlobsSidecar { + beacon_block: next_block, + blobs_sidecar: sidecar, + }, + }); + } + + // Every remaining block does not have a blob + for block in accumulated_blocks { + pairs.push(BlockWrapper::Block { block }) + } + + Ok(pairs) + } + + pub fn is_finished(&self) -> bool { + self.is_blocks_rpc_finished && self.is_sidecar_rpc_finished + } +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 60105d4225..155e2d2131 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,7 +35,7 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; -use super::network_context::SyncNetworkContext; +use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; @@ -45,11 +45,11 @@ use crate::sync::range_sync::ExpectedBatchTy; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; -use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; +use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; @@ -746,17 +746,17 @@ impl SyncManager { &mut self.network, ), RequestId::BackFillSync { id } => { - if let Some((batch_id, block)) = self.network.backfill_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlock, - ) { + let is_stream_terminator = beacon_block.is_none(); + if let Some(batch_id) = self + .network + .backfill_sync_only_blocks_response(id, is_stream_terminator) + { match self.backfill_sync.on_block_response( &mut self.network, batch_id, &peer_id, id, - block, + beacon_block.map(|block| BlockWrapper::Block { block }), ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -769,61 +769,125 @@ impl SyncManager { } } RequestId::RangeSync { id } => { - if let Some((chain_id, batch_id, block)) = self.network.range_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlock, - ) { + let is_stream_terminator = beacon_block.is_none(); + if let Some((chain_id, batch_id)) = self + .network + .range_sync_block_response(id, is_stream_terminator) + { self.range_sync.blocks_by_range_response( &mut self.network, peer_id, chain_id, batch_id, id, - block, + beacon_block.map(|block| BlockWrapper::Block { block }), ); self.update_sync_state(); } } RequestId::BackFillSidecarPair { id } => { - if let Some((batch_id, block)) = self.network.backfill_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlockBlobs, - ) { - match self.backfill_sync.on_block_response( - &mut self.network, - batch_id, - &peer_id, - id, - block, - ) { - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Ok(ProcessResult::Successful) => {} - Err(_error) => { - // The backfill sync has failed, errors are reported - // within. - self.update_sync_state(); + self.block_blob_backfill_response(id, peer_id, beacon_block.into()) + } + RequestId::RangeSidecarPair { id } => { + self.block_blob_range_response(id, peer_id, beacon_block.into()) + } + } + } + + /// Handles receiving a response for a range sync request that should have both blocks and + /// blobs. + fn block_blob_range_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlob, + ) { + if let Some((chain_id, batch_id, block_responses)) = self + .network + .range_sync_block_and_blob_response(id, block_or_blob) + { + match block_responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(|block| Some(block)) + // chain the stream terminator + .chain(vec![None]) + { + self.range_sync.blocks_by_range_response( + &mut self.network, + peer_id, + chain_id, + batch_id, + id, + block, + ); + self.update_sync_state(); + } + } + Err(e) => { + // inform backfill that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, "Blocks and blobs request for backfill received invalid data"; + "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e + ); + // TODO: penalize the peer for being a bad boy + let id = RequestId::RangeSidecarPair { id }; + self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) + } + } + } + } + + /// Handles receiving a response for a bacjfill sync request that should have both blocks and + /// blobs. + fn block_blob_backfill_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlob, + ) { + if let Some((batch_id, block_responses)) = self + .network + .backfill_sync_block_and_blob_response(id, block_or_blob) + { + match block_responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(|block| Some(block)) + // chain the stream terminator + .chain(vec![None]) + { + match self.backfill_sync.on_block_response( + &mut self.network, + batch_id, + &peer_id, + id, + block, + ) { + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Ok(ProcessResult::Successful) => {} + Err(_error) => { + // The backfill sync has failed, errors are reported + // within. + self.update_sync_state(); + } } } } - } - RequestId::RangeSidecarPair { id } => { - if let Some((chain_id, batch_id, block)) = self.network.range_sync_block_response( - id, - beacon_block, - ExpectedBatchTy::OnlyBlockBlobs, - ) { - self.range_sync.blocks_by_range_response( - &mut self.network, - peer_id, - chain_id, - batch_id, - id, - block, + Err(e) => { + // inform backfill that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, "Blocks and blobs request for backfill received invalid data"; + "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e ); - self.update_sync_state(); + // TODO: penalize the peer for being a bad boy + let id = RequestId::BackFillSidecarPair { id }; + self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) } } } @@ -844,44 +908,13 @@ impl SyncManager { unreachable!("An only blocks request does not receive sidecars") } RequestId::BackFillSidecarPair { id } => { - if let Some((batch_id, block)) = self - .network - .backfill_sync_sidecar_response(id, maybe_sidecar) - { - match self.backfill_sync.on_block_response( - &mut self.network, - batch_id, - &peer_id, - id, - block, - ) { - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Ok(ProcessResult::Successful) => {} - Err(_error) => { - // The backfill sync has failed, errors are reported - // within. - self.update_sync_state(); - } - } - } + self.block_blob_backfill_response(id, peer_id, maybe_sidecar.into()) } RequestId::RangeSync { .. } => { - unreachable!("And only blocks range request does not receive sidecars") + unreachable!("Only-blocks range requests don't receive sidecars") } RequestId::RangeSidecarPair { id } => { - if let Some((chain_id, batch_id, block)) = - self.network.range_sync_sidecar_response(id, maybe_sidecar) - { - self.range_sync.blocks_by_range_response( - &mut self.network, - peer_id, - chain_id, - batch_id, - id, - block, - ); - self.update_sync_state(); - } + self.block_blob_range_response(id, peer_id, maybe_sidecar.into()) } } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index dc18a5c981..7b244bcece 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -3,6 +3,7 @@ //! Stores the various syncing methods for the beacon chain. mod backfill_sync; mod block_lookups; +mod block_sidecar_coupling; pub mod manager; mod network_context; mod peer_sync_info; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 978bd69d06..912a5620e8 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,6 +1,7 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use super::block_sidecar_coupling::BlockBlobRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId, ExpectedBatchTy}; use crate::beacon_processor::WorkEvent; @@ -13,59 +14,11 @@ use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; -use slot_clock::SlotClock; use std::collections::hash_map::Entry; -use std::collections::VecDeque; use std::sync::Arc; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; -use types::{ - BlobsSidecar, ChainSpec, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, -}; - -#[derive(Debug, Default)] -struct BlockBlobRequestInfo { - /// Blocks we have received awaiting for their corresponding sidecar. - accumulated_blocks: VecDeque>>, - /// Sidecars we have received awaiting for their corresponding block. - accumulated_sidecars: VecDeque>>, - /// Whether the individual RPC request for blocks is finished or not. - is_blocks_rpc_finished: bool, - /// Whether the individual RPC request for sidecars is finished or not. - is_sidecar_rpc_finished: bool, -} - -impl BlockBlobRequestInfo { - pub fn add_block_response(&mut self, maybe_block: Option>>) { - match maybe_block { - Some(block) => self.accumulated_blocks.push_back(block), - None => self.is_blocks_rpc_finished = true, - } - } - - pub fn add_sidecar_response(&mut self, maybe_sidecar: Option>>) { - match maybe_sidecar { - Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), - None => self.is_sidecar_rpc_finished = true, - } - } - - pub fn pop_response(&mut self) -> Option> { - if !self.accumulated_blocks.is_empty() && !self.accumulated_sidecars.is_empty() { - let beacon_block = self.accumulated_blocks.pop_front().expect("non empty"); - let blobs_sidecar = self.accumulated_sidecars.pop_front().expect("non empty"); - return Some(SignedBeaconBlockAndBlobsSidecar { - beacon_block, - blobs_sidecar, - }); - } - None - } - - pub fn is_finished(&self) -> bool { - self.is_blocks_rpc_finished && self.is_sidecar_rpc_finished - } -} +use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. pub struct SyncNetworkContext { @@ -104,6 +57,24 @@ pub struct SyncNetworkContext { log: slog::Logger, } +/// Small enumeration to make dealing with block and blob requests easier. +pub enum BlockOrBlob { + Block(Option>>), + Blob(Option>>), +} + +impl From>>> for BlockOrBlob { + fn from(block: Option>>) -> Self { + BlockOrBlob::Block(block) + } +} + +impl From>>> for BlockOrBlob { + fn from(blob: Option>>) -> Self { + BlockOrBlob::Blob(blob) + } +} + impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, @@ -300,91 +271,45 @@ impl SyncNetworkContext { } } - /// Received a blocks by range response. + /// Response for a request that is only for blocks. pub fn range_sync_block_response( &mut self, request_id: Id, - maybe_block: Option>>, - batch_type: ExpectedBatchTy, - ) -> Option<(ChainId, BatchId, Option>)> { - match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => { - match self.range_sidecar_pair_requests.entry(request_id) { - Entry::Occupied(mut entry) => { - let (chain_id, batch_id, info) = entry.get_mut(); - let chain_id = chain_id.clone(); - let batch_id = batch_id.clone(); - let stream_terminator = maybe_block.is_none(); - info.add_block_response(maybe_block); - let maybe_block_wrapped = info.pop_response().map(|block_sidecar_pair| { - BlockWrapper::BlockAndBlob { block_sidecar_pair } - }); - - if stream_terminator && !info.is_finished() { - return None; - } - if !stream_terminator && maybe_block_wrapped.is_none() { - return None; - } - - if info.is_finished() { - entry.remove(); - } - - Some((chain_id, batch_id, maybe_block_wrapped)) - } - Entry::Vacant(_) => None, - } - } - ExpectedBatchTy::OnlyBlock => { - // if the request is just for blocks then it can be removed on a stream termination - match maybe_block { - Some(block) => { - self.range_requests - .get(&request_id) - .cloned() - .map(|(chain_id, batch_id)| { - (chain_id, batch_id, Some(BlockWrapper::Block { block })) - }) - } - None => self - .range_requests - .remove(&request_id) - .map(|(chain_id, batch_id)| (chain_id, batch_id, None)), - } - } + is_stream_terminator: bool, + ) -> Option<(ChainId, BatchId)> { + if is_stream_terminator { + self.range_requests + .remove(&request_id) + .map(|(chain_id, batch_id)| (chain_id, batch_id)) + } else { + self.range_requests.get(&request_id).cloned() } } - pub fn range_sync_sidecar_response( + /// Received a blocks by range response for a request that couples blocks and blobs. + pub fn range_sync_block_and_blob_response( &mut self, request_id: Id, - maybe_sidecar: Option>>, - ) -> Option<(ChainId, BatchId, Option>)> { + block_or_blob: BlockOrBlob, + ) -> Option<( + ChainId, + BatchId, + Result>, &'static str>, + )> { match self.range_sidecar_pair_requests.entry(request_id) { Entry::Occupied(mut entry) => { - let (chain_id, batch_id, info) = entry.get_mut(); - let chain_id = chain_id.clone(); - let batch_id = batch_id.clone(); - let stream_terminator = maybe_sidecar.is_none(); - info.add_sidecar_response(maybe_sidecar); - let maybe_block = info - .pop_response() - .map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair }); - - if stream_terminator && !info.is_finished() { - return None; + let (_, _, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } - - if !stream_terminator && maybe_block.is_none() { - return None; - } - if info.is_finished() { - entry.remove(); + // If the request is finished, unqueue everything + let (chain_id, batch_id, info) = entry.remove(); + Some((chain_id, batch_id, info.into_responses())) + } else { + None } - - Some((chain_id, batch_id, maybe_block)) } Entry::Vacant(_) => None, } @@ -418,65 +343,41 @@ impl SyncNetworkContext { } } - /// Received a blocks by range response. - pub fn backfill_sync_block_response( + /// Response for a request that is only for blocks. + pub fn backfill_sync_only_blocks_response( &mut self, request_id: Id, - maybe_block: Option>>, - batch_type: ExpectedBatchTy, - ) -> Option<(BatchId, Option>)> { - match batch_type { - ExpectedBatchTy::OnlyBlockBlobs => { - match self.backfill_sidecar_pair_requests.entry(request_id) { - Entry::Occupied(mut entry) => { - let (batch_id, info) = entry.get_mut(); - let batch_id = batch_id.clone(); - info.add_block_response(maybe_block); - let maybe_block = info.pop_response().map(|block_sidecar_pair| { - BlockWrapper::BlockAndBlob { block_sidecar_pair } - }); - if info.is_finished() { - entry.remove(); - } - Some((batch_id, maybe_block)) - } - Entry::Vacant(_) => None, - } - } - ExpectedBatchTy::OnlyBlock => { - // if the request is just for blocks then it can be removed on a stream termination - match maybe_block { - Some(block) => self - .backfill_requests - .get(&request_id) - .cloned() - .map(|batch_id| (batch_id, Some(BlockWrapper::Block { block }))), - None => self - .backfill_requests - .remove(&request_id) - .map(|batch_id| (batch_id, None)), - } - } + is_stream_terminator: bool, + ) -> Option { + if is_stream_terminator { + self.backfill_requests + .remove(&request_id) + .map(|batch_id| batch_id) + } else { + self.backfill_requests.get(&request_id).cloned() } } - pub fn backfill_sync_sidecar_response( + /// Received a blocks by range response for a request that couples blocks and blobs. + pub fn backfill_sync_block_and_blob_response( &mut self, request_id: Id, - maybe_sidecar: Option>>, - ) -> Option<(BatchId, Option>)> { + block_or_blob: BlockOrBlob, + ) -> Option<(BatchId, Result>, &'static str>)> { match self.backfill_sidecar_pair_requests.entry(request_id) { Entry::Occupied(mut entry) => { - let (batch_id, info) = entry.get_mut(); - let batch_id = batch_id.clone(); - info.add_sidecar_response(maybe_sidecar); - let maybe_block = info - .pop_response() - .map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair }); - if info.is_finished() { - entry.remove(); + let (_, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } + if info.is_finished() { + // If the request is finished, unqueue everything + let (batch_id, info) = entry.remove(); + Some((batch_id, info.into_responses())) + } else { + None } - Some((batch_id, maybe_block)) } Entry::Vacant(_) => None, } diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 73e6f49eb0..a4869f75b7 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -686,13 +686,10 @@ mod tests { // add some peers let (peer1, local_info, head_info) = rig.head_peer(); range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let ((chain1, batch1, _), id1) = match rig.grab_request(&peer1).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => ( - rig.cx - .range_sync_block_response(id, None, ExpectedBatchTy::OnlyBlock) - .unwrap(), - id, - ), + let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } other => panic!("unexpected request {:?}", other), }; @@ -708,13 +705,10 @@ mod tests { // while the ee is offline, more peers might arrive. Add a new finalized peer. let (peer2, local_info, finalized_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let ((chain2, batch2, _), id2) = match rig.grab_request(&peer2).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => ( - rig.cx - .range_sync_block_response(id, None, ExpectedBatchTy::OnlyBlock) - .unwrap(), - id, - ), + let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } other => panic!("unexpected request {:?}", other), }; diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index 430936cc27..f1e2a4bb16 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -1,13 +1,17 @@ -use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; +use crate::test_utils::TestRandom; +use crate::{Blob, EthSpec, Hash256, SignedBeaconBlock, SignedRoot, Slot}; use kzg::KzgProof; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; +use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Default, TestRandom, +)] #[serde(bound = "T: EthSpec")] pub struct BlobsSidecar { pub beacon_block_root: Hash256, @@ -23,6 +27,7 @@ impl BlobsSidecar { pub fn empty() -> Self { Self::default() } + #[allow(clippy::integer_arithmetic)] pub fn max_size() -> usize { // Fixed part From 847f0de0ea20dcf8f27525b731102260828006c1 Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 22 Dec 2022 17:32:33 -0500 Subject: [PATCH 10/19] change base From fbc147e273d15b779587154d89d2691ff77c749a Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 22 Dec 2022 17:34:01 -0500 Subject: [PATCH 11/19] remove unused entry struct --- .../network/src/sync/block_sidecar_coupling.rs | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 762f37692f..60c5a62eaa 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{hash_map::OccupiedEntry, VecDeque}, - sync::Arc, -}; +use std::{collections::VecDeque, sync::Arc}; use types::{ signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock, @@ -25,18 +22,6 @@ pub struct BlockBlobRequestInfo { is_sidecar_rpc_finished: bool, } -pub struct BlockBlobRequestEntry<'a, K, T: EthSpec> { - entry: OccupiedEntry<'a, K, BlockBlobRequestInfo>, -} - -impl<'a, K, T: EthSpec> From>> - for BlockBlobRequestEntry<'a, K, T> -{ - fn from(entry: OccupiedEntry<'a, K, BlockBlobRequestInfo>) -> Self { - BlockBlobRequestEntry { entry } - } -} - impl BlockBlobRequestInfo { pub fn add_block_response(&mut self, maybe_block: Option>>) { match maybe_block { From e24f6c93d90387c7c17071add448fbdb1e307147 Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 22 Dec 2022 17:38:16 -0500 Subject: [PATCH 12/19] fix ctrl c'd comment --- beacon_node/network/src/sync/manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 155e2d2131..f003ca27e5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -827,10 +827,10 @@ impl SyncManager { } } Err(e) => { - // inform backfill that the request needs to be treated as failed + // inform range that the request needs to be treated as failed // With time we will want to downgrade this log warn!( - self.log, "Blocks and blobs request for backfill received invalid data"; + self.log, "Blocks and blobs request for range received invalid data"; "peer_id" => %peer_id, "batch_id" => batch_id, "error" => e ); // TODO: penalize the peer for being a bad boy From 48ff56d9cb0173b4a61b6727017b5059b59275c6 Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 22 Dec 2022 17:38:55 -0500 Subject: [PATCH 13/19] spelling --- beacon_node/network/src/sync/manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f003ca27e5..305f9f7069 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -841,7 +841,7 @@ impl SyncManager { } } - /// Handles receiving a response for a bacjfill sync request that should have both blocks and + /// Handles receiving a response for a Backfill sync request that should have both blocks and /// blobs. fn block_blob_backfill_response( &mut self, From 3643f5cc19fe02d661ab08a61289113b6901211e Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 22 Dec 2022 17:47:36 -0500 Subject: [PATCH 14/19] spelling --- beacon_node/network/src/sync/network_context.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 912a5620e8..8e651e8973 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -304,7 +304,7 @@ impl SyncNetworkContext { BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } if info.is_finished() { - // If the request is finished, unqueue everything + // If the request is finished, dequeue everything let (chain_id, batch_id, info) = entry.remove(); Some((chain_id, batch_id, info.into_responses())) } else { @@ -372,7 +372,7 @@ impl SyncNetworkContext { BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } if info.is_finished() { - // If the request is finished, unqueue everything + // If the request is finished, dequeue everything let (batch_id, info) = entry.remove(); Some((batch_id, info.into_responses())) } else { From 66f9aa922d5f049deb1eec8a7877b9aaba163224 Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 23 Dec 2022 09:52:10 -0500 Subject: [PATCH 15/19] clean up and improvements --- .../src/sync/block_sidecar_coupling.rs | 87 +++++++------------ .../network/src/sync/network_context.rs | 8 +- 2 files changed, 36 insertions(+), 59 deletions(-) diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 60c5a62eaa..95acadffb6 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -5,11 +5,6 @@ use types::{ SignedBeaconBlockAndBlobsSidecar, }; -struct ReceivedData { - block: Option>>, - blob: Option>>, -} - #[derive(Debug, Default)] pub struct BlockBlobRequestInfo { /// Blocks we have received awaiting for their corresponding sidecar. @@ -17,84 +12,68 @@ pub struct BlockBlobRequestInfo { /// Sidecars we have received awaiting for their corresponding block. accumulated_sidecars: VecDeque>>, /// Whether the individual RPC request for blocks is finished or not. - is_blocks_rpc_finished: bool, + is_blocks_stream_terminated: bool, /// Whether the individual RPC request for sidecars is finished or not. - is_sidecar_rpc_finished: bool, + is_sidecars_stream_terminated: bool, } impl BlockBlobRequestInfo { pub fn add_block_response(&mut self, maybe_block: Option>>) { match maybe_block { Some(block) => self.accumulated_blocks.push_back(block), - None => self.is_blocks_rpc_finished = true, + None => self.is_blocks_stream_terminated = true, } } pub fn add_sidecar_response(&mut self, maybe_sidecar: Option>>) { match maybe_sidecar { Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), - None => self.is_sidecar_rpc_finished = true, + None => self.is_sidecars_stream_terminated = true, } } pub fn into_responses(self) -> Result>, &'static str> { let BlockBlobRequestInfo { accumulated_blocks, - accumulated_sidecars, + mut accumulated_sidecars, .. } = self; - // Create the storage for our pairs. - let mut pairs = Vec::with_capacity(accumulated_blocks.len()); - - // ASSUMPTION: There can't be more more blobs than blocks. i.e. sending any block (empty + // ASSUMPTION: There can't be more more blobs than blocks. i.e. sending any blob (empty // included) for a skipped slot is not permitted. - for sidecar in accumulated_sidecars { - let blob_slot = sidecar.beacon_block_slot; - // First queue any blocks that might not have a blob. - while let Some(block) = { - // We identify those if their slot is less than the current blob's slot. - match accumulated_blocks.front() { - Some(borrowed_block) if borrowed_block.slot() < blob_slot => { - accumulated_blocks.pop_front() - } - Some(_) => None, - None => { - // We received a blob and ran out of blocks. This is a peer error - return Err("Blob without more blobs to pair with returned by peer"); - } + let pairs = accumulated_blocks + .into_iter() + .map(|beacon_block| { + if accumulated_sidecars + .front() + .map(|sidecar| sidecar.beacon_block_slot == beacon_block.slot()) + .unwrap_or(false) + { + let blobs_sidecar = + accumulated_sidecars.pop_front().ok_or("missing sidecar")?; + Ok(BlockWrapper::BlockAndBlob { + block_sidecar_pair: SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + }, + }) + } else { + Ok(BlockWrapper::Block { + block: beacon_block, + }) } - } { - pairs.push(BlockWrapper::Block { block }) - } + }) + .collect::, _>>(); - // The next block must be present and must match the blob's slot - let next_block = accumulated_blocks - .pop_front() - .expect("If block stream ended, an error was previously returned"); - if next_block.slot() != blob_slot { - // We verified that the slot of the block is not less than the slot of the blob (it - // would have been returned before). It's also not equal, so this block is ahead - // than the blob. This means the blob is not paired. - return Err("Blob without a matching block returned by peer"); - } - pairs.push(BlockWrapper::BlockAndBlob { - block_sidecar_pair: SignedBeaconBlockAndBlobsSidecar { - beacon_block: next_block, - blobs_sidecar: sidecar, - }, - }); + // if accumulated sidecars is not empty, throw an error. + if !accumulated_sidecars.is_empty() { + return Err("Received more sidecars than blocks"); } - // Every remaining block does not have a blob - for block in accumulated_blocks { - pairs.push(BlockWrapper::Block { block }) - } - - Ok(pairs) + pairs } pub fn is_finished(&self) -> bool { - self.is_blocks_rpc_finished && self.is_sidecar_rpc_finished + self.is_blocks_stream_terminated && self.is_sidecars_stream_terminated } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 8e651e8973..cc94db77ca 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -278,11 +278,9 @@ impl SyncNetworkContext { is_stream_terminator: bool, ) -> Option<(ChainId, BatchId)> { if is_stream_terminator { - self.range_requests - .remove(&request_id) - .map(|(chain_id, batch_id)| (chain_id, batch_id)) + self.range_requests.remove(&request_id) } else { - self.range_requests.get(&request_id).cloned() + self.range_requests.get(&request_id).copied() } } @@ -354,7 +352,7 @@ impl SyncNetworkContext { .remove(&request_id) .map(|batch_id| batch_id) } else { - self.backfill_requests.get(&request_id).cloned() + self.backfill_requests.get(&request_id).copied() } } From 5db0a88d4f4fed0854e77be0ccdd84842f8aea82 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 23 Dec 2022 10:27:01 -0500 Subject: [PATCH 16/19] fix compilation errors from merge --- .../lighthouse_network/src/rpc/codec/ssz_snappy.rs | 5 ++++- lcli/Cargo.toml | 1 - testing/ef_tests/Cargo.toml | 1 - testing/ef_tests/src/cases/operations.rs | 13 +++++-------- 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index d69b42c0be..df5350d949 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -719,7 +719,10 @@ fn context_bytes_to_fork_name( let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - format!("Context bytes {} do not correspond to a valid fork", encoded), + format!( + "Context bytes {} do not correspond to a valid fork", + encoded + ), ) }) } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 4130c0f6af..0da6b6f090 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -8,7 +8,6 @@ edition = "2021" [features] portable = ["bls/supranational-portable"] fake_crypto = ['bls/fake_crypto'] -withdrawals = ["types/withdrawals", "beacon_chain/withdrawals", "store/withdrawals", "state_processing/withdrawals"] withdrawals-processing = ["beacon_chain/withdrawals-processing", "store/withdrawals-processing", "state_processing/withdrawals-processing"] [dependencies] diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 1b42b42ff1..23bb29364d 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -9,7 +9,6 @@ edition = "2021" ef_tests = [] milagro = ["bls/milagro"] fake_crypto = ["bls/fake_crypto"] -withdrawals = ["state_processing/withdrawals", "store/withdrawals", "beacon_chain/withdrawals", "types/withdrawals", "execution_layer/withdrawals"] withdrawals-processing = ["state_processing/withdrawals-processing", "store/withdrawals-processing", "beacon_chain/withdrawals-processing", "execution_layer/withdrawals-processing"] [dependencies] diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index dd43eeccef..a2356519ad 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,9 +5,9 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use crate::testing_spec; use serde_derive::Deserialize; #[cfg(feature = "withdrawals-processing")] -use state_processing::per_block_processing::process_operations::{ - process_bls_to_execution_changes, process_bls_to_execution_changes, -}; +use state_processing::per_block_processing::process_operations::process_bls_to_execution_changes; +#[cfg(feature = "withdrawals-processing")] +use state_processing::per_block_processing::process_withdrawals; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, @@ -356,10 +356,6 @@ impl Operation for WithdrawalsPayload { return false; } - if !cfg!(feature = "withdrawals") { - return false; - } - fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge } @@ -372,7 +368,7 @@ impl Operation for WithdrawalsPayload { }) } - #[cfg(feature = "withdrawals")] + #[cfg(feature = "withdrawals-processing")] fn apply_to( &self, state: &mut BeaconState, @@ -386,6 +382,7 @@ impl Operation for WithdrawalsPayload { process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) } } +} #[cfg(feature = "withdrawals-processing")] impl Operation for SignedBlsToExecutionChange { From 901764b8f0c85eaee474bb95c29af72400197efe Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 23 Dec 2022 10:32:26 -0500 Subject: [PATCH 17/19] backfill batches need to be of just one epoch --- .../network/src/sync/backfill_sync/mod.rs | 2 +- .../network/src/sync/network_context.rs | 20 ++++++++----------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 76850a5454..56ed551530 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -33,7 +33,7 @@ use types::{Epoch, EthSpec}; /// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which /// case the responder will fill the response up to the max request size, assuming they have the /// bandwidth to do so. -pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 2; +pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 1; /// The maximum number of batches to queue before requesting more. const BACKFILL_BATCH_BUFFER_SIZE: u8 = 20; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index a38f1e0fe4..36da3bf821 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -531,25 +531,21 @@ impl SyncNetworkContext { id } + /// Check whether a batch for this epoch (and only this epoch) should request just blocks or + /// blocks and blobs. pub fn batch_type(&self, epoch: types::Epoch) -> ExpectedBatchTy { - // Keep tests only for blocks. + const _: () = assert!( + super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1 + && super::range_sync::EPOCHS_PER_BATCH == 1, + "To deal with alignment with 4844 boundaries, batches need to be of just one epoch" + ); #[cfg(test)] { + // Keep tests only for blocks. return ExpectedBatchTy::OnlyBlock; } #[cfg(not(test))] { - use super::range_sync::EPOCHS_PER_BATCH; - assert_eq!( - EPOCHS_PER_BATCH, 1, - "If this is not one, everything will fail horribly" - ); - - // Here we need access to the beacon chain, check the fork boundary, the current epoch, the - // blob period to serve and check with that if the batch is a blob batch or not. - // NOTE: This would carelessly assume batch sizes are always 1 epoch, to avoid needing to - // align with the batch boundary. - if let Some(data_availability_boundary) = self.chain.data_availability_boundary() { if epoch >= data_availability_boundary { ExpectedBatchTy::OnlyBlockBlobs From 24087f104d9883ca65e2b03badda57847b3ddf94 Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 23 Dec 2022 10:49:46 -0500 Subject: [PATCH 18/19] add the batch type to the Batch's KV --- beacon_node/network/src/sync/range_sync/batch.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index b0d266e074..7453e1df61 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -5,6 +5,7 @@ use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; use std::sync::Arc; +use strum::Display; use types::signed_block_and_blobs::BlockWrapper; use types::{Epoch, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot}; @@ -40,7 +41,8 @@ impl BatchTy { pub struct MixedBlockTyErr; /// Type of expected batch. -#[derive(Debug, Clone)] +#[derive(Debug, Copy, Clone, Display)] +#[strum(serialize_all = "snake_case")] pub enum ExpectedBatchTy { OnlyBlockBlobs, OnlyBlock, @@ -247,7 +249,7 @@ impl BatchInfo { start_slot: self.start_slot.into(), count: self.end_slot.sub(self.start_slot).into(), }, - self.batch_type.clone(), + self.batch_type, ) } @@ -557,6 +559,7 @@ impl slog::KV for BatchInfo { serializer.emit_usize("processed", self.failed_processing_attempts.len())?; serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; + serializer.emit_arguments("batch_ty", &format_args!("{}", self.batch_type)); slog::Result::Ok(()) } } From 240854750c6622cec41db015c38a4fcce79803ea Mon Sep 17 00:00:00 2001 From: Divma <26765164+divagant-martian@users.noreply.github.com> Date: Fri, 23 Dec 2022 17:16:10 -0500 Subject: [PATCH 19/19] cleanup: remove unused imports, unusued fields (#3834) --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/blob_cache.rs | 3 +-- beacon_node/beacon_chain/src/blob_verification.rs | 2 -- beacon_node/beacon_chain/src/block_verification.rs | 2 +- beacon_node/execution_layer/src/engines.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 5 ++--- beacon_node/lighthouse_network/src/rpc/protocol.rs | 2 +- beacon_node/lighthouse_network/src/types/pubsub.rs | 12 ++++-------- .../src/beacon_processor/work_reprocessing_queue.rs | 3 +-- .../src/beacon_processor/worker/gossip_methods.rs | 6 ++---- .../src/beacon_processor/worker/rpc_methods.rs | 5 ++--- .../src/beacon_processor/worker/sync_methods.rs | 5 +---- beacon_node/network/src/sync/block_lookups/mod.rs | 5 +---- .../network/src/sync/block_lookups/parent_lookup.rs | 3 +-- .../src/sync/block_lookups/single_block_lookup.rs | 3 +-- beacon_node/network/src/sync/block_lookups/tests.rs | 4 ++-- beacon_node/network/src/sync/manager.rs | 4 ++-- beacon_node/network/src/sync/range_sync/chain.rs | 1 - beacon_node/network/src/sync/range_sync/range.rs | 5 ++--- beacon_node/store/src/lib.rs | 1 + consensus/state_processing/src/consensus_context.rs | 8 +------- .../state_processing/src/per_block_processing.rs | 2 -- consensus/types/src/blobs_sidecar.rs | 2 +- lcli/src/new_testnet.rs | 7 ++----- 24 files changed, 31 insertions(+), 63 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ab00817153..d8c9c52c1c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -62,7 +62,7 @@ use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, Ca use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, - PayloadAttributes, PayloadAttributesV2, PayloadStatus, + PayloadAttributes, PayloadStatus, }; pub use fork_choice::CountUnrealized; use fork_choice::{ diff --git a/beacon_node/beacon_chain/src/blob_cache.rs b/beacon_node/beacon_chain/src/blob_cache.rs index 7f057ad9ed..d03e62ab64 100644 --- a/beacon_node/beacon_chain/src/blob_cache.rs +++ b/beacon_node/beacon_chain/src/blob_cache.rs @@ -1,7 +1,6 @@ use lru::LruCache; use parking_lot::Mutex; -use tree_hash::TreeHash; -use types::{BlobsSidecar, EthSpec, ExecutionPayload, Hash256}; +use types::{BlobsSidecar, EthSpec, Hash256}; pub const DEFAULT_BLOB_CACHE_SIZE: usize = 10; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 1b05c7d39f..7b940f2912 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,9 +2,7 @@ use slot_clock::SlotClock; use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use crate::{kzg_utils, BeaconChainError}; -use bls::PublicKey; use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions; -use types::consts::eip4844::BLS_MODULUS; use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions}; #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index b9e65bc0a2..9215af4bac 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -88,12 +88,12 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::signed_block_and_blobs::BlockWrapper; +use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -use types::{BlobsSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 432cc85cd4..e0b7c1dc3f 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; -use types::{Address, ExecutionBlockHash, ForkName, Hash256}; +use types::{ExecutionBlockHash, ForkName}; /// The number of payload IDs that will be stored for each `Engine`. /// diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 085f5036f4..4a6a584ca1 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -11,9 +11,8 @@ use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::signed_block_and_blobs::BlockWrapper; use types::{ - AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, - SignedBeaconBlockEip4844, + AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, + Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, }; use warp::Rejection; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8a3149fc51..1164688cda 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,7 +22,7 @@ use tokio_util::{ }; use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Blob, EmptyBlock, EthSpec, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 7b9e6a7b47..7951a07243 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -3,20 +3,16 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; -use serde_derive::{Deserialize, Serialize}; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; -use tree_hash_derive::TreeHash; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedBlsToExecutionChange, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index e0c9347451..6f43300555 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -23,7 +23,6 @@ use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; -use std::sync::Arc; use std::task::Context; use std::time::Duration; use task_executor::TaskExecutor; @@ -31,7 +30,7 @@ use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; use types::signed_block_and_blobs::BlockWrapper; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index eac8175d51..2c7c94079e 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -15,15 +15,13 @@ use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerI use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; -use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; use types::{ - Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 3494153a5b..3ade1bb87b 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -12,7 +12,6 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; use slot_clock::SlotClock; -use ssz_types::VariableList; use std::sync::Arc; use task_executor::TaskExecutor; use types::light_client_bootstrap::LightClientBootstrap; @@ -581,7 +580,7 @@ impl Worker { /// Handle a `BlobsByRange` request from the peer. pub fn handle_blobs_by_range_request( self, - executor: TaskExecutor, + _executor: TaskExecutor, send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, @@ -656,7 +655,7 @@ impl Worker { let block_roots = block_roots.into_iter().flatten().collect::>(); let mut blobs_sent = 0; - let mut send_response = true; + let send_response = true; for root in block_roots { match self.chain.store.get_blobs(&root) { diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index dafc00bddb..717f2a0950 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -17,10 +17,7 @@ use slog::{debug, error, info, warn}; use std::sync::Arc; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; -use types::{ - Epoch, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, - SignedBeaconBlockAndBlobsSidecarDecode, -}; +use types::{Epoch, Hash256, SignedBeaconBlock}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index ca633ba760..84b49e25f1 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -4,15 +4,12 @@ use std::time::Duration; use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; -use futures::StreamExt; -use itertools::{Either, Itertools}; use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; -use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; +use store::Hash256; use types::signed_block_and_blobs::BlockWrapper; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index fd17e18db5..2aabbb563d 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,8 +1,7 @@ use super::RootBlockTuple; use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; -use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; +use store::Hash256; use strum::IntoStaticStr; use types::signed_block_and_blobs::BlockWrapper; diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 0e84fb0bbc..05df18a0da 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -4,8 +4,7 @@ use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; use ssz_types::VariableList; use std::collections::HashSet; -use std::sync::Arc; -use store::{EthSpec, Hash256, SignedBeaconBlock}; +use store::{EthSpec, Hash256}; use strum::IntoStaticStr; use types::signed_block_and_blobs::BlockWrapper; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 21b6d6658d..004d0479a4 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -10,11 +10,11 @@ use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; -use slot_clock::{SlotClock, SystemTimeSlotClock}; +use slot_clock::SystemTimeSlotClock; use store::MemoryStore; use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{EthSpec, MainnetEthSpec, MinimalEthSpec as E, Slot}; +use types::MinimalEthSpec as E; type T = Witness, E, MemoryStore, MemoryStore>; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 305f9f7069..8d08ed1b17 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -898,10 +898,10 @@ impl SyncManager { request_id: RequestId, peer_id: PeerId, maybe_sidecar: Option::EthSpec>>>, - seen_timestamp: Duration, + _seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } | RequestId::ParentLookup { id } => { + RequestId::SingleBlock { .. } | RequestId::ParentLookup { .. } => { unreachable!("There is no such thing as a singular 'by root' glob request that is not accompanied by the block") } RequestId::BackFillSync { .. } => { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 46b6d05d7d..89e120050e 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,5 +1,4 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; -use super::BatchTy; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index a4869f75b7..1e3474fa5a 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -388,12 +388,11 @@ mod tests { use slog::{o, Drain}; use tokio::sync::mpsc; - use slot_clock::{SlotClock, SystemTimeSlotClock}; + use slot_clock::SystemTimeSlotClock; use std::collections::HashSet; use std::sync::Arc; - use std::time::Duration; use store::MemoryStore; - use types::{Hash256, MainnetEthSpec, MinimalEthSpec as E}; + use types::{Hash256, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index d9041dd636..e940c0f25e 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,6 +7,7 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +#![allow(dead_code)] #[macro_use] extern crate lazy_static; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index f5585426ce..23dd989f98 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,13 +1,10 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use std::collections::{hash_map::Entry, HashMap}; -use std::marker::PhantomData; -use std::sync::Arc; use tree_hash::TreeHash; use types::{ AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, - BlobsSidecar, ChainSpec, Epoch, EthSpec, ExecPayload, Hash256, IndexedAttestation, - SignedBeaconBlock, Slot, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -21,8 +18,6 @@ pub struct ConsensusContext { /// Cache of indexed attestations constructed during block processing. indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, - /// Should only be populated if the sidecar has not been validated. - blobs_sidecar: Option>>, /// Whether `validate_blobs_sidecar` has successfully passed. blobs_sidecar_validated: bool, /// Whether `verify_kzg_commitments_against_transactions` has successfully passed. @@ -49,7 +44,6 @@ impl ConsensusContext { proposer_index: None, current_block_root: None, indexed_attestations: HashMap::new(), - blobs_sidecar: None, blobs_sidecar_validated: false, blobs_verified_vs_txs: false, } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 4fcf07c8a8..4b5e77e0a4 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -42,8 +42,6 @@ mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; -use crate::common::decrease_balance; - #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs index f1e2a4bb16..1a31786b6f 100644 --- a/consensus/types/src/blobs_sidecar.rs +++ b/consensus/types/src/blobs_sidecar.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{Blob, EthSpec, Hash256, SignedBeaconBlock, SignedRoot, Slot}; +use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; use kzg::KzgProof; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index d6e093c173..d8973980fe 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -2,24 +2,21 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; use eth2_hashing::hash; use eth2_network_config::Eth2NetworkConfig; -use genesis::interop_genesis_state; use ssz::Decode; use ssz::Encode; use state_processing::process_activations; -use state_processing::upgrade::{ - upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, -}; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use std::fs::File; use std::io::Read; use std::path::PathBuf; use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; +use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, Hash256, Keypair, PublicKey, Validator, }; -use types::{BeaconStateMerge, ExecutionBlockHash}; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?;