From 9f0696f93fce05c4b411ee43663d1633e4600ecf Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 6 Apr 2026 06:54:41 +0400 Subject: [PATCH 01/27] Remove unused `exit-future` (#9095) Remove the `exit-future` crate as it is unused. Co-Authored-By: Mac L --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 96d57e0210..db6853d44d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -145,7 +145,6 @@ ethereum_serde_utils = "0.8.0" ethereum_ssz = { version = "0.10.0", features = ["context_deserialize"] } ethereum_ssz_derive = "0.10.0" execution_layer = { path = "beacon_node/execution_layer" } -exit-future = "0.2" filesystem = { path = "common/filesystem" } fixed_bytes = { path = "consensus/fixed_bytes" } fnv = "1" From 243eecc46528fccecfc7e7d762d674502df04ed9 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 7 Apr 2026 10:23:11 +0400 Subject: [PATCH 02/27] Add `cargo-hack` to CI to check crate features (#8927) #8926 Add a step to CI which runs `cargo check` across all combinations of features for certain crates using `cargo-hack` Co-Authored-By: Mac L --- .github/workflows/test-suite.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index d9efbfc148..c2ce6f89be 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -427,6 +427,22 @@ jobs: cache-target: release - name: Run Makefile to trigger the bash script run: make cli-local + cargo-hack: + name: cargo-hack + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + - uses: taiki-e/install-action@cargo-hack + - name: Check types feature powerset + run: cargo hack check -p types --feature-powerset --no-dev-deps --exclude-features arbitrary-fuzz,portable + - name: Check eth2 feature powerset + run: cargo hack check -p eth2 --feature-powerset --no-dev-deps cargo-sort: name: cargo-sort needs: [check-labels] @@ -470,6 +486,7 @@ jobs: 'compile-with-beta-compiler', 'cli-check', 'lockbud', + 'cargo-hack', 'cargo-sort', ] steps: From 2749e18d0e35e6f148642623327acac5a7066658 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 9 Apr 2026 03:44:19 +0900 Subject: [PATCH 03/27] Gloas serve post block state for finalized/justified state requests (#9092) Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Pawan Dhananjay --- beacon_node/http_api/src/block_id.rs | 10 +++--- beacon_node/http_api/src/state_id.rs | 51 ++++++++++++++++++++++------ common/eth2/src/types.rs | 8 +++++ 3 files changed, 54 insertions(+), 15 deletions(-) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index e6b1ed0879..f4645f1304 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,5 +1,5 @@ use crate::version::inconsistent_fork_rejection; -use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; +use crate::{ExecutionOptimistic, state_id::checkpoint_block_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; @@ -60,15 +60,15 @@ impl BlockId { CoreBlockId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - let (_slot, execution_optimistic) = - checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + let (_block, execution_optimistic) = + checkpoint_block_and_execution_optimistic(chain, finalized_checkpoint)?; Ok((finalized_checkpoint.root, execution_optimistic, true)) } CoreBlockId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - let (_slot, execution_optimistic) = - checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + let (_block, execution_optimistic) = + checkpoint_block_and_execution_optimistic(chain, justified_checkpoint)?; Ok((justified_checkpoint.root, execution_optimistic, false)) } CoreBlockId::Slot(slot) => { diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 13fb9b2c58..ce18388926 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -2,6 +2,7 @@ use crate::ExecutionOptimistic; use crate::metrics; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; +use proto_array::Block; use std::fmt; use std::str::FromStr; use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; @@ -19,6 +20,8 @@ impl StateId { Self(CoreStateId::Slot(slot)) } + // TODO(gloas) add tests for finalized and justified checkpoint states to ensure + // we return the post block state for gloas /// Return the state root identified by `self`. pub fn root( &self, @@ -41,15 +44,41 @@ impl StateId { CoreStateId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - let (slot, execution_optimistic) = - checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + + let slot = finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let (block, execution_optimistic) = + checkpoint_block_and_execution_optimistic(chain, finalized_checkpoint)?; + + if chain + .spec + .fork_name_at_slot::(block.slot) + .gloas_enabled() + { + return Ok((block.state_root, execution_optimistic, true)); + } + (slot, execution_optimistic, true) } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - let (slot, execution_optimistic) = - checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + + let slot = justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let (block, execution_optimistic) = + checkpoint_block_and_execution_optimistic(chain, justified_checkpoint)?; + + if chain + .spec + .fork_name_at_slot::(block.slot) + .gloas_enabled() + { + return Ok((block.state_root, execution_optimistic, false)); + } + (slot, execution_optimistic, false) } CoreStateId::Slot(slot) => ( @@ -254,13 +283,11 @@ impl fmt::Display for StateId { } } -/// Returns the first slot of the checkpoint's `epoch` and the execution status of the checkpoint's -/// `root`. -pub fn checkpoint_slot_and_execution_optimistic( +/// Returns checkpoint block and the execution status of the checkpoint's `root`. +pub fn checkpoint_block_and_execution_optimistic( chain: &BeaconChain, checkpoint: Checkpoint, -) -> Result<(Slot, ExecutionOptimistic), warp::reject::Rejection> { - let slot = checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()); +) -> Result<(Block, ExecutionOptimistic), warp::reject::Rejection> { let fork_choice = chain.canonical_head.fork_choice_read_lock(); let finalized_checkpoint = fork_choice.cached_fork_choice_view().finalized_checkpoint; @@ -277,5 +304,9 @@ pub fn checkpoint_slot_and_execution_optimistic( .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::unhandled_error)?; - Ok((slot, execution_optimistic)) + let block = fork_choice.get_block(&checkpoint.root).ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("Block {:?} not found", checkpoint.root)) + })?; + + Ok((block, execution_optimistic)) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 54e9c98b5b..e85565c580 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -125,7 +125,15 @@ impl fmt::Display for BlockId { pub enum StateId { Head, Genesis, + /// Pre-gloas the finalized state is the checkpoint block state + /// advanced to the epoch boundary. + /// Post-gloas this state is always the checkpoint post-block state and is not advanced + /// to the epoch boundary. Finalized, + /// Pre-gloas the justified state is the checkpoint block state + /// advanced to the epoch boundary. + /// Post-gloas this state is always the checkpoint post-block state and is not advanced + /// to the epoch boundary. Justified, Slot(Slot), Root(Hash256), From 815aad37315ff513ff0787db6881ab9c520f9b06 Mon Sep 17 00:00:00 2001 From: Mike Jerred Date: Thu, 9 Apr 2026 06:36:45 +0100 Subject: [PATCH 04/27] Allow --validator-dir to be specified after subcommands (#8329) #3768 Made the --validator-dir flag global so that it can be specified in any order Co-Authored-By: Mike Jerred Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> --- account_manager/src/validator/mod.rs | 1 + lighthouse/tests/account_manager.rs | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index 5a6c9439a6..2a92ad2d37 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -28,6 +28,7 @@ pub fn cli_app() -> Command { "The path to search for validator directories. \ Defaults to ~/.lighthouse/{network}/validators", ) + .global(true) .action(ArgAction::Set) .conflicts_with("datadir"), ) diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 9bfcae85e5..76839dea39 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -248,9 +248,9 @@ impl TestValidator { store_withdrawal_key: bool, ) -> Result, String> { let mut cmd = validator_cmd(); - cmd.arg(format!("--{}", VALIDATOR_DIR_FLAG)) + cmd.arg(CREATE_CMD) + .arg(format!("--{}", VALIDATOR_DIR_FLAG)) .arg(self.validator_dir.clone().into_os_string()) - .arg(CREATE_CMD) .arg(format!("--{}", WALLETS_DIR_FLAG)) .arg(self.wallet.base_dir().into_os_string()) .arg(format!("--{}", WALLET_NAME_FLAG)) @@ -427,9 +427,9 @@ fn validator_import_launchpad() { File::create(src_dir.path().join(NOT_KEYSTORE_NAME)).unwrap(); let mut child = validator_cmd() + .arg(IMPORT_CMD) .arg(format!("--{}", VALIDATOR_DIR_FLAG)) .arg(dst_dir.path().as_os_str()) - .arg(IMPORT_CMD) .arg(format!("--{}", STDIN_INPUTS_FLAG)) // Using tty does not work well with tests. .arg(format!("--{}", import::DIR_FLAG)) .arg(src_dir.path().as_os_str()) @@ -479,10 +479,10 @@ fn validator_import_launchpad() { // Disable all the validators in validator_definition. output_result( validator_cmd() - .arg(format!("--{}", VALIDATOR_DIR_FLAG)) - .arg(dst_dir.path().as_os_str()) .arg(MODIFY_CMD) .arg(DISABLE) + .arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(dst_dir.path().as_os_str()) .arg(format!("--{}", ALL)), ) .unwrap(); @@ -514,10 +514,10 @@ fn validator_import_launchpad() { // Enable keystore validator again output_result( validator_cmd() - .arg(format!("--{}", VALIDATOR_DIR_FLAG)) - .arg(dst_dir.path().as_os_str()) .arg(MODIFY_CMD) .arg(ENABLE) + .arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(dst_dir.path().as_os_str()) .arg(format!("--{}", PUBKEY_FLAG)) .arg(format!("{}", keystore.public_key().unwrap())), ) @@ -560,9 +560,9 @@ fn validator_import_launchpad_no_password_then_add_password() { let validator_import_key_cmd = || { validator_cmd() + .arg(IMPORT_CMD) .arg(format!("--{}", VALIDATOR_DIR_FLAG)) .arg(dst_dir.path().as_os_str()) - .arg(IMPORT_CMD) .arg(format!("--{}", STDIN_INPUTS_FLAG)) // Using tty does not work well with tests. .arg(format!("--{}", import::DIR_FLAG)) .arg(src_dir.path().as_os_str()) @@ -700,9 +700,9 @@ fn validator_import_launchpad_password_file() { .unwrap(); let mut child = validator_cmd() + .arg(IMPORT_CMD) .arg(format!("--{}", VALIDATOR_DIR_FLAG)) .arg(dst_dir.path().as_os_str()) - .arg(IMPORT_CMD) .arg(format!("--{}", import::DIR_FLAG)) .arg(src_dir.path().as_os_str()) .arg(format!("--{}", import::REUSE_PASSWORD_FLAG)) From 8681e8e06ee9d1d26d655e588111ed480d1e656c Mon Sep 17 00:00:00 2001 From: Mark Liu Date: Thu, 9 Apr 2026 15:36:49 +1000 Subject: [PATCH 05/27] Reduce slow test runtimes to under 60s (#9012) Co-Authored-By: Mark Liu Co-Authored-By: Michael Sproul --- .../src/peer_manager/mod.rs | 3 ++ .../lighthouse_network/src/rpc/codec.rs | 4 ++- .../lighthouse_network/tests/rpc_tests.rs | 4 ++- .../initialized_validators/src/key_cache.rs | 30 +++++++++++++++---- 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 2edd9de2d9..d7285c5c8e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -3087,6 +3087,9 @@ mod tests { const MAX_TEST_PEERS: usize = 300; proptest! { + // 64 cases (down from default 256) keeps this test under 10s while + // still providing good random coverage of the pruning logic. + #![proptest_config(ProptestConfig::with_cases(64))] #[test] fn prune_excess_peers(peer_conditions in proptest::collection::vec(peer_condition_strategy(), DEFAULT_TARGET_PEERS..=MAX_TEST_PEERS)) { let target_peer_count = DEFAULT_TARGET_PEERS; diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 346e350825..75e035ae82 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1088,9 +1088,11 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(spec); + // 11,000 × 1KB ≈ 11MB, just above the 10MB max_payload_size. + // Previously used 100,000 txs (~100MB) which made this test take >60s. let tx = VariableList::try_from(vec![0; 1024]).unwrap(); let txs = - VariableList::try_from(std::iter::repeat_n(tx, 100000).collect::>()).unwrap(); + VariableList::try_from(std::iter::repeat_n(tx, 11000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index debe30b34f..d3f47c88bd 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -46,8 +46,10 @@ fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { /// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); + // 11,000 × 1KB ≈ 11MB, just above the 10MB max_payload_size. + // Previously used 100,000 txs (~100MB) which caused hangs and timeouts. let tx = VariableList::try_from(vec![0; 1024]).unwrap(); - let txs = VariableList::try_from(std::iter::repeat_n(tx, 100000).collect::>()).unwrap(); + let txs = VariableList::try_from(std::iter::repeat_n(tx, 11000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; diff --git a/validator_client/initialized_validators/src/key_cache.rs b/validator_client/initialized_validators/src/key_cache.rs index b600013c8b..c2f60acc27 100644 --- a/validator_client/initialized_validators/src/key_cache.rs +++ b/validator_client/initialized_validators/src/key_cache.rs @@ -1,7 +1,7 @@ use account_utils::write_file_via_temporary; use bls::{Keypair, PublicKey}; use eth2_keystore::json_keystore::{ - Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, EmptyMap, EmptyString, KdfModule, + Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, EmptyMap, EmptyString, Kdf, KdfModule, Sha256Checksum, }; use eth2_keystore::{ @@ -65,10 +65,14 @@ impl KeyCache { } pub fn init_crypto() -> Crypto { + Self::build_crypto(default_kdf) + } + + fn build_crypto(kdf_fn: fn(Vec) -> Kdf) -> Crypto { let salt = rand::rng().random::<[u8; SALT_SIZE]>(); let iv = rand::rng().random::<[u8; IV_SIZE]>().to_vec().into(); - let kdf = default_kdf(salt.to_vec()); + let kdf = kdf_fn(salt.to_vec()); let cipher = Cipher::Aes128Ctr(Aes128Ctr { iv }); Crypto { @@ -116,7 +120,11 @@ impl KeyCache { } fn encrypt(&mut self) -> Result<(), Error> { - self.crypto = Self::init_crypto(); + self.encrypt_with(default_kdf) + } + + fn encrypt_with(&mut self, kdf_fn: fn(Vec) -> Kdf) -> Result<(), Error> { + self.crypto = Self::build_crypto(kdf_fn); let secret_map: SerializedKeyMap = self .pairs .iter() @@ -268,7 +276,19 @@ pub enum Error { #[cfg(test)] mod tests { use super::*; - use eth2_keystore::json_keystore::HexBytes; + use eth2_keystore::json_keystore::{HexBytes, Scrypt}; + + /// Scrypt with minimal cost (n=1024) for fast test execution. + /// Production uses n=262144 which takes ~45s per derivation. + fn insecure_kdf(salt: Vec) -> Kdf { + Kdf::Scrypt(Scrypt { + dklen: 32, + n: 1024, + p: 1, + r: 8, + salt: salt.into(), + }) + } #[tokio::test] async fn test_serialization() { @@ -302,7 +322,7 @@ mod tests { key_cache.add(keypair.clone(), uuid, password.clone()); } - key_cache.encrypt().unwrap(); + key_cache.encrypt_with(insecure_kdf).unwrap(); key_cache.state = State::DecryptedAndSaved; assert_eq!(&key_cache.uuids, &uuids); From 4b297c6ce85321b93ca3157348c9c3bcca216426 Mon Sep 17 00:00:00 2001 From: Roheemah <60899500+AbolareRoheemah@users.noreply.github.com> Date: Thu, 9 Apr 2026 06:43:50 +0100 Subject: [PATCH 06/27] added check for fee recipient per validator and added unit tests (#8454) Addresses #5403 - Added `check_fee_recipient()` method to validate individual validators - Added `check_all_fee_recipients()` to validate all validators on startup - Validator client now fails to start if any enabled validator lacks a fee recipient and no global flag is used. - Added Clear error messages to guide users on how to fix the issue - Added unit tests Co-Authored-By: AbolareRoheemah --- .../src/validator_definitions.rs | 290 +++++++++++++++++- validator_client/src/lib.rs | 3 + 2 files changed, 292 insertions(+), 1 deletion(-) diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 0fc5bf5665..fe6481350c 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -12,7 +12,7 @@ use std::collections::HashSet; use std::fs::{self, File, create_dir_all}; use std::io; use std::path::{Path, PathBuf}; -use tracing::error; +use tracing::{debug, error}; use types::{Address, graffiti::GraffitiString}; use validator_dir::VOTING_KEYSTORE_FILE; use zeroize::Zeroizing; @@ -212,6 +212,16 @@ impl ValidatorDefinition { }, }) } + + pub fn check_fee_recipient(&self, global_fee_recipient: Option
) -> Option<&PublicKey> { + // Skip disabled validators. Also skip if validator has its own fee set, or the global flag is set + if !self.enabled || self.suggested_fee_recipient.is_some() || global_fee_recipient.is_some() + { + return None; + } + + Some(&self.voting_public_key) + } } /// A list of `ValidatorDefinition` that serves as a serde-able configuration file which defines a @@ -410,6 +420,52 @@ impl ValidatorDefinitions { .iter() .filter_map(|def| def.signing_definition.voting_keystore_password_path()) } + + /// Called after loading to run safety checks on all validators + pub fn check_all_fee_recipients( + &self, + global_fee_recipient: Option
, + ) -> Result<(), String> { + let missing: Vec<&PublicKey> = self + .0 + .iter() + .filter_map(|def| def.check_fee_recipient(global_fee_recipient)) + .collect(); + + if !missing.is_empty() { + let pubkeys = missing + .iter() + .map(|pk| pk.to_string()) + .collect::>() + .join(", "); + + return Err(format!( + "The following validators are missing a `suggested_fee_recipient`: {}. \ + Fix this by adding a `suggested_fee_recipient` in the \ + `validator_definitions.yml` or by supplying a fallback fee \ + recipient via the `--suggested-fee-recipient` flag.", + pubkeys + )); + } + + // Friendly reminder for users using the fallback flag + if global_fee_recipient.is_some() { + let count = self + .0 + .iter() + .filter(|d| d.enabled && d.suggested_fee_recipient.is_none()) + .count(); + if count > 0 { + debug!( + "The fallback --suggested-fee-recipient is being used for {} validator(s). \ + You may alternatively set the fee recipient for each validator individually via `validator_definitions.yml`.", + count + ); + } + } + + Ok(()) + } } /// Perform an exhaustive tree search of `dir`, adding any discovered voting keystore paths to @@ -485,6 +541,7 @@ pub fn is_voting_keystore(file_name: &str) -> bool { #[cfg(test)] mod tests { use super::*; + use bls::Keypair; use std::str::FromStr; #[test] @@ -682,4 +739,235 @@ mod tests { let def: ValidatorDefinition = yaml_serde::from_str(valid_builder_proposals).unwrap(); assert_eq!(def.builder_proposals, Some(true)); } + + #[test] + fn fee_recipient_check_enabled_validator_cases() { + let def = ValidatorDefinition { + enabled: true, + voting_public_key: PublicKey::from_str( + "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + ).unwrap(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + } + }; + + // Should return Some(pubkey) when no fee recipient is set + let check_result = def.check_fee_recipient(None); + assert!(check_result.is_some()); + + // Should return None since global fee recipient is set + let global_fee_recipient = + Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()); + let check_result = def.check_fee_recipient(global_fee_recipient); + assert!(check_result.is_none()); + } + + #[test] + fn fee_recipient_check_passes_with_validator_specific() { + let def = ValidatorDefinition { + enabled: true, + voting_public_key: PublicKey::from_str( + "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + ).unwrap(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()), + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + // Should return None because suggested_fee_recipient is set + let check_result = def.check_fee_recipient(None); + assert!(check_result.is_none()); + } + + #[test] + fn fee_recipient_check_skips_disabled_validators() { + let def = ValidatorDefinition { + enabled: false, + voting_public_key: PublicKey::from_str( + "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + ).unwrap(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + // Should return None because validator is disabled + let check_result = def.check_fee_recipient(None); + assert!(check_result.is_none()); + } + + #[test] + fn check_all_fee_recipients_reports_all_missing() { + let keypair1 = Keypair::random(); + let keypair2 = Keypair::random(); + + let def1 = ValidatorDefinition { + enabled: true, + voting_public_key: keypair1.pk.clone(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + let def2 = ValidatorDefinition { + enabled: true, + voting_public_key: keypair2.pk.clone(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: None, // Missing recipient + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + let defs = ValidatorDefinitions::from(vec![def1, def2]); + + // Should fail because both defs have no fee recipient and no global fee recipient is set + let result = defs.check_all_fee_recipients(None); + assert!(result.is_err()); + let err = result.unwrap_err(); + + // Check that both public keys are mentioned in the error message + let pk1_string = keypair1.pk.to_string(); + let pk2_string = keypair2.pk.to_string(); + + assert!(err.contains(&pk1_string), "Error message missing pubkey 1"); + assert!(err.contains(&pk2_string), "Error message missing pubkey 2"); + assert!(err.contains("are missing a `suggested_fee_recipient`")); + } + + #[test] + fn check_all_fee_recipients_passes_all_configured() { + let keypair = Keypair::random(); + let def1 = ValidatorDefinition { + enabled: true, + voting_public_key: keypair.pk.clone(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: Some( + Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap(), + ), + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + let def2 = ValidatorDefinition { + enabled: true, + voting_public_key: keypair.pk.clone(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: Some( + Address::from_str("0xb2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap(), + ), + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + let defs = ValidatorDefinitions::from(vec![def1, def2]); + + // Should pass - all validators have fee recipients + assert!(defs.check_all_fee_recipients(None).is_ok()); + } + + #[test] + fn check_all_fee_recipients_passes_with_global() { + let keypair = Keypair::random(); + let def1 = ValidatorDefinition { + enabled: true, + voting_public_key: keypair.pk.clone(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + let def2 = ValidatorDefinition { + enabled: true, + voting_public_key: keypair.pk.clone(), + description: String::new(), + graffiti: None, + suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, + builder_boost_factor: None, + prefer_builder_proposals: None, + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: PathBuf::new(), + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + let defs = ValidatorDefinitions::from(vec![def1, def2]); + + // Should pass - global fee recipient is set + let global_fee_recipient = + Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()); + assert!(defs.check_all_fee_recipients(global_fee_recipient).is_ok()); + } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index f70d5830ec..e26d5c3d30 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -187,6 +187,9 @@ impl ProductionValidatorClient { info!(new_validators, "Completed validator discovery"); } + // Check for all validators' fee recipient + validator_defs.check_all_fee_recipients(config.validator_store.fee_recipient)?; + let validators = InitializedValidators::from_definitions( validator_defs, config.validator_dir.clone(), From b95f99f130ace6ba105819174f74a8714ae51f2c Mon Sep 17 00:00:00 2001 From: CATS Date: Thu, 9 Apr 2026 07:54:10 +0200 Subject: [PATCH 07/27] feat(execution_layer): log more detail when JWT auth fails (#9051) Co-Authored-By: CATS Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> --- beacon_node/execution_layer/src/engine_api.rs | 2 +- beacon_node/execution_layer/src/engine_api/auth.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 774eac5fe2..9c19e94c0e 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -79,7 +79,7 @@ impl From for Error { e.status(), Some(StatusCode::UNAUTHORIZED) | Some(StatusCode::FORBIDDEN) ) { - Error::Auth(auth::Error::InvalidToken) + Error::Auth(auth::Error::InvalidToken(e.to_string())) } else { Error::HttpClient(e.into()) } diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index af1ca195bd..3a27048b1a 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -14,7 +14,7 @@ pub const JWT_SECRET_LENGTH: usize = 32; #[derive(Debug)] pub enum Error { JWT(jsonwebtoken::errors::Error), - InvalidToken, + InvalidToken(String), InvalidKey(String), } From fb5a0434d7d3b485007fa5618b19de9a0f45e430 Mon Sep 17 00:00:00 2001 From: cui Date: Thu, 9 Apr 2026 13:54:14 +0800 Subject: [PATCH 08/27] Fix graffiti calculator test mock commit fallback (#9087) Co-Authored-By: Weixie Cui --- beacon_node/beacon_chain/src/graffiti_calculator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 85470715c9..403873cc00 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -446,7 +446,7 @@ mod tests { DEFAULT_CLIENT_VERSION.code, mock_commit .strip_prefix("0x") - .unwrap_or("&mock_commit") + .unwrap_or(&mock_commit) .get(0..4) .expect("should get first 2 bytes in hex"), "LH", @@ -459,7 +459,7 @@ mod tests { DEFAULT_CLIENT_VERSION.code, mock_commit .strip_prefix("0x") - .unwrap_or("&mock_commit") + .unwrap_or(&mock_commit) .get(0..2) .expect("should get first 2 bytes in hex"), "LH", From 7c2dcfc0d66e983f979bb4bd2ea6ac982ad22173 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 9 Apr 2026 12:41:02 +0400 Subject: [PATCH 09/27] Refactor `timestamp_now` (#9094) #9077 Where possible replaces all instances of `validator_monitor::timestamp_now` with `chain.slot_clock.now_duration().unwrap_or_default()`. Where chain/slot_clock is not available, instead replace it with a convenience function `slot_clock::timestamp_now`. Remove the `validator_monitor::timestamp_now` function. Co-Authored-By: Mac L --- .../src/data_column_verification.rs | 6 +-- .../beacon_chain/src/fetch_blobs/mod.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 9 +--- .../http_api/src/publish_attestations.rs | 8 ++-- beacon_node/http_api/src/publish_blocks.rs | 10 +++-- beacon_node/http_api/src/sync_committees.rs | 9 ++-- beacon_node/http_api/src/validator/mod.rs | 3 +- beacon_node/network/src/router.rs | 41 ++++++++----------- beacon_node/network/src/sync/manager.rs | 11 +++-- .../src/sync/network_context/custody.rs | 7 +++- .../src/sync/network_context/requests.rs | 2 +- common/slot_clock/src/lib.rs | 12 +++++- 12 files changed, 60 insertions(+), 60 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index f47de01ddc..f2cec0980f 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -5,13 +5,12 @@ use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; use crate::observed_data_sidecars::{ Error as ObservedDataSidecarsError, ObservationKey, ObservationStrategy, Observe, }; -use crate::validator_monitor::timestamp_now; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; use educe::Educe; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; use proto_array::Block; -use slot_clock::SlotClock; +use slot_clock::{SlotClock, timestamp_now}; use ssz_derive::Encode; use ssz_types::VariableList; use std::iter; @@ -570,8 +569,9 @@ pub fn validate_data_column_sidecar_for_gossip_fulu Duration { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) -} - fn u64_to_i64(n: impl Into) -> i64 { i64::try_from(n.into()).unwrap_or(i64::MAX) } diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 947edf56d9..b93f2a0b7b 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -35,15 +35,13 @@ //! appears that this validator is capable of producing valid //! attestations and there's no immediate cause for concern. use crate::task_spawner::{Priority, TaskSpawner}; -use beacon_chain::{ - AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes, - validator_monitor::timestamp_now, -}; +use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; use beacon_processor::{Work, WorkEvent}; use eth2::types::Failure; use lighthouse_network::PubsubMessage; use network::NetworkMessage; +use slot_clock::SlotClock; use std::sync::Arc; use std::time::Duration; use tokio::sync::{mpsc::UnboundedSender, oneshot}; @@ -138,7 +136,7 @@ pub async fn publish_attestations( .collect::>(); // Gossip validate and publish attestations that can be immediately processed. - let seen_timestamp = timestamp_now(); + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); let mut prelim_results = task_spawner .clone() .blocking_task(Priority::P0, move || { diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index eb7e56e9cc..340b0bbbed 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -4,7 +4,7 @@ use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::{AsBlock, LookupBlock}; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; -use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; +use beacon_chain::validator_monitor::get_block_delay_ms; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, build_blob_data_column_sidecars, @@ -19,6 +19,7 @@ use lighthouse_network::PubsubMessage; use network::NetworkMessage; use rand::prelude::SliceRandom; use reqwest::StatusCode; +use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; @@ -88,7 +89,7 @@ pub async fn publish_block>( validation_level: BroadcastValidation, duplicate_status_code: StatusCode, ) -> Result { - let seen_timestamp = timestamp_now(); + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); let block_publishing_delay_for_testing = chain.config.block_publishing_delay; let data_column_publishing_delay_for_testing = chain.config.data_column_publishing_delay; @@ -113,11 +114,12 @@ pub async fn publish_block>( debug!("Signed block received in HTTP API"); /* actually publish a block */ + let publish_chain = chain.clone(); let publish_block_p2p = move |block: Arc>, sender, seen_timestamp| -> Result<(), BlockError> { - let publish_timestamp = timestamp_now(); + let publish_timestamp = publish_chain.slot_clock.now_duration().unwrap_or_default(); let publish_delay = publish_timestamp .checked_sub(seen_timestamp) .unwrap_or_else(|| Duration::from_secs(0)); @@ -676,7 +678,7 @@ pub async fn reconstruct_block( // us. late_block_logging( &chain, - timestamp_now(), + chain.slot_clock.now_duration().unwrap_or_default(), block.message(), block_root, "builder", diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index efba0056b9..0dba4ff429 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -4,10 +4,7 @@ use crate::utils::publish_pubsub_message; use beacon_chain::sync_committee_verification::{ Error as SyncVerificationError, VerifiedSyncCommitteeMessage, }; -use beacon_chain::{ - BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig, - validator_monitor::timestamp_now, -}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig}; use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -188,7 +185,7 @@ pub fn process_sync_committee_signatures( ) -> Result<(), warp::reject::Rejection> { let mut failures = vec![]; - let seen_timestamp = timestamp_now(); + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); for (i, sync_committee_signature) in sync_committee_signatures.iter().enumerate() { let subnet_positions = match get_subnet_positions_for_sync_committee_message( @@ -319,7 +316,7 @@ pub fn process_signed_contribution_and_proofs( let mut verified_contributions = Vec::with_capacity(signed_contribution_and_proofs.len()); let mut failures = vec![]; - let seen_timestamp = timestamp_now(); + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); if let Some(latest_optimistic_update) = chain .light_client_server_cache diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 412851233e..7533510277 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -9,7 +9,6 @@ use crate::utils::{ use crate::version::{V1, V2, V3, unsupported_version_rejection}; use crate::{StateId, attester_duties, proposer_duties, sync_committees}; use beacon_chain::attestation_verification::VerifiedAttestation; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::PublicKeyBytes; use eth2::types::{ @@ -871,7 +870,7 @@ pub fn post_validator_aggregate_and_proofs( network_tx: UnboundedSender>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - let seen_timestamp = timestamp_now(); + let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); let mut verified_aggregates = Vec::with_capacity(aggregates.len()); let mut messages = Vec::with_capacity(aggregates.len()); let mut failures = Vec::new(); diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index e6982e6a84..3f0e329e91 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -19,8 +19,8 @@ use lighthouse_network::{ }; use logging::TimeLatch; use logging::crit; +use slot_clock::SlotClock; use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; @@ -351,6 +351,7 @@ impl Router { gossip_message: PubsubMessage, should_process: bool, ) { + let seen_timestamp = self.chain.slot_clock.now_duration().unwrap_or_default(); match gossip_message { PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => self .handle_beacon_processor_send_result( @@ -358,7 +359,7 @@ impl Router { message_id, peer_id, *aggregate_and_proof, - timestamp_now(), + seen_timestamp, ), ), PubsubMessage::Attestation(subnet_attestation) => self @@ -369,7 +370,7 @@ impl Router { subnet_attestation.1, subnet_attestation.0, should_process, - timestamp_now(), + seen_timestamp, ), ), PubsubMessage::BeaconBlock(block) => self.handle_beacon_processor_send_result( @@ -378,7 +379,7 @@ impl Router { peer_id, self.network_globals.client(&peer_id), block, - timestamp_now(), + seen_timestamp, ), ), PubsubMessage::BlobSidecar(data) => { @@ -390,7 +391,7 @@ impl Router { self.network_globals.client(&peer_id), blob_index, blob_sidecar, - timestamp_now(), + seen_timestamp, ), ) } @@ -403,7 +404,7 @@ impl Router { peer_id, subnet_id, column_sidecar, - timestamp_now(), + seen_timestamp, ), ) } @@ -450,7 +451,7 @@ impl Router { message_id, peer_id, *contribution_and_proof, - timestamp_now(), + seen_timestamp, ), ) } @@ -465,7 +466,7 @@ impl Router { peer_id, sync_committtee_msg.1, sync_committtee_msg.0, - timestamp_now(), + seen_timestamp, ), ) } @@ -480,7 +481,7 @@ impl Router { message_id, peer_id, *light_client_finality_update, - timestamp_now(), + seen_timestamp, ), ) } @@ -496,7 +497,7 @@ impl Router { message_id, peer_id, *light_client_optimistic_update, - timestamp_now(), + seen_timestamp, ), ) } @@ -516,7 +517,7 @@ impl Router { message_id, peer_id, signed_execution_payload_envelope, - timestamp_now(), + seen_timestamp, ), ) } @@ -642,7 +643,7 @@ impl Router { peer_id, sync_request_id, beacon_block, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), }); } @@ -662,7 +663,7 @@ impl Router { peer_id, sync_request_id, blob_sidecar, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), }); } else { crit!("All blobs by range responses should belong to sync"); @@ -699,7 +700,7 @@ impl Router { peer_id, sync_request_id, beacon_block, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), }); } @@ -733,7 +734,7 @@ impl Router { sync_request_id, peer_id, blob_sidecar, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), }); } @@ -767,7 +768,7 @@ impl Router { sync_request_id, peer_id, data_column, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), }); } @@ -787,7 +788,7 @@ impl Router { peer_id, sync_request_id, data_column, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), }); } else { crit!("All data columns by range responses should belong to sync"); @@ -855,9 +856,3 @@ impl HandlerNetworkContext { }) } } - -fn timestamp_now() -> Duration { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 7e618d8980..60dcc3efc7 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -49,7 +49,6 @@ use crate::sync::block_lookups::{ use crate::sync::custody_backfill_sync::CustodyBackFillSync; use crate::sync::network_context::{PeerGroup, RpcResponseResult}; use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, }; @@ -851,7 +850,7 @@ impl SyncManager { BlockComponent::Block(DownloadResult { value: block.block_cloned(), block_root, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), peer_group: PeerGroup::from_single(peer_id), }), ); @@ -869,7 +868,7 @@ impl SyncManager { BlockComponent::Blob(DownloadResult { value: blob, block_root, - seen_timestamp: timestamp_now(), + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), peer_group: PeerGroup::from_single(peer_id), }), ); @@ -889,7 +888,11 @@ impl SyncManager { BlockComponent::DataColumn(DownloadResult { value: data_column, block_root, - seen_timestamp: timestamp_now(), + seen_timestamp: self + .chain + .slot_clock + .now_duration() + .unwrap_or_default(), peer_group: PeerGroup::from_single(peer_id), }), ); diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index ae0eee9964..620962b40b 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -2,11 +2,11 @@ use crate::sync::network_context::{ DataColumnsByRootRequestId, DataColumnsByRootSingleBlockRequest, }; use beacon_chain::BeaconChainTypes; -use beacon_chain::validator_monitor::timestamp_now; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::service::api_types::{CustodyId, DataColumnsByRootRequester}; use parking_lot::RwLock; +use slot_clock::SlotClock; use std::collections::HashSet; use std::hash::{BuildHasher, RandomState}; use std::time::{Duration, Instant}; @@ -223,7 +223,10 @@ impl ActiveCustodyRequest { .collect::, _>>()?; let peer_group = PeerGroup::from_set(peers); - let max_seen_timestamp = seen_timestamps.into_iter().max().unwrap_or(timestamp_now()); + let max_seen_timestamp = seen_timestamps + .into_iter() + .max() + .unwrap_or_else(|| cx.chain.slot_clock.now_duration().unwrap_or_default()); return Ok(Some((columns, peer_group, max_seen_timestamp))); } diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 8f9540693e..ad60dffb45 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,9 +1,9 @@ use std::time::Instant; use std::{collections::hash_map::Entry, hash::Hash}; -use beacon_chain::validator_monitor::timestamp_now; use fnv::FnvHashMap; use lighthouse_network::PeerId; +use slot_clock::timestamp_now; use strum::IntoStaticStr; use tracing::{Span, debug}; use types::{Hash256, Slot}; diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index abfab547b9..757d0164ca 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -2,7 +2,7 @@ mod manual_slot_clock; mod metrics; mod system_time_slot_clock; -use std::time::Duration; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; pub use crate::manual_slot_clock::ManualSlotClock; @@ -110,3 +110,13 @@ pub trait SlotClock: Send + Sync + Sized + Clone { slot_clock } } + +/// Returns the current system time as a duration since the UNIX epoch. +/// +/// This is a convenience function for recording timestamps when `SlotClock` is not available. +/// Prefer `SlotClock::now_duration` if available. +pub fn timestamp_now() -> Duration { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() +} From c615210fefdb10852ff950ab9163c100b37bd67e Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 9 Apr 2026 20:00:53 +0800 Subject: [PATCH 10/27] Truncated `Display` impl for `ExecutionBlockHash` (#9108) - #6689 The intention is to only modify the INFO logs that's emitted regularly to reduce the verbosity. But I understand that this change will affect other display in the logs too that uses the `ExecutionBlockHash` display. So would love some feedbacks about the change. Co-Authored-By: Tan Chee Keong Co-Authored-By: Mac L --- beacon_node/client/src/notifier.rs | 2 +- .../types/src/core/execution_block_hash.rs | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index c1d8cae573..4acb8c3aed 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -374,7 +374,7 @@ pub fn spawn_notifier( warn!( info = "chain not fully verified, \ block and attestation production disabled until execution engine syncs", - execution_block_hash = ?hash, + execution_block_hash = ?hash, "Head is optimistic" ); format!("{} (unverified)", hash) diff --git a/consensus/types/src/core/execution_block_hash.rs b/consensus/types/src/core/execution_block_hash.rs index 91c019ce04..cbacf7cf74 100644 --- a/consensus/types/src/core/execution_block_hash.rs +++ b/consensus/types/src/core/execution_block_hash.rs @@ -18,6 +18,18 @@ impl fmt::Debug for ExecutionBlockHash { } } +impl fmt::Display for ExecutionBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let hash = format!("{}", self.0); + write!( + f, + "{}…{}", + &hash[..6], + &hash[hash.len().saturating_sub(4)..] + ) + } +} + impl ExecutionBlockHash { pub fn zero() -> Self { Self(Hash256::zero()) @@ -102,12 +114,6 @@ impl std::str::FromStr for ExecutionBlockHash { } } -impl fmt::Display for ExecutionBlockHash { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - impl From for ExecutionBlockHash { fn from(hash: Hash256) -> Self { Self(hash) From 8c8facd0cdb1b5db98f044aab257be64bdae3782 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Mon, 13 Apr 2026 03:02:50 +0200 Subject: [PATCH 11/27] Add missing beacon API config/spec values (#9112) Co-Authored-By: Barnabas Busa --- consensus/types/presets/gnosis/gloas.yaml | 22 ++++++++++++++ consensus/types/src/core/chain_spec.rs | 30 ++++++++++++++++--- consensus/types/src/core/config_and_preset.rs | 3 ++ consensus/types/src/core/preset.rs | 21 +++++++++++-- 4 files changed, 70 insertions(+), 6 deletions(-) diff --git a/consensus/types/presets/gnosis/gloas.yaml b/consensus/types/presets/gnosis/gloas.yaml index 170accaac3..d1a48adca1 100644 --- a/consensus/types/presets/gnosis/gloas.yaml +++ b/consensus/types/presets/gnosis/gloas.yaml @@ -1 +1,23 @@ # Gnosis preset - Gloas + +# Misc +# --------------------------------------------------------------- +# 2**9 (= 512) validators +PTC_SIZE: 512 + +# Max operations per block +# --------------------------------------------------------------- +# 2**1 (= 2) attestations +MAX_PAYLOAD_ATTESTATIONS: 2 + +# State list lengths +# --------------------------------------------------------------- +# 2**40 (= 1,099,511,627,776) builder spots +BUILDER_REGISTRY_LIMIT: 1099511627776 +# 2**20 (= 1,048,576) builder pending withdrawals +BUILDER_PENDING_WITHDRAWALS_LIMIT: 1048576 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16,384) builders +MAX_BUILDERS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index e612c8b6db..d06e5083c8 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -152,6 +152,7 @@ pub struct ChainSpec { pub proposer_score_boost: Option, pub reorg_head_weight_threshold: Option, pub reorg_parent_weight_threshold: Option, + pub reorg_max_epochs_since_finalization: Option, /* * Eth1 @@ -1149,6 +1150,7 @@ impl ChainSpec { proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), + reorg_max_epochs_since_finalization: Some(2), /* * Eth1 @@ -1554,6 +1556,7 @@ impl ChainSpec { proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), + reorg_max_epochs_since_finalization: Some(2), /* * Eth1 @@ -1983,6 +1986,13 @@ pub struct Config { #[serde(skip_serializing_if = "Option::is_none")] proposer_score_boost: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + reorg_head_weight_threshold: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + reorg_parent_weight_threshold: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + reorg_max_epochs_since_finalization: Option>, + #[serde(with = "serde_utils::quoted_u64")] deposit_chain_id: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -2545,6 +2555,15 @@ impl Config { max_per_epoch_activation_churn_limit: spec.max_per_epoch_activation_churn_limit, proposer_score_boost: spec.proposer_score_boost.map(|value| MaybeQuoted { value }), + reorg_head_weight_threshold: spec + .reorg_head_weight_threshold + .map(|value| MaybeQuoted { value }), + reorg_parent_weight_threshold: spec + .reorg_parent_weight_threshold + .map(|value| MaybeQuoted { value }), + reorg_max_epochs_since_finalization: spec + .reorg_max_epochs_since_finalization + .map(|value| MaybeQuoted { value }), deposit_chain_id: spec.deposit_chain_id, deposit_network_id: spec.deposit_network_id, @@ -2647,6 +2666,9 @@ impl Config { max_per_epoch_activation_churn_limit, churn_limit_quotient, proposer_score_boost, + reorg_head_weight_threshold, + reorg_parent_weight_threshold, + reorg_max_epochs_since_finalization, deposit_chain_id, deposit_network_id, deposit_contract_address, @@ -2743,6 +2765,10 @@ impl Config { max_per_epoch_activation_churn_limit, churn_limit_quotient, proposer_score_boost: proposer_score_boost.map(|q| q.value), + reorg_head_weight_threshold: reorg_head_weight_threshold.map(|q| q.value), + reorg_parent_weight_threshold: reorg_parent_weight_threshold.map(|q| q.value), + reorg_max_epochs_since_finalization: reorg_max_epochs_since_finalization + .map(|q| q.value), deposit_chain_id, deposit_network_id, deposit_contract_address, @@ -3692,10 +3718,6 @@ mod yaml_tests { "SYNC_MESSAGE_DUE_BPS_GLOAS", "CONTRIBUTION_DUE_BPS_GLOAS", "MAX_REQUEST_PAYLOADS", - // Gloas fork choice params not yet in Config - "REORG_HEAD_WEIGHT_THRESHOLD", - "REORG_PARENT_WEIGHT_THRESHOLD", - "REORG_MAX_EPOCHS_SINCE_FINALIZATION", // Heze networking "VIEW_FREEZE_CUTOFF_BPS", "INCLUSION_LIST_SUBMISSION_DUE_BPS", diff --git a/consensus/types/src/core/config_and_preset.rs b/consensus/types/src/core/config_and_preset.rs index 06f080e82b..02f9867fcb 100644 --- a/consensus/types/src/core/config_and_preset.rs +++ b/consensus/types/src/core/config_and_preset.rs @@ -133,6 +133,9 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "domain_sync_committee_selection_proof".to_uppercase() => u32_hex(spec.domain_sync_committee_selection_proof), "domain_bls_to_execution_change".to_uppercase() => u32_hex(spec.domain_bls_to_execution_change), + "domain_beacon_builder".to_uppercase() => u32_hex(spec.domain_beacon_builder), + "domain_ptc_attester".to_uppercase() => u32_hex(spec.domain_ptc_attester), + "domain_proposer_preferences".to_uppercase() => u32_hex(spec.domain_proposer_preferences), "sync_committee_subnet_count".to_uppercase() => consts::altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), "target_aggregators_per_sync_subcommittee".to_uppercase() => diff --git a/consensus/types/src/core/preset.rs b/consensus/types/src/core/preset.rs index 4fa7a28204..978fc6f4a1 100644 --- a/consensus/types/src/core/preset.rs +++ b/consensus/types/src/core/preset.rs @@ -331,11 +331,28 @@ impl FuluPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] -pub struct GloasPreset {} +pub struct GloasPreset { + #[serde(with = "serde_utils::quoted_u64")] + pub ptc_size: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_payload_attestations: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_registry_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_pending_withdrawals_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_builders_per_withdrawals_sweep: u64, +} impl GloasPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { - Self {} + Self { + ptc_size: E::ptc_size() as u64, + max_payload_attestations: E::max_payload_attestations() as u64, + builder_registry_limit: E::BuilderRegistryLimit::to_u64(), + builder_pending_withdrawals_limit: E::builder_pending_withdrawals_limit() as u64, + max_builders_per_withdrawals_sweep: E::max_builders_per_withdrawals_sweep() as u64, + } } } From b40a17811176543306fc90565327dff06e13bace Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 15 Apr 2026 01:39:59 +0900 Subject: [PATCH 12/27] Gloas bid and preference verification (#9036) Gossip verify and cache bids and proposer preferences. This PR also ensures we subscribe to new fork topics one epoch early instead of two slots early. This is required for proposer preferences. Co-Authored-By: Eitan Seri- Levi --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 + beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/lib.rs | 2 + .../gossip_verified_bid.rs | 380 +++++++++ .../src/payload_bid_verification/mod.rs | 76 ++ .../payload_bid_cache.rs | 156 ++++ .../src/payload_bid_verification/tests.rs | 748 ++++++++++++++++++ .../gossip_verified_envelope.rs | 6 +- .../gossip_verified_proposer_preferences.rs | 223 ++++++ .../proposer_preferences_verification/mod.rs | 70 ++ .../proposer_preference_cache.rs | 107 +++ .../tests.rs | 279 +++++++ .../gossip_methods.rs | 127 ++- .../src/network_beacon_processor/mod.rs | 6 +- .../src/per_block_processing.rs | 24 +- .../process_operations.rs | 3 +- .../per_block_processing/signature_sets.rs | 47 +- consensus/types/src/builder/builder.rs | 11 +- consensus/types/src/state/beacon_state.rs | 71 +- 19 files changed, 2267 insertions(+), 79 deletions(-) create mode 100644 beacon_node/beacon_chain/src/payload_bid_verification/gossip_verified_bid.rs create mode 100644 beacon_node/beacon_chain/src/payload_bid_verification/mod.rs create mode 100644 beacon_node/beacon_chain/src/payload_bid_verification/payload_bid_cache.rs create mode 100644 beacon_node/beacon_chain/src/payload_bid_verification/tests.rs create mode 100644 beacon_node/beacon_chain/src/proposer_preferences_verification/gossip_verified_proposer_preferences.rs create mode 100644 beacon_node/beacon_chain/src/proposer_preferences_verification/mod.rs create mode 100644 beacon_node/beacon_chain/src/proposer_preferences_verification/proposer_preference_cache.rs create mode 100644 beacon_node/beacon_chain/src/proposer_preferences_verification/tests.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e226c707a4..acf7ad9c4c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -54,6 +54,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; +use crate::payload_bid_verification::payload_bid_cache::GossipVerifiedPayloadBidCache; #[cfg(not(test))] use crate::payload_envelope_streamer::{EnvelopeRequestSource, launch_payload_envelope_stream}; use crate::pending_payload_envelopes::PendingPayloadEnvelopes; @@ -61,6 +62,7 @@ use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::persist_custody_context; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; +use crate::proposer_preferences_verification::proposer_preference_cache::GossipVerifiedProposerPreferenceCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, @@ -466,6 +468,10 @@ pub struct BeaconChain { pub envelope_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, + /// A cache used to store gossip verified payload bids. + pub gossip_verified_payload_bid_cache: GossipVerifiedPayloadBidCache, + /// A cache used to store gossip verified proposer preferences. + pub gossip_verified_proposer_preferences_cache: GossipVerifiedProposerPreferenceCache, /// A cache used to produce light_client server messages pub light_client_server_cache: LightClientServerCache, /// Sender to signal the light_client server to produce new updates @@ -6403,6 +6409,8 @@ impl BeaconChain { self.naive_aggregation_pool.write().prune(slot); self.block_times_cache.write().prune(slot); self.envelope_times_cache.write().prune(slot); + self.gossip_verified_payload_bid_cache.prune(slot); + self.gossip_verified_proposer_preferences_cache.prune(slot); // Don't run heavy-weight tasks during sync. if self.best_slot() + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 11b87351b1..b963f7c342 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1064,6 +1064,8 @@ where ), kzg: self.kzg.clone(), rng: Arc::new(Mutex::new(rng)), + gossip_verified_payload_bid_cache: <_>::default(), + gossip_verified_proposer_preferences_cache: <_>::default(), }; let head = beacon_chain.head_snapshot(); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d71aec6987..a8a706d8bc 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -43,6 +43,7 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; +pub mod payload_bid_verification; pub mod payload_envelope_streamer; pub mod payload_envelope_verification; pub mod pending_payload_envelopes; @@ -50,6 +51,7 @@ pub mod persisted_beacon_chain; pub mod persisted_custody; mod persisted_fork_choice; mod pre_finalization_cache; +pub mod proposer_preferences_verification; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; diff --git a/beacon_node/beacon_chain/src/payload_bid_verification/gossip_verified_bid.rs b/beacon_node/beacon_chain/src/payload_bid_verification/gossip_verified_bid.rs new file mode 100644 index 0000000000..91945896df --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_bid_verification/gossip_verified_bid.rs @@ -0,0 +1,380 @@ +use std::sync::Arc; + +use crate::{ + BeaconChain, BeaconChainTypes, CanonicalHead, + payload_bid_verification::{PayloadBidError, payload_bid_cache::GossipVerifiedPayloadBidCache}, + proposer_preferences_verification::proposer_preference_cache::GossipVerifiedProposerPreferenceCache, +}; +use educe::Educe; +use slot_clock::SlotClock; +use state_processing::signature_sets::{ + execution_payload_bid_signature_set, get_builder_pubkey_from_state, +}; +use tracing::debug; +use types::{ + BeaconState, ChainSpec, EthSpec, ExecutionPayloadBid, SignedExecutionPayloadBid, + SignedProposerPreferences, Slot, +}; + +/// Verify that an execution payload bid is consistent with the current chain state +/// and proposer preferences. +pub(crate) fn verify_bid_consistency( + bid: &ExecutionPayloadBid, + current_slot: Slot, + proposer_preferences: &SignedProposerPreferences, + head_state: &BeaconState, + spec: &ChainSpec, +) -> Result<(), PayloadBidError> { + let bid_slot = bid.slot; + + if bid_slot != current_slot && bid_slot != current_slot.saturating_add(1u64) { + return Err(PayloadBidError::InvalidBidSlot { bid_slot }); + } + + // Execution payments are used by off protocol builders. In protocol bids + // should always have this value set to zero. + if bid.execution_payment != 0 { + return Err(PayloadBidError::ExecutionPaymentNonZero { + execution_payment: bid.execution_payment, + }); + } + + if bid.fee_recipient != proposer_preferences.message.fee_recipient { + return Err(PayloadBidError::InvalidFeeRecipient); + } + if bid.gas_limit != proposer_preferences.message.gas_limit { + return Err(PayloadBidError::InvalidGasLimit); + } + + let max_blobs_per_block = + spec.max_blobs_per_block(bid_slot.epoch(E::slots_per_epoch())) as usize; + + if bid.blob_kzg_commitments.len() > max_blobs_per_block { + return Err(PayloadBidError::InvalidBlobKzgCommitments { + max_blobs_per_block, + blob_kzg_commitments_len: bid.blob_kzg_commitments.len(), + }); + } + + let builder_index = bid.builder_index; + + let is_active_builder = head_state + .is_active_builder(builder_index, spec) + .map_err(|_| PayloadBidError::InvalidBuilder { builder_index })?; + + if !is_active_builder { + return Err(PayloadBidError::InvalidBuilder { builder_index }); + } + + if !head_state.can_builder_cover_bid(builder_index, bid.value, spec)? { + return Err(PayloadBidError::BuilderCantCoverBid { + builder_index, + builder_bid: bid.value, + }); + } + + Ok(()) +} + +pub struct GossipVerificationContext<'a, T: BeaconChainTypes> { + pub canonical_head: &'a CanonicalHead, + pub gossip_verified_payload_bid_cache: &'a GossipVerifiedPayloadBidCache, + pub gossip_verified_proposer_preferences_cache: &'a GossipVerifiedProposerPreferenceCache, + pub slot_clock: &'a T::SlotClock, + pub spec: &'a ChainSpec, +} + +/// A wrapper around a `SignedExecutionPayloadBid` that indicates it has been approved for re-gossiping on +/// the p2p network. +#[derive(Educe)] +#[educe( + Debug(bound = "T: BeaconChainTypes"), + Clone(bound = "T: BeaconChainTypes") +)] +pub struct GossipVerifiedPayloadBid { + pub signed_bid: Arc>, +} + +impl GossipVerifiedPayloadBid { + pub fn new( + signed_bid: Arc>, + ctx: &GossipVerificationContext<'_, T>, + ) -> Result { + let bid_slot = signed_bid.message.slot; + let bid_parent_block_hash = signed_bid.message.parent_block_hash; + let bid_parent_block_root = signed_bid.message.parent_block_root; + let bid_value = signed_bid.message.value; + + if ctx + .gossip_verified_payload_bid_cache + .seen_builder_index(&bid_slot, signed_bid.message.builder_index) + { + return Err(PayloadBidError::BuilderAlreadySeen { + builder_index: signed_bid.message.builder_index, + slot: bid_slot, + }); + } + + // TODO(gloas): Extract into `bid_value_over_threshold` on the bid cache and potentially + // make this more sophisticate than just a <= check. + if let Some(cached_bid) = ctx.gossip_verified_payload_bid_cache.get_highest_bid( + bid_slot, + bid_parent_block_hash, + bid_parent_block_root, + ) && bid_value <= cached_bid.message.value + { + return Err(PayloadBidError::BidValueBelowCached { + cached_value: cached_bid.message.value, + incoming_value: bid_value, + }); + } + + let cached_head = ctx.canonical_head.cached_head(); + let current_slot = ctx + .slot_clock + .now() + .ok_or(PayloadBidError::UnableToReadSlot)?; + let head_state = &cached_head.snapshot.beacon_state; + + let Some(proposer_preferences) = ctx + .gossip_verified_proposer_preferences_cache + .get_preferences(&bid_slot) + else { + return Err(PayloadBidError::NoProposerPreferences { slot: bid_slot }); + }; + + let fork_choice = ctx.canonical_head.fork_choice_read_lock(); + + // TODO(gloas) reprocess bids whose parent_block_root becomes known & canonical after a reorg? + if !fork_choice.contains_block(&bid_parent_block_root) { + return Err(PayloadBidError::ParentBlockRootUnknown { + parent_block_root: bid_parent_block_root, + }); + } + + // TODO(gloas) reprocess bids whose parent_block_root becomes canonical after a reorg. + let head_root = cached_head.head_block_root(); + if !fork_choice.is_descendant(bid_parent_block_root, head_root) { + return Err(PayloadBidError::ParentBlockRootNotCanonical { + parent_block_root: bid_parent_block_root, + }); + } + + // TODO(gloas) [IGNORE] bid.parent_block_hash is the block hash of a known execution payload in fork choice. + + drop(fork_choice); + + verify_bid_consistency( + &signed_bid.message, + current_slot, + &proposer_preferences, + head_state, + ctx.spec, + )?; + + // Verify signature + execution_payload_bid_signature_set( + head_state, + |i| get_builder_pubkey_from_state(head_state, i), + &signed_bid, + ctx.spec, + ) + .map_err(|_| PayloadBidError::BadSignature)? + .ok_or(PayloadBidError::BadSignature)? + .verify() + .then_some(()) + .ok_or(PayloadBidError::BadSignature)?; + + let gossip_verified_bid = GossipVerifiedPayloadBid { signed_bid }; + + ctx.gossip_verified_payload_bid_cache + .insert_seen_builder(&gossip_verified_bid); + + ctx.gossip_verified_payload_bid_cache + .insert_highest_bid(gossip_verified_bid.clone()); + + Ok(gossip_verified_bid) + } +} + +impl BeaconChain { + /// Build a `GossipVerificationContext` from this `BeaconChain` for `GossipVerifiedPayloadBid`. + pub fn payload_bid_gossip_verification_context(&self) -> GossipVerificationContext<'_, T> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + gossip_verified_payload_bid_cache: &self.gossip_verified_payload_bid_cache, + gossip_verified_proposer_preferences_cache: &self + .gossip_verified_proposer_preferences_cache, + slot_clock: &self.slot_clock, + spec: &self.spec, + } + } + + /// Returns `Ok(GossipVerifiedPayloadBid)` if the supplied `bid` should be forwarded onto the + /// gossip network and cached. + /// + /// ## Errors + /// + /// Returns an `Err` if the given bid was invalid, or an error was encountered during verification. + pub fn verify_payload_bid_for_gossip( + &self, + bid: Arc>, + ) -> Result, PayloadBidError> { + let slot = bid.message.slot; + let parent_block_root = bid.message.parent_block_root; + let parent_block_hash = bid.message.parent_block_hash; + + let ctx = self.payload_bid_gossip_verification_context(); + match GossipVerifiedPayloadBid::new(bid, &ctx) { + Ok(verified) => { + debug!( + %slot, + %parent_block_hash, + %parent_block_root, + "Successfully verified gossip payload bid" + ); + Ok(verified) + } + Err(e) => { + debug!( + error = e.to_string(), + %slot, + %parent_block_hash, + %parent_block_root, + "Rejected gossip payload bid" + ); + Err(e) + } + } + } +} + +#[cfg(test)] +mod tests { + use bls::Signature; + use kzg::KzgCommitment; + use ssz_types::VariableList; + use types::{ + Address, BeaconState, ChainSpec, EthSpec, ExecutionPayloadBid, MinimalEthSpec, + ProposerPreferences, SignedProposerPreferences, Slot, + }; + + use super::verify_bid_consistency; + use crate::payload_bid_verification::PayloadBidError; + + type E = MinimalEthSpec; + + fn make_bid(slot: Slot, fee_recipient: Address, gas_limit: u64) -> ExecutionPayloadBid { + ExecutionPayloadBid { + slot, + fee_recipient, + gas_limit, + value: 100, + ..ExecutionPayloadBid::default() + } + } + + fn make_preferences(fee_recipient: Address, gas_limit: u64) -> SignedProposerPreferences { + SignedProposerPreferences { + message: ProposerPreferences { + fee_recipient, + gas_limit, + ..ProposerPreferences::default() + }, + signature: Signature::empty(), + } + } + + fn state_and_spec() -> (BeaconState, ChainSpec) { + let spec = E::default_spec(); + let state = BeaconState::new(0, <_>::default(), &spec); + (state, spec) + } + + #[test] + fn test_invalid_bid_slot_too_old() { + let (state, spec) = state_and_spec(); + let current_slot = Slot::new(10); + let bid = make_bid(Slot::new(5), Address::ZERO, 30_000_000); + let prefs = make_preferences(Address::ZERO, 30_000_000); + + let result = verify_bid_consistency::(&bid, current_slot, &prefs, &state, &spec); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBidSlot { .. }) + )); + } + + #[test] + fn test_invalid_bid_slot_too_far_ahead() { + let (state, spec) = state_and_spec(); + let current_slot = Slot::new(10); + let bid = make_bid(Slot::new(12), Address::ZERO, 30_000_000); + let prefs = make_preferences(Address::ZERO, 30_000_000); + + let result = verify_bid_consistency::(&bid, current_slot, &prefs, &state, &spec); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBidSlot { .. }) + )); + } + + #[test] + fn test_execution_payment_nonzero() { + let (state, spec) = state_and_spec(); + let current_slot = Slot::new(10); + let mut bid = make_bid(current_slot, Address::ZERO, 30_000_000); + bid.execution_payment = 42; + let prefs = make_preferences(Address::ZERO, 30_000_000); + + let result = verify_bid_consistency::(&bid, current_slot, &prefs, &state, &spec); + assert!(matches!( + result, + Err(PayloadBidError::ExecutionPaymentNonZero { + execution_payment: 42 + }) + )); + } + + #[test] + fn test_fee_recipient_mismatch() { + let (state, spec) = state_and_spec(); + let current_slot = Slot::new(10); + let bid = make_bid(current_slot, Address::ZERO, 30_000_000); + let prefs = make_preferences(Address::repeat_byte(0xaa), 30_000_000); + + let result = verify_bid_consistency::(&bid, current_slot, &prefs, &state, &spec); + assert!(matches!(result, Err(PayloadBidError::InvalidFeeRecipient))); + } + + #[test] + fn test_invalid_blob_kzg_commitments() { + let (state, spec) = state_and_spec(); + let current_slot = Slot::new(10); + let mut bid = make_bid(current_slot, Address::ZERO, 30_000_000); + let prefs = make_preferences(Address::ZERO, 30_000_000); + + let max_blobs = spec.max_blobs_per_block(current_slot.epoch(E::slots_per_epoch())) as usize; + let commitments: Vec = (0..=max_blobs) + .map(|_| KzgCommitment::empty_for_testing()) + .collect(); + bid.blob_kzg_commitments = VariableList::new(commitments).unwrap(); + + let result = verify_bid_consistency::(&bid, current_slot, &prefs, &state, &spec); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBlobKzgCommitments { .. }) + )); + } + + #[test] + fn test_gas_limit_mismatch() { + let (state, spec) = state_and_spec(); + let current_slot = Slot::new(10); + let bid = make_bid(current_slot, Address::ZERO, 30_000_000); + let prefs = make_preferences(Address::ZERO, 50_000_000); + + let result = verify_bid_consistency::(&bid, current_slot, &prefs, &state, &spec); + assert!(matches!(result, Err(PayloadBidError::InvalidGasLimit))); + } +} diff --git a/beacon_node/beacon_chain/src/payload_bid_verification/mod.rs b/beacon_node/beacon_chain/src/payload_bid_verification/mod.rs new file mode 100644 index 0000000000..514695f5c0 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_bid_verification/mod.rs @@ -0,0 +1,76 @@ +//! Gossip verification for execution payload bids. +//! +//! A `SignedExecutionPayloadBid` is verified and wrapped as a `GossipVerifiedPayloadBid`, +//! which is then inserted into the `GossipVerifiedPayloadBidCache`. +//! +//! ```ignore +//! SignedExecutionPayloadBid +//! | +//! ▼ +//! GossipVerifiedPayloadBid -------> Insert into GossipVerifiedPayloadBidCache +//! ``` + +use types::{BeaconStateError, Hash256, Slot}; + +pub mod gossip_verified_bid; +pub mod payload_bid_cache; + +#[cfg(test)] +mod tests; + +#[derive(Debug)] +pub enum PayloadBidError { + /// The bid's parent block root is unknown. + ParentBlockRootUnknown { parent_block_root: Hash256 }, + /// The bid's parent block root is known but not on the canonical chain. + ParentBlockRootNotCanonical { parent_block_root: Hash256 }, + /// The signature is invalid. + BadSignature, + /// A bid for this builder at this slot has already been seen. + BuilderAlreadySeen { builder_index: u64, slot: Slot }, + /// Builder is not valid/active for the given epoch + InvalidBuilder { builder_index: u64 }, + /// The bid value is lower than the currently cached bid. + BidValueBelowCached { + cached_value: u64, + incoming_value: u64, + }, + /// The bids slot is not the current slot or the next slot. + InvalidBidSlot { bid_slot: Slot }, + /// The slot clock cannot be read. + UnableToReadSlot, + /// No proposer preferences for the current slot. + NoProposerPreferences { slot: Slot }, + /// The builder doesn't have enough deposited funds to cover the bid. + BuilderCantCoverBid { + builder_index: u64, + builder_bid: u64, + }, + /// The bids fee recipient doesn't match the proposer preferences fee recipient. + InvalidFeeRecipient, + /// The bids gas limit doesn't match the proposer preferences gas limit. + InvalidGasLimit, + /// The bids execution payment is non-zero + ExecutionPaymentNonZero { execution_payment: u64 }, + /// The number of blob KZG commitments exceeds the maximum allowed. + InvalidBlobKzgCommitments { + max_blobs_per_block: usize, + blob_kzg_commitments_len: usize, + }, + /// Some Beacon State error + BeaconStateError(BeaconStateError), + /// Internal error + InternalError(String), +} + +impl std::fmt::Display for PayloadBidError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for PayloadBidError { + fn from(e: BeaconStateError) -> Self { + PayloadBidError::BeaconStateError(e) + } +} diff --git a/beacon_node/beacon_chain/src/payload_bid_verification/payload_bid_cache.rs b/beacon_node/beacon_chain/src/payload_bid_verification/payload_bid_cache.rs new file mode 100644 index 0000000000..1c98569bc5 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_bid_verification/payload_bid_cache.rs @@ -0,0 +1,156 @@ +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + sync::Arc, +}; + +use crate::{ + BeaconChainTypes, payload_bid_verification::gossip_verified_bid::GossipVerifiedPayloadBid, +}; +use parking_lot::RwLock; +use types::{BuilderIndex, ExecutionBlockHash, Hash256, SignedExecutionPayloadBid, Slot}; + +type HighestBidMap = + BTreeMap>>; + +pub struct GossipVerifiedPayloadBidCache { + highest_bid: RwLock>, + seen_builder: RwLock>>, +} + +impl Default for GossipVerifiedPayloadBidCache { + fn default() -> Self { + Self { + highest_bid: RwLock::new(BTreeMap::new()), + seen_builder: RwLock::new(BTreeMap::new()), + } + } +} + +impl GossipVerifiedPayloadBidCache { + /// Get the cached bid for the tuple `(slot, parent_block_hash, parent_block_root)`. + pub fn get_highest_bid( + &self, + slot: Slot, + parent_block_hash: ExecutionBlockHash, + parent_block_root: Hash256, + ) -> Option>> { + self.highest_bid.read().get(&slot).and_then(|map| { + map.get(&(parent_block_hash, parent_block_root)) + .map(|b| b.signed_bid.clone()) + }) + } + + /// Insert a bid for the tuple `(slot, parent_block_hash, parent_block_root)` only if + /// its value is higher than the currently cached bid for that tuple. + pub fn insert_highest_bid(&self, bid: GossipVerifiedPayloadBid) { + let key = ( + bid.signed_bid.message.parent_block_hash, + bid.signed_bid.message.parent_block_root, + ); + let mut highest_bid = self.highest_bid.write(); + let slot_map = highest_bid.entry(bid.signed_bid.message.slot).or_default(); + + if let Some(existing) = slot_map.get(&key) + && existing.signed_bid.message.value >= bid.signed_bid.message.value + { + return; + } + slot_map.insert(key, bid); + } + + /// A gossip verified bid for `BuilderIndex` already exists at `slot` + pub fn seen_builder_index(&self, slot: &Slot, builder_index: BuilderIndex) -> bool { + self.seen_builder + .read() + .get(slot) + .is_some_and(|seen_builders| seen_builders.contains(&builder_index)) + } + + /// Insert a builder into the seen cache. + pub fn insert_seen_builder(&self, bid: &GossipVerifiedPayloadBid) { + let mut seen_builder = self.seen_builder.write(); + seen_builder + .entry(bid.signed_bid.message.slot) + .or_default() + .insert(bid.signed_bid.message.builder_index); + } + + /// Prune anything before `current_slot` + pub fn prune(&self, current_slot: Slot) { + self.highest_bid + .write() + .retain(|&slot, _| slot >= current_slot); + + self.seen_builder + .write() + .retain(|&slot, _| slot >= current_slot); + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bls::Signature; + use types::{ + ExecutionBlockHash, ExecutionPayloadBid, Hash256, MinimalEthSpec, + SignedExecutionPayloadBid, Slot, + }; + + use super::GossipVerifiedPayloadBidCache; + use crate::{ + payload_bid_verification::gossip_verified_bid::GossipVerifiedPayloadBid, + test_utils::EphemeralHarnessType, + }; + + type E = MinimalEthSpec; + type T = EphemeralHarnessType; + + fn make_gossip_verified( + slot: Slot, + builder_index: u64, + parent_block_hash: ExecutionBlockHash, + parent_block_root: Hash256, + value: u64, + ) -> GossipVerifiedPayloadBid { + GossipVerifiedPayloadBid { + signed_bid: Arc::new(SignedExecutionPayloadBid { + message: ExecutionPayloadBid { + slot, + builder_index, + parent_block_hash, + parent_block_root, + value, + ..ExecutionPayloadBid::default() + }, + signature: Signature::empty(), + }), + } + } + + #[test] + fn prune_removes_old_retains_current() { + let cache = GossipVerifiedPayloadBidCache::::default(); + let hash = ExecutionBlockHash::zero(); + let root = Hash256::ZERO; + + for slot in [1, 2, 3, 7, 8, 9, 10] { + let verified = make_gossip_verified(Slot::new(slot), slot, hash, root, slot * 100); + cache.insert_seen_builder(&verified); + cache.insert_highest_bid(verified); + } + + cache.prune(Slot::new(8)); + + // Slots 1-7 pruned from both maps. + for slot in [1, 2, 3, 7] { + assert!(cache.get_highest_bid(Slot::new(slot), hash, root).is_none()); + assert!(!cache.seen_builder_index(&Slot::new(slot), slot)); + } + // Slots 8-10 retained in both maps. + for slot in [8, 9, 10] { + assert!(cache.get_highest_bid(Slot::new(slot), hash, root).is_some()); + assert!(cache.seen_builder_index(&Slot::new(slot), slot)); + } + } +} diff --git a/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs b/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs new file mode 100644 index 0000000000..bb59b16ffb --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs @@ -0,0 +1,748 @@ +use std::sync::Arc; + +use std::time::Duration; + +use bls::{Keypair, PublicKeyBytes, Signature}; +use ethereum_hashing::hash; +use fork_choice::ForkChoice; +use genesis::{generate_deterministic_keypairs, interop_genesis_state}; +use kzg::KzgCommitment; +use slot_clock::{SlotClock, TestingSlotClock}; +use ssz::Encode; +use ssz_types::VariableList; +use store::{HotColdDB, StoreConfig}; +use types::{ + Address, BeaconBlock, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, ExecutionBlockHash, + ExecutionPayloadBid, Hash256, MinimalEthSpec, ProposerPreferences, SignedBeaconBlock, + SignedExecutionPayloadBid, SignedProposerPreferences, SignedRoot, Slot, +}; + +use proto_array::{Block as ProtoBlock, ExecutionStatus, PayloadStatus}; +use types::AttestationShufflingId; + +use crate::{ + beacon_fork_choice_store::BeaconForkChoiceStore, + beacon_snapshot::BeaconSnapshot, + canonical_head::CanonicalHead, + payload_bid_verification::{ + PayloadBidError, + gossip_verified_bid::{GossipVerificationContext, GossipVerifiedPayloadBid}, + payload_bid_cache::GossipVerifiedPayloadBidCache, + }, + proposer_preferences_verification::{ + gossip_verified_proposer_preferences::GossipVerifiedProposerPreferences, + proposer_preference_cache::GossipVerifiedProposerPreferenceCache, + }, + test_utils::{EphemeralHarnessType, fork_name_from_env, test_spec}, +}; + +type E = MinimalEthSpec; +type T = EphemeralHarnessType; + +/// Number of regular validators (must be >= min_genesis_active_validator_count for MinimalEthSpec). +const NUM_VALIDATORS: usize = 64; +/// Number of builders to register. +const NUM_BUILDERS: usize = 4; +/// Balance given to each builder (min_deposit_amount + extra to cover bids in tests). +const BUILDER_BALANCE: u64 = 2_000_000_000; + +struct TestContext { + canonical_head: CanonicalHead, + bid_cache: GossipVerifiedPayloadBidCache, + preferences_cache: GossipVerifiedProposerPreferenceCache, + slot_clock: TestingSlotClock, + keypairs: Vec, + spec: ChainSpec, + genesis_block_root: Hash256, + inactive_builder_index: u64, +} + +fn builder_withdrawal_credentials(pubkey: &bls::PublicKey, spec: &ChainSpec) -> Hash256 { + let fake_execution_address = &hash(&pubkey.as_ssz_bytes())[0..20]; + let mut credentials = [0u8; 32]; + credentials[0] = spec.builder_withdrawal_prefix_byte; + credentials[12..].copy_from_slice(fake_execution_address); + Hash256::from_slice(&credentials) +} + +impl TestContext { + fn new() -> Self { + let spec = test_spec::(); + let store = Arc::new( + HotColdDB::open_ephemeral(StoreConfig::default(), Arc::new(spec.clone())) + .expect("should open ephemeral store"), + ); + + let keypairs = generate_deterministic_keypairs(NUM_VALIDATORS); + + let mut state = + interop_genesis_state::(&keypairs, 0, Hash256::repeat_byte(0x42), None, &spec) + .expect("should build genesis state"); + + // Register builders in the builder registry. + for keypair in keypairs.iter().take(NUM_BUILDERS) { + let creds = builder_withdrawal_credentials(&keypair.pk, &spec); + state + .add_builder_to_registry( + PublicKeyBytes::from(keypair.pk.clone()), + creds, + BUILDER_BALANCE, + Slot::new(0), + &spec, + ) + .expect("should register builder"); + } + + // Bump finalized checkpoint epoch so builders are considered active + // (is_active_builder requires deposit_epoch < finalized_checkpoint.epoch). + *state.finalized_checkpoint_mut() = Checkpoint { + epoch: Epoch::new(1), + root: Hash256::ZERO, + }; + + let inactive_keypair = &keypairs[NUM_BUILDERS]; + let inactive_creds = builder_withdrawal_credentials(&inactive_keypair.pk, &spec); + let inactive_builder_index = state + .add_builder_to_registry( + PublicKeyBytes::from(inactive_keypair.pk.clone()), + inactive_creds, + BUILDER_BALANCE, + Slot::new(E::slots_per_epoch()), + &spec, + ) + .expect("should register inactive builder"); + + let mut genesis_block = BeaconBlock::empty(&spec); + *genesis_block.state_root_mut() = state + .update_tree_hash_cache() + .expect("should hash genesis state"); + let signed_block = SignedBeaconBlock::from_block(genesis_block, Signature::empty()); + let block_root = signed_block.canonical_root(); + + let snapshot = BeaconSnapshot::new( + Arc::new(signed_block.clone()), + None, + block_root, + state.clone(), + ); + + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), snapshot.clone()) + .expect("should create fork choice store"); + let fork_choice = + ForkChoice::from_anchor(fc_store, block_root, &signed_block, &state, None, &spec) + .expect("should create fork choice"); + + let canonical_head = + CanonicalHead::new(fork_choice, Arc::new(snapshot), PayloadStatus::Pending); + + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + spec.get_slot_duration(), + ); + + Self { + canonical_head, + bid_cache: GossipVerifiedPayloadBidCache::default(), + preferences_cache: GossipVerifiedProposerPreferenceCache::default(), + slot_clock, + keypairs, + spec, + genesis_block_root: block_root, + inactive_builder_index, + } + } + + fn sign_bid(&self, bid: ExecutionPayloadBid) -> Arc> { + let head = self.canonical_head.cached_head(); + let state = &head.snapshot.beacon_state; + let domain = self.spec.get_domain( + bid.slot.epoch(E::slots_per_epoch()), + Domain::BeaconBuilder, + &state.fork(), + state.genesis_validators_root(), + ); + let message = bid.signing_root(domain); + let signature = self.keypairs[bid.builder_index as usize].sk.sign(message); + Arc::new(SignedExecutionPayloadBid { + message: bid, + signature, + }) + } + + fn gossip_ctx(&self) -> GossipVerificationContext<'_, T> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + gossip_verified_payload_bid_cache: &self.bid_cache, + gossip_verified_proposer_preferences_cache: &self.preferences_cache, + slot_clock: &self.slot_clock, + spec: &self.spec, + } + } + + fn insert_non_canonical_block(&self) -> Hash256 { + let shuffling_id = AttestationShufflingId { + shuffling_epoch: Epoch::new(0), + shuffling_decision_block: self.genesis_block_root, + }; + let fork_block_root = Hash256::repeat_byte(0xab); + let mut fc = self.canonical_head.fork_choice_write_lock(); + fc.proto_array_mut() + .process_block::( + ProtoBlock { + slot: Slot::new(1), + root: fork_block_root, + parent_root: Some(self.genesis_block_root), + target_root: fork_block_root, + current_epoch_shuffling_id: shuffling_id.clone(), + next_epoch_shuffling_id: shuffling_id, + state_root: Hash256::ZERO, + justified_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: self.genesis_block_root, + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: self.genesis_block_root, + }, + execution_status: ExecutionStatus::irrelevant(), + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::repeat_byte(0xab)), + proposer_index: Some(0), + }, + Slot::new(1), + &self.spec, + Duration::from_secs(0), + ) + .expect("should insert fork block"); + fork_block_root + } +} + +fn make_signed_bid( + slot: Slot, + builder_index: u64, + fee_recipient: Address, + gas_limit: u64, + value: u64, + parent_block_root: Hash256, +) -> Arc> { + Arc::new(SignedExecutionPayloadBid { + message: ExecutionPayloadBid { + slot, + builder_index, + fee_recipient, + gas_limit, + value, + parent_block_root, + ..ExecutionPayloadBid::default() + }, + signature: Signature::empty(), + }) +} + +fn make_signed_preferences( + proposal_slot: Slot, + validator_index: u64, + fee_recipient: Address, + gas_limit: u64, +) -> Arc { + Arc::new(SignedProposerPreferences { + message: ProposerPreferences { + proposal_slot, + validator_index, + fee_recipient, + gas_limit, + }, + signature: Signature::empty(), + }) +} + +fn seed_preferences(ctx: &TestContext, slot: Slot, fee_recipient: Address, gas_limit: u64) { + let prefs = GossipVerifiedProposerPreferences { + signed_preferences: make_signed_preferences(slot, 0, fee_recipient, gas_limit), + }; + ctx.preferences_cache.insert_preferences(prefs); +} + +#[test] +fn no_proposer_preferences_for_slot() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let bid = make_signed_bid( + Slot::new(0), + 0, + Address::ZERO, + 30_000_000, + 100, + Hash256::ZERO, + ); + + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::NoProposerPreferences { .. }) + )); +} + +#[test] +fn builder_already_seen_for_slot() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid = make_signed_bid(slot, 42, Address::ZERO, 30_000_000, 100, Hash256::ZERO); + let verified = GossipVerifiedPayloadBid { + signed_bid: bid.clone(), + }; + ctx.bid_cache.insert_seen_builder(&verified); + + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::BuilderAlreadySeen { + builder_index: 42, + .. + }) + )); +} + +#[test] +fn bid_value_below_cached() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let high_bid = GossipVerifiedPayloadBid { + signed_bid: make_signed_bid(slot, 99, Address::ZERO, 30_000_000, 500, Hash256::ZERO), + }; + ctx.bid_cache.insert_highest_bid(high_bid); + + let low_bid = make_signed_bid(slot, 1, Address::ZERO, 30_000_000, 100, Hash256::ZERO); + let result = GossipVerifiedPayloadBid::new(low_bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::BidValueBelowCached { .. }) + )); +} + +#[test] +fn invalid_bid_slot() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(5); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid = make_signed_bid( + slot, + 0, + Address::ZERO, + 30_000_000, + 100, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBidSlot { .. }) + )); +} + +#[test] +fn fee_recipient_mismatch() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::repeat_byte(0xaa), 30_000_000); + + let bid = make_signed_bid( + slot, + 0, + Address::ZERO, + 30_000_000, + 100, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!(result, Err(PayloadBidError::InvalidFeeRecipient))); +} + +#[test] +fn gas_limit_mismatch() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid = make_signed_bid( + slot, + 0, + Address::ZERO, + 50_000_000, + 100, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!(result, Err(PayloadBidError::InvalidGasLimit))); +} + +#[test] +fn execution_payment_nonzero() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid = Arc::new(SignedExecutionPayloadBid { + message: ExecutionPayloadBid { + slot, + gas_limit: 30_000_000, + execution_payment: 42, + parent_block_root: ctx.genesis_block_root, + ..ExecutionPayloadBid::default() + }, + signature: Signature::empty(), + }); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::ExecutionPaymentNonZero { .. }) + )); +} + +#[test] +fn unknown_builder_index() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + // Use a builder_index that doesn't exist in the registry. + let bid = make_signed_bid( + slot, + 9999, + Address::ZERO, + 30_000_000, + 100, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBuilder { + builder_index: 9999 + }) + )); +} + +#[test] +fn inactive_builder() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid = make_signed_bid( + slot, + ctx.inactive_builder_index, + Address::ZERO, + 30_000_000, + 100, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBuilder { .. }) + )); +} + +#[test] +fn builder_cant_cover_bid() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + // Builder index 0 exists but bid value far exceeds their balance. + let bid = make_signed_bid( + slot, + 0, + Address::ZERO, + 30_000_000, + u64::MAX, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::BuilderCantCoverBid { .. }) + )); +} + +#[test] +fn parent_block_root_unknown() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + // Parent block root not in fork choice. + let unknown_root = Hash256::repeat_byte(0xff); + let bid = make_signed_bid(slot, 0, Address::ZERO, 30_000_000, 0, unknown_root); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(result.is_err(), "expected error, got Ok"); + let err = result.unwrap_err(); + assert!( + matches!(err, PayloadBidError::ParentBlockRootUnknown { .. }), + "expected ParentBlockRootUnknown, got: {err:?}" + ); +} + +#[test] +fn parent_block_root_not_canonical() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let fork_root = ctx.insert_non_canonical_block(); + let bid = make_signed_bid(slot, 0, Address::ZERO, 30_000_000, 0, fork_root); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(result.is_err(), "expected error, got Ok"); + let err = result.unwrap_err(); + assert!( + matches!(err, PayloadBidError::ParentBlockRootNotCanonical { .. }), + "expected ParentBlockRootNotCanonical, got: {err:?}" + ); +} + +#[test] +fn invalid_blob_kzg_commitments() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let max_blobs = ctx + .spec + .max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize; + let commitments: Vec = (0..=max_blobs) + .map(|_| KzgCommitment::empty_for_testing()) + .collect(); + + let bid = Arc::new(SignedExecutionPayloadBid { + message: ExecutionPayloadBid { + slot, + builder_index: 0, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + value: 0, + parent_block_root: ctx.genesis_block_root, + blob_kzg_commitments: VariableList::new(commitments).unwrap(), + ..ExecutionPayloadBid::default() + }, + signature: Signature::empty(), + }); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::InvalidBlobKzgCommitments { .. }) + )); +} + +#[test] +fn bad_signature() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + // All checks pass but signature is empty/invalid. + let bid = make_signed_bid( + slot, + 0, + Address::ZERO, + 30_000_000, + 0, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!(matches!(result, Err(PayloadBidError::BadSignature))); + assert!(!ctx.bid_cache.seen_builder_index(&slot, 0)); + assert!( + ctx.bid_cache + .get_highest_bid(slot, ExecutionBlockHash::zero(), ctx.genesis_block_root) + .is_none() + ); +} + +#[test] +fn valid_bid() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid = ctx.sign_bid(ExecutionPayloadBid { + slot, + builder_index: 0, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + value: 0, + parent_block_root: ctx.genesis_block_root, + ..ExecutionPayloadBid::default() + }); + let result = GossipVerifiedPayloadBid::new(bid, &gossip); + assert!( + result.is_ok(), + "expected Ok, got: {:?}", + result.unwrap_err() + ); +} + +#[test] +fn two_builders_coexist_in_cache() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + let bid_0 = ctx.sign_bid(ExecutionPayloadBid { + slot, + builder_index: 0, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + value: 0, + parent_block_root: ctx.genesis_block_root, + ..ExecutionPayloadBid::default() + }); + let result_0 = GossipVerifiedPayloadBid::new(bid_0, &gossip); + assert!( + result_0.is_ok(), + "builder 0 should pass: {:?}", + result_0.unwrap_err() + ); + + // Builder 1 must bid strictly higher than builder 0's cached value. + let bid_1 = ctx.sign_bid(ExecutionPayloadBid { + slot, + builder_index: 1, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + value: 1, + parent_block_root: ctx.genesis_block_root, + ..ExecutionPayloadBid::default() + }); + let result_1 = GossipVerifiedPayloadBid::new(bid_1, &gossip); + assert!( + result_1.is_ok(), + "builder 1 should pass: {:?}", + result_1.unwrap_err() + ); + + // Both builders should be seen. + assert!(ctx.bid_cache.seen_builder_index(&slot, 0)); + assert!(ctx.bid_cache.seen_builder_index(&slot, 1)); + + let highest = ctx + .bid_cache + .get_highest_bid(slot, ExecutionBlockHash::zero(), ctx.genesis_block_root) + .expect("should have highest bid"); + assert_eq!(highest.message.value, 1); + assert_eq!(highest.message.builder_index, 1); +} + +#[test] +fn bid_equal_to_cached_value_rejected() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(0); + seed_preferences(&ctx, slot, Address::ZERO, 30_000_000); + + // Seed a cached bid with value 100. + let high_bid = GossipVerifiedPayloadBid { + signed_bid: make_signed_bid( + slot, + 99, + Address::ZERO, + 30_000_000, + 100, + ctx.genesis_block_root, + ), + }; + ctx.bid_cache.insert_highest_bid(high_bid); + + // Submit a bid with exactly the same value — should be rejected. + let equal_bid = make_signed_bid( + slot, + 1, + Address::ZERO, + 30_000_000, + 100, + ctx.genesis_block_root, + ); + let result = GossipVerifiedPayloadBid::new(equal_bid, &gossip); + assert!(matches!( + result, + Err(PayloadBidError::BidValueBelowCached { + cached_value: 100, + incoming_value: 100, + }) + )); +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 4d40a29332..77b44a2af0 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -242,8 +242,8 @@ impl GossipVerifiedEnvelope { } impl BeaconChain { - /// Build a `GossipVerificationContext` from this `BeaconChain`. - pub fn gossip_verification_context(&self) -> GossipVerificationContext<'_, T> { + /// Build a `GossipVerificationContext` from this `BeaconChain` for `GossipVerifiedEnvelope`. + pub fn payload_envelope_gossip_verification_context(&self) -> GossipVerificationContext<'_, T> { GossipVerificationContext { canonical_head: &self.canonical_head, store: &self.store, @@ -277,7 +277,7 @@ impl BeaconChain { let slot = envelope.slot(); let beacon_block_root = envelope.message.beacon_block_root; - let ctx = chain.gossip_verification_context(); + let ctx = chain.payload_envelope_gossip_verification_context(); match GossipVerifiedEnvelope::new(envelope, &ctx) { Ok(verified) => { debug!( diff --git a/beacon_node/beacon_chain/src/proposer_preferences_verification/gossip_verified_proposer_preferences.rs b/beacon_node/beacon_chain/src/proposer_preferences_verification/gossip_verified_proposer_preferences.rs new file mode 100644 index 0000000000..8ea095743f --- /dev/null +++ b/beacon_node/beacon_chain/src/proposer_preferences_verification/gossip_verified_proposer_preferences.rs @@ -0,0 +1,223 @@ +use std::sync::Arc; + +use crate::{ + BeaconChain, BeaconChainTypes, CanonicalHead, + proposer_preferences_verification::{ + ProposerPreferencesError, proposer_preference_cache::GossipVerifiedProposerPreferenceCache, + }, +}; +use slot_clock::SlotClock; +use state_processing::signature_sets::{get_pubkey_from_state, proposer_preferences_signature_set}; +use tracing::debug; +use types::{ + BeaconState, ChainSpec, EthSpec, ProposerPreferences, SignedProposerPreferences, Slot, +}; + +/// Verify that proposer preferences are consistent with the current chain state +pub(crate) fn verify_preferences_consistency( + preferences: &ProposerPreferences, + current_slot: Slot, + head_state: &BeaconState, +) -> Result<(), ProposerPreferencesError> { + let proposal_slot = preferences.proposal_slot; + let validator_index = preferences.validator_index; + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let proposal_epoch = proposal_slot.epoch(E::slots_per_epoch()); + + if proposal_epoch < current_epoch || proposal_epoch > current_epoch.saturating_add(1u64) { + return Err(ProposerPreferencesError::InvalidProposalEpoch { proposal_epoch }); + } + + if proposal_slot <= current_slot { + return Err(ProposerPreferencesError::ProposalSlotAlreadyPassed { + proposal_slot, + current_slot, + }); + } + + if !head_state.is_valid_proposal_slot(preferences)? { + return Err(ProposerPreferencesError::InvalidProposalSlot { + validator_index, + proposal_slot, + }); + } + + Ok(()) +} + +pub struct GossipVerificationContext<'a, T: BeaconChainTypes> { + pub canonical_head: &'a CanonicalHead, + pub gossip_verified_proposer_preferences_cache: &'a GossipVerifiedProposerPreferenceCache, + pub slot_clock: &'a T::SlotClock, + pub spec: &'a ChainSpec, +} + +/// A wrapper around `SignedProposerPreferences` that has been verified for gossip propagation. +#[derive(Debug, Clone)] +pub struct GossipVerifiedProposerPreferences { + pub signed_preferences: Arc, +} + +impl GossipVerifiedProposerPreferences { + pub fn new( + signed_preferences: Arc, + ctx: &GossipVerificationContext<'_, T>, + ) -> Result { + let proposal_slot = signed_preferences.message.proposal_slot; + let validator_index = signed_preferences.message.validator_index; + let cached_head = ctx.canonical_head.cached_head(); + let current_slot = ctx + .slot_clock + .now() + .ok_or(ProposerPreferencesError::UnableToReadSlot)?; + let head_state = &cached_head.snapshot.beacon_state; + + if ctx + .gossip_verified_proposer_preferences_cache + .get_seen_validator(&proposal_slot, validator_index) + { + return Err(ProposerPreferencesError::AlreadySeen { + validator_index, + proposal_slot, + }); + } + + verify_preferences_consistency(&signed_preferences.message, current_slot, head_state)?; + + // Verify signature + proposer_preferences_signature_set( + head_state, + |i| get_pubkey_from_state(head_state, i), + &signed_preferences, + ctx.spec, + ) + .map_err(|_| ProposerPreferencesError::BadSignature)? + .verify() + .then_some(()) + .ok_or(ProposerPreferencesError::BadSignature)?; + + let gossip_verified = GossipVerifiedProposerPreferences { signed_preferences }; + + ctx.gossip_verified_proposer_preferences_cache + .insert_seen_validator(&gossip_verified); + + ctx.gossip_verified_proposer_preferences_cache + .insert_preferences(gossip_verified.clone()); + + Ok(gossip_verified) + } +} + +impl BeaconChain { + pub fn proposer_preferences_gossip_verification_context( + &self, + ) -> GossipVerificationContext<'_, T> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + gossip_verified_proposer_preferences_cache: &self + .gossip_verified_proposer_preferences_cache, + slot_clock: &self.slot_clock, + spec: &self.spec, + } + } + + pub fn verify_proposer_preferences_for_gossip( + &self, + signed_preferences: Arc, + ) -> Result { + let proposal_slot = signed_preferences.message.proposal_slot; + let validator_index = signed_preferences.message.validator_index; + + let ctx = self.proposer_preferences_gossip_verification_context(); + match GossipVerifiedProposerPreferences::new(signed_preferences, &ctx) { + Ok(verified) => { + debug!( + %proposal_slot, + %validator_index, + "Successfully verified gossip proposer preferences" + ); + Ok(verified) + } + Err(e) => { + debug!( + error = e.to_string(), + %proposal_slot, + %validator_index, + "Rejected gossip proposer preferences" + ); + Err(e) + } + } + } +} + +#[cfg(test)] +mod tests { + use types::{Address, BeaconState, EthSpec, MinimalEthSpec, ProposerPreferences, Slot}; + + use super::verify_preferences_consistency; + use crate::proposer_preferences_verification::ProposerPreferencesError; + + type E = MinimalEthSpec; + + fn make_preferences(proposal_slot: Slot, validator_index: u64) -> ProposerPreferences { + ProposerPreferences { + proposal_slot, + validator_index, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + } + } + + fn state() -> BeaconState { + BeaconState::new(0, <_>::default(), &E::default_spec()) + } + + #[test] + fn test_invalid_epoch_too_old() { + let current_slot = Slot::new(2 * E::slots_per_epoch()); + let prefs = make_preferences(Slot::new(3), 0); + + let result = verify_preferences_consistency::(&prefs, current_slot, &state()); + assert!(matches!( + result, + Err(ProposerPreferencesError::InvalidProposalEpoch { .. }) + )); + } + + #[test] + fn test_invalid_epoch_too_far_ahead() { + let current_slot = Slot::new(E::slots_per_epoch()); + let prefs = make_preferences(Slot::new(3 * E::slots_per_epoch() + 1), 0); + + let result = verify_preferences_consistency::(&prefs, current_slot, &state()); + assert!(matches!( + result, + Err(ProposerPreferencesError::InvalidProposalEpoch { .. }) + )); + } + + #[test] + fn test_proposal_slot_already_passed() { + let current_slot = Slot::new(10); + let prefs = make_preferences(Slot::new(9), 0); + + let result = verify_preferences_consistency::(&prefs, current_slot, &state()); + assert!(matches!( + result, + Err(ProposerPreferencesError::ProposalSlotAlreadyPassed { .. }) + )); + } + + #[test] + fn test_proposal_slot_equal_to_current() { + let current_slot = Slot::new(10); + let prefs = make_preferences(Slot::new(10), 0); + + let result = verify_preferences_consistency::(&prefs, current_slot, &state()); + assert!(matches!( + result, + Err(ProposerPreferencesError::ProposalSlotAlreadyPassed { .. }) + )); + } +} diff --git a/beacon_node/beacon_chain/src/proposer_preferences_verification/mod.rs b/beacon_node/beacon_chain/src/proposer_preferences_verification/mod.rs new file mode 100644 index 0000000000..a2e96dfce1 --- /dev/null +++ b/beacon_node/beacon_chain/src/proposer_preferences_verification/mod.rs @@ -0,0 +1,70 @@ +//! Gossip verification for proposer preferences. +//! +//! A `SignedProposerPreferences` is verified and wrapped as a `GossipVerifiedProposerPreferences`, +//! which is then inserted into the `GossipVerifiedProposerPreferenceCache`. +//! +//! ```ignore +//! SignedProposerPreferences +//! | +//! ▼ +//! GossipVerifiedProposerPreferences -------> Insert into GossipVerifiedProposerPreferenceCache +//! ``` + +use std::sync::Arc; + +use types::{BeaconStateError, Epoch, Slot}; + +use crate::BeaconChainError; + +pub mod gossip_verified_proposer_preferences; +pub mod proposer_preference_cache; + +#[cfg(test)] +mod tests; + +#[derive(Debug)] +pub enum ProposerPreferencesError { + /// The proposal slot is not in the current or next epoch. + InvalidProposalEpoch { proposal_epoch: Epoch }, + /// The proposal slot has already passed. + ProposalSlotAlreadyPassed { + proposal_slot: Slot, + current_slot: Slot, + }, + /// The validator index does not match the proposer at the given slot. + InvalidProposalSlot { + validator_index: u64, + proposal_slot: Slot, + }, + /// The slot clock cannot be read. + UnableToReadSlot, + /// A valid message from this validator for this slot has already been seen. + AlreadySeen { + validator_index: u64, + proposal_slot: Slot, + }, + /// The signature is invalid. + BadSignature, + /// Some Beacon Chain Error + BeaconChainError(Arc), + /// Some Beacon State error + BeaconStateError(BeaconStateError), +} + +impl std::fmt::Display for ProposerPreferencesError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for ProposerPreferencesError { + fn from(e: BeaconStateError) -> Self { + ProposerPreferencesError::BeaconStateError(e) + } +} + +impl From for ProposerPreferencesError { + fn from(e: BeaconChainError) -> Self { + ProposerPreferencesError::BeaconChainError(Arc::new(e)) + } +} diff --git a/beacon_node/beacon_chain/src/proposer_preferences_verification/proposer_preference_cache.rs b/beacon_node/beacon_chain/src/proposer_preferences_verification/proposer_preference_cache.rs new file mode 100644 index 0000000000..69337f2a83 --- /dev/null +++ b/beacon_node/beacon_chain/src/proposer_preferences_verification/proposer_preference_cache.rs @@ -0,0 +1,107 @@ +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, +}; + +use crate::proposer_preferences_verification::gossip_verified_proposer_preferences::GossipVerifiedProposerPreferences; +use parking_lot::RwLock; +use types::{SignedProposerPreferences, Slot}; + +pub struct GossipVerifiedProposerPreferenceCache { + preferences: RwLock>, + seen: RwLock>>, +} + +impl Default for GossipVerifiedProposerPreferenceCache { + fn default() -> Self { + Self { + preferences: RwLock::new(BTreeMap::new()), + seen: RwLock::new(BTreeMap::new()), + } + } +} + +impl GossipVerifiedProposerPreferenceCache { + pub fn get_preferences(&self, slot: &Slot) -> Option> { + self.preferences + .read() + .get(slot) + .map(|p| p.signed_preferences.clone()) + } + + pub fn insert_preferences(&self, preferences: GossipVerifiedProposerPreferences) { + let slot = preferences.signed_preferences.message.proposal_slot; + self.preferences.write().insert(slot, preferences); + } + + pub fn get_seen_validator(&self, slot: &Slot, validator_index: u64) -> bool { + self.seen + .read() + .get(slot) + .is_some_and(|seen| seen.contains(&validator_index)) + } + + pub fn insert_seen_validator(&self, preferences: &GossipVerifiedProposerPreferences) { + let slot = preferences.signed_preferences.message.proposal_slot; + let validator_index = preferences.signed_preferences.message.validator_index; + self.seen + .write() + .entry(slot) + .or_default() + .insert(validator_index); + } + + pub fn prune(&self, current_slot: Slot) { + self.preferences + .write() + .retain(|&slot, _| slot >= current_slot); + self.seen.write().retain(|&slot, _| slot >= current_slot); + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bls::Signature; + use types::{Address, ProposerPreferences, SignedProposerPreferences, Slot}; + + use super::GossipVerifiedProposerPreferenceCache; + use crate::proposer_preferences_verification::gossip_verified_proposer_preferences::GossipVerifiedProposerPreferences; + + fn make_gossip_verified(slot: Slot, validator_index: u64) -> GossipVerifiedProposerPreferences { + GossipVerifiedProposerPreferences { + signed_preferences: Arc::new(SignedProposerPreferences { + message: ProposerPreferences { + proposal_slot: slot, + validator_index, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + }, + signature: Signature::empty(), + }), + } + } + + #[test] + fn prune_removes_old_retains_current() { + let cache = GossipVerifiedProposerPreferenceCache::default(); + + for slot in [1, 2, 3, 7, 8, 9, 10] { + let verified = make_gossip_verified(Slot::new(slot), slot); + cache.insert_seen_validator(&verified); + cache.insert_preferences(verified); + } + + cache.prune(Slot::new(8)); + + for slot in [1, 2, 3, 7] { + assert!(cache.get_preferences(&Slot::new(slot)).is_none()); + assert!(!cache.get_seen_validator(&Slot::new(slot), slot)); + } + for slot in [8, 9, 10] { + assert!(cache.get_preferences(&Slot::new(slot)).is_some()); + assert!(cache.get_seen_validator(&Slot::new(slot), slot)); + } + } +} diff --git a/beacon_node/beacon_chain/src/proposer_preferences_verification/tests.rs b/beacon_node/beacon_chain/src/proposer_preferences_verification/tests.rs new file mode 100644 index 0000000000..2f1b24fcbb --- /dev/null +++ b/beacon_node/beacon_chain/src/proposer_preferences_verification/tests.rs @@ -0,0 +1,279 @@ +use std::sync::Arc; +use std::time::Duration; + +use bls::Signature; +use fork_choice::ForkChoice; +use genesis::{generate_deterministic_keypairs, interop_genesis_state}; +use proto_array::PayloadStatus; +use slot_clock::{SlotClock, TestingSlotClock}; +use store::{HotColdDB, StoreConfig}; +use types::{ + Address, BeaconBlock, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, MinimalEthSpec, + ProposerPreferences, SignedBeaconBlock, SignedProposerPreferences, Slot, +}; + +use crate::{ + beacon_fork_choice_store::BeaconForkChoiceStore, + beacon_snapshot::BeaconSnapshot, + canonical_head::CanonicalHead, + proposer_preferences_verification::{ + ProposerPreferencesError, + gossip_verified_proposer_preferences::{ + GossipVerificationContext, GossipVerifiedProposerPreferences, + }, + proposer_preference_cache::GossipVerifiedProposerPreferenceCache, + }, + test_utils::{EphemeralHarnessType, fork_name_from_env, test_spec}, +}; + +type E = MinimalEthSpec; +type T = EphemeralHarnessType; + +const NUM_VALIDATORS: usize = 64; + +struct TestContext { + canonical_head: CanonicalHead, + preferences_cache: GossipVerifiedProposerPreferenceCache, + slot_clock: TestingSlotClock, + spec: ChainSpec, +} + +impl TestContext { + fn new() -> Self { + let spec = test_spec::(); + let store = Arc::new( + HotColdDB::open_ephemeral(StoreConfig::default(), Arc::new(spec.clone())) + .expect("should open ephemeral store"), + ); + + let keypairs = generate_deterministic_keypairs(NUM_VALIDATORS); + + let mut state = + interop_genesis_state::(&keypairs, 0, Hash256::repeat_byte(0x42), None, &spec) + .expect("should build genesis state"); + + *state.finalized_checkpoint_mut() = Checkpoint { + epoch: Epoch::new(1), + root: Hash256::ZERO, + }; + + let mut genesis_block = BeaconBlock::empty(&spec); + *genesis_block.state_root_mut() = state + .update_tree_hash_cache() + .expect("should hash genesis state"); + let signed_block = SignedBeaconBlock::from_block(genesis_block, Signature::empty()); + let block_root = signed_block.canonical_root(); + + let snapshot = BeaconSnapshot::new( + Arc::new(signed_block.clone()), + None, + block_root, + state.clone(), + ); + + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), snapshot.clone()) + .expect("should create fork choice store"); + let fork_choice = + ForkChoice::from_anchor(fc_store, block_root, &signed_block, &state, None, &spec) + .expect("should create fork choice"); + + let canonical_head = + CanonicalHead::new(fork_choice, Arc::new(snapshot), PayloadStatus::Pending); + + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + spec.get_slot_duration(), + ); + + Self { + canonical_head, + preferences_cache: GossipVerifiedProposerPreferenceCache::default(), + slot_clock, + spec, + } + } + + fn gossip_ctx(&self) -> GossipVerificationContext<'_, T> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + gossip_verified_proposer_preferences_cache: &self.preferences_cache, + slot_clock: &self.slot_clock, + spec: &self.spec, + } + } + + fn proposer_at_slot(&self, slot: Slot) -> u64 { + let head = self.canonical_head.cached_head(); + let state = &head.snapshot.beacon_state; + let lookahead = state + .proposer_lookahead() + .expect("Gloas state has lookahead"); + let slot_in_epoch = slot.as_usize() % E::slots_per_epoch() as usize; + let epoch = slot.epoch(E::slots_per_epoch()); + let current_epoch = state.slot().epoch(E::slots_per_epoch()); + let index = if epoch == current_epoch.saturating_add(1u64) { + E::slots_per_epoch() as usize + slot_in_epoch + } else { + slot_in_epoch + }; + *lookahead.get(index).expect("index in range") + } +} + +fn make_signed_preferences( + proposal_slot: Slot, + validator_index: u64, +) -> Arc { + Arc::new(SignedProposerPreferences { + message: ProposerPreferences { + proposal_slot, + validator_index, + fee_recipient: Address::ZERO, + gas_limit: 30_000_000, + }, + signature: Signature::empty(), + }) +} + +#[test] +fn already_seen_validator() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(1); + + let verified = GossipVerifiedProposerPreferences { + signed_preferences: make_signed_preferences(slot, 42), + }; + ctx.preferences_cache.insert_seen_validator(&verified); + + let prefs = make_signed_preferences(slot, 42); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + assert!(matches!( + result, + Err(ProposerPreferencesError::AlreadySeen { + validator_index: 42, + .. + }) + )); +} + +#[test] +fn invalid_epoch_too_far_ahead() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + + let far_slot = Slot::new(3 * E::slots_per_epoch()); + let prefs = make_signed_preferences(far_slot, 0); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + assert!(matches!( + result, + Err(ProposerPreferencesError::InvalidProposalEpoch { .. }) + )); +} + +#[test] +fn proposal_slot_already_passed() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + + let prefs = make_signed_preferences(Slot::new(0), 0); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + assert!(matches!( + result, + Err(ProposerPreferencesError::ProposalSlotAlreadyPassed { .. }) + )); +} + +#[test] +fn wrong_proposer_for_slot() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(1); + + let actual_proposer = ctx.proposer_at_slot(slot); + let wrong_validator = if actual_proposer == 0 { 1 } else { 0 }; + + let prefs = make_signed_preferences(slot, wrong_validator); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + assert!(matches!( + result, + Err(ProposerPreferencesError::InvalidProposalSlot { .. }) + )); +} + +#[test] +fn correct_proposer_bad_signature() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(1); + + let actual_proposer = ctx.proposer_at_slot(slot); + let prefs = make_signed_preferences(slot, actual_proposer); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + assert!(matches!( + result, + Err(ProposerPreferencesError::BadSignature) + )); + assert!( + !ctx.preferences_cache + .get_seen_validator(&slot, actual_proposer) + ); + assert!(ctx.preferences_cache.get_preferences(&slot).is_none()); +} + +#[test] +fn validator_index_out_of_bounds() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + let slot = Slot::new(1); + + let prefs = make_signed_preferences(slot, u64::MAX); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + assert!(matches!( + result, + Err(ProposerPreferencesError::InvalidProposalSlot { .. }) + )); +} + +// TODO(gloas) add successful proposer preferences check once we have proposer preferences signing logic + +#[test] +fn preferences_for_next_epoch_slot() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + let ctx = TestContext::new(); + let gossip = ctx.gossip_ctx(); + + // Head is at slot 0 (epoch 0). Pick a slot in epoch 1. + let next_epoch_slot = Slot::new(E::slots_per_epoch() + 1); + let actual_proposer = ctx.proposer_at_slot(next_epoch_slot); + + let prefs = make_signed_preferences(next_epoch_slot, actual_proposer); + let result = GossipVerifiedProposerPreferences::new(prefs, &gossip); + // Should pass consistency checks but fail on signature (empty sig). + assert!( + matches!(result, Err(ProposerPreferencesError::BadSignature)), + "expected BadSignature for next-epoch slot, got: {:?}", + result + ); +} diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 1f55d9a878..c0aa30ffcc 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,8 +4,6 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; -use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -24,6 +22,11 @@ use beacon_chain::{ EnvelopeError, gossip_verified_envelope::GossipVerifiedEnvelope, }, }; +use beacon_chain::{block_verification_types::AsBlock, payload_bid_verification::PayloadBidError}; +use beacon_chain::{ + data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}, + proposer_preferences_verification::ProposerPreferencesError, +}; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use logging::crit; @@ -3470,26 +3473,103 @@ impl NetworkBeaconProcessor { } } + #[instrument( + name = "lh_process_execution_payload_bid", + parent = None, + level = "debug", + skip_all, + fields(parent_block_hash = ?bid.message.parent_block_hash, parent_block_root = ?bid.message.parent_block_root), + )] pub fn process_gossip_execution_payload_bid( self: &Arc, message_id: MessageId, peer_id: PeerId, - payload_bid: SignedExecutionPayloadBid, + bid: Arc>, ) { - // TODO(EIP-7732): Implement proper payload bid gossip processing. - // This should integrate with a payload execution bid verification module once it's implemented. + let verification_result = self.chain.verify_payload_bid_for_gossip(bid.clone()); - trace!( - %peer_id, - slot = %payload_bid.message.slot, - value = %payload_bid.message.value, - "Processing execution payload bid" - ); - - // For now, ignore all payload bids since verification is not implemented - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + match verification_result { + Ok(_) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Err( + PayloadBidError::BadSignature + | PayloadBidError::InvalidBuilder { .. } + | PayloadBidError::InvalidFeeRecipient + | PayloadBidError::InvalidGasLimit + | PayloadBidError::ExecutionPaymentNonZero { .. } + | PayloadBidError::InvalidBlobKzgCommitments { .. }, + ) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "invalid_gossip_payload_bid", + ); + } + Err( + PayloadBidError::NoProposerPreferences { .. } + | PayloadBidError::BuilderAlreadySeen { .. } + | PayloadBidError::BidValueBelowCached { .. } + | PayloadBidError::ParentBlockRootUnknown { .. } + | PayloadBidError::ParentBlockRootNotCanonical { .. } + | PayloadBidError::BuilderCantCoverBid { .. } + | PayloadBidError::BeaconStateError(_) + | PayloadBidError::InternalError(_) + | PayloadBidError::InvalidBidSlot { .. } + | PayloadBidError::UnableToReadSlot, + ) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + } } + #[instrument( + name = "lh_process_proposer_preferences", + parent = None, + level = "debug", + skip_all, + fields(validator_index = ?proposer_preferences.message.validator_index, proposal_slot = ?proposer_preferences.message.proposal_slot), + )] + pub fn process_gossip_proposer_preferences( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + proposer_preferences: Arc, + ) { + let verification_result = self + .chain + .verify_proposer_preferences_for_gossip(proposer_preferences); + + match verification_result { + Ok(_) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Err( + ProposerPreferencesError::AlreadySeen { .. } + | ProposerPreferencesError::InvalidProposalEpoch { .. } + | ProposerPreferencesError::ProposalSlotAlreadyPassed { .. } + | ProposerPreferencesError::BeaconChainError(_) + | ProposerPreferencesError::BeaconStateError(_) + | ProposerPreferencesError::UnableToReadSlot, + ) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + Err( + ProposerPreferencesError::InvalidProposalSlot { .. } + | ProposerPreferencesError::BadSignature, + ) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "invalid_gossip_proposer_preferences", + ); + } + } + } + + // TODO(gloas) dont forget to add tracing instrumentation pub fn process_gossip_payload_attestation( self: &Arc, message_id: MessageId, @@ -3510,23 +3590,4 @@ impl NetworkBeaconProcessor { // For now, ignore all payload attestation messages since verification is not implemented self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - - pub fn process_gossip_proposer_preferences( - self: &Arc, - message_id: MessageId, - peer_id: PeerId, - proposer_preferences: SignedProposerPreferences, - ) { - // TODO(EIP-7732): Implement proper proposer preferences gossip processing. - - trace!( - %peer_id, - validator_index = proposer_preferences.message.validator_index, - slot = %proposer_preferences.message.proposal_slot, - "Processing proposer preferences" - ); - - // For now, ignore all proposer preferences since verification is not implemented - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index b3d6874b8a..2b354aaa20 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -463,7 +463,7 @@ impl NetworkBeaconProcessor { processor.process_gossip_execution_payload_bid( message_id, peer_id, - *execution_payload_bid, + Arc::new(*execution_payload_bid), ) }; @@ -507,12 +507,12 @@ impl NetworkBeaconProcessor { processor.process_gossip_proposer_preferences( message_id, peer_id, - *proposer_preferences, + Arc::new(*proposer_preferences), ) }; self.try_send(BeaconWorkEvent { - drop_during_sync: false, + drop_during_sync: true, work: Work::GossipProposerPreferences(Box::new(process_fn)), }) } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 5aa610e98e..210e0437be 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -531,26 +531,6 @@ pub fn compute_timestamp_at_slot( .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } -pub fn can_builder_cover_bid( - state: &BeaconState, - builder_index: BuilderIndex, - builder: &Builder, - bid_amount: u64, - spec: &ChainSpec, -) -> Result { - let builder_balance = builder.balance; - let pending_withdrawals_amount = - state.get_pending_balance_to_withdraw_for_builder(builder_index)?; - let min_balance = spec - .min_deposit_amount - .safe_add(pending_withdrawals_amount)?; - if builder_balance < min_balance { - Ok(false) - } else { - Ok(builder_balance.safe_sub(min_balance)? >= bid_amount) - } -} - pub fn process_execution_payload_bid>( state: &mut BeaconState, block: BeaconBlockRef<'_, E, Payload>, @@ -579,13 +559,13 @@ pub fn process_execution_payload_bid // Verify that the builder is active block_verify!( - builder.is_active_at_finalized_epoch(state.finalized_checkpoint().epoch, spec), + state.is_active_builder(builder_index, spec)?, ExecutionPayloadBidInvalid::BuilderNotActive(builder_index).into() ); // Verify that the builder has funds to cover the bid block_verify!( - can_builder_cover_bid(state, builder_index, builder, amount, spec)?, + state.can_builder_cover_bid(builder_index, amount, spec)?, ExecutionPayloadBidInvalid::InsufficientBalance { builder_index, builder_balance: builder.balance, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index ac64398655..f1de284fc8 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -556,8 +556,7 @@ fn process_builder_voluntary_exit( )))?; // Verify the builder is active - let finalized_epoch = state.finalized_checkpoint().epoch; - if !builder.is_active_at_finalized_epoch(finalized_epoch, spec) { + if !state.is_active_builder(builder_index, spec)? { return Err(BlockOperationError::invalid(ExitInvalid::NotActive( signed_exit.message.validator_index, ))); diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 71ee1f8993..5c1767f227 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -12,9 +12,9 @@ use types::{ BuilderIndex, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, IndexedAttestationRef, IndexedPayloadAttestation, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedExecutionPayloadBid, SignedRoot, - SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, - consts::gloas::BUILDER_INDEX_SELF_BUILD, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedExecutionPayloadBid, + SignedProposerPreferences, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, + SyncAggregatorSelectionData, consts::gloas::BUILDER_INDEX_SELF_BUILD, }; pub type Result = std::result::Result; @@ -389,6 +389,37 @@ where Ok(SignatureSet::multiple_pubkeys(signature, pubkeys, message)) } +pub fn proposer_preferences_signature_set<'a, E, F>( + state: &'a BeaconState, + get_pubkey: F, + signed_proposer_preferences: &'a SignedProposerPreferences, + spec: &'a ChainSpec, +) -> Result> +where + E: EthSpec, + F: Fn(usize) -> Option>, +{ + let preferences = &signed_proposer_preferences.message; + let validator_index = preferences.validator_index as usize; + + let proposal_epoch = preferences.proposal_slot.epoch(E::slots_per_epoch()); + let proposal_fork = spec.fork_at_epoch(proposal_epoch); + let domain = spec.get_domain( + proposal_epoch, + Domain::ProposerPreferences, + &proposal_fork, + state.genesis_validators_root(), + ); + + let message = preferences.signing_root(domain); + + Ok(SignatureSet::single_pubkey( + &signed_proposer_preferences.signature, + get_pubkey(validator_index).ok_or(Error::ValidatorUnknown(validator_index as u64))?, + message, + )) +} + pub fn execution_payload_bid_signature_set<'a, E, F>( state: &'a BeaconState, get_builder_pubkey: F, @@ -407,10 +438,16 @@ where // See `process_execution_payload_bid`. return Ok(None); } + + let bid_epoch = signed_execution_payload_bid + .message + .slot + .epoch(E::slots_per_epoch()); + let bid_fork = spec.fork_at_epoch(bid_epoch); let domain = spec.get_domain( - state.current_epoch(), + bid_epoch, Domain::BeaconBuilder, - &state.fork(), + &bid_fork, state.genesis_validators_root(), ); diff --git a/consensus/types/src/builder/builder.rs b/consensus/types/src/builder/builder.rs index 7d494da3ee..2bd50f42cc 100644 --- a/consensus/types/src/builder/builder.rs +++ b/consensus/types/src/builder/builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{Address, ChainSpec, Epoch, ForkName}; +use crate::{Address, Epoch, ForkName}; use bls::PublicKeyBytes; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; @@ -24,12 +24,3 @@ pub struct Builder { pub deposit_epoch: Epoch, pub withdrawable_epoch: Epoch, } - -impl Builder { - /// Check if a builder is active in a state with `finalized_epoch`. - /// - /// This implements `is_active_builder` from the spec. - pub fn is_active_at_finalized_epoch(&self, finalized_epoch: Epoch, spec: &ChainSpec) -> bool { - self.deposit_epoch < finalized_epoch && self.withdrawable_epoch == spec.far_future_epoch - } -} diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index a033272b9d..8bef8816e5 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -24,7 +24,7 @@ use tree_hash_derive::TreeHash; use typenum::Unsigned; use crate::{ - Address, ExecutionBlockHash, ExecutionPayloadBid, Withdrawal, + Address, ExecutionBlockHash, ExecutionPayloadBid, ProposerPreferences, Withdrawal, attestation::{ AttestationData, AttestationDuty, BeaconCommittee, Checkpoint, CommitteeIndex, PTC, ParticipationFlags, PendingAttestation, @@ -1349,6 +1349,43 @@ impl BeaconState { } } + /// Check if the validator is the proposer for the given slot in the current or next epoch. + pub fn is_valid_proposal_slot( + &self, + preferences: &ProposerPreferences, + ) -> Result { + let current_epoch = self.current_epoch(); + let proposal_epoch = preferences.proposal_slot.epoch(E::slots_per_epoch()); + + if proposal_epoch < current_epoch { + return Ok(false); + } + + let next_epoch = current_epoch.saturating_add(1u64); + if proposal_epoch > next_epoch { + return Ok(false); + } + + let epoch_offset = proposal_epoch.as_u64().safe_sub(current_epoch.as_u64())?; + + let slot_in_epoch = preferences + .proposal_slot + .as_u64() + .safe_rem(E::slots_per_epoch())?; + + let index = epoch_offset + .safe_mul(E::slots_per_epoch()) + .and_then(|v| v.safe_add(slot_in_epoch))?; + + let proposer_lookahead = self.proposer_lookahead()?; + + let proposer = proposer_lookahead + .get(index as usize) + .ok_or(BeaconStateError::ProposerLookaheadOutOfBounds { i: index as usize })?; + + Ok(*proposer == preferences.validator_index) + } + /// Returns the beacon proposer index for each `slot` in `epoch`. /// /// The returned `Vec` contains one proposer index for each slot in the epoch. @@ -3259,6 +3296,38 @@ impl BeaconState { Ok(effective_balance.safe_mul(MAX_RANDOM_VALUE)? >= max_effective_balance.safe_mul(random_value)?) } + + pub fn can_builder_cover_bid( + &self, + builder_index: BuilderIndex, + bid_amount: u64, + spec: &ChainSpec, + ) -> Result { + let builder = self.get_builder(builder_index)?; + + let builder_balance = builder.balance; + let pending_withdrawals_amount = + self.get_pending_balance_to_withdraw_for_builder(builder_index)?; + + let min_balance = spec + .min_deposit_amount + .safe_add(pending_withdrawals_amount)?; + if builder_balance < min_balance { + return Ok(false); + } + Ok(builder_balance.safe_sub(min_balance)? >= bid_amount) + } + + pub fn is_active_builder( + &self, + builder_index: BuilderIndex, + spec: &ChainSpec, + ) -> Result { + let builder = self.get_builder(builder_index)?; + + Ok(builder.deposit_epoch < self.finalized_checkpoint().epoch + && builder.withdrawable_epoch == spec.far_future_epoch) + } } impl ForkVersionDecode for BeaconState { From d3c13c4cf081746741af48b4189633f6cf42b844 Mon Sep 17 00:00:00 2001 From: YoungWoo Yang <119781151+0u-Y@users.noreply.github.com> Date: Wed, 15 Apr 2026 01:41:56 +0900 Subject: [PATCH 13/27] Gloas: envelope peer penalties and REJECT/IGNORE mapping (#8981) Closes #8949 Implements peer penalties and REJECT/IGNORE message propagation for `SignedExecutionPayloadEnvelope` gossip handling, completing follow-up work from #8806. Feedback on the error classification would be appreciated. ### Key Implementation Details - Maps all 15 `EnvelopeError` variants to REJECT/IGNORE based on [Gloas p2p spec](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#execution_payload) - Follows `ExecutionPayloadError` handling pattern from block gossip (`penalize_peer()` method) - Uses explicit variant matching (rather than catch-all `_`) for type safety - Applies `LowToleranceError` penalty for protocol violations (invalid signatures, mismatches, etc.) - Ignores without penalty for spec-defined cases (unknown block root, prior to finalization) and internal errors Co-Authored-By: 0u-Y Co-Authored-By: Eitan Seri-Levi --- .../gossip_methods.rs | 179 +++++++++++++----- 1 file changed, 129 insertions(+), 50 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index c0aa30ffcc..2238cb2f17 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -3337,63 +3337,112 @@ impl NetworkBeaconProcessor { verified_envelope } + Err(e) => { + match e { + EnvelopeError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } - Err(EnvelopeError::BlockRootUnknown { block_root }) => { - let envelope_slot = envelope.slot(); + EnvelopeError::BadSignature + | EnvelopeError::BuilderIndexMismatch { .. } + | EnvelopeError::SlotMismatch { .. } + | EnvelopeError::BlockHashMismatch { .. } + | EnvelopeError::UnknownValidator { .. } + | EnvelopeError::IncorrectBlockProposer { .. } + | EnvelopeError::ExecutionPayloadError(_) + | EnvelopeError::EnvelopeProcessingError(_) + | EnvelopeError::BlockError(_) => { + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_envelope_low", + ); + } - debug!( - ?block_root, - %envelope_slot, - "Envelope references unknown block, deferring to reprocess queue" - ); + EnvelopeError::BlockRootUnknown { block_root } => { + let envelope_slot = envelope.slot(); - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + debug!( + ?block_root, + %envelope_slot, + "Envelope references unknown block, deferring to reprocess queue" + ); - let inner_self = self.clone(); - let chain = self.chain.clone(); - let process_fn = Box::pin(async move { - match chain.verify_envelope_for_gossip(envelope).await { - Ok(verified_envelope) => { - inner_self - .process_gossip_verified_execution_payload_envelope( - peer_id, - verified_envelope, - ) - .await; - } - Err(e) => { - debug!( - error = ?e, - "Deferred envelope failed verification" + self.propagate_validation_result( + message_id.clone(), + peer_id, + MessageAcceptance::Ignore, + ); + + let inner_self = self.clone(); + let chain = self.chain.clone(); + let process_fn = Box::pin(async move { + match chain.verify_envelope_for_gossip(envelope).await { + Ok(verified_envelope) => { + inner_self + .process_gossip_verified_execution_payload_envelope( + peer_id, + verified_envelope, + ) + .await; + } + Err(e) => { + debug!( + error = ?e, + "Deferred envelope failed verification" + ); + } + } + }); + + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess( + ReprocessQueueMessage::UnknownBlockForEnvelope( + QueuedGossipEnvelope { + beacon_block_slot: envelope_slot, + beacon_block_root: block_root, + process_fn, + }, + ), + ), + }) + .is_err() + { + error!( + %envelope_slot, + ?block_root, + "Failed to defer envelope import" ); } } - }); - if self - .beacon_processor_send - .try_send(WorkEvent { - drop_during_sync: false, - work: Work::Reprocess(ReprocessQueueMessage::UnknownBlockForEnvelope( - QueuedGossipEnvelope { - beacon_block_slot: envelope_slot, - beacon_block_root: block_root, - process_fn, - }, - )), - }) - .is_err() - { - error!( - %envelope_slot, - ?block_root, - "Failed to defer envelope import" - ); + EnvelopeError::PriorToFinalization { .. } + | EnvelopeError::OptimisticSyncNotSupported { .. } + | EnvelopeError::BeaconChainError(_) + | EnvelopeError::BeaconStateError(_) + | EnvelopeError::BlockProcessingError(_) + | EnvelopeError::InternalError(_) => { + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } } return None; } - // TODO(gloas) penalize peers accordingly - Err(_) => return None, }; let envelope_slot = verified_envelope.signed_envelope.slot(); @@ -3441,7 +3490,7 @@ impl NetworkBeaconProcessor { async fn process_gossip_verified_execution_payload_envelope( self: Arc, - _peer_id: PeerId, + peer_id: PeerId, verified_envelope: GossipVerifiedEnvelope, ) { let _processing_start_time = Instant::now(); @@ -3467,9 +3516,39 @@ impl NetworkBeaconProcessor { | Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { // Nothing to do } - Err(_) => { - // TODO(gloas) implement peer penalties - } + Err(e) => match e { + EnvelopeError::ExecutionPayloadError(epe) if !epe.penalize_peer() => {} + EnvelopeError::BadSignature + | EnvelopeError::BuilderIndexMismatch { .. } + | EnvelopeError::SlotMismatch { .. } + | EnvelopeError::BlockHashMismatch { .. } + | EnvelopeError::UnknownValidator { .. } + | EnvelopeError::IncorrectBlockProposer { .. } + | EnvelopeError::ExecutionPayloadError(_) => { + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_envelope_processing_low", + ); + } + + EnvelopeError::EnvelopeProcessingError(_) + | EnvelopeError::BlockError(_) + | EnvelopeError::BlockRootUnknown { .. } => { + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_envelope_processing_error", + ); + } + + EnvelopeError::PriorToFinalization { .. } + | EnvelopeError::OptimisticSyncNotSupported { .. } + | EnvelopeError::BeaconChainError(_) + | EnvelopeError::BeaconStateError(_) + | EnvelopeError::BlockProcessingError(_) + | EnvelopeError::InternalError(_) => {} + }, } } From 30446b9f3a88679fbad14d84890f9950ceb1cd83 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 16 Apr 2026 03:07:54 +0300 Subject: [PATCH 14/27] Bump `rustls-webpki` to unblock CI (#9130) New audit failure from `RUSTSEC-2026-0098` Bump `rustls-webpki` to an unaffected version, add an ignore for the old version used by `warp` 0.3 Co-Authored-By: Mac L Co-Authored-By: Pawan Dhananjay --- Cargo.lock | 8 ++++---- Makefile | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 726929e9ec..329518f647 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5266,7 +5266,7 @@ dependencies = [ "rcgen", "ring", "rustls 0.23.35", - "rustls-webpki 0.103.10", + "rustls-webpki 0.103.12", "thiserror 2.0.17", "x509-parser", "yasna", @@ -7678,7 +7678,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.10", + "rustls-webpki 0.103.12", "subtle", "zeroize", ] @@ -7727,9 +7727,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.10" +version = "0.103.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" dependencies = [ "ring", "rustls-pki-types", diff --git a/Makefile b/Makefile index 599c1a8791..033ad6cfc8 100644 --- a/Makefile +++ b/Makefile @@ -331,7 +331,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit --ignore RUSTSEC-2026-0049 + cargo audit --ignore RUSTSEC-2026-0049 --ignore RUSTSEC-2026-0098 --ignore RUSTSEC-2026-0099 # Runs cargo deny (check for banned crates, duplicate versions, and source restrictions) deny: install-deny deny-CI From e0922badbef399f6f82ec3a5343d3b39393a30e4 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 16 Apr 2026 10:07:58 +1000 Subject: [PATCH 15/27] Prefix VC root spans with `lh_` so they get exported to tracing backend (#9129) The tracing exporter uses a `PrefixBasedSampler` that only samples root spans whose name starts with `lh_`. Rename the VC root spans to include the prefix so their traces are exported. Thanks @lmnzx for pointing this out! Co-Authored-By: Jimmy Chen --- .../validator_services/src/attestation_service.rs | 4 ++-- validator_client/validator_services/src/block_service.rs | 2 +- .../validator_services/src/sync_committee_service.rs | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index fe808efd88..dc5fc27a4f 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -439,7 +439,7 @@ impl AttestationService AttestationService BlockService { } #[instrument( - name = "block_proposal_duty_cycle", + name = "lh_block_proposal_duty_cycle", skip_all, fields(%slot, ?validator_pubkey) )] diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 26ce052ea0..e34e7636dd 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -214,7 +214,7 @@ impl SyncCommitteeService SyncCommitteeService SyncCommitteeService Date: Thu, 16 Apr 2026 01:19:57 -0500 Subject: [PATCH 16/27] Add mixed V17/V29 execution payload invalidation test (#9089) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- .../gloas_payload.rs | 76 +++++++++++++++++++ consensus/proto_array/src/proto_array.rs | 1 - 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index ea37780795..2e792028e5 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -890,4 +890,80 @@ mod tests { let test = get_gloas_payload_received_interleaving_test_definition(); test.run(); } + + /// Test that execution payload invalidation propagates across the V17→V29 fork + /// boundary: after invalidating a V17 parent, head must not select any descendant. + /// + /// genesis(V17) -> block_1(V17, slot 31) -> block_2(V29, slot 32) + #[test] + fn mixed_v17_v29_invalidation() { + let balances = vec![1]; + let mut ops = vec![]; + + // V17 block at slot 31 (pre-Gloas). + ops.push(Operation::ProcessBlock { + slot: Slot::new(31), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + }); + + // V29 block at slot 32 (first Gloas slot), child of block 1. + ops.push(Operation::ProcessBlock { + slot: Slot::new(32), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Vote for block 2 (V29) so both blocks have weight. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(2), + attestation_slot: Slot::new(32), + }); + + // FindHead triggers apply_score_changes which materializes the vote. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: balances.clone(), + expected_head: get_root(2), + current_slot: Slot::new(32), + expected_payload_status: None, + }); + + // Invalidate block 1 (V17). filter_block_tree excludes the entire branch. + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(1), + latest_valid_ancestor_root: Some(get_hash(0)), + }); + + // Head falls back to genesis — the invalid branch is no longer selectable. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: balances.clone(), + expected_head: get_root(0), + current_slot: Slot::new(32), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: Some(gloas_fork_boundary_spec()), + } + .run(); + } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 1f7291b260..4946631f73 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -864,7 +864,6 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. - // TODO(gloas): this needs some tests for the mixed Gloas/pre-Gloas case. pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, From 794718e96b46cfc67c5d653a30e9b2caecd19519 Mon Sep 17 00:00:00 2001 From: Shane K Moore <41407272+shane-moore@users.noreply.github.com> Date: Thu, 16 Apr 2026 03:23:18 -0700 Subject: [PATCH 17/27] Gloas vc ptc duty (#8338) Co-Authored-By: shane-moore Co-Authored-By: Eitan Seri- Levi --- beacon_node/http_api/tests/tests.rs | 1 + common/eth2/src/lib.rs | 30 ++ common/eth2/src/types.rs | 8 + testing/simulator/src/checks.rs | 2 + validator_client/http_metrics/src/lib.rs | 10 + validator_client/validator_metrics/src/lib.rs | 12 + .../validator_services/src/duties_service.rs | 314 +++++++++++++++++- .../src/notifier_service.rs | 3 + 8 files changed, 379 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b28816302c..60e65e0049 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3473,6 +3473,7 @@ impl ApiTester { self } + // TODO(EIP-7732): Add test_get_validator_duties_ptc function to test PTC duties endpoint pub async fn test_get_validator_duties_proposer_v2(self) -> Self { let current_epoch = self.chain.epoch().unwrap(); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index d5140a3878..87b4125c0e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -73,6 +73,8 @@ const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_AGGREGATOR_TIMEOUT_QUOTIENT: u32 = 24; // For DVT involving middleware only +// TODO(EIP-7732): Determine what this quotient should be +const HTTP_PTC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; @@ -93,6 +95,7 @@ pub struct Timeouts { pub sync_committee_contribution: Duration, pub sync_duties: Duration, pub sync_aggregators: Duration, + pub ptc_duties: Duration, pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, @@ -113,6 +116,7 @@ impl Timeouts { sync_committee_contribution: timeout, sync_duties: timeout, sync_aggregators: timeout, + ptc_duties: timeout, get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, @@ -135,6 +139,7 @@ impl Timeouts { / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, sync_duties: base_timeout / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, sync_aggregators: base_timeout / HTTP_SYNC_AGGREGATOR_TIMEOUT_QUOTIENT, + ptc_duties: base_timeout / HTTP_PTC_DUTIES_TIMEOUT_QUOTIENT, get_beacon_blocks_ssz: base_timeout / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: base_timeout / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: base_timeout / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, @@ -3274,4 +3279,29 @@ impl BeaconNodeHttpClient { self.post_with_timeout_and_response(path, &selections, self.timeouts.sync_aggregators) .await } + + // TODO(EIP-7732): Create corresponding beacon node response endpoint per spec + // https://github.com/ethereum/beacon-APIs/pull/552 + /// `POST validator/duties/ptc/{epoch}` + pub async fn post_validator_duties_ptc( + &self, + epoch: Epoch, + indices: &[u64], + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("ptc") + .push(&epoch.to_string()); + + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.ptc_duties, + ) + .await + } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e85565c580..dd16f46c55 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -778,6 +778,14 @@ pub enum GraffitiPolicy { AppendClientVersions, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PtcDuty { + pub pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + pub slot: Slot, +} + #[derive(Clone, Deserialize)] pub struct ValidatorBlocksQuery { pub randao_reveal: SignatureBytes, diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index de202e5812..a2e9ae96b2 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -220,6 +220,8 @@ pub async fn verify_full_sync_aggregates_up_to( Ok(()) } +// TODO(EIP-7732): Add verify_ptc_duties_executed function to verify that PTC duties are being fetched and executed correctly when Gloas fork is enabled + /// Verify that the first merged PoS block got finalized. pub async fn verify_transition_block_finalized( network: LocalNetwork, diff --git a/validator_client/http_metrics/src/lib.rs b/validator_client/http_metrics/src/lib.rs index 70b447a493..a6624b4f44 100644 --- a/validator_client/http_metrics/src/lib.rs +++ b/validator_client/http_metrics/src/lib.rs @@ -197,6 +197,16 @@ pub fn gather_prometheus_metrics( &[NEXT_EPOCH], duties_service.attester_count(next_epoch) as i64, ); + set_int_gauge( + &PTC_COUNT, + &[CURRENT_EPOCH], + duties_service.ptc_count(current_epoch) as i64, + ); + set_int_gauge( + &PTC_COUNT, + &[NEXT_EPOCH], + duties_service.ptc_count(next_epoch) as i64, + ); } } diff --git a/validator_client/validator_metrics/src/lib.rs b/validator_client/validator_metrics/src/lib.rs index 060d8a4edd..46a86381f9 100644 --- a/validator_client/validator_metrics/src/lib.rs +++ b/validator_client/validator_metrics/src/lib.rs @@ -22,7 +22,12 @@ pub const UPDATE_ATTESTERS_CURRENT_EPOCH: &str = "update_attesters_current_epoch pub const UPDATE_ATTESTERS_NEXT_EPOCH: &str = "update_attesters_next_epoch"; pub const UPDATE_ATTESTERS_FETCH: &str = "update_attesters_fetch"; pub const UPDATE_ATTESTERS_STORE: &str = "update_attesters_store"; +pub const UPDATE_PTC_CURRENT_EPOCH: &str = "update_ptc_current_epoch"; +pub const UPDATE_PTC_NEXT_EPOCH: &str = "update_ptc_next_epoch"; +pub const UPDATE_PTC_FETCH: &str = "update_ptc_fetch"; +pub const UPDATE_PTC_STORE: &str = "update_ptc_store"; pub const ATTESTER_DUTIES_HTTP_POST: &str = "attester_duties_http_post"; +pub const PTC_DUTIES_HTTP_POST: &str = "ptc_duties_http_post"; pub const PROPOSER_DUTIES_HTTP_GET: &str = "proposer_duties_http_get"; pub const VALIDATOR_DUTIES_SYNC_HTTP_POST: &str = "validator_duties_sync_http_post"; pub const VALIDATOR_ID_HTTP_GET: &str = "validator_id_http_get"; @@ -162,6 +167,13 @@ pub static ATTESTER_COUNT: LazyLock> = LazyLock::new(|| { &["task"], ) }); +pub static PTC_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "vc_beacon_ptc_count", + "Number of PTC (Payload Timeliness Committee) validators on this host", + &["task"], + ) +}); pub static PROPOSAL_CHANGED: LazyLock> = LazyLock::new(|| { try_create_int_counter( "vc_beacon_block_proposal_changed", diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index f467db92a1..9f51694f34 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -13,7 +13,7 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use bls::PublicKeyBytes; use eth2::types::{ AttesterData, BeaconCommitteeSelection, BeaconCommitteeSubscription, DutiesResponse, - ProposerData, StateId, ValidatorId, + ProposerData, PtcDuty, StateId, ValidatorId, }; use futures::{ StreamExt, @@ -46,6 +46,7 @@ const VALIDATOR_METRICS_MIN_COUNT: usize = 64; /// The initial request is used to determine if further requests are required, so that it /// reduces the amount of data that needs to be transferred. const INITIAL_DUTIES_QUERY_SIZE: usize = 1; +const INITIAL_PTC_DUTIES_QUERY_SIZE: usize = 1; /// Offsets from the attestation duty slot at which a subscription should be sent. const ATTESTATION_SUBSCRIPTION_OFFSETS: [u64; 8] = [3, 4, 5, 6, 7, 8, 16, 32]; @@ -83,6 +84,7 @@ const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > MIN_ATTESTATION_SUBS pub enum Error { UnableToReadSlotClock, FailedToDownloadAttesters(#[allow(dead_code)] String), + FailedToDownloadPtc(#[allow(dead_code)] String), FailedToProduceSelectionProof(#[allow(dead_code)] ValidatorStoreError), InvalidModulo(#[allow(dead_code)] ArithError), Arith(#[allow(dead_code)] ArithError), @@ -283,6 +285,7 @@ type DependentRoot = Hash256; type AttesterMap = HashMap>; type ProposerMap = HashMap)>; +type PtcMap = HashMap)>; pub struct DutiesServiceBuilder { /// Provides the canonical list of locally-managed validators. @@ -384,6 +387,7 @@ impl DutiesServiceBuilder { attesters: Default::default(), proposers: Default::default(), sync_duties: SyncDutiesMap::new(self.sync_selection_proof_config), + ptc_duties: Default::default(), validator_store: self .validator_store .ok_or("Cannot build DutiesService without validator_store")?, @@ -414,6 +418,8 @@ pub struct DutiesService { pub proposers: RwLock, /// Map from validator index to sync committee duties. pub sync_duties: SyncDutiesMap, + /// Maps an epoch to PTC duties for locally-managed validators. + pub ptc_duties: RwLock, /// Provides the canonical list of locally-managed validators. pub validator_store: Arc, /// Maps unknown validator pubkeys to the next slot time when a poll should be conducted again. @@ -472,6 +478,15 @@ impl DutiesService { .count() } + /// Returns the total number of validators that have PTC duties in the given epoch. + pub fn ptc_count(&self, epoch: Epoch) -> usize { + self.ptc_duties + .read() + .get(&epoch) + .map(|(_, duties)| duties.len()) + .unwrap_or(0) + } + /// Returns the total number of validators that are in a doppelganger detection period. pub fn doppelganger_detecting_count(&self) -> usize { self.validator_store @@ -534,6 +549,25 @@ impl DutiesService { self.enable_high_validator_count_metrics || self.total_validator_count() <= VALIDATOR_METRICS_MIN_COUNT } + + /// Get PTC duties for a specific slot. + /// + /// Returns duties for local validators who have PTC assignments at the given slot. + pub fn get_ptc_duties_for_slot(&self, slot: Slot) -> Vec { + let epoch = slot.epoch(S::E::slots_per_epoch()); + + self.ptc_duties + .read() + .get(&epoch) + .map(|(_, ptc_duties)| { + ptc_duties + .iter() + .filter(|ptc_duty| ptc_duty.slot == slot) + .cloned() + .collect() + }) + .unwrap_or_default() + } } /// Start the service that periodically polls the beacon node for validator duties. This will start @@ -662,6 +696,61 @@ pub fn start_update_service }, "duties_service_sync_committee", ); + + // Spawn the task which keeps track of local PTC duties. + // Only start PTC duties service if Gloas fork is scheduled. + if core_duties_service.spec.is_gloas_scheduled() { + let duties_service = core_duties_service.clone(); + core_duties_service.executor.spawn( + async move { + loop { + // Check if we've reached the Gloas fork epoch before polling + let Some(current_slot) = duties_service.slot_clock.now() else { + // Unable to read slot clock, sleep and try again + sleep(duties_service.slot_clock.slot_duration()).await; + continue; + }; + + let current_epoch = current_slot.epoch(S::E::slots_per_epoch()); + let Some(gloas_fork_epoch) = duties_service.spec.gloas_fork_epoch else { + // Gloas fork epoch not configured, should not reach here + break; + }; + + if current_epoch + 1 < gloas_fork_epoch { + // Wait until the next slot and check again + if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration).await; + } else { + sleep(duties_service.slot_clock.slot_duration()).await; + } + continue; + } + + if let Err(e) = poll_beacon_ptc_attesters(&duties_service).await { + error!( + error = ?e, + "Failed to poll PTC duties" + ); + } + + // Wait until the next slot before polling again. + // This doesn't mean that the beacon node will get polled every slot + // as the PTC duties service will return early if it deems it already has + // enough information. + if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration).await; + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(duties_service.slot_clock.slot_duration()).await; + continue; + } + } + }, + "duties_service_ptc", + ); + } } /// Iterate through all the voting pubkeys in the `ValidatorStore` and attempt to learn any unknown @@ -1282,6 +1371,26 @@ fn process_duty_and_proof( } } +async fn post_validator_duties_ptc( + duties_service: &Arc>, + epoch: Epoch, + validator_indices: &[u64], +) -> Result>, Error> { + duties_service + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::PTC_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_ptc(epoch, validator_indices) + .await + }) + .await + .map_err(|e| Error::FailedToDownloadPtc(e.to_string())) +} + /// Compute the attestation selection proofs for the `duties` and add them to the `attesters` map. /// /// Duties are computed in batches each slot. If a re-org is detected then the process will @@ -1641,6 +1750,209 @@ async fn poll_beacon_proposers( Ok(()) } +/// Query the beacon node for ptc duties for any known validators. +async fn poll_beacon_ptc_attesters( + duties_service: &Arc>, +) -> Result<(), Error> { + let current_epoch_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_PTC_CURRENT_EPOCH], + ); + + let current_slot = duties_service + .slot_clock + .now() + .ok_or(Error::UnableToReadSlotClock)?; + let current_epoch = current_slot.epoch(S::E::slots_per_epoch()); + + // Collect *all* pubkeys, even those undergoing doppelganger protection. + let local_pubkeys: HashSet<_> = duties_service + .validator_store + .voting_pubkeys(DoppelgangerStatus::ignored); + + let local_indices = { + let mut local_indices = Vec::with_capacity(local_pubkeys.len()); + + for &pubkey in &local_pubkeys { + if let Some(validator_index) = duties_service.validator_store.validator_index(&pubkey) { + local_indices.push(validator_index) + } + } + local_indices + }; + + // Poll for current epoch + if let Err(e) = poll_beacon_ptc_attesters_for_epoch( + duties_service, + current_epoch, + &local_indices, + &local_pubkeys, + ) + .await + { + error!( + %current_epoch, + request_epoch = %current_epoch, + err = ?e, + "Failed to download PTC duties" + ); + } + drop(current_epoch_timer); + let next_epoch_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_PTC_NEXT_EPOCH], + ); + + // Poll for next epoch + let next_epoch = current_epoch + 1; + if let Err(e) = poll_beacon_ptc_attesters_for_epoch( + duties_service, + next_epoch, + &local_indices, + &local_pubkeys, + ) + .await + { + error!( + %current_epoch, + request_epoch = %next_epoch, + err = ?e, + "Failed to download PTC duties" + ); + } + drop(next_epoch_timer); + + // Prune old duties. + duties_service + .ptc_duties + .write() + .retain(|&epoch, _| epoch + HISTORICAL_DUTIES_EPOCHS >= current_epoch); + + Ok(()) +} + +/// For the given `local_indices` and `local_pubkeys`, download the PTC duties for the given `epoch` and +/// store them in `duties_service.ptc_duties` using bandwidth optimization. +async fn poll_beacon_ptc_attesters_for_epoch< + S: ValidatorStore + 'static, + T: SlotClock + 'static, +>( + duties_service: &Arc>, + epoch: Epoch, + local_indices: &[u64], + local_pubkeys: &HashSet, +) -> Result<(), Error> { + // No need to bother the BN if we don't have any validators. + if local_indices.is_empty() { + debug!( + %epoch, + "No validators, not downloading PTC duties" + ); + return Ok(()); + } + + let fetch_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_PTC_FETCH], + ); + + // TODO(gloas) Unlike attester duties which use `get_uninitialized_validators` to detect + // newly-added validators, PTC duties only check dependent_root changes. Validators added + // mid-epoch won't get PTC duties until the next epoch boundary. We should probably fix this. + let initial_indices_to_request = + &local_indices[0..min(INITIAL_PTC_DUTIES_QUERY_SIZE, local_indices.len())]; + + let response = + post_validator_duties_ptc(duties_service, epoch, initial_indices_to_request).await?; + let dependent_root = response.dependent_root; + + // Check if we need to update duties for this epoch and collect validators to update. + // We update if we have no epoch data OR if the dependent_root changed. + let validators_to_update = { + // Avoid holding the read-lock for any longer than required. + let ptc_duties = duties_service.ptc_duties.read(); + let needs_update = ptc_duties.get(&epoch).is_none_or(|(prior_root, _duties)| { + // Update if dependent_root changed + *prior_root != dependent_root + }); + + if needs_update { + local_pubkeys.iter().collect::>() + } else { + Vec::new() + } + }; + + if validators_to_update.is_empty() { + // No validators have conflicting (epoch, dependent_root) values for this epoch. + return Ok(()); + } + + // Make a request for all indices that require updating which we have not already made a request for. + let indices_to_request = validators_to_update + .iter() + .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .filter(|validator_index| !initial_indices_to_request.contains(validator_index)) + .collect::>(); + + // Filter the initial duties by their relevance so that we don't hit warnings about + // overwriting duties. + let new_initial_duties = response + .data + .into_iter() + .filter(|duty| validators_to_update.contains(&&duty.pubkey)); + + let mut new_duties = if !indices_to_request.is_empty() { + post_validator_duties_ptc(duties_service, epoch, indices_to_request.as_slice()) + .await? + .data + } else { + vec![] + }; + new_duties.extend(new_initial_duties); + + drop(fetch_timer); + + let _store_timer = validator_metrics::start_timer_vec( + &validator_metrics::DUTIES_SERVICE_TIMES, + &[validator_metrics::UPDATE_PTC_STORE], + ); + + debug!( + %dependent_root, + num_new_duties = new_duties.len(), + "Downloaded PTC duties" + ); + + // Update duties - we only reach here if dependent_root changed or epoch is missing + let mut ptc_duties = duties_service.ptc_duties.write(); + + match ptc_duties.entry(epoch) { + hash_map::Entry::Occupied(mut entry) => { + // Dependent root must have changed, so we do complete replacement. + // We cannot support partial updates for the same dependent_root. + // The beacon node may return incomplete duty lists and we cannot distinguish between "no duties" and + // "duties not included in this response". We could query all local validators in each + // `post_validator_duties_ptc` call regardless of dependent_root changes, but the bandwidth + // cost is likely not justified since PTC assignments are sparse. + let (existing_root, _existing_duties) = entry.get(); + debug!( + old_root = %existing_root, + new_root = %dependent_root, + "PTC dependent root changed, replacing all duties" + ); + + *entry.get_mut() = (dependent_root, new_duties); + } + hash_map::Entry::Vacant(entry) => { + // No existing duties for this epoch + entry.insert((dependent_root, new_duties)); + } + } + + Ok(()) +} + /// Notify the block service if it should produce a block. async fn notify_block_production_service( current_slot: Slot, diff --git a/validator_client/validator_services/src/notifier_service.rs b/validator_client/validator_services/src/notifier_service.rs index a8f73490c7..e6e7a67864 100644 --- a/validator_client/validator_services/src/notifier_service.rs +++ b/validator_client/validator_services/src/notifier_service.rs @@ -109,6 +109,7 @@ pub async fn notify( let total_validators = duties_service.total_validator_count(); let proposing_validators = duties_service.proposer_count(epoch); let attesting_validators = duties_service.attester_count(epoch); + let ptc_validators = duties_service.ptc_count(epoch); let doppelganger_detecting_validators = duties_service.doppelganger_detecting_count(); if doppelganger_detecting_validators > 0 { @@ -126,6 +127,7 @@ pub async fn notify( } else if total_validators == attesting_validators { info!( current_epoch_proposers = proposing_validators, + current_epoch_ptc = ptc_validators, active_validators = attesting_validators, total_validators = total_validators, %epoch, @@ -135,6 +137,7 @@ pub async fn notify( } else if attesting_validators > 0 { info!( current_epoch_proposers = proposing_validators, + current_epoch_ptc = ptc_validators, active_validators = attesting_validators, total_validators = total_validators, %epoch, From 4cb3ffed8dc2c1aaad1350601b306fe2e1a3822c Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 17 Apr 2026 05:20:20 +0530 Subject: [PATCH 18/27] Rust 1.95 lints (#9142) N/A Adds lints for rust 1.95. Mostly cosmetic. 1. .zip(a.into_iter()) -> .zip(a) . Also a few more places where into_iter is not required 2. replace sort_by with sort_by_key 3. move if statements inside match block. 4. use checked_div instead of if statements. I think this is debatable in terms of being better, happy to remove it if others also feel its unnecessary Co-Authored-By: Pawan Dhananjay --- .../beacon_chain/src/attestation_rewards.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 7 +- beacon_node/http_api/src/ui.rs | 28 +-- beacon_node/http_api/tests/tests.rs | 6 +- .../lighthouse_network/tests/rpc_tests.rs | 227 ++++++++---------- .../gossip_methods.rs | 4 +- .../network/src/sync/network_context.rs | 4 +- beacon_node/operation_pool/src/lib.rs | 2 +- consensus/types/src/core/chain_spec.rs | 2 +- .../generate_random_block_and_blobs.rs | 7 +- lighthouse/environment/src/lib.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 7 +- validator_client/http_api/src/keystores.rs | 6 +- .../lighthouse_validator_store/src/lib.rs | 2 +- .../validator_services/src/duties_service.rs | 12 +- 15 files changed, 142 insertions(+), 176 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 554cd431b3..b25dd1f154 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -320,7 +320,7 @@ impl BeaconChain { ) .into_values() .collect::>(); - ideal_rewards.sort_by(|a, b| a.effective_balance.cmp(&b.effective_balance)); + ideal_rewards.sort_by_key(|a| a.effective_balance); Ok(StandardAttestationRewards { ideal_rewards, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 13dcf22108..1b03b6e10b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -3694,11 +3694,8 @@ pub fn generate_rand_block_and_blobs( blobs, } = bundle; - for (index, ((blob, kzg_commitment), kzg_proof)) in blobs - .into_iter() - .zip(commitments.into_iter()) - .zip(proofs.into_iter()) - .enumerate() + for (index, ((blob, kzg_commitment), kzg_proof)) in + blobs.into_iter().zip(commitments).zip(proofs).enumerate() { blob_sidecars.push(BlobSidecar { index: index as u64, diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 1538215a0b..75ef2c63cb 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -215,24 +215,22 @@ pub fn post_validator_monitor_metrics( drop(val_metrics); let attestations = attestation_hits + attestation_misses; - let attestation_hit_percentage: f64 = if attestations == 0 { - 0.0 - } else { - (100 * attestation_hits / attestations) as f64 - }; + let attestation_hit_percentage: f64 = (100 * attestation_hits) + .checked_div(attestations) + .map(|f| f as f64) + .unwrap_or(0.0); + let head_attestations = attestation_head_hits + attestation_head_misses; - let attestation_head_hit_percentage: f64 = if head_attestations == 0 { - 0.0 - } else { - (100 * attestation_head_hits / head_attestations) as f64 - }; + let attestation_head_hit_percentage: f64 = (100 * attestation_head_hits) + .checked_div(head_attestations) + .map(|f| f as f64) + .unwrap_or(0.0); let target_attestations = attestation_target_hits + attestation_target_misses; - let attestation_target_hit_percentage: f64 = if target_attestations == 0 { - 0.0 - } else { - (100 * attestation_target_hits / target_attestations) as f64 - }; + let attestation_target_hit_percentage: f64 = (100 * attestation_target_hits) + .checked_div(target_attestations) + .map(|f| f as f64) + .unwrap_or(0.0); let metrics = ValidatorMetrics { attestation_hits, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 60e65e0049..99fe0567b8 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -4746,7 +4746,7 @@ impl ApiTester { .beacon_state .validators() .into_iter() - .zip(fee_recipients.into_iter()) + .zip(fee_recipients) .enumerate() { let actual_fee_recipient = self @@ -4803,7 +4803,7 @@ impl ApiTester { .beacon_state .validators() .into_iter() - .zip(fee_recipients.into_iter()) + .zip(fee_recipients) .enumerate() { let actual = self @@ -4842,7 +4842,7 @@ impl ApiTester { .beacon_state .validators() .into_iter() - .zip(fee_recipients.into_iter()) + .zip(fee_recipients) .enumerate() { let actual_fee_recipient = self diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index d3f47c88bd..65b03189d4 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -139,16 +139,10 @@ fn test_tcp_status_rpc() { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - debug!("Receiver Received"); - receiver.send_response( - peer_id, - inbound_request_id, - rpc_response.clone(), - ); - } + } if request_type == rpc_request => { + // send the response + debug!("Receiver Received"); + receiver.send_response(peer_id, inbound_request_id, rpc_response.clone()); } _ => {} // Ignore other events } @@ -269,34 +263,33 @@ fn test_tcp_blocks_by_range_chunked_rpc() { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - warn!("Receiver got request"); - for i in 0..messages_to_send { - // Send first third of responses as base blocks, - // second as altair and third as bellatrix. - let rpc_response = if i < 2 { - rpc_response_base.clone() - } else if i < 4 { - rpc_response_altair.clone() - } else { - rpc_response_bellatrix_small.clone() - }; - receiver.send_response( - peer_id, - inbound_request_id, - rpc_response.clone(), - ); - } - // send the stream termination + } if request_type == rpc_request => { + // send the response + warn!("Receiver got request"); + for i in 0..messages_to_send { + // Send first third of responses as base blocks, + // second as altair and third as bellatrix. + let rpc_response = if i < 2 { + rpc_response_base.clone() + } else if i < 4 { + rpc_response_altair.clone() + } else { + rpc_response_bellatrix_small.clone() + }; receiver.send_response( peer_id, inbound_request_id, - Response::BlocksByRange(None), + rpc_response.clone(), ); } + // send the stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::BlocksByRange(None), + ); } + _ => {} // Ignore other events } } @@ -404,26 +397,24 @@ fn test_blobs_by_range_chunked_rpc() { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - warn!("Receiver got request"); - for _ in 0..messages_to_send { - // Send first third of responses as base blocks, - // second as altair and third as bellatrix. - receiver.send_response( - peer_id, - inbound_request_id, - rpc_response.clone(), - ); - } - // send the stream termination + } if request_type == rpc_request => { + // send the response + warn!("Receiver got request"); + for _ in 0..messages_to_send { + // Send first third of responses as base blocks, + // second as altair and third as bellatrix. receiver.send_response( peer_id, inbound_request_id, - Response::BlobsByRange(None), + rpc_response.clone(), ); } + // send the stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::BlobsByRange(None), + ); } _ => {} // Ignore other events } @@ -512,25 +503,23 @@ fn test_tcp_blocks_by_range_over_limit() { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - warn!("Receiver got request"); - for _ in 0..messages_to_send { - let rpc_response = rpc_response_bellatrix_large.clone(); - receiver.send_response( - peer_id, - inbound_request_id, - rpc_response.clone(), - ); - } - // send the stream termination + } if request_type == rpc_request => { + // send the response + warn!("Receiver got request"); + for _ in 0..messages_to_send { + let rpc_response = rpc_response_bellatrix_large.clone(); receiver.send_response( peer_id, inbound_request_id, - Response::BlocksByRange(None), + rpc_response.clone(), ); } + // send the stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::BlocksByRange(None), + ); } _ => {} // Ignore other events } @@ -650,12 +639,10 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { request_type, }, _, - )) => { - if request_type == rpc_request { - // send the response - warn!("Receiver got request"); - message_info = Some((peer_id, inbound_request_id)); - } + )) if request_type == rpc_request => { + // send the response + warn!("Receiver got request"); + message_info = Some((peer_id, inbound_request_id)); } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required _ => continue, @@ -770,25 +757,23 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - warn!("Receiver got request"); + } if request_type == rpc_request => { + // send the response + warn!("Receiver got request"); - for _ in 1..=messages_to_send { - receiver.send_response( - peer_id, - inbound_request_id, - rpc_response.clone(), - ); - } - // send the stream termination + for _ in 1..=messages_to_send { receiver.send_response( peer_id, inbound_request_id, - Response::BlocksByRange(None), + rpc_response.clone(), ); } + // send the stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::BlocksByRange(None), + ); } _ => {} // Ignore other events } @@ -917,31 +902,29 @@ fn test_tcp_blocks_by_root_chunked_rpc() { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - debug!("Receiver got request"); + } if request_type == rpc_request => { + // send the response + debug!("Receiver got request"); - for i in 0..messages_to_send { - // Send equal base, altair and bellatrix blocks - let rpc_response = if i < 2 { - rpc_response_base.clone() - } else if i < 4 { - rpc_response_altair.clone() - } else { - rpc_response_bellatrix_small.clone() - }; - receiver.send_response(peer_id, inbound_request_id, rpc_response); - debug!("Sending message"); - } - // send the stream termination - receiver.send_response( - peer_id, - inbound_request_id, - Response::BlocksByRange(None), - ); - debug!("Send stream term"); + for i in 0..messages_to_send { + // Send equal base, altair and bellatrix blocks + let rpc_response = if i < 2 { + rpc_response_base.clone() + } else if i < 4 { + rpc_response_altair.clone() + } else { + rpc_response_bellatrix_small.clone() + }; + receiver.send_response(peer_id, inbound_request_id, rpc_response); + debug!("Sending message"); } + // send the stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::BlocksByRange(None), + ); + debug!("Send stream term"); } _ => {} // Ignore other events } @@ -1099,27 +1082,25 @@ fn test_tcp_columns_by_root_chunked_rpc_for_fork(fork_name: ForkName) { peer_id, inbound_request_id, request_type, - } => { - if request_type == rpc_request { - // send the response - info!("Receiver got request"); + } if request_type == rpc_request => { + // send the response + info!("Receiver got request"); - for _ in 0..messages_to_send { - receiver.send_response( - peer_id, - inbound_request_id, - rpc_response.clone(), - ); - info!("Sending message"); - } - // send the stream termination + for _ in 0..messages_to_send { receiver.send_response( peer_id, inbound_request_id, - Response::DataColumnsByRoot(None), + rpc_response.clone(), ); - info!("Send stream term"); + info!("Sending message"); } + // send the stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::DataColumnsByRoot(None), + ); + info!("Send stream term"); } e => { info!(?e, "Got event"); @@ -1425,12 +1406,10 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { request_type, }, _, - )) => { - if request_type == rpc_request { - // send the response - warn!("Receiver got request"); - message_info = Some((peer_id, inbound_request_id)); - } + )) if request_type == rpc_request => { + // send the response + warn!("Receiver got request"); + message_info = Some((peer_id, inbound_request_id)); } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required _ => continue, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 2238cb2f17..2fe5aec347 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -289,7 +289,7 @@ impl NetworkBeaconProcessor { }) .collect::>(); - for (result, package) in results.into_iter().zip(packages.into_iter()) { + for (result, package) in results.into_iter().zip(packages) { let result = match result { Ok((indexed_attestation, attestation)) => Ok(VerifiedUnaggregate { indexed_attestation, @@ -495,7 +495,7 @@ impl NetworkBeaconProcessor { .map(|result| result.map(|verified| verified.into_indexed_attestation())) .collect::>(); - for (result, package) in results.into_iter().zip(packages.into_iter()) { + for (result, package) in results.into_iter().zip(packages) { let result = match result { Ok(indexed_attestation) => Ok(VerifiedAggregate { indexed_attestation, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index ff630bb470..b1ba87c75d 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1702,8 +1702,8 @@ impl SyncNetworkContext { }; let result = columns_by_range_peers_to_request - .iter() - .filter_map(|(peer_id, _)| { + .keys() + .filter_map(|peer_id| { self.send_data_columns_by_range_request( *peer_id, request.clone(), diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index b3bd091691..4b815704d9 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1148,7 +1148,7 @@ mod release_tests { }) .collect::>(); - for att in aggs1.into_iter().chain(aggs2.into_iter()) { + for att in aggs1.into_iter().chain(aggs2) { let attesting_indices = get_attesting_indices_from_state(&state, att.to_ref()).unwrap(); op_pool.insert_attestation(att, attesting_indices).unwrap(); diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index d06e5083c8..516ca2288e 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1789,7 +1789,7 @@ impl<'de> Deserialize<'de> for BlobSchedule { impl BlobSchedule { pub fn new(mut vec: Vec) -> Self { // reverse sort by epoch - vec.sort_by(|a, b| b.epoch.cmp(&a.epoch)); + vec.sort_by_key(|b| std::cmp::Reverse(b.epoch)); Self { schedule: vec, skip_serializing: false, diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index cf7b5df891..4e875341a0 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -34,11 +34,8 @@ pub fn generate_rand_block_and_blobs( .blob_kzg_commitments_mut() .expect("kzg commitment expected from Deneb") = commitments.clone(); - for (index, ((blob, kzg_commitment), kzg_proof)) in blobs - .into_iter() - .zip(commitments.into_iter()) - .zip(proofs.into_iter()) - .enumerate() + for (index, ((blob, kzg_commitment), kzg_proof)) in + blobs.into_iter().zip(commitments).zip(proofs).enumerate() { blob_sidecars.push(BlobSidecar { index: index as u64, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 6694c673ed..1431b03f45 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -388,7 +388,7 @@ impl Environment { Err(e) => error!(error = ?e, "Could not register SIGHUP handler"), } - future::select(inner_shutdown, future::select_all(handles.into_iter())).await + future::select(inner_shutdown, future::select_all(handles)).await }; match self.runtime().block_on(register_handlers) { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 06f204ab01..5e9dc001c7 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -660,11 +660,8 @@ impl Tester { // Zipping will stop when any of the zipped lists runs out, which is what we want. Some // of the tests don't provide enough proofs/blobs, and should fail the availability // check. - for (i, ((blob, kzg_proof), kzg_commitment)) in blobs - .into_iter() - .zip(proofs) - .zip(commitments.into_iter()) - .enumerate() + for (i, ((blob, kzg_proof), kzg_commitment)) in + blobs.into_iter().zip(proofs).zip(commitments).enumerate() { let blob_sidecar = Arc::new(BlobSidecar { index: i as u64, diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index 18accf0d5a..9004bcbd62 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -102,10 +102,8 @@ pub fn import( // Import each keystore. Some keystores may fail to be imported, so we record a status for each. let mut statuses = Vec::with_capacity(request.keystores.len()); - for (KeystoreJsonStr(keystore), password) in request - .keystores - .into_iter() - .zip(request.passwords.into_iter()) + for (KeystoreJsonStr(keystore), password) in + request.keystores.into_iter().zip(request.passwords) { let pubkey_str = keystore.pubkey().to_string(); diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index e8c1cfbc43..76f7a86aab 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1030,7 +1030,7 @@ impl ValidatorStore for LighthouseValidatorS // Collect successfully signed attestations and log errors. let mut signed_attestations = Vec::with_capacity(attestations.len()); - for (result, att) in results.into_iter().zip(attestations.into_iter()) { + for (result, att) in results.into_iter().zip(attestations) { match result { Ok(()) => { signed_attestations.push(( diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index 9f51694f34..2a371abf62 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -471,8 +471,8 @@ impl DutiesService { .voting_pubkeys(DoppelgangerStatus::only_safe); self.attesters .read() - .iter() - .filter_map(|(_, map)| map.get(&epoch)) + .values() + .filter_map(|map| map.get(&epoch)) .map(|(_, duty_and_proof)| duty_and_proof) .filter(|duty_and_proof| signing_pubkeys.contains(&duty_and_proof.duty.pubkey)) .count() @@ -533,8 +533,8 @@ impl DutiesService { self.attesters .read() - .iter() - .filter_map(|(_, map)| map.get(&epoch)) + .values() + .filter_map(|map| map.get(&epoch)) .map(|(_, duty_and_proof)| duty_and_proof) .filter(|duty_and_proof| { duty_and_proof.duty.slot == slot @@ -983,8 +983,8 @@ async fn poll_beacon_attesters Date: Fri, 17 Apr 2026 07:01:25 -0700 Subject: [PATCH 19/27] Gloas - add get_payload_attestation_endpoint (#8497) Co-Authored-By: shane-moore Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Jimmy Chen --- beacon_node/beacon_chain/src/beacon_chain.rs | 44 ++++++++ beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 11 ++ beacon_node/http_api/src/lib.rs | 9 ++ beacon_node/http_api/src/validator/mod.rs | 100 +++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 74 +++++++++++++- common/eth2/src/lib.rs | 45 +++++++++ 7 files changed, 283 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index acf7ad9c4c..4e4ff341fe 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2097,6 +2097,50 @@ impl BeaconChain { )?) } + /// Produce a `PayloadAttestationData` for a PTC validator to sign. + /// + /// This is used by PTC (Payload Timeliness Committee) validators to attest to the + /// presence/absence of an execution payload and blobs for a given slot. + pub fn produce_payload_attestation_data( + &self, + request_slot: Slot, + ) -> Result { + let _timer = metrics::start_timer(&metrics::PAYLOAD_ATTESTATION_PRODUCTION_SECONDS); + + // Payload attestations are only valid for the current slot + let current_slot = self.slot()?; + if request_slot != current_slot { + return Err(Error::InvalidSlot(request_slot)); + } + + // Check if we've seen a block for this slot from the canonical head + let head = self.head_snapshot(); + if head.beacon_block.slot() != request_slot { + return Err(Error::NoBlockForSlot(request_slot)); + } + + let beacon_block_root = head.beacon_block_root; + + // TODO(gloas) do we want to use a dedicated envelope cache instead? + // Maybe the new gloas DA cache? (Or should the gloas DA cache use + // the envelopes_times_cache internally?) + let payload_present = self + .envelope_times_cache + .read() + .cache + .contains_key(&beacon_block_root); + + // TODO(EIP-7732): Check blob data availability. For now, default to true. + let blob_data_available = true; + + Ok(PayloadAttestationData { + beacon_block_root, + slot: head.beacon_block.slot(), + payload_present, + blob_data_available, + }) + } + /// Performs the same validation as `Self::verify_unaggregated_attestation_for_gossip`, but for /// multiple attestations using batch BLS verification. Batch verification can provide /// significant CPU-time savings compared to individual verification. diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 210c4a4482..d5ff12e33b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -54,6 +54,7 @@ pub enum BeaconChainError { }, SlotClockDidNotStart, NoStateForSlot(Slot), + NoBlockForSlot(Slot), BeaconStateError(BeaconStateError), EpochCacheError(EpochCacheError), DBInconsistent(String), diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 786daa09da..5485f0a9e3 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -511,6 +511,17 @@ pub static ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_payload_attestation_production_seconds", + "Full runtime of payload attestation production", + ) + }); + /* * Fork Choice */ diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0bb04888b7..0be631c057 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2536,6 +2536,14 @@ pub fn serve( task_spawner_filter.clone(), ); + // GET validator/payload_attestation_data/{slot} + let get_validator_payload_attestation_data = get_validator_payload_attestation_data( + eth_v1.clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); + // GET validator/aggregate_attestation?attestation_data_root,slot let get_validator_aggregate_attestation = get_validator_aggregate_attestation( any_version.clone(), @@ -3347,6 +3355,7 @@ pub fn serve( .uor(get_validator_blinded_blocks) .uor(get_validator_execution_payload_envelope) .uor(get_validator_attestation_data) + .uor(get_validator_payload_attestation_data) .uor(get_validator_aggregate_attestation) .uor(get_validator_sync_committee_contribution) .uor(get_lighthouse_health) diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 7533510277..7349aa4db0 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -248,6 +248,106 @@ pub fn get_validator_attestation_data( .boxed() } +// GET validator/payload_attestation_data/{slot} +pub fn get_validator_payload_attestation_data( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + use eth2::beacon_response::{EmptyMetadata, ForkVersionedResponse}; + use ssz::Encode; + use warp::http::Response; + + eth_v1 + .and(warp::path("validator")) + .and(warp::path("payload_attestation_data")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(not_while_syncing_filter) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |slot: Slot, + accept_header: Option, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P0, move || { + not_synced_filter?; + + let fork_name = chain.spec.fork_name_at_slot::(slot); + + // Payload attestations are only valid for Gloas and later forks + if !fork_name.gloas_enabled() { + return Err(warp_utils::reject::custom_bad_request(format!( + "Payload attestations are not supported for fork: {fork_name}" + ))); + } + + let payload_attestation_data = chain + .produce_payload_attestation_data(slot) + .map_err(|e| match e { + BeaconChainError::InvalidSlot(_) + | BeaconChainError::NoBlockForSlot(_) => { + warp_utils::reject::custom_bad_request(format!( + "Unable to produce payload attestation data: {e:?}" + )) + } + _ => warp_utils::reject::custom_server_error(format!( + "Unable to produce payload attestation data: {e:?}" + )), + })?; + + match accept_header { + Some(Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .header("Eth-Consensus-Version", fork_name.to_string()) + .body(payload_attestation_data.as_ssz_bytes().into()) + .map(|res: Response| res) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to build SSZ response: {e}" + )) + }), + _ => { + let json_response = ForkVersionedResponse { + version: fork_name, + metadata: EmptyMetadata {}, + data: payload_attestation_data, + }; + Response::builder() + .status(200) + .header("Content-Type", "application/json") + .header("Eth-Consensus-Version", fork_name.to_string()) + .body( + serde_json::to_string(&json_response) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to serialize response: {e}" + )) + })? + .into(), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to build JSON response: {e}" + )) + }) + } + } + }) + }, + ) + .boxed() +} + // GET validator/blinded_blocks/{slot} pub fn get_validator_blinded_blocks( eth_v1: EthV1Filter, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 99fe0567b8..bf8443929c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3,7 +3,8 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + fork_name_from_env, test_spec, }, }; use bls::{AggregateSignature, Keypair, PublicKeyBytes, SecretKey, Signature, SignatureBytes}; @@ -4434,6 +4435,53 @@ impl ApiTester { self } + pub async fn test_get_validator_payload_attestation_data(self) -> Self { + let slot = self.chain.slot().unwrap(); + let fork_name = self.chain.spec.fork_name_at_slot::(slot); + + let response = self + .client + .get_validator_payload_attestation_data(slot) + .await + .unwrap(); + + assert_eq!(response.version(), Some(fork_name)); + + let result = response.into_data(); + let expected = self.chain.produce_payload_attestation_data(slot).unwrap(); + + assert_eq!(result.beacon_block_root, expected.beacon_block_root); + assert_eq!(result.slot, expected.slot); + assert_eq!(result.payload_present, expected.payload_present); + assert_eq!(result.blob_data_available, expected.blob_data_available); + + let ssz_result = self + .client + .get_validator_payload_attestation_data_ssz(slot) + .await + .unwrap(); + + assert_eq!(ssz_result, expected); + + self + } + + pub async fn test_get_validator_payload_attestation_data_pre_gloas(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // The endpoint should return a 400 error for pre-Gloas forks + match self + .client + .get_validator_payload_attestation_data(slot) + .await + { + Ok(result) => panic!("query for pre-Gloas slot should fail, got: {result:?}"), + Err(e) => assert_eq!(e.status().unwrap(), 400), + } + + self + } + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. pub async fn test_get_validator_aggregate_attestation_v1(self) -> Self { let attestation = self @@ -8057,6 +8105,30 @@ async fn get_validator_attestation_data_with_skip_slots() { .await; } +// TODO(EIP-7732): Remove `#[ignore]` once gloas beacon chain harness is implemented +#[ignore] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_payload_attestation_data() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + ApiTester::new() + .await + .test_get_validator_payload_attestation_data() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_payload_attestation_data_pre_gloas() { + if fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + ApiTester::new() + .await + .test_get_validator_payload_attestation_data_pre_gloas() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_attestation_v1() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 87b4125c0e..4ec75468a2 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -46,6 +46,7 @@ use ssz::{Decode, Encode}; use std::fmt; use std::future::Future; use std::time::Duration; +use types::PayloadAttestationData; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); @@ -79,6 +80,7 @@ const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_PAYLOAD_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; /// A struct to define a variety of different timeouts for different validator tasks to ensure @@ -100,6 +102,7 @@ pub struct Timeouts { pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, pub get_validator_block: Duration, + pub payload_attestation: Duration, pub default: Duration, } @@ -121,6 +124,7 @@ impl Timeouts { get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, get_validator_block: timeout, + payload_attestation: timeout, default: timeout, } } @@ -144,6 +148,7 @@ impl Timeouts { get_debug_beacon_states: base_timeout / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: base_timeout / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, get_validator_block: base_timeout / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, + payload_attestation: base_timeout / HTTP_PAYLOAD_ATTESTATION_TIMEOUT_QUOTIENT, default: base_timeout / HTTP_DEFAULT_TIMEOUT_QUOTIENT, } } @@ -2942,6 +2947,46 @@ impl BeaconNodeHttpClient { self.get_with_timeout(path, self.timeouts.attestation).await } + /// `GET validator/payload_attestation_data/{slot}` + pub async fn get_validator_payload_attestation_data( + &self, + slot: Slot, + ) -> Result, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("payload_attestation_data") + .push(&slot.to_string()); + + self.get_with_timeout(path, self.timeouts.payload_attestation) + .await + .map(BeaconResponse::ForkVersioned) + } + + /// `GET validator/payload_attestation_data/{slot}` in SSZ format + pub async fn get_validator_payload_attestation_data_ssz( + &self, + slot: Slot, + ) -> Result { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("payload_attestation_data") + .push(&slot.to_string()); + + let opt_response = self + .get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.payload_attestation) + .await?; + + let response_bytes = opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND))?; + + PayloadAttestationData::from_ssz_bytes(&response_bytes).map_err(Error::InvalidSsz) + } + /// `GET v1/validator/aggregate_attestation?slot,attestation_data_root` pub async fn get_validator_aggregate_attestation_v1( &self, From 9b08e1ad630f02cb653452dd623d1ce6e245c8b5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Apr 2026 10:59:39 +1000 Subject: [PATCH 20/27] Fix total_effective_balance=0 in `PreEpochCache` (#9106) Fix a **consensus fault** in `PreEpochCache` :scream_cat: Fortunately it's only reachable on a network with `total_active_balance=0`, i.e. a network that's already completely dead. As such this PR is not time-sensitive in any way. Add the floor on `total_effective_balance` when converting from `PreEpochCache` to `EpochCache`. An alternative would be to add the floor inside `PreEpochCache::get_total_active_balance`, however that would be redundant, as the only place this function is called outside this file is in single-pass epoch processing: https://github.com/sigp/lighthouse/blob/176cce585c1ba979a6210ed79b6b6528596cdb8c/consensus/state_processing/src/per_epoch_processing/single_pass.rs#L461-L462 The `set_total_active_balance` call already handles the floor. A regression test is included. Co-Authored-By: Michael Sproul --- consensus/state_processing/src/epoch_cache.rs | 46 ++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index b890694a7e..92863ccdb5 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -74,6 +74,8 @@ impl PreEpochCache { } } + /// Note: the spec-mandated floor (max with EFFECTIVE_BALANCE_INCREMENT) is applied in + /// `into_epoch_cache` and `set_total_active_balance`. This returns the raw sum. pub fn get_total_active_balance(&self) -> u64 { self.total_active_balance } @@ -84,7 +86,12 @@ impl PreEpochCache { spec: &ChainSpec, ) -> Result { let epoch = self.epoch_key.epoch; - let total_active_balance = self.total_active_balance; + // Apply the spec-mandated floor from `get_total_balance`: + // max(EFFECTIVE_BALANCE_INCREMENT, sum(...)) + // This prevents division by zero in base reward calculation when all + // validators have zero effective balance. + let total_active_balance = + std::cmp::max(self.total_active_balance, spec.effective_balance_increment); let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_active_balance); let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; @@ -176,3 +183,40 @@ pub fn initialize_epoch_cache( Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use types::Epoch; + + /// Regression test for division-by-zero when all validators have zero effective balance. + /// + /// When `process_effective_balance_updates` drops all effective balances to 0, the + /// `PreEpochCache` accumulates `total_active_balance = 0`. Without the spec-mandated floor + /// of `max(EFFECTIVE_BALANCE_INCREMENT, sum)`, `BaseRewardPerIncrement::new()` would divide + /// by `integer_sqrt(0) = 0`. + #[test] + fn into_epoch_cache_zero_total_active_balance() { + let spec = ChainSpec::minimal(); + + let cache = PreEpochCache { + epoch_key: EpochCacheKey { + epoch: Epoch::new(1), + decision_block_root: Hash256::zero(), + }, + effective_balances: vec![0, 0, 0, 0], + total_active_balance: 0, + }; + + // Verify the raw total is zero. + assert_eq!(cache.get_total_active_balance(), 0); + + // This should succeed, not panic with division by zero. + let epoch_cache = cache + .into_epoch_cache(ActivationQueue::default(), &spec) + .expect("into_epoch_cache should not fail with zero total_active_balance"); + + // Base reward for validator index 0 should be 0. + assert_eq!(epoch_cache.get_base_reward(0).unwrap(), 0); + } +} From c028bac28d8e706c5a7d1492be2ce6e5fbd90ad6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Apr 2026 10:59:42 +1000 Subject: [PATCH 21/27] Fix slasher OOM (#9141) Fix a vulnerability in the slasher whereby it would OOM upon processing an invalid attestation with an artificially high `validator_index`. This fix has already been made available to affected users on the `slasher-fix` branch. - Prevent attestations from being passed to the slasher prior to signature verification. This was unnecessary, as they would later be passed on successful validation as well. - Add a defensive cap on the maximum validator index processable by the slasher. The cap is high enough that it shouldn't be reached for several years, and will quickly result in warning logs if forgotten. - Add a regression test that confirms that the issue is fixed. Co-Authored-By: Michael Sproul --- .../src/attestation_verification.rs | 10 ---- .../tests/attestation_verification.rs | 57 +++++++++++++++++++ slasher/src/attestation_queue.rs | 17 ++++++ slasher/src/slasher.rs | 5 ++ 4 files changed, 79 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 667bafe445..f35de59e1f 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -514,11 +514,6 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result { Self::verify_slashable(signed_aggregate, chain) - .inspect(|verified_aggregate| { - if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_attestation(verified_aggregate.indexed_attestation.clone()); - } - }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } @@ -971,11 +966,6 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result { Self::verify_slashable(attestation, subnet_id, chain) - .inspect(|verified_unaggregated| { - if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone()); - } - }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index acf326430b..91bc8e249a 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -19,8 +19,10 @@ use execution_layer::test_utils::generate_genesis_header; use fixed_bytes::FixedBytesExtended; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; +use slasher::{Config as SlasherConfig, Slasher}; use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; +use tempfile::tempdir; use tree_hash::TreeHash; use typenum::Unsigned; use types::{ @@ -1958,3 +1960,58 @@ async fn gloas_aggregated_attestation_same_slot_index_must_be_zero() { result.err() ); } + +/// Regression test: a SingleAttestation with a huge bogus attester_index must not be forwarded to +/// the slasher. Previously the slasher received the IndexedAttestation before committee-membership +/// validation, causing an OOM when the slasher tried to allocate based on the untrusted index. +#[tokio::test] +async fn unaggregated_attestation_bogus_attester_index_not_sent_to_slasher() { + let slasher_dir = tempdir().unwrap(); + let spec = Arc::new(test_spec::()); + let slasher = Arc::new( + Slasher::::open(SlasherConfig::new(slasher_dir.path().into()), spec.clone()).unwrap(), + ); + + let inner_slasher = slasher.clone(); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .keypairs(KEYPAIRS[0..VALIDATOR_COUNT].to_vec()) + .fresh_ephemeral_store() + .initial_mutator(Box::new(move |builder| builder.slasher(inner_slasher))) + .mock_execution_layer() + .build(); + harness.advance_slot(); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + harness.advance_slot(); + + // Build a valid SingleAttestation, then replace the attester_index with a huge value. + let (mut bogus_attestation, _, _) = get_valid_unaggregated_attestation(&harness.chain); + bogus_attestation.attester_index = 1 << 40; // ~2^40, would OOM the slasher + + // Drain any attestations already queued from block production. + slasher + .process_queued(harness.get_current_slot().epoch(E::slots_per_epoch())) + .unwrap(); + let queue_len_before = slasher.attestation_queue_len(); + assert_eq!(queue_len_before, 0); + + let result = harness + .chain + .verify_unaggregated_attestation_for_gossip(&bogus_attestation, None); + assert!( + result.is_err(), + "attestation with bogus index should fail verification" + ); + + assert_eq!( + slasher.attestation_queue_len(), + 0, + "slasher queue length must not change — bogus attestation must not be forwarded" + ); +} diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 62a1bb0945..e99a3708ad 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -2,8 +2,17 @@ use crate::{AttesterRecord, Config, IndexedAttesterRecord}; use parking_lot::Mutex; use std::collections::BTreeMap; use std::sync::{Arc, Weak}; +use tracing::warn; use types::{EthSpec, Hash256, IndexedAttestation}; +/// Hard cap on validator indices accepted by the slasher. +/// +/// Any attestation referencing a validator index above this limit is silently dropped during +/// grouping. This is a defence-in-depth measure to prevent pathological memory allocation if an +/// attestation with a bogus index somehow reaches the slasher. The value (2^23 = 8,388,608) +/// provides generous headroom above the current mainnet validator set (~2M). +const MAX_VALIDATOR_INDEX: u64 = 8_388_608; + /// Staging area for attestations received from the network. /// /// Attestations are not grouped by validator index at this stage so that they can be easily @@ -72,6 +81,14 @@ impl AttestationBatch { let mut grouped_attestations = GroupedAttestations { subqueues: vec![] }; for ((validator_index, _), indexed_record) in self.attesters { + if validator_index >= MAX_VALIDATOR_INDEX { + warn!( + validator_index, + "Dropping slasher attestation with out-of-range validator index" + ); + break; + } + let subqueue_id = config.validator_chunk_index(validator_index); if subqueue_id >= grouped_attestations.subqueues.len() { diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 5d26c5a6da..8d34a34f3e 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -74,6 +74,11 @@ impl Slasher { &self.config } + /// Return the number of attestations in the queue. + pub fn attestation_queue_len(&self) -> usize { + self.attestation_queue.len() + } + /// Accept an attestation from the network and queue it for processing. pub fn accept_attestation(&self, attestation: IndexedAttestation) { self.attestation_queue.queue(attestation); From cf3d5e285e9109def686d24b543d5b44cb233347 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 21 Apr 2026 16:29:15 +1000 Subject: [PATCH 22/27] Gloas spec v1.7.0-alpha.5 and beacon_chain tests (#8998) Fix database pruning post-Gloas - Fix DB pruning logic (and state summaries DAG) - Get the `beacon_chain` tests running with `FORK_NAME=gloas` :tada: Co-Authored-By: Michael Sproul Co-Authored-By: Jimmy Chen Co-Authored-By: Eitan Seri- Levi Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Eitan Seri-Levi --- .github/forbidden-files.txt | 1 + Makefile | 5 +- .../beacon_chain/src/beacon_block_streamer.rs | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 68 +-- .../beacon_chain/src/beacon_snapshot.rs | 9 +- .../beacon_chain/src/blob_verification.rs | 8 +- .../src/block_production/gloas.rs | 109 +++- .../beacon_chain/src/block_production/mod.rs | 81 +-- .../beacon_chain/src/block_verification.rs | 43 +- beacon_node/beacon_chain/src/builder.rs | 46 +- .../beacon_chain/src/canonical_head.rs | 97 ++-- .../src/data_column_verification.rs | 9 +- beacon_node/beacon_chain/src/errors.rs | 6 +- .../beacon_chain/src/execution_payload.rs | 8 + beacon_node/beacon_chain/src/migrate.rs | 2 +- .../src/payload_bid_verification/tests.rs | 9 +- .../src/payload_envelope_streamer/tests.rs | 3 +- .../execution_pending_envelope.rs | 18 +- .../gossip_verified_envelope.rs | 15 +- .../payload_envelope_verification/import.rs | 11 +- .../src/payload_envelope_verification/mod.rs | 10 +- .../src/pending_payload_envelopes.rs | 7 +- .../beacon_chain/src/state_advance_timer.rs | 14 +- beacon_node/beacon_chain/src/test_utils.rs | 66 ++- .../src/validator_pubkey_cache.rs | 18 +- .../tests/attestation_production.rs | 2 +- .../tests/attestation_verification.rs | 15 +- .../beacon_chain/tests/block_verification.rs | 94 +++- .../beacon_chain/tests/column_verification.rs | 7 +- beacon_node/beacon_chain/tests/events.rs | 5 +- .../tests/payload_invalidation.rs | 43 +- beacon_node/beacon_chain/tests/rewards.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 467 ++++++++---------- beacon_node/beacon_chain/tests/tests.rs | 18 +- .../beacon_chain/tests/validator_monitor.rs | 3 +- beacon_node/execution_layer/src/engine_api.rs | 64 ++- .../execution_layer/src/engine_api/http.rs | 34 ++ .../src/engine_api/json_structures.rs | 35 +- .../test_utils/execution_block_generator.rs | 5 + .../src/test_utils/handle_rpc.rs | 18 +- .../src/test_utils/mock_builder.rs | 26 +- .../src/test_utils/mock_execution_layer.rs | 30 +- .../execution_layer/src/test_utils/mod.rs | 1 + .../src/beacon/execution_payload_envelope.rs | 6 +- .../http_api/src/sync_committee_rewards.rs | 3 +- beacon_node/http_api/tests/tests.rs | 5 +- .../src/network_beacon_processor/tests.rs | 14 +- beacon_node/store/src/hot_cold_store.rs | 280 +---------- beacon_node/store/src/reconstruct.rs | 1 - beacon_node/store/src/state_cache.rs | 43 +- common/eth2/src/types.rs | 2 - consensus/fork_choice/src/fork_choice.rs | 29 +- .../gloas_payload.rs | 106 ++-- consensus/proto_array/src/proto_array.rs | 39 +- .../src/proto_array_fork_choice.rs | 43 +- .../state_processing/src/block_replayer.rs | 137 +---- .../src/envelope_processing.rs | 153 ++---- consensus/state_processing/src/genesis.rs | 40 +- .../src/per_block_processing.rs | 140 +++++- .../src/per_block_processing/errors.rs | 7 + .../src/per_block_processing/tests.rs | 2 +- .../src/per_block_processing/withdrawals.rs | 10 +- .../state_processing/src/upgrade/gloas.rs | 5 +- consensus/types/src/block/beacon_block.rs | 1 + .../types/src/block/beacon_block_body.rs | 6 + .../types/src/block/signed_beacon_block.rs | 6 +- .../types/src/execution/execution_payload.rs | 8 +- .../src/execution/execution_payload_bid.rs | 1 + .../execution/execution_payload_envelope.rs | 8 +- consensus/types/src/execution/mod.rs | 2 - .../signed_execution_payload_envelope.rs | 2 +- .../src/execution/state_payload_status.rs | 18 - consensus/types/src/state/beacon_state.rs | 42 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 +- testing/ef_tests/src/cases/fork_choice.rs | 109 +++- testing/ef_tests/src/cases/operations.rs | 54 +- testing/ef_tests/src/handler.rs | 8 +- testing/ef_tests/src/lib.rs | 8 +- testing/ef_tests/tests/tests.rs | 18 +- .../src/test_rig.rs | 4 + .../lighthouse_validator_store/src/lib.rs | 2 +- 82 files changed, 1513 insertions(+), 1391 deletions(-) delete mode 100644 consensus/types/src/execution/state_payload_status.rs diff --git a/.github/forbidden-files.txt b/.github/forbidden-files.txt index b070067350..8649fbb574 100644 --- a/.github/forbidden-files.txt +++ b/.github/forbidden-files.txt @@ -12,3 +12,4 @@ beacon_node/http_api/src/block_rewards.rs common/eth2/src/lighthouse/attestation_performance.rs common/eth2/src/lighthouse/block_packing_efficiency.rs common/eth2/src/lighthouse/block_rewards.rs +consensus/types/src/execution/state_payload_status.rs diff --git a/Makefile b/Makefile index 033ad6cfc8..280e74d1d9 100644 --- a/Makefile +++ b/Makefile @@ -207,11 +207,10 @@ run-ef-tests: ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. -# TODO(EIP-7732) Extend to support gloas by using RECENT_FORKS instead -test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS_BEFORE_GLOAS)) +test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain + env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain --no-fail-fast # Run the tests in the `http_api` crate for recent forks. test-http-api: $(patsubst %,test-http-api-%,$(RECENT_FORKS_BEFORE_GLOAS)) diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 9ddc50a9f7..ed74022c3d 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -733,6 +733,7 @@ mod tests { spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); spec.fulu_fork_epoch = Some(Epoch::new(fulu_fork_epoch as u64)); + spec.gloas_fork_epoch = None; let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4e4ff341fe..e14c7c047f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2058,12 +2058,7 @@ impl BeaconChain { // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (advanced_state_root, mut state) = self .store - .get_advanced_hot_state( - beacon_block_root, - StatePayloadStatus::Pending, - request_slot, - beacon_state_root, - )? + .get_advanced_hot_state(beacon_block_root, request_slot, beacon_state_root)? .ok_or(Error::MissingBeaconState(beacon_state_root))?; if state.current_epoch() < request_epoch { partial_state_advance( @@ -4564,7 +4559,7 @@ impl BeaconChain { // // Load the parent state from disk. let chain = self.clone(); - let (state, state_root_opt) = self + let block_production_state = self .task_executor .spawn_blocking_handle( move || chain.load_state_for_block_production(slot), @@ -4573,6 +4568,10 @@ impl BeaconChain { .ok_or(BlockProductionError::ShuttingDown)? .await .map_err(BlockProductionError::TokioJoin)??; + let (state, state_root_opt) = ( + block_production_state.state, + block_production_state.state_root, + ); // Part 2/2 (async, with some blocking components) // @@ -4722,12 +4721,7 @@ impl BeaconChain { .ok_or(Error::MissingBeaconBlock(parent_block_root))?; let (state_root, state) = self .store - .get_advanced_hot_state( - parent_block_root, - StatePayloadStatus::Pending, - proposal_slot, - block.state_root(), - )? + .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? .ok_or(Error::MissingBeaconState(block.state_root()))?; (Cow::Owned(state), state_root) }; @@ -6019,6 +6013,12 @@ impl BeaconChain { None }; + let slot_number = if prepare_slot_fork.gloas_enabled() { + Some(prepare_slot.as_u64()) + } else { + None + }; + let payload_attributes = PayloadAttributes::new( self.slot_clock .start_of(prepare_slot) @@ -6028,6 +6028,7 @@ impl BeaconChain { execution_layer.get_suggested_fee_recipient(proposer).await, withdrawals.map(Into::into), parent_beacon_block_root, + slot_number, ); execution_layer @@ -6663,12 +6664,7 @@ impl BeaconChain { // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (state_root, state) = self .store - .get_advanced_hot_state( - head_block_root, - StatePayloadStatus::Pending, - target_slot, - head_block.state_root, - )? + .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; @@ -6756,10 +6752,10 @@ impl BeaconChain { blocks.push((beacon_block_root, Arc::new(beacon_block))); } - // Collect states, using the next blocks to determine if states are full (have Gloas - // payloads). + // Collect envelopes, using the next blocks to determine if payloads are canonical + // (the parent block was full). for (i, (block_root, block)) in blocks.iter().enumerate() { - let (opt_envelope, state_root) = if block.fork_name_unchecked().gloas_enabled() { + let opt_envelope = if block.fork_name_unchecked().gloas_enabled() { let opt_envelope = self.store.get_payload_envelope(block_root)?.map(Arc::new); if let Some((_, next_block)) = blocks.get(i + 1) { @@ -6768,22 +6764,30 @@ impl BeaconChain { let envelope = opt_envelope.ok_or_else(|| { Error::DBInconsistent(format!("Missing envelope {block_root:?}")) })?; - let state_root = envelope.message.state_root; - (Some(envelope), state_root) + Some(envelope) } else { - (None, block.state_root()) + None } } else { - // TODO(gloas): should use fork choice/cached head for last block in sequence - opt_envelope - .as_ref() - .map_or((None, block.state_root()), |envelope| { - (Some(envelope.clone()), envelope.message.state_root) - }) + // Last block in the sequence: use canonical head to determine + // whether the payload is canonical. + let head = self.canonical_head.cached_head(); + assert_eq!(head.head_block_root(), *block_root); + let payload_received = + head.head_payload_status() == fork_choice::PayloadStatus::Full; + if payload_received { + let envelope = opt_envelope.ok_or_else(|| { + Error::DBInconsistent(format!("Missing envelope {block_root:?}")) + })?; + Some(envelope) + } else { + None + } } } else { - (None, block.state_root()) + None }; + let state_root = block.state_root(); let mut beacon_state = self .store diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 566713e3f3..996a964386 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -44,18 +44,13 @@ impl> BeaconSnapshot { } } - /// Returns the state root from `self.beacon_block` or `self.execution_envelope` as - /// appropriate. + /// Returns the state root from `self.beacon_block`. /// /// ## Caution /// /// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`. pub fn beacon_state_root(&self) -> Hash256 { - if let Some(ref envelope) = self.execution_envelope { - envelope.message.state_root - } else { - self.beacon_block.message().state_root() - } + self.beacon_block.message().state_root() } /// Update all fields of the checkpoint. diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 86b385d818..e557a24369 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -20,7 +20,6 @@ use tree_hash::TreeHash; use types::data::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, - StatePayloadStatus, }; /// An error occurred while validating a gossip blob. @@ -513,12 +512,7 @@ pub fn validate_blob_sidecar_for_gossip BeaconChain { // // Load the parent state from disk. let chain = self.clone(); - let (state, state_root_opt) = self + let block_production_state = self .task_executor .spawn_blocking_handle( move || chain.load_state_for_block_production(slot), @@ -96,6 +99,12 @@ impl BeaconChain { .ok_or(BlockProductionError::ShuttingDown)? .await .map_err(BlockProductionError::TokioJoin)??; + let BlockProductionState { + state, + state_root: state_root_opt, + parent_payload_status, + parent_envelope, + } = block_production_state; // Part 2/2 (async, with some blocking components) // @@ -103,6 +112,8 @@ impl BeaconChain { self.produce_block_on_state_gloas( state, state_root_opt, + parent_payload_status, + parent_envelope, slot, randao_reveal, graffiti_settings, @@ -113,10 +124,13 @@ impl BeaconChain { // TODO(gloas) need to implement builder boost factor logic #[instrument(level = "debug", skip_all)] + #[allow(clippy::too_many_arguments)] pub async fn produce_block_on_state_gloas( self: &Arc, state: BeaconState, state_root_opt: Option, + parent_payload_status: PayloadStatus, + parent_envelope: Option>>, produce_at_slot: Slot, randao_reveal: Signature, graffiti_settings: GraffitiSettings, @@ -148,6 +162,16 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; + // Extract the parent's execution requests from the envelope (if parent was full). + let parent_execution_requests = if parent_payload_status == PayloadStatus::Full { + parent_envelope + .as_ref() + .map(|env| env.message.execution_requests.clone()) + .ok_or(BlockProductionError::MissingParentExecutionPayload)? + } else { + ExecutionRequests::default() + }; + // Part 2/3 (async) // // Produce the execution payload bid. @@ -157,6 +181,8 @@ impl BeaconChain { .clone() .produce_execution_payload_bid( state, + parent_payload_status, + parent_envelope, produce_at_slot, BID_VALUE_SELF_BUILD, BUILDER_INDEX_SELF_BUILD, @@ -173,6 +199,7 @@ impl BeaconChain { chain.complete_partial_beacon_block_gloas( partial_beacon_block, execution_payload_bid, + parent_execution_requests, payload_data, state, verification, @@ -427,6 +454,7 @@ impl BeaconChain { &self, partial_beacon_block: PartialBeaconBlock, signed_execution_payload_bid: SignedExecutionPayloadBid, + parent_execution_requests: ExecutionRequests, payload_data: Option>, mut state: BeaconState, verification: ProduceBlockVerification, @@ -488,6 +516,7 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes .try_into() .map_err(BlockProductionError::SszTypesError)?, + parent_execution_requests, signed_execution_payload_bid, payload_attestations: payload_attestations .try_into() @@ -558,29 +587,23 @@ impl BeaconChain { execution_requests: payload_data.execution_requests, builder_index: payload_data.builder_index, beacon_block_root, - slot: payload_data.slot, - state_root: Hash256::ZERO, }; - let mut signed_envelope = SignedExecutionPayloadEnvelope { + let signed_envelope = SignedExecutionPayloadEnvelope { message: execution_payload_envelope, signature: Signature::empty(), }; - // We skip state root verification here because the relevant state root - // cant be calculated until after the new block has been constructed. - process_execution_payload_envelope( - &mut state, - None, + // Verify the envelope against the state. This performs no state mutation. + verify_execution_payload_envelope( + &state, &signed_envelope, VerifySignatures::False, - VerifyStateRoot::False, + state_root, &self.spec, ) .map_err(BlockProductionError::EnvelopeProcessingError)?; - signed_envelope.message.state_root = state.update_tree_hash_cache()?; - // Cache the envelope for later retrieval by the validator for signing and publishing. let envelope_slot = payload_data.slot; // TODO(gloas) might be safer to cache by root instead of by slot. @@ -622,7 +645,9 @@ impl BeaconChain { #[instrument(level = "debug", skip_all)] pub async fn produce_execution_payload_bid( self: Arc, - mut state: BeaconState, + state: BeaconState, + parent_payload_status: PayloadStatus, + parent_envelope: Option>>, produce_at_slot: Slot, bid_value: u64, builder_index: BuilderIndex, @@ -665,6 +690,17 @@ impl BeaconChain { .map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?, }; + let parent_bid = state.latest_execution_payload_bid()?; + + // TODO(gloas): need should_extend_payload check here as well + let parent_block_hash = if parent_payload_status == PayloadStatus::Full { + // Build on parent bid's payload. + parent_bid.block_hash + } else { + // Skip parent bid's payload. For genesis this is the EL genesis hash. + parent_bid.parent_block_hash + }; + // TODO(gloas) this should be BlockProductionVersion::V4 // V3 is okay for now as long as we're not connected to a builder // TODO(gloas) add builder boost factor @@ -672,6 +708,8 @@ impl BeaconChain { self.clone(), &state, parent_root, + parent_block_hash, + parent_envelope, proposer_index, builder_params, )?; @@ -689,13 +727,11 @@ impl BeaconChain { blobs_and_proofs: _, } = block_proposal_contents; - let state_root = state.update_tree_hash_cache()?; - // TODO(gloas) since we are defaulting to local building, execution payment is 0 // execution payment should only be set to > 0 for trusted building. let bid = ExecutionPayloadBid:: { - parent_block_hash: state.latest_block_hash()?.to_owned(), - parent_block_root: state.get_latest_block_root(state_root), + parent_block_hash, + parent_block_root: parent_root, block_hash: payload.block_hash, prev_randao: payload.prev_randao, fee_recipient: Address::ZERO, @@ -705,6 +741,7 @@ impl BeaconChain { value: bid_value, execution_payment: EXECUTION_PAYMENT_TRUSTLESS_BUILD, blob_kzg_commitments, + execution_requests_root: execution_requests.tree_hash_root(), }; // Store payload data for envelope construction after block is created @@ -740,6 +777,8 @@ fn get_execution_payload_gloas( chain: Arc>, state: &BeaconState, parent_beacon_block_root: Hash256, + parent_block_hash: ExecutionBlockHash, + parent_envelope: Option>>, proposer_index: u64, builder_params: BuilderParams, ) -> Result, BlockProductionError> { @@ -751,11 +790,28 @@ fn get_execution_payload_gloas( compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; - let latest_execution_block_hash = *state.latest_block_hash()?; - let latest_gas_limit = state.latest_execution_payload_bid()?.gas_limit; + // TODO(gloas): this gas limit calc is not necessarily right + let parent_bid = state.latest_execution_payload_bid()?; + let latest_gas_limit = parent_bid.gas_limit; - let withdrawals = if state.is_parent_block_full() { - Withdrawals::::from(get_expected_withdrawals(state, spec)?).into() + let is_parent_block_full = parent_block_hash == parent_bid.block_hash; + + let withdrawals = if is_parent_block_full { + if let Some(envelope) = parent_envelope { + let mut withdrawals_state = state.clone(); + apply_parent_execution_payload( + &mut withdrawals_state, + parent_bid, + &envelope.message.execution_requests, + spec, + )?; + Withdrawals::::from(get_expected_withdrawals(&withdrawals_state, spec)?) + .into() + } else { + // No envelope available (e.g. genesis). The parent had no execution requests, + // so compute withdrawals directly from the current state. + Withdrawals::::from(get_expected_withdrawals(state, spec)?).into() + } } else { // If the previous payload was missed, carry forward the withdrawals from the state. state.payload_expected_withdrawals()?.to_vec() @@ -773,7 +829,7 @@ fn get_execution_payload_gloas( timestamp, random, proposer_index, - latest_execution_block_hash, + parent_block_hash, latest_gas_limit, builder_params, withdrawals, @@ -839,12 +895,15 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; + let slot_number = Some(builder_params.slot.as_u64()); + let payload_attributes = PayloadAttributes::new( timestamp, random, suggested_fee_recipient, Some(withdrawals), Some(parent_beacon_block_root), + slot_number, ); let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index bf42923cbe..fd5e381023 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -1,9 +1,10 @@ use std::{sync::Arc, time::Duration}; +use fork_choice::PayloadStatus; use proto_array::ProposerHeadError; use slot_clock::SlotClock; use tracing::{debug, error, info, instrument, warn}; -use types::{BeaconState, Hash256, Slot, StatePayloadStatus}; +use types::{BeaconState, Hash256, SignedExecutionPayloadEnvelope, Slot}; use crate::{ BeaconChain, BeaconChainTypes, BlockProductionError, StateSkipConfig, @@ -12,14 +13,24 @@ use crate::{ mod gloas; +/// State loaded from the database for block production. +pub(crate) struct BlockProductionState { + pub state: BeaconState, + pub state_root: Option, + pub parent_payload_status: PayloadStatus, + pub parent_envelope: Option>>, +} + impl BeaconChain { /// Load a beacon state from the database for block production. This is a long-running process /// that should not be performed in an `async` context. + /// + /// The returned `PayloadStatus` is the payload status of the parent block to be built upon. #[instrument(skip_all, level = "debug")] pub(crate) fn load_state_for_block_production( self: &Arc, slot: Slot, - ) -> Result<(BeaconState, Option), BlockProductionError> { + ) -> Result, BlockProductionError> { let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); @@ -27,16 +38,19 @@ impl BeaconChain { let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); // Atomically read some values from the head whilst avoiding holding cached head `Arc` any - // longer than necessary. - let (head_slot, head_block_root, head_state_root) = { + // longer than necessary. If the head has a payload envelope (Gloas full head), cheaply + // clone the `Arc` so we can pass it to block production without a DB load. + let (head_slot, head_block_root, head_state_root, head_payload_status, head_envelope) = { let head = self.canonical_head.cached_head(); ( head.head_slot(), head.head_block_root(), head.head_state_root(), + head.head_payload_status(), + head.snapshot.execution_envelope.clone(), ) }; - let (state, state_root_opt) = if head_slot < slot { + let result = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. // TODO(gloas): re-enable reorgs let gloas_enabled = self @@ -52,37 +66,29 @@ impl BeaconChain { head_to_reorg = %head_block_root, "Proposing block to re-org current head" ); - (re_org_state, Some(re_org_state_root)) + // TODO(gloas): ensure we use a sensible payload status when we enable reorgs + // for Gloas + BlockProductionState { + state: re_org_state, + state_root: Some(re_org_state_root), + parent_payload_status: PayloadStatus::Pending, + parent_envelope: None, + } } else { // Fetch the head state advanced through to `slot`, which should be present in the // state cache thanks to the state advance timer. - // TODO(gloas): need to fix this once fork choice understands payloads - // for now we just use the existence of the head's payload envelope to determine - // whether we should build atop it - let (payload_status, parent_state_root) = if gloas_enabled - && let Ok(Some(envelope)) = self.store.get_payload_envelope(&head_block_root) - { - debug!( - %slot, - parent_state_root = ?envelope.message.state_root, - parent_block_root = ?head_block_root, - "Building Gloas block on full state" - ); - (StatePayloadStatus::Full, envelope.message.state_root) - } else { - (StatePayloadStatus::Pending, head_state_root) - }; + let parent_state_root = head_state_root; let (state_root, state) = self .store - .get_advanced_hot_state( - head_block_root, - payload_status, - slot, - parent_state_root, - ) + .get_advanced_hot_state(head_block_root, slot, parent_state_root) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; - (state, Some(state_root)) + BlockProductionState { + state, + state_root: Some(state_root), + parent_payload_status: head_payload_status, + parent_envelope: head_envelope, + } } } else { warn!( @@ -94,12 +100,19 @@ impl BeaconChain { .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - (state, None) + // TODO(gloas): update this to read payload canonicity from fork choice once ready + let parent_payload_status = PayloadStatus::Pending; + BlockProductionState { + state, + state_root: None, + parent_payload_status, + parent_envelope: None, + } }; drop(state_load_timer); - Ok((state, state_root_opt)) + Ok(result) } /// If configured, wait for the fork choice run at the start of the slot to complete. @@ -232,11 +245,7 @@ impl BeaconChain { let (state_root, state) = self .store - .get_advanced_hot_state_from_cache( - re_org_parent_block, - StatePayloadStatus::Pending, - slot, - ) + .get_advanced_hot_state_from_cache(re_org_parent_block, slot) .or_else(|| { warn!(reason = "no state in cache", "Not attempting re-org"); None diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1ce1137f1e..9a43147233 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -99,8 +99,7 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, FullPayload, Hash256, InconsistentFork, KzgProofs, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, StatePayloadStatus, - data::DataColumnSidecarError, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -1509,11 +1508,7 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - // TODO(gloas): could do a similar optimisation here for Full blocks if we have access - // to the parent envelope and its `state_root`. - let state_root = if parent.beacon_block.slot() == state.slot() - && state.payload_status() == StatePayloadStatus::Pending - { + let state_root = if parent.beacon_block.slot() == state.slot() { // If it happens that `pre_state` has *not* already been advanced forward a single // slot, then there is no need to compute the state root for this // `per_slot_processing` call since that state root is already stored in the parent @@ -1957,37 +1952,9 @@ fn load_parent>( // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). // - // Post-Gloas we must also fetch a state with the correct payload status. If the current - // block builds upon the payload of its parent block, then we know the parent block is FULL - // and we need to load the full state. - let (payload_status, parent_state_root) = if parent_block.slot() == chain.spec.genesis_slot - { - // Genesis state is always pending, there is no such thing as a "genesis envelope". - // See: https://github.com/ethereum/consensus-specs/issues/5043 - (StatePayloadStatus::Pending, parent_block.state_root()) - } else if !block.as_block().fork_name_unchecked().gloas_enabled() { - // All pre-Gloas parent states are pending. - (StatePayloadStatus::Pending, parent_block.state_root()) - } else if let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() - && block.as_block().is_parent_block_full(parent_bid_block_hash) - { - // Post-Gloas Full block case. - // TODO(gloas): loading the envelope here is not very efficient - let Some(envelope) = chain.store.get_payload_envelope(&root)? else { - return Err(BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - .into()); - }; - let state_root = envelope.message.state_root; - (StatePayloadStatus::Full, state_root) - } else { - // Post-Gloas empty block case (also covers the Gloas fork transition). - (StatePayloadStatus::Pending, parent_block.state_root()) - }; let (parent_state_root, state) = chain .store - .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? + .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { BeaconChainError::DBInconsistent( format!("Missing state for parent block {root:?}",), @@ -2010,9 +1977,7 @@ fn load_parent>( ); } - let beacon_state_root = if state.slot() == parent_block.slot() - && let StatePayloadStatus::Pending = payload_status - { + let beacon_state_root = if state.slot() == parent_block.slot() { // Sanity check. if parent_state_root != parent_block.state_root() { return Err(BeaconChainError::DBInconsistent(format!( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index b963f7c342..74141dc64a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -23,7 +23,7 @@ use crate::{ use bls::Signature; use execution_layer::ExecutionLayer; use fixed_bytes::FixedBytesExtended; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; +use fork_choice::{ForkChoice, PayloadStatus, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::Kzg; use logging::crit; @@ -34,7 +34,9 @@ use rand::RngCore; use rayon::prelude::*; use slasher::Slasher; use slot_clock::{SlotClock, TestingSlotClock}; -use state_processing::{AllCaches, per_slot_processing}; +use state_processing::AllCaches; +use state_processing::genesis::genesis_block; +use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -44,8 +46,8 @@ use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, Epoch, EthSpec, + Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -321,7 +323,7 @@ where .clone() .ok_or("set_genesis_state requires a store")?; - let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; + let beacon_block = make_genesis_block(&mut beacon_state, &self.spec)?; beacon_state .build_caches(&self.spec) @@ -374,7 +376,7 @@ where // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent // historic states from being retained (unless `--archive` is set). let retain_historic_states = self.chain_config.archive; - let genesis_beacon_block = genesis_block(&mut beacon_state, &self.spec)?; + let genesis_beacon_block = make_genesis_block(&mut beacon_state, &self.spec)?; self.pending_io_batch.push( store .init_anchor_info( @@ -617,7 +619,6 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); - // TODO(gloas): add check that checkpoint state is Pending let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, execution_envelope: None, @@ -786,23 +787,26 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; - let state_payload_status = head_payload_status.as_state_payload_status(); - let (_head_state_root, head_state) = store - .get_advanced_hot_state( - head_block_root, - state_payload_status, - current_slot, - head_block.state_root(), - ) + .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?; + // Load the execution envelope from the store if the head has a Full payload. + let execution_envelope = if head_payload_status == PayloadStatus::Full { + store + .get_payload_envelope(&head_block_root) + .map_err(|e| format!("Error loading head execution envelope: {:?}", e))? + .map(Arc::new) + } else { + None + }; + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, - execution_envelope: None, + execution_envelope, beacon_block: Arc::new(head_block), beacon_state: head_state, }; @@ -1166,17 +1170,19 @@ where } } -fn genesis_block( +fn make_genesis_block( genesis_state: &mut BeaconState, spec: &ChainSpec, ) -> Result, String> { - let mut genesis_block = BeaconBlock::empty(spec); - *genesis_block.state_root_mut() = genesis_state + let mut block = genesis_block(genesis_state, spec) + .map_err(|e| format!("Error building genesis block: {:?}", e))?; + + *block.state_root_mut() = genesis_state .update_tree_hash_cache() .map_err(|e| format!("Error hashing genesis state: {:?}", e))?; Ok(SignedBeaconBlock::from_block( - genesis_block, + block, // Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis // block consistent with every other block. Signature::empty(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index cd53d0ef7c..1e5e1300ab 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -43,8 +43,8 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseLateHead}; use fork_choice::{ - ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, - ResetPayloadStatuses, + ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, PayloadStatus, + ProtoBlock, ResetPayloadStatuses, }; use itertools::process_results; @@ -315,20 +315,22 @@ impl CanonicalHead { .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let current_slot = fork_choice.fc_store().get_current_slot(); - // TODO(gloas): pass a better payload status once fork choice is implemented - let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = store - .get_advanced_hot_state( - beacon_block_root, - payload_status, - current_slot, - beacon_block.state_root(), - )? + .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; + // Load the execution envelope from the store if the head has a Full payload. + let execution_envelope = if head_payload_status == PayloadStatus::Full { + store + .get_payload_envelope(&beacon_block_root)? + .map(Arc::new) + } else { + None + }; + let snapshot = BeaconSnapshot { beacon_block_root, - execution_envelope: None, + execution_envelope, beacon_block: Arc::new(beacon_block), beacon_state, }; @@ -683,30 +685,42 @@ impl BeaconChain { drop(fork_choice_read_lock); // If the head has changed, update `self.canonical_head`. - let new_cached_head = if new_view.head_block_root != old_view.head_block_root { + let new_cached_head = if new_view.head_block_root != old_view.head_block_root + || new_payload_status != old_payload_status + { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); + // TODO(gloas): could optimise this to reuse state and rest of snapshot if just the + // payload status has changed. let mut new_snapshot = { let beacon_block = self .store .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - // TODO(gloas): update once we have fork choice - let payload_status = StatePayloadStatus::Pending; + // Load the execution envelope from the store if the head has a Full payload. + let state_root = beacon_block.state_root(); + let execution_envelope = if new_payload_status == PayloadStatus::Full { + let envelope = self + .store + .get_payload_envelope(&new_view.head_block_root)? + .map(Arc::new) + .ok_or(Error::MissingExecutionPayloadEnvelope( + new_view.head_block_root, + ))?; + + Some(envelope) + } else { + None + }; let (_, beacon_state) = self .store - .get_advanced_hot_state( - new_view.head_block_root, - payload_status, - current_slot, - beacon_block.state_root(), - )? - .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; + .get_advanced_hot_state(new_view.head_block_root, current_slot, state_root)? + .ok_or(Error::MissingBeaconState(state_root))?; BeaconSnapshot { beacon_block: Arc::new(beacon_block), - execution_envelope: None, + execution_envelope, beacon_block_root: new_view.head_block_root, beacon_state, } @@ -770,7 +784,8 @@ impl BeaconChain { let old_snapshot = &old_cached_head.snapshot; // If the head changed, perform some updates. - if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root + if (new_snapshot.beacon_block_root != old_snapshot.beacon_block_root + || new_payload_status != old_payload_status) && let Err(e) = self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) { @@ -974,26 +989,30 @@ impl BeaconChain { // The store migration task and op pool pruning require the *state at the first slot of the // finalized epoch*, rather than the state of the latest finalized block. These two values // will only differ when the first slot of the finalized epoch is a skip slot. - // - // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` - // to ensure we use the same state that we just set as the head. let new_finalized_slot = new_view .finalized_checkpoint .epoch .start_slot(T::EthSpec::slots_per_epoch()); - let new_finalized_state_root = process_results( - StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), - |mut iter| { - iter.find_map(|(state_root, slot)| { - if slot == new_finalized_slot { - Some(state_root) - } else { - None - } - }) - }, - )? - .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + let new_finalized_state_root = if new_finalized_slot == finalized_proto_block.slot { + // Fast-path for the common case where the finalized state is not at a skipped slot. + finalized_proto_block.state_root + } else { + // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` + // to ensure we use the same state that we just set as the head. + process_results( + StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))? + }; let update_cache = true; let new_finalized_state = self diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index f2cec0980f..a24dbd8942 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -21,7 +21,7 @@ use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, - EthSpec, Hash256, Slot, StatePayloadStatus, + EthSpec, Hash256, Slot, }; /// An error occurred while validating a gossip data column. @@ -743,12 +743,7 @@ fn verify_proposer_and_signature( // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root chain .store - .get_advanced_hot_state( - block_parent_root, - StatePayloadStatus::Pending, - column_slot, - parent_block.state_root, - ) + .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? .ok_or_else(|| { GossipDataColumnError::BeaconChainError(Box::new( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d5ff12e33b..9802f091e0 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -63,6 +63,7 @@ pub enum BeaconChainError { ForkChoiceStoreError(ForkChoiceStoreError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), + MissingExecutionPayloadEnvelope(Hash256), MissingHotStateSummary(Hash256), SlotProcessingError(SlotProcessingError), EpochProcessingError(EpochProcessingError), @@ -294,9 +295,6 @@ pub enum BlockProductionError { BeaconStateError(BeaconStateError), StateAdvanceError(StateAdvanceError), OpPoolError(OpPoolError), - /// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it - /// cannot produce blocks. - NoEth1ChainConnection, StateSlotTooHigh { produce_at_slot: Slot, state_slot: Slot, @@ -324,6 +322,8 @@ pub enum BlockProductionError { SszTypesError(ssz_types::Error), EnvelopeProcessingError(EnvelopeProcessingError), BlsError(bls::Error), + MissingParentExecutionPayload, + MissingExecutionPayloadEnvelope(Hash256), // TODO(gloas): Remove this once Gloas is implemented GloasNotImplemented(String), } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2b03a095f1..16542eea2d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -402,12 +402,20 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; + + let slot_number = if fork.gloas_enabled() { + Some(builder_params.slot.as_u64()) + } else { + None + }; + let payload_attributes = PayloadAttributes::new( timestamp, random, suggested_fee_recipient, withdrawals, parent_beacon_block_root, + slot_number, ); let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 24258d2d31..3c17c1ebba 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -330,7 +330,7 @@ impl, Cold: ItemStore> BackgroundMigrator state, other => { error!( diff --git a/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs b/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs index bb59b16ffb..98863a49d5 100644 --- a/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs +++ b/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs @@ -10,9 +10,10 @@ use kzg::KzgCommitment; use slot_clock::{SlotClock, TestingSlotClock}; use ssz::Encode; use ssz_types::VariableList; +use state_processing::genesis::genesis_block; use store::{HotColdDB, StoreConfig}; use types::{ - Address, BeaconBlock, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, ExecutionBlockHash, + Address, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, ExecutionBlockHash, ExecutionPayloadBid, Hash256, MinimalEthSpec, ProposerPreferences, SignedBeaconBlock, SignedExecutionPayloadBid, SignedProposerPreferences, SignedRoot, Slot, }; @@ -112,11 +113,11 @@ impl TestContext { ) .expect("should register inactive builder"); - let mut genesis_block = BeaconBlock::empty(&spec); - *genesis_block.state_root_mut() = state + let mut block = genesis_block(&state, &spec).expect("should build genesis block"); + *block.state_root_mut() = state .update_tree_hash_cache() .expect("should hash genesis state"); - let signed_block = SignedBeaconBlock::from_block(genesis_block, Signature::empty()); + let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); let block_root = signed_block.canonical_root(); let snapshot = BeaconSnapshot::new( diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs index 9e869a59b8..0db6d57ed6 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs @@ -65,13 +65,12 @@ fn build_chain( message: ExecutionPayloadEnvelope { payload: ExecutionPayloadGloas { block_hash, + slot_number: slot, ..Default::default() }, execution_requests: Default::default(), builder_index: 0, beacon_block_root: block_root, - slot, - state_root: Hash256::zero(), }, signature: Signature::empty(), }) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs index 86f9293c8f..4b8e7347cc 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -1,10 +1,7 @@ use std::sync::Arc; use slot_clock::SlotClock; -use state_processing::{ - VerifySignatures, - envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, -}; +use state_processing::{VerifySignatures, envelope_processing::verify_execution_payload_envelope}; use types::EthSpec; use crate::{ @@ -77,16 +74,15 @@ impl GossipVerifiedEnvelope { } else { load_snapshot_from_state_root::(block_root, self.block.state_root(), &chain.store)? }; - let mut state = snapshot.pre_state; + let state = snapshot.pre_state; - // All the state modifications are done in envelope_processing - process_execution_payload_envelope( - &mut state, - Some(snapshot.state_root), + // Verify the envelope against the state (no state mutation). + verify_execution_payload_envelope( + &state, &signed_envelope, // verify signature already done for GossipVerifiedEnvelope VerifySignatures::False, - VerifyStateRoot::True, + snapshot.state_root, &chain.spec, )?; @@ -97,7 +93,7 @@ impl GossipVerifiedEnvelope { }, import_data: EnvelopeImportData { block_root, - post_state: Box::new(state), + _phantom: Default::default(), }, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 77b44a2af0..80724e2b00 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -42,18 +42,18 @@ pub(crate) fn verify_envelope_consistency( ) -> Result<(), EnvelopeError> { // Check that the envelope's slot isn't from a slot prior // to the latest finalized slot. - if envelope.slot < latest_finalized_slot { + if envelope.slot() < latest_finalized_slot { return Err(EnvelopeError::PriorToFinalization { - payload_slot: envelope.slot, + payload_slot: envelope.slot(), latest_finalized_slot, }); } // Check that the slot of the envelope matches the slot of the block. - if envelope.slot != block.slot() { + if envelope.slot() != block.slot() { return Err(EnvelopeError::SlotMismatch { block: block.slot(), - envelope: envelope.slot, + envelope: envelope.slot(), }); } @@ -144,7 +144,7 @@ impl GossipVerifiedEnvelope { // validator pubkey cache for the proposer's pubkey, avoiding a state load from disk. // For external builder envelopes, we must load the state to access the builder registry. let builder_index = envelope.builder_index; - let block_slot = envelope.slot; + let block_slot = envelope.slot(); let envelope_epoch = block_slot.epoch(T::EthSpec::slots_per_epoch()); // Since the payload's block is already guaranteed to be imported, the associated `proto_block.current_epoch_shuffling_id` // already carries the correct `shuffling_decision_block`. @@ -224,7 +224,6 @@ impl GossipVerifiedEnvelope { builder_index, block_hash: signed_envelope.message.payload.block_hash, block_root: beacon_block_root, - state_root: signed_envelope.message.state_root, }, )); } @@ -334,13 +333,12 @@ mod tests { ExecutionPayloadEnvelope { payload: ExecutionPayloadGloas { block_hash, + slot_number: slot, ..ExecutionPayloadGloas::default() }, execution_requests: ExecutionRequests::default(), builder_index, beacon_block_root: Hash256::ZERO, - slot, - state_root: Hash256::ZERO, } } @@ -365,6 +363,7 @@ mod tests { voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), bls_to_execution_changes: VariableList::empty(), + parent_execution_requests: ExecutionRequests::default(), signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), payload_attestations: VariableList::empty(), _phantom: PhantomData, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 7e79799310..5a6d3a1b7d 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -6,7 +6,7 @@ use fork_choice::PayloadVerificationStatus; use slot_clock::SlotClock; use store::StoreOp; use tracing::{debug, error, info, info_span, instrument, warn}; -use types::{BeaconState, BlockImportSource, Hash256, SignedExecutionPayloadEnvelope}; +use types::{BlockImportSource, Hash256, SignedExecutionPayloadEnvelope}; use super::{ AvailableEnvelope, AvailableExecutedEnvelope, EnvelopeError, EnvelopeImportData, @@ -198,7 +198,7 @@ impl BeaconChain { let EnvelopeImportData { block_root, - post_state, + _phantom, } = import_data; let block_root = { @@ -208,7 +208,6 @@ impl BeaconChain { chain.import_execution_payload_envelope( envelope, block_root, - *post_state, payload_verification_outcome.payload_verification_status, ) }, @@ -231,7 +230,6 @@ impl BeaconChain { &self, signed_envelope: AvailableEnvelope, block_root: Hash256, - state: BeaconState, payload_verification_status: PayloadVerificationStatus, ) -> Result { // Everything in this initial section is on the hot path for processing the envelope. @@ -285,10 +283,6 @@ impl BeaconChain { block_root, signed_envelope.clone(), )); - ops.push(StoreOp::PutState( - signed_envelope.message.state_root, - &state, - )); let db_span = info_span!("persist_payloads_and_blobs").entered(); @@ -365,7 +359,6 @@ impl BeaconChain { builder_index: signed_envelope.message.builder_index, block_hash: signed_envelope.block_hash(), block_root, - state_root: signed_envelope.message.state_root, execution_optimistic: payload_verification_status.is_optimistic(), })); } diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 225d5a9892..51fc3f235d 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -18,11 +18,11 @@ //! //! ``` +use std::marker::PhantomData; use std::sync::Arc; -use store::Error as DBError; - use state_processing::{BlockProcessingError, envelope_processing::EnvelopeProcessingError}; +use store::Error as DBError; use tracing::instrument; use types::{ BeaconState, BeaconStateError, ChainSpec, DataColumnSidecarList, EthSpec, ExecutionBlockHash, @@ -41,10 +41,11 @@ mod payload_notifier; pub use execution_pending_envelope::ExecutionPendingEnvelope; +// TODO(gloas): could remove this type completely, or remove the generic #[derive(PartialEq)] pub struct EnvelopeImportData { pub block_root: Hash256, - pub post_state: Box>, + _phantom: PhantomData, } #[derive(Debug)] @@ -249,9 +250,6 @@ impl From for EnvelopeError { committed_bid, envelope, }, - EnvelopeProcessingError::BlockProcessingError(e) => { - EnvelopeError::BlockProcessingError(e) - } e => EnvelopeError::EnvelopeProcessingError(e), } } diff --git a/beacon_node/beacon_chain/src/pending_payload_envelopes.rs b/beacon_node/beacon_chain/src/pending_payload_envelopes.rs index 336ab5323f..351783832d 100644 --- a/beacon_node/beacon_chain/src/pending_payload_envelopes.rs +++ b/beacon_node/beacon_chain/src/pending_payload_envelopes.rs @@ -87,12 +87,13 @@ mod tests { fn make_envelope(slot: Slot) -> ExecutionPayloadEnvelope { ExecutionPayloadEnvelope { - payload: ExecutionPayloadGloas::default(), + payload: ExecutionPayloadGloas { + slot_number: slot, + ..ExecutionPayloadGloas::default() + }, execution_requests: ExecutionRequests::default(), builder_index: 0, beacon_block_root: Hash256::ZERO, - slot, - state_root: Hash256::ZERO, } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 4c070e7ecc..cb916cb514 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -26,10 +26,7 @@ use std::sync::{ use task_executor::TaskExecutor; use tokio::time::{Instant, sleep, sleep_until}; use tracing::{Instrument, debug, debug_span, error, instrument, warn}; -use types::{ - AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot, - StatePayloadStatus, -}; +use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -280,16 +277,9 @@ fn advance_head(beacon_chain: &Arc>) -> Resu (snapshot.beacon_block_root, snapshot.beacon_state_root()) }; - // TODO(gloas): do better once we have fork choice - let payload_status = StatePayloadStatus::Pending; let (head_state_root, mut state) = beacon_chain .store - .get_advanced_hot_state( - head_block_root, - payload_status, - current_slot, - head_block_state_root, - )? + .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; let initial_slot = state.slot(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1b03b6e10b..e84f9ad983 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1043,6 +1043,13 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); + // For Gloas forks, delegate to make_block_with_envelope and discard the envelope. + if self.spec.fork_name_at_slot::(slot).gloas_enabled() { + let (block_contents, _envelope, state) = + Box::pin(self.make_block_with_envelope(state, slot)).await; + return (block_contents, state); + } + complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); @@ -1124,11 +1131,24 @@ where GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); + // Load the parent's payload envelope and status from the cached head. + // TODO(gloas): we may want to pass these as arguments to support cases where we build + // on alternate chains to the head. + let (parent_payload_status, parent_envelope) = { + let head = self.chain.canonical_head.cached_head(); + ( + head.head_payload_status(), + head.snapshot.execution_envelope.clone(), + ) + }; + let (block, pending_state, _consensus_block_value) = self .chain .produce_block_on_state_gloas( state, None, + parent_payload_status, + parent_envelope, slot, randao_reveal, graffiti_settings, @@ -2681,32 +2701,27 @@ where Ok(block_hash) } - /// Process an execution payload envelope for a Gloas block. + /// Verify and process (with fork choice) an execution payload envelope for a Gloas block. pub async fn process_envelope( &self, block_root: Hash256, signed_envelope: SignedExecutionPayloadEnvelope, - pending_state: &mut BeaconState, - ) -> Hash256 { - let state_root = signed_envelope.message.state_root; + state: &BeaconState, + block_state_root: Hash256, + ) { debug!( - slot = %signed_envelope.message.slot, - ?state_root, + slot = %signed_envelope.slot(), "Processing execution payload envelope" ); - let block_state_root = pending_state - .update_tree_hash_cache() - .expect("should compute pending state root"); - state_processing::envelope_processing::process_execution_payload_envelope( - pending_state, - Some(block_state_root), + state_processing::envelope_processing::verify_execution_payload_envelope( + state, &signed_envelope, state_processing::VerifySignatures::True, - state_processing::envelope_processing::VerifyStateRoot::True, + block_state_root, &self.spec, ) - .expect("should process envelope"); + .expect("should verify envelope"); // Notify the EL of the new payload so forkchoiceUpdated can reference it. let block = self @@ -2747,16 +2762,18 @@ where // Store the envelope. self.chain .store - .put_payload_envelope(&block_root, signed_envelope) + .put_payload_envelope(&block_root, &signed_envelope) .expect("should store envelope"); - // Store the Full state. + // Update fork choice so it knows the payload was received. self.chain - .store - .put_state(&state_root, pending_state) - .expect("should store full state"); + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root) + .expect("should update fork choice with envelope"); - state_root + // Run fork choice because the envelope could become the head. + self.chain.recompute_head_at_current_slot().await; } /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and blobs or data columns retrieved from @@ -2970,7 +2987,8 @@ where BlockError, > { self.set_current_slot(slot); - let (block_contents, new_state) = self.make_block(state, slot).await; + let (block_contents, opt_envelope, new_state) = + self.make_block_with_envelope(state, slot).await; let block_hash = self .process_block( @@ -2979,6 +2997,12 @@ where block_contents.clone(), ) .await?; + + if let Some(envelope) = opt_envelope { + let block_state_root = block_contents.0.state_root(); + self.process_envelope(block_hash.into(), envelope, &new_state, block_state_root) + .await; + } Ok((block_hash, block_contents, new_state)) } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 26ac02d91b..36bf5c7113 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -302,7 +302,8 @@ mod test { #[test] fn basic_operation() { - let (state, keypairs) = get_state(8); + // >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch). + let (state, keypairs) = get_state(32); let store = get_store(); @@ -311,21 +312,14 @@ mod test { check_cache_get(&cache, &keypairs[..]); // Try adding a state with the same number of keypairs. - let (state, keypairs) = get_state(8); - cache - .import_new_pubkeys(&state) - .expect("should import pubkeys"); - check_cache_get(&cache, &keypairs[..]); - - // Try adding a state with less keypairs. - let (state, _) = get_state(1); + let (state, keypairs) = get_state(32); cache .import_new_pubkeys(&state) .expect("should import pubkeys"); check_cache_get(&cache, &keypairs[..]); // Try adding a state with more keypairs. - let (state, keypairs) = get_state(12); + let (state, keypairs) = get_state(48); cache .import_new_pubkeys(&state) .expect("should import pubkeys"); @@ -334,7 +328,7 @@ mod test { #[test] fn persistence() { - let (state, keypairs) = get_state(8); + let (state, keypairs) = get_state(32); let store = get_store(); @@ -349,7 +343,7 @@ mod test { check_cache_get(&cache, &keypairs[..]); // Add some more keypairs. - let (state, keypairs) = get_state(12); + let (state, keypairs) = get_state(48); let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index bca60d27cd..a3ab959d12 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -10,7 +10,7 @@ use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{Attestation, EthSpec, MainnetEthSpec, RelativeEpoch, Slot}; -pub const VALIDATOR_COUNT: usize = 16; +pub const VALIDATOR_COUNT: usize = 32; /// A cached set of keys. static KEYPAIRS: LazyLock> = diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 91bc8e249a..da7f380e36 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1389,13 +1389,18 @@ async fn attestation_to_finalized_block() { let earlier_block_root = earlier_block.canonical_root(); assert_ne!(earlier_block_root, finalized_checkpoint.root); + // For Gloas, `block.state_root()` returns the pending state root, but the cold DB + // may store the full state root. Use `get_cold_state_root` to get the actual stored key. + let cold_state_root = harness + .chain + .store + .get_cold_state_root(earlier_slot) + .expect("should not error getting cold state root") + .expect("cold state root should be present for finalized slot in archive store"); + let mut state = harness .chain - .get_state( - &earlier_block.state_root(), - Some(earlier_slot), - CACHE_STATE_IN_TESTS, - ) + .get_state(&cold_state_root, Some(earlier_slot), CACHE_STATE_IN_TESTS) .expect("should not error getting state") .expect("should find state"); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 2bb60f111a..6646fe0b1e 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -31,8 +31,8 @@ use types::{test_utils::generate_deterministic_keypair, *}; type E = MainnetEthSpec; -// Should ideally be divisible by 3. -const VALIDATOR_COUNT: usize = 24; +// Gloas requires >= 1 validator per slot for PTC committee computation, so >= 32 for MainnetEthSpec. +const VALIDATOR_COUNT: usize = 32; const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1]; @@ -77,10 +77,9 @@ async fn get_chain_segment() -> (Vec>, Vec], + harness: &BeaconChainHarness>, +) { + for snapshot in chain_segment { + if let Some(ref envelope) = snapshot.execution_envelope { + harness + .chain + .store + .put_payload_envelope(&snapshot.beacon_block_root, envelope) + .expect("should store envelope"); + } + } +} + +/// Update fork choice with envelope payload status for all blocks in the chain segment. +/// +/// Must be called after the blocks have been imported into fork choice. +fn update_fork_choice_with_envelopes( + chain_segment: &[BeaconSnapshot], + harness: &BeaconChainHarness>, +) { + for snapshot in chain_segment { + if snapshot.execution_envelope.is_some() { + // Call may fail if block was invalid (it will have no fork choice node). + let _ = harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(snapshot.beacon_block_root); + } + } +} + fn junk_signature() -> Signature { let kp = generate_deterministic_keypair(VALIDATOR_COUNT); let message = Hash256::from_slice(&[42; 32]); @@ -303,6 +343,7 @@ fn update_data_column_signed_header( async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + store_envelopes_for_chain_segment(&chain_segment, &harness); let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() @@ -328,6 +369,7 @@ async fn chain_segment_full_segment() { .into_block_error() .expect("should import chain segment"); + update_fork_choice_with_envelopes(&chain_segment, &harness); harness.chain.recompute_head_at_current_slot().await; assert_eq!( @@ -348,6 +390,7 @@ async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 31, 32, 33] { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); + store_envelopes_for_chain_segment(&chain_segment, &harness); harness .chain @@ -363,6 +406,7 @@ async fn chain_segment_varying_chunk_size() { .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } + update_fork_choice_with_envelopes(&chain_segment, &harness); harness.chain.recompute_head_at_current_slot().await; assert_eq!( @@ -514,6 +558,7 @@ async fn assert_invalid_signature( snapshots: &[BeaconSnapshot], item: &str, ) { + store_envelopes_for_chain_segment(chain_segment, harness); let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) @@ -540,10 +585,22 @@ async fn assert_invalid_signature( harness.chain.recompute_head_at_current_slot().await; // Ensure the block will be rejected if imported on its own (without gossip checking). - let ancestor_blocks = chain_segment + // Only include blocks that haven't been imported yet (after the finalized slot) to avoid + // `WouldRevertFinalizedSlot` errors when part 1 already imported and finalized some blocks. + // Use the fork choice finalized checkpoint directly, as the cached head may not reflect + // finalization that occurred during process_chain_segment. + let finalized_slot = harness + .chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); + let ancestor_blocks: Vec> = chain_segment .iter() .take(block_index) .zip(chain_segment_blobs.iter()) + .filter(|(snapshot, _)| snapshot.beacon_block.slot() > finalized_slot) .map(|(snapshot, blobs)| { build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) @@ -554,6 +611,7 @@ async fn assert_invalid_signature( .chain .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) .await; + update_fork_choice_with_envelopes(chain_segment, harness); harness.chain.recompute_head_at_current_slot().await; let process_res = harness @@ -594,6 +652,7 @@ async fn get_invalid_sigs_harness( chain_segment: &[BeaconSnapshot], ) -> BeaconChainHarness> { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); + store_envelopes_for_chain_segment(chain_segment, &harness); harness .chain .slot_clock @@ -1091,6 +1150,21 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); + // Post-Gloas, store the execution payload envelope so that subsequent blocks can look up + // the parent envelope. + if let Some(ref envelope) = snapshot.execution_envelope { + harness + .chain + .store + .put_payload_envelope(&snapshot.beacon_block_root, envelope) + .expect("should store envelope"); + harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(snapshot.beacon_block_root) + .expect("should update fork choice with envelope"); + } if let Some(data_sidecars) = blobs_opt { verify_and_process_gossip_data_sidecars(&harness, data_sidecars).await; } @@ -2040,7 +2114,10 @@ async fn range_sync_block_construction_fails_with_wrong_blob_count() { async fn range_sync_block_rejects_missing_custody_columns() { let spec = test_spec::(); - if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { + // Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() + || spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() + { return; } @@ -2118,7 +2195,10 @@ async fn range_sync_block_rejects_missing_custody_columns() { async fn rpc_block_allows_construction_past_da_boundary() { let spec = test_spec::(); - if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { + // Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() + || spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() + { return; } diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 6114bd7f45..5846ccfd7e 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -16,8 +16,8 @@ use types::*; type E = MainnetEthSpec; -// Should ideally be divisible by 3. -const VALIDATOR_COUNT: usize = 24; +// >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch). +const VALIDATOR_COUNT: usize = 32; /// A cached set of keys. static KEYPAIRS: LazyLock> = @@ -52,7 +52,8 @@ async fn rpc_columns_with_invalid_header_signature() { let spec = Arc::new(test_spec::()); // Only run this test if columns are enabled. - if !spec.is_fulu_scheduled() { + // TODO(gloas): Gloas blocks don't have blob_kzg_commitments — blobs are in the envelope. + if !spec.is_fulu_scheduled() || spec.is_gloas_scheduled() { return; } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 121f8c255d..5305965f0f 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -170,7 +170,10 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { #[tokio::test] async fn data_column_sidecar_event_on_process_rpc_columns() { - if fork_name_from_env().is_none_or(|f| !f.fulu_enabled()) { + // Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if fork_name_from_env().is_none_or(|f| !f.fulu_enabled()) + || fork_name_from_env().is_some_and(|f| f.gloas_enabled()) + { return; }; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 947024e8c2..38d4f4c47e 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -371,7 +371,7 @@ impl InvalidPayloadRig { /// Simple test of the different import types. #[tokio::test] async fn valid_invalid_syncing() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -388,7 +388,7 @@ async fn valid_invalid_syncing() { /// `latest_valid_hash`. #[tokio::test] async fn invalid_payload_invalidates_parent() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -445,7 +445,7 @@ async fn immediate_forkchoice_update_invalid_test( #[tokio::test] async fn immediate_forkchoice_update_payload_invalid() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { @@ -456,7 +456,7 @@ async fn immediate_forkchoice_update_payload_invalid() { #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_block_hash() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await @@ -464,7 +464,7 @@ async fn immediate_forkchoice_update_payload_invalid_block_hash() { #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_terminal_block() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } immediate_forkchoice_update_invalid_test(|_| Payload::Invalid { @@ -476,7 +476,7 @@ async fn immediate_forkchoice_update_payload_invalid_terminal_block() { /// Ensure the client tries to exit when the justified checkpoint is invalidated. #[tokio::test] async fn justified_checkpoint_becomes_invalid() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -520,7 +520,7 @@ async fn justified_checkpoint_becomes_invalid() { /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. #[tokio::test] async fn pre_finalized_latest_valid_hash() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 4; @@ -569,7 +569,7 @@ async fn pre_finalized_latest_valid_hash() { /// - Will not validate `latest_valid_root` and its ancestors. #[tokio::test] async fn latest_valid_hash_will_not_validate() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } const LATEST_VALID_SLOT: u64 = 3; @@ -618,7 +618,7 @@ async fn latest_valid_hash_will_not_validate() { /// Check behaviour when the `latest_valid_hash` is a junk value. #[tokio::test] async fn latest_valid_hash_is_junk() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 5; @@ -661,7 +661,7 @@ async fn latest_valid_hash_is_junk() { /// Check that descendants of invalid blocks are also invalidated. #[tokio::test] async fn invalidates_all_descendants() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; @@ -764,7 +764,7 @@ async fn invalidates_all_descendants() { /// Check that the head will switch after the canonical branch is invalidated. #[tokio::test] async fn switches_heads() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; @@ -863,7 +863,7 @@ async fn switches_heads() { #[tokio::test] async fn invalid_during_processing() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -897,7 +897,7 @@ async fn invalid_during_processing() { #[tokio::test] async fn invalid_after_optimistic_sync() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -937,7 +937,7 @@ async fn invalid_after_optimistic_sync() { #[tokio::test] async fn manually_validate_child() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -957,7 +957,7 @@ async fn manually_validate_child() { #[tokio::test] async fn manually_validate_parent() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -977,7 +977,7 @@ async fn manually_validate_parent() { #[tokio::test] async fn payload_preparation() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -1034,13 +1034,14 @@ async fn payload_preparation() { fee_recipient, None, None, + None, ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } #[tokio::test] async fn invalid_parent() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -1107,7 +1108,7 @@ async fn invalid_parent() { #[tokio::test] async fn attesting_to_optimistic_head() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -1320,7 +1321,7 @@ impl InvalidHeadSetup { #[tokio::test] async fn recover_from_invalid_head_by_importing_blocks() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let InvalidHeadSetup { @@ -1362,7 +1363,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { #[tokio::test] async fn recover_from_invalid_head_after_persist_and_reboot() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let InvalidHeadSetup { @@ -1407,7 +1408,7 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { #[tokio::test] async fn weights_after_resetting_optimistic_status() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 1889c1f625..bc7c98041f 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -845,14 +845,13 @@ async fn check_all_base_rewards_for_subset( .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) .unwrap(); - // TODO(gloas): handle payloads? let mut pre_state = BlockReplayer::>::new( parent_state, &harness.spec, ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(vec![], vec![], Some(block.slot())) + .apply_blocks(vec![], Some(block.slot())) .unwrap() .into_state(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c6e13bd160..47bda60eb8 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -27,6 +27,7 @@ use beacon_chain::{ }; use bls::{Keypair, Signature, SignatureBytes}; use fixed_bytes::FixedBytesExtended; +use fork_choice::PayloadStatus; use logging::create_test_tracing_subscriber; use maplit::hashset; use rand::Rng; @@ -53,7 +54,7 @@ use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; // Should ideally be divisible by 3. -pub const LOW_VALIDATOR_COUNT: usize = 24; +pub const LOW_VALIDATOR_COUNT: usize = 32; pub const HIGH_VALIDATOR_COUNT: usize = 64; // When set to true, cache any states fetched from the db. @@ -184,6 +185,10 @@ async fn light_client_bootstrap_test() { // No-op prior to Altair. return; }; + // TODO(EIP-7732): Light client not yet implemented for Gloas. + if spec.is_gloas_scheduled() { + return; + } let db_path = tempdir().unwrap(); let store = get_store_generic(&db_path, StoreConfig::default(), spec.clone()); @@ -239,6 +244,10 @@ async fn light_client_updates_test() { // No-op prior to Altair. return; }; + // TODO(EIP-7732): Light client not yet implemented for Gloas. + if spec.is_gloas_scheduled() { + return; + } let num_final_blocks = E::slots_per_epoch() * 2; let db_path = tempdir().unwrap(); @@ -568,13 +577,12 @@ async fn epoch_boundary_state_attestation_processing() { .get_blinded_block(&block_root) .unwrap() .expect("block exists"); - // Use get_state as the state may be finalized by this point + // Use get_state as the state may be finalized by this point. + let state_root = block.state_root(); let mut epoch_boundary_state = store - .get_state(&block.state_root(), None, CACHE_STATE_IN_TESTS) + .get_state(&state_root, None, CACHE_STATE_IN_TESTS) .expect("no error") - .unwrap_or_else(|| { - panic!("epoch boundary state should exist {:?}", block.state_root()) - }); + .unwrap_or_else(|| panic!("epoch boundary state should exist {:?}", state_root)); let ebs_state_root = epoch_boundary_state.update_tree_hash_cache().unwrap(); let mut ebs_of_ebs = store .get_state(&ebs_state_root, None, CACHE_STATE_IN_TESTS) @@ -673,8 +681,11 @@ async fn forwards_iter_block_and_state_roots_until() { let block_root = block_roots[slot.as_usize()]; assert_eq!(block_root_iter.next().unwrap().unwrap(), (block_root, slot)); + let (iter_state_root, iter_slot) = state_root_iter.next().unwrap().unwrap(); + assert_eq!(iter_slot, slot); + let state_root = state_roots[slot.as_usize()]; - assert_eq!(state_root_iter.next().unwrap().unwrap(), (state_root, slot)); + assert_eq!(iter_state_root, state_root); } }; @@ -708,13 +719,8 @@ async fn block_replayer_hooks() { .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) .await; - let (blocks, envelopes) = store - .load_blocks_to_replay( - Slot::new(0), - max_slot, - end_block_root.into(), - StatePayloadStatus::Pending, - ) + let blocks = store + .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) .unwrap(); let mut pre_slots = vec![]; @@ -749,7 +755,7 @@ async fn block_replayer_hooks() { post_block_slots.push(block.slot()); Ok(()) })) - .apply_blocks(blocks, envelopes, None) + .apply_blocks(blocks, None) .unwrap() .into_state(); @@ -2871,12 +2877,6 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) .unwrap() .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness .chain .store @@ -2884,8 +2884,21 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { .unwrap() .unwrap(); - // The test premise requires the anchor block to have a payload. - assert!(wss_block.message().execution_payload().is_ok()); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + + // The test premise requires the anchor block to have a payload (or a payload bid in Gloas). + assert!( + wss_block.message().execution_payload().is_ok() + || wss_block + .message() + .body() + .signed_execution_payload_bid() + .is_ok() + ); let wss_blobs_opt = harness .chain @@ -2967,15 +2980,19 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { chain.head_snapshot().beacon_state.slot() ); - let payload_exists = chain - .store - .execution_payload_exists(&wss_block_root) - .unwrap_or(false); + // In Gloas, the execution payload envelope is separate from the block and will be synced + // from the network. We don't check for its existence here. + if !wss_block.fork_name_unchecked().gloas_enabled() { + let payload_exists = chain + .store + .execution_payload_exists(&wss_block_root) + .unwrap_or(false); - assert!( - payload_exists, - "Split block payload must exist in the new node's store after checkpoint sync" - ); + assert!( + payload_exists, + "Split block payload must exist in the new node's store after checkpoint sync" + ); + } } async fn weak_subjectivity_sync_test( @@ -3013,18 +3030,17 @@ async fn weak_subjectivity_sync_test( .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) .unwrap() .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness .chain .store .get_full_block(&wss_block_root) .unwrap() .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); let wss_blobs_opt = harness .chain .get_or_reconstruct_blobs(&wss_block_root) @@ -3101,6 +3117,20 @@ async fn weak_subjectivity_sync_test( .build() .expect("should build"); + // Store the WSS envelope to simulate it arriving from network sync. + // In production, the envelope would be synced from the network after checkpoint sync. + if let Some(envelope) = harness + .chain + .store + .get_payload_envelope(&wss_block.canonical_root()) + .unwrap_or(None) + { + beacon_chain + .store + .put_payload_envelope(&wss_block.canonical_root(), &envelope) + .unwrap(); + } + let beacon_chain = Arc::new(beacon_chain); let wss_block_root = wss_block.canonical_root(); let store_wss_block = harness @@ -3120,6 +3150,21 @@ async fn weak_subjectivity_sync_test( assert_eq!(store_wss_blobs_opt, wss_blobs_opt); } + // Store the WSS block's envelope in the new chain (required for Gloas forward sync). + // The first forward block needs the checkpoint block's envelope to determine the parent's + // Full state. + if let Some(envelope) = harness + .chain + .store + .get_payload_envelope(&wss_block_root) + .unwrap() + { + beacon_chain + .store + .put_payload_envelope(&wss_block_root, &envelope) + .unwrap(); + } + // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); let new_blocks = chain_dump @@ -3154,6 +3199,21 @@ async fn weak_subjectivity_sync_test( ) .await .unwrap(); + + // Store the envelope and apply it to fork choice. + if let Some(envelope) = &snapshot.execution_envelope { + beacon_chain + .store + .put_payload_envelope(&block_root, envelope) + .unwrap(); + // Update fork choice so head selection accounts for Full payload status. + beacon_chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root) + .unwrap(); + } + beacon_chain.recompute_head_at_current_slot().await; // Check that the new block's state can be loaded correctly. @@ -3305,6 +3365,17 @@ async fn weak_subjectivity_sync_test( } assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); + // Store envelopes for all historic blocks (needed for dumping the chain from the new node). + for snapshot in chain_dump.iter() { + let block_root = snapshot.beacon_block_root; + if let Some(envelope) = &snapshot.execution_envelope { + beacon_chain + .store + .put_payload_envelope(&block_root, envelope) + .unwrap(); + } + } + // Sanity check for non-aligned WSS starts, to make sure the WSS block is persisted properly if wss_block_slot != wss_state_slot { let new_node_block_root_at_wss_block = beacon_chain @@ -3374,13 +3445,12 @@ async fn weak_subjectivity_sync_test( assert_eq!(state.canonical_root().unwrap(), state_root); } - // Anchor slot is still set to the slot of the checkpoint block. - // Note: since hot tree states the anchor slot is set to the aligned ws state slot - // https://github.com/sigp/lighthouse/pull/6750 - let wss_aligned_slot = if checkpoint_slot % E::slots_per_epoch() == 0 { - checkpoint_slot + // Anchor slot is set to the WSS state slot, which is always epoch-aligned (the state is + // advanced to an epoch boundary during checkpoint sync). + let wss_aligned_slot = if wss_state_slot % E::slots_per_epoch() == 0 { + wss_state_slot } else { - (checkpoint_slot.epoch(E::slots_per_epoch()) + Epoch::new(1)) + (wss_state_slot.epoch(E::slots_per_epoch()) + Epoch::new(1)) .start_slot(E::slots_per_epoch()) }; assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); @@ -3635,6 +3705,10 @@ async fn test_import_historical_data_columns_batch_no_block_found() { if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { return; }; + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } let spec = test_spec::(); let db_path = tempdir().unwrap(); @@ -3745,12 +3819,14 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); - let split_slot = Slot::new(E::slots_per_epoch() * 4); + let finalized_epoch_start_slot = Slot::new(E::slots_per_epoch() * 4); let pre_skips = 1; let post_skips = 1; - // Build the chain up to the intended split slot, with 3 skips before the split. - let slots = (1..=split_slot.as_u64() - pre_skips) + let split_slot = finalized_epoch_start_slot; + + // Build the chain up to the intended finalized epoch slot, with 1 skip before the split. + let slots = (1..=finalized_epoch_start_slot.as_u64() - pre_skips) .map(Slot::new) .collect::>(); @@ -3769,20 +3845,26 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { // // - one that is invalid because it conflicts with finalization (slot <= finalized_slot) // - one that is valid because its slot is not finalized (slot > finalized_slot) + // + // Note: block verification uses finalized_checkpoint.epoch.start_slot() (== + // finalized_epoch_start_slot) for the finalized slot check. let (unadvanced_split_state, unadvanced_split_state_root) = harness.get_current_state_and_root(); let ((invalid_fork_block, _), _) = harness - .make_block(unadvanced_split_state.clone(), split_slot) + .make_block(unadvanced_split_state.clone(), finalized_epoch_start_slot) .await; let ((valid_fork_block, _), _) = harness - .make_block(unadvanced_split_state.clone(), split_slot + 1) + .make_block( + unadvanced_split_state.clone(), + finalized_epoch_start_slot + 1, + ) .await; // Advance the chain so that the intended split slot is finalized. // Do not attest in the epoch boundary slot, to make attestation production later easier (no // equivocations). - let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); + let finalizing_slot = finalized_epoch_start_slot + 2 * E::slots_per_epoch(); for _ in 0..pre_skips + post_skips { harness.advance_slot(); } @@ -3834,12 +3916,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let (split_state_root, mut advanced_split_state) = harness .chain .store - .get_advanced_hot_state( - split.block_root, - StatePayloadStatus::Pending, - split.slot, - split.state_root, - ) + .get_advanced_hot_state(split.block_root, split.slot, split.state_root) .unwrap() .unwrap(); complete_state_advance( @@ -3973,6 +4050,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo let num_blocks_produced = E::slots_per_epoch() * 4; let db_path = tempdir().unwrap(); let spec = test_spec::(); + let is_gloas = spec.is_gloas_scheduled(); let chain_config = ChainConfig { archive, @@ -3995,7 +4073,11 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = SchemaVersion(28); + let min_version = if is_gloas { + SchemaVersion(29) + } else { + SchemaVersion(28) + }; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -4565,6 +4647,10 @@ async fn fulu_prune_data_columns_happy_case() { // No-op if PeerDAS not scheduled. return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if store.get_chain_spec().is_gloas_scheduled() { + return; + } let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else { // No-op prior to Fulu. return; @@ -4620,6 +4706,10 @@ async fn fulu_prune_data_columns_no_finalization() { // No-op if PeerDAS not scheduled. return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if store.get_chain_spec().is_gloas_scheduled() { + return; + } let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else { // No-op prior to Fulu. return; @@ -4839,6 +4929,10 @@ async fn fulu_prune_data_columns_margin_test(margin: u64) { // No-op if PeerDAS not scheduled. return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if store.get_chain_spec().is_gloas_scheduled() { + return; + } let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else { // No-op prior to Fulu. return; @@ -5156,6 +5250,10 @@ async fn test_custody_column_filtering_regular_node() { if !test_spec::().is_peer_das_scheduled() { return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if test_spec::().is_gloas_scheduled() { + return; + } let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -5200,6 +5298,10 @@ async fn test_custody_column_filtering_supernode() { if !test_spec::().is_peer_das_scheduled() { return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if test_spec::().is_gloas_scheduled() { + return; + } let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -5515,7 +5617,7 @@ async fn test_gloas_block_and_envelope_storage_generic( let mut state = genesis_state; let mut block_roots = vec![]; - let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)]; + let mut stored_states = vec![(Slot::new(0), genesis_state_root)]; for i in 1..=num_slots { let slot = Slot::new(i); @@ -5527,10 +5629,10 @@ async fn test_gloas_block_and_envelope_storage_generic( let state_root = state.canonical_root().unwrap(); store.put_state(&state_root, &state).unwrap(); - stored_states.push((slot, state.payload_status(), state_root)); + stored_states.push((slot, state_root)); } - let (block_contents, envelope, mut pending_state) = + let (block_contents, envelope, mut post_block_state) = harness.make_block_with_envelope(state, slot).await; let block_root = block_contents.0.canonical_root(); @@ -5540,21 +5642,17 @@ async fn test_gloas_block_and_envelope_storage_generic( .await .unwrap(); - let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); - stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root)); + let state_root = post_block_state.update_tree_hash_cache().unwrap(); + stored_states.push((slot, state_root)); // Process the envelope. let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state.clone(); - let envelope_state_root = envelope.message.state_root; - let full_state_root = harness - .process_envelope(block_root, envelope, &mut full_state) + harness + .process_envelope(block_root, envelope, &post_block_state, state_root) .await; - assert_eq!(full_state_root, envelope_state_root); - stored_states.push((slot, StatePayloadStatus::Full, full_state_root)); block_roots.push(block_root); - state = full_state; + state = post_block_state; } // Verify block storage. @@ -5577,20 +5675,15 @@ async fn test_gloas_block_and_envelope_storage_generic( // Verify state storage. // Iterate in reverse order to frustrate the cache. - for (slot, payload_status, state_root) in stored_states.into_iter().rev() { + for (slot, state_root) in stored_states.into_iter().rev() { println!("{slot}: {state_root:?}"); let Some(mut loaded_state) = store .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() else { - panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}"); + panic!("missing state at slot {slot} with root {state_root:?}"); }; assert_eq!(loaded_state.slot(), slot); - assert_eq!( - loaded_state.payload_status(), - payload_status, - "slot = {slot}" - ); assert_eq!( loaded_state.canonical_root().unwrap(), state_root, @@ -5600,74 +5693,6 @@ async fn test_gloas_block_and_envelope_storage_generic( check_db_invariants(&harness); } -/// Test that Pending and Full states have the correct payload status through round-trip -/// storage and retrieval. -#[tokio::test] -async fn test_gloas_state_payload_status() { - if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { - return; - } - - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - let num_blocks = 6u64; - let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); - let mut state = genesis_state; - - for i in 1..=num_blocks { - let slot = Slot::new(i); - harness.advance_slot(); - - let (block_contents, envelope, pending_state) = - harness.make_block_with_envelope(state, slot).await; - let block_root = block_contents.0.canonical_root(); - - harness - .process_block(slot, block_root, block_contents) - .await - .unwrap(); - - // Verify the pending state has correct payload status. - assert_eq!( - pending_state.payload_status(), - StatePayloadStatus::Pending, - "pending state at slot {} should be Pending", - i - ); - - // Process the envelope and verify the full state has correct payload status. - let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state; - let full_state_root = harness - .process_envelope(block_root, envelope, &mut full_state) - .await; - - assert_eq!( - full_state.payload_status(), - StatePayloadStatus::Full, - "full state at slot {} should be Full", - i - ); - - // Round-trip: load the full state from DB and check status. - let loaded_full = store - .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) - .unwrap() - .expect("full state should exist in DB"); - assert_eq!( - loaded_full.payload_status(), - StatePayloadStatus::Full, - "loaded full state at slot {} should be Full after round-trip", - i - ); - - state = full_state; - } - check_db_invariants(&harness); -} - /// Test block replay with and without envelopes. #[tokio::test] async fn test_gloas_block_replay_with_envelopes() { @@ -5704,11 +5729,11 @@ async fn test_gloas_block_replay_with_envelopes() { pending_states.insert(slot, (pending_state_root, pending_state.clone())); let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state; - let full_state_root = harness - .process_envelope(block_root, envelope, &mut full_state) + let full_state = pending_state; + harness + .process_envelope(block_root, envelope, &full_state, pending_state_root) .await; - full_states.insert(slot, (full_state_root, full_state.clone())); + full_states.insert(slot, (pending_state_root, full_state.clone())); last_block_root = block_root; state = full_state; @@ -5716,94 +5741,29 @@ async fn test_gloas_block_replay_with_envelopes() { let end_slot = Slot::new(num_blocks); - // Load blocks for Pending replay (no envelopes for the last block). - let (blocks_pending, envelopes_pending) = store - .load_blocks_to_replay( - Slot::new(0), - end_slot, - last_block_root, - StatePayloadStatus::Pending, - ) + // Load blocks for replay. + let blocks = store + .load_blocks_to_replay(Slot::new(0), end_slot, last_block_root) .unwrap(); - assert!( - !blocks_pending.is_empty(), - "should have blocks for pending replay" - ); - // For Pending, no envelope for the first block (slot 0) or last block; envelopes for - // intermediate blocks whose payloads are canonical. - let expected_pending_envelopes = blocks_pending.len().saturating_sub(2); + assert!(!blocks.is_empty(), "should have blocks for replay"); + + // Replay blocks and verify against the expected state. + let mut replayed = BlockReplayer::::new(genesis_state, store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(blocks, None) + .expect("should replay blocks") + .into_state(); + replayed.apply_pending_mutations().unwrap(); + + let (_, mut expected) = pending_states.get(&end_slot).unwrap().clone(); + expected.apply_pending_mutations().unwrap(); + + replayed.drop_all_caches().unwrap(); + expected.drop_all_caches().unwrap(); assert_eq!( - envelopes_pending.len(), - expected_pending_envelopes, - "pending replay should have envelopes for all blocks except the last" - ); - assert!( - blocks_pending - .iter() - .skip(1) - .take(envelopes_pending.len()) - .map(|block| block.slot()) - .eq(envelopes_pending - .iter() - .map(|envelope| envelope.message.slot)), - "block and envelope slots should match" - ); - - // Load blocks for Full replay (envelopes for all blocks including the last). - let (blocks_full, envelopes_full) = store - .load_blocks_to_replay( - Slot::new(0), - end_slot, - last_block_root, - StatePayloadStatus::Full, - ) - .unwrap(); - assert_eq!( - envelopes_full.len(), - expected_pending_envelopes + 1, - "full replay should have one more envelope than pending replay" - ); - - // Replay to Pending state and verify. - let mut replayed_pending = - BlockReplayer::::new(genesis_state.clone(), store.get_chain_spec()) - .no_signature_verification() - .minimal_block_root_verification() - .desired_state_payload_status(StatePayloadStatus::Pending) - .apply_blocks(blocks_pending, envelopes_pending, None) - .expect("should replay blocks to pending state") - .into_state(); - replayed_pending.apply_pending_mutations().unwrap(); - - let (_, mut expected_pending) = pending_states.get(&end_slot).unwrap().clone(); - expected_pending.apply_pending_mutations().unwrap(); - - replayed_pending.drop_all_caches().unwrap(); - expected_pending.drop_all_caches().unwrap(); - assert_eq!( - replayed_pending, expected_pending, - "replayed pending state should match stored pending state" - ); - - // Replay to Full state and verify. - let mut replayed_full = - BlockReplayer::::new(genesis_state, store.get_chain_spec()) - .no_signature_verification() - .minimal_block_root_verification() - .desired_state_payload_status(StatePayloadStatus::Full) - .apply_blocks(blocks_full, envelopes_full, None) - .expect("should replay blocks to full state") - .into_state(); - replayed_full.apply_pending_mutations().unwrap(); - - let (_, mut expected_full) = full_states.get(&end_slot).unwrap().clone(); - expected_full.apply_pending_mutations().unwrap(); - - replayed_full.drop_all_caches().unwrap(); - expected_full.drop_all_caches().unwrap(); - assert_eq!( - replayed_full, expected_full, - "replayed full state should match stored full state" + replayed, expected, + "replayed state should match stored state" ); check_db_invariants(&harness); } @@ -5836,40 +5796,43 @@ async fn test_gloas_hot_state_hierarchy() { let slot = Slot::new(i); harness.advance_slot(); - let (block_contents, envelope, pending_state) = + let (block_contents, envelope, mut pending_state) = harness.make_block_with_envelope(state.clone(), slot).await; let block_root = block_contents.0.canonical_root(); - - // Attest to previous block before processing next. - if i > 1 { - let state_root = state.update_tree_hash_cache().unwrap(); - harness.attest_block( - &state, - state_root, - last_block_root.into(), - &block_contents.0, - &some_validators, - ); - } + let signed_block = block_contents.0.clone(); harness .process_block(slot, block_root, block_contents) .await .unwrap(); + // Attest to the current block at its own slot (same-slot attestation). + // In Gloas, same-slot attestations have index=0 and route to Pending in + // fork choice, correctly propagating weight through the Full path. + // Use pending_state (at slot i) so the target root resolves correctly. + let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); + harness.attest_block( + &pending_state, + pending_state_root, + block_root.into(), + &signed_block, + &some_validators, + ); + let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state; + let full_state = pending_state; harness - .process_envelope(block_root, envelope, &mut full_state) + .process_envelope(block_root, envelope, &full_state, pending_state_root) .await; last_block_root = block_root; state = full_state; } - // Verify states can be loaded and have correct payload status. - let _head_state = harness.get_current_state(); - let _head_slot = harness.head_slot(); + // Head should be the block at slot 40 with full payload. + let head = harness.chain.canonical_head.cached_head(); + assert_eq!(head.head_block_root(), last_block_root); + assert_eq!(head.head_payload_status(), PayloadStatus::Full); // States at all slots on the canonical chain should be retrievable. for slot_num in 1..=num_blocks { @@ -5880,7 +5843,7 @@ async fn test_gloas_hot_state_hierarchy() { let mut loaded_state = store .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() - .unwrap(); + .unwrap_or_else(|| panic!("missing state at {slot}/{state_root:?}")); assert_eq!(loaded_state.canonical_root().unwrap(), state_root); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 10c0b429a9..3958ce6c6d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -115,7 +115,18 @@ fn massive_skips() { assert!(state.slot() > 1, "the state should skip at least one slot"); - if state.fork_name_unchecked().fulu_enabled() { + if state.fork_name_unchecked().gloas_enabled() { + // Gloas uses compute_balance_weighted_selection for proposer selection, which + // returns InvalidIndicesCount (not InsufficientValidators) when the active + // validator set is empty. + assert_eq!( + error, + SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError( + BeaconStateError::InvalidIndicesCount + )), + "should return error indicating that validators have been slashed out" + ) + } else if state.fork_name_unchecked().fulu_enabled() { // post-fulu this is done in per_epoch_processing assert_eq!( error, @@ -1006,9 +1017,12 @@ async fn pseudo_finalize_test_generic( }; // pseudo finalize + // Post-Gloas the finalized state must be Pending (the block's state_root), not Full + // (the envelope's state_root), because the payload of the finalized block is not finalized. + let finalized_state_root = head.beacon_block.message().state_root(); harness .chain - .manually_finalize_state(head.beacon_state_root(), checkpoint) + .manually_finalize_state(finalized_state_root, checkpoint) .unwrap(); let split = harness.chain.store.get_split_info(); diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 521fc4ac97..a37ab6458f 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -117,7 +117,8 @@ async fn missed_blocks_across_epochs() { #[tokio::test] async fn missed_blocks_basic() { - let validator_count = 16; + // >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch). + let validator_count = 32; let slots_per_epoch = E::slots_per_epoch(); diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 9c19e94c0e..236340aa29 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,11 +1,11 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, ENGINE_GET_CLIENT_VERSION_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, - ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, - ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, + ENGINE_FORKCHOICE_UPDATED_V4, ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, + ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -158,7 +158,7 @@ impl ExecutionBlock { } #[superstruct( - variants(V1, V2, V3), + variants(V1, V2, V3, V4), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") @@ -171,10 +171,12 @@ pub struct PayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[superstruct(only(V2, V3))] + #[superstruct(only(V2, V3, V4))] pub withdrawals: Vec, - #[superstruct(only(V3), partial_getter(copy))] + #[superstruct(only(V3, V4), partial_getter(copy))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(V4), partial_getter(copy))] + pub slot_number: u64, } impl PayloadAttributes { @@ -184,24 +186,35 @@ impl PayloadAttributes { suggested_fee_recipient: Address, withdrawals: Option>, parent_beacon_block_root: Option, + slot_number: Option, ) -> Self { - match withdrawals { - Some(withdrawals) => match parent_beacon_block_root { - Some(parent_beacon_block_root) => PayloadAttributes::V3(PayloadAttributesV3 { + match (withdrawals, parent_beacon_block_root, slot_number) { + (Some(withdrawals), Some(parent_beacon_block_root), Some(slot_number)) => { + PayloadAttributes::V4(PayloadAttributesV4 { timestamp, prev_randao, suggested_fee_recipient, withdrawals, parent_beacon_block_root, - }), - None => PayloadAttributes::V2(PayloadAttributesV2 { + slot_number, + }) + } + (Some(withdrawals), Some(parent_beacon_block_root), None) => { + PayloadAttributes::V3(PayloadAttributesV3 { timestamp, prev_randao, suggested_fee_recipient, withdrawals, - }), - }, - None => PayloadAttributes::V1(PayloadAttributesV1 { + parent_beacon_block_root, + }) + } + (Some(withdrawals), None, _) => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + (None, _, _) => PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao, suggested_fee_recipient, @@ -246,6 +259,21 @@ impl From for SsePayloadAttributes { withdrawals, parent_beacon_block_root, }), + // V4 maps to V3 for SSE (slot_number is not part of the SSE spec) + PayloadAttributes::V4(PayloadAttributesV4 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + slot_number: _, + }) => Self::V3(SsePayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }), } } } @@ -555,6 +583,7 @@ pub struct EngineCapabilities { pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, pub forkchoice_updated_v3: bool, + pub forkchoice_updated_v4: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, @@ -594,6 +623,9 @@ impl EngineCapabilities { if self.forkchoice_updated_v3 { response.push(ENGINE_FORKCHOICE_UPDATED_V3); } + if self.forkchoice_updated_v4 { + response.push(ENGINE_FORKCHOICE_UPDATED_V4); + } if self.get_payload_bodies_by_hash_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bcd95d1ae4..dcf8205406 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -48,6 +48,7 @@ pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_V3: &str = "engine_forkchoiceUpdatedV3"; +pub const ENGINE_FORKCHOICE_UPDATED_V4: &str = "engine_forkchoiceUpdatedV4"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; @@ -84,6 +85,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, + ENGINE_FORKCHOICE_UPDATED_V4, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_CLIENT_VERSION_V1, @@ -1132,6 +1134,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v4( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V4, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, @@ -1204,6 +1227,7 @@ impl HttpJsonRpc { forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), + forkchoice_updated_v4: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V4), get_payload_bodies_by_hash_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities @@ -1449,6 +1473,16 @@ impl HttpJsonRpc { )) } } + PayloadAttributes::V4(_) => { + if engine_capabilities.forkchoice_updated_v4 { + self.forkchoice_updated_v4(forkchoice_state, maybe_payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported( + "engine_forkchoiceUpdatedV4", + )) + } + } } } else if engine_capabilities.forkchoice_updated_v3 { self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 97c8e8a625..a77861981f 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -107,6 +107,12 @@ pub struct JsonExecutionPayload { #[superstruct(only(Deneb, Electra, Fulu, Gloas))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, + #[superstruct(only(Gloas))] + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub block_access_list: VariableList, + #[superstruct(only(Gloas))] + #[serde(with = "serde_utils::u64_hex_be")] + pub slot_number: u64, } impl From> for JsonExecutionPayloadBellatrix { @@ -252,6 +258,8 @@ impl TryFrom> for JsonExecutionPayloadGloas withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + block_access_list: payload.block_access_list, + slot_number: payload.slot_number.into(), }) } } @@ -425,6 +433,8 @@ impl TryFrom> for ExecutionPayloadGloas withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + block_access_list: payload.block_access_list, + slot_number: payload.slot_number.into(), }) } } @@ -716,7 +726,7 @@ impl<'a> From<&'a JsonWithdrawal> for EncodableJsonWithdrawal<'a> { } #[superstruct( - variants(V1, V2, V3), + variants(V1, V2, V3, V4), variant_attributes( derive(Debug, Clone, PartialEq, Serialize, Deserialize), serde(rename_all = "camelCase") @@ -732,10 +742,13 @@ pub struct JsonPayloadAttributes { pub prev_randao: Hash256, #[serde(with = "serde_utils::address_hex")] pub suggested_fee_recipient: Address, - #[superstruct(only(V2, V3))] + #[superstruct(only(V2, V3, V4))] pub withdrawals: Vec, - #[superstruct(only(V3))] + #[superstruct(only(V3, V4))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(V4))] + #[serde(with = "serde_utils::u64_hex_be")] + pub slot_number: u64, } impl From for JsonPayloadAttributes { @@ -759,6 +772,14 @@ impl From for JsonPayloadAttributes { withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), parent_beacon_block_root: pa.parent_beacon_block_root, }), + PayloadAttributes::V4(pa) => Self::V4(JsonPayloadAttributesV4 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: pa.parent_beacon_block_root, + slot_number: pa.slot_number, + }), } } } @@ -784,6 +805,14 @@ impl From for PayloadAttributes { withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), parent_beacon_block_root: jpa.parent_beacon_block_root, }), + JsonPayloadAttributes::V4(jpa) => Self::V4(PayloadAttributesV4 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: jpa.parent_beacon_block_root, + slot_number: jpa.slot_number, + }), } } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a66f7a9b55..ace6276b75 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -735,6 +735,9 @@ impl ExecutionBlockGenerator { blob_gas_used: 0, excess_blob_gas: 0, }), + _ => unreachable!(), + }, + PayloadAttributes::V4(pa) => match self.get_fork_at_timestamp(pa.timestamp) { ForkName::Gloas => ExecutionPayload::Gloas(ExecutionPayloadGloas { parent_hash: head_block_hash, fee_recipient: pa.suggested_fee_recipient, @@ -753,6 +756,8 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, + block_access_list: VariableList::empty(), + slot_number: pa.slot_number.into(), }), _ => unreachable!(), }, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e263e5402a..058f1e76da 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -507,7 +507,8 @@ pub async fn handle_rpc( } ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 - | ENGINE_FORKCHOICE_UPDATED_V3 => { + | ENGINE_FORKCHOICE_UPDATED_V3 + | ENGINE_FORKCHOICE_UPDATED_V4 => { let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let payload_attributes = match method { @@ -554,6 +555,11 @@ pub async fn handle_rpc( .map(|opt| opt.map(JsonPayloadAttributes::V3)) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? } + ENGINE_FORKCHOICE_UPDATED_V4 => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V4)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + } _ => unreachable!(), }; @@ -607,7 +613,7 @@ pub async fn handle_rpc( )); } } - ForkName::Deneb | ForkName::Electra | ForkName::Fulu | ForkName::Gloas => { + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => { if method == ENGINE_FORKCHOICE_UPDATED_V1 { return Err(( format!("{} called after Deneb fork!", method), @@ -621,6 +627,14 @@ pub async fn handle_rpc( )); } } + ForkName::Gloas => { + if method != ENGINE_FORKCHOICE_UPDATED_V4 { + return Err(( + format!("{} called after Gloas fork! Use V4.", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + } _ => unreachable!(), }; } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 7b6c4e8310..6ab6cca3f6 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -898,16 +898,24 @@ impl MockBuilder { fee_recipient, expected_withdrawals, None, + None, + ), + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + None, + ), + ForkName::Gloas => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + Some(slot.as_u64()), ), - ForkName::Deneb | ForkName::Electra | ForkName::Fulu | ForkName::Gloas => { - PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - Some(head_block_root), - ) - } ForkName::Base | ForkName::Altair => { return Err("invalid fork".to_string()); } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 91966ff65e..288416d51e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -96,8 +96,14 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + None, + None, + None, + ); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); @@ -124,8 +130,14 @@ impl MockExecutionLayer { chain_health: ChainHealth::Healthy, }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + None, + None, + None, + ); let payload_parameters = PayloadParameters { parent_hash, @@ -171,8 +183,14 @@ impl MockExecutionLayer { chain_health: ChainHealth::Healthy, }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + None, + None, + None, + ); let payload_parameters = PayloadParameters { parent_hash, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 47e3c9064c..6d8c30d316 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -47,6 +47,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { forkchoice_updated_v1: true, forkchoice_updated_v2: true, forkchoice_updated_v3: true, + forkchoice_updated_v4: true, get_payload_bodies_by_hash_v1: true, get_payload_bodies_by_range_v1: true, get_payload_v1: true, diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index 4a974c9919..382b967b43 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -91,7 +91,7 @@ pub async fn publish_execution_payload_envelope( chain: Arc>, network_tx: &UnboundedSender>, ) -> Result, Rejection> { - let slot = envelope.message.slot; + let slot = envelope.slot(); let beacon_block_root = envelope.message.beacon_block_root; // TODO(gloas): Replace this check once we have gossip validation. @@ -161,9 +161,7 @@ pub(crate) fn get_beacon_execution_payload_envelope( )) })?; - let fork_name = chain - .spec - .fork_name_at_slot::(envelope.message.slot); + let fork_name = chain.spec.fork_name_at_slot::(envelope.slot()); match accept_header { Some(api_types::Accept::Ssz) => Response::builder() diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 8715fc2b1e..9bc1f6ead4 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -66,12 +66,11 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; - // TODO(gloas): handle payloads? let replayer = BlockReplayer::new(parent_state, &chain.spec) .no_signature_verification() .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() - .apply_blocks(vec![], vec![], Some(block.slot())) + .apply_blocks(vec![], Some(block.slot())) .map_err(unhandled_error::)?; Ok(replayer.into_state()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index bf8443929c..2dd4c28040 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3937,7 +3937,7 @@ impl ApiTester { .cloned() .expect("envelope should exist in pending cache for local building"); assert_eq!(envelope.beacon_block_root, block_root); - assert_eq!(envelope.slot, slot); + assert_eq!(envelope.slot(), slot); } /// Assert envelope fields match the expected block root and slot. @@ -3948,9 +3948,8 @@ impl ApiTester { slot: Slot, ) { assert_eq!(envelope.beacon_block_root, block_root); - assert_eq!(envelope.slot, slot); + assert_eq!(envelope.slot(), slot); assert_eq!(envelope.builder_index, BUILDER_INDEX_SELF_BUILD); - assert_ne!(envelope.state_root, Hash256::ZERO); } /// Sign an execution payload envelope. diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index d0f0557223..76c6ba812d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -20,7 +20,6 @@ use beacon_chain::test_utils::{ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use bls::Signature; -use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use libp2p::gossipsub::MessageAcceptance; use lighthouse_network::rpc::InboundRequestId; @@ -2125,12 +2124,13 @@ fn make_test_payload_envelope( ) -> SignedExecutionPayloadEnvelope { SignedExecutionPayloadEnvelope { message: ExecutionPayloadEnvelope { - payload: ExecutionPayloadGloas::default(), + payload: ExecutionPayloadGloas { + slot_number: slot, + ..ExecutionPayloadGloas::default() + }, execution_requests: ExecutionRequests::default(), builder_index: 0, beacon_block_root, - slot, - state_root: Hash256::zero(), }, signature: Signature::empty(), } @@ -2158,7 +2158,7 @@ async fn test_payload_envelopes_by_range() { let envelope = make_test_payload_envelope(Slot::new(slot), root); rig.chain .store - .put_payload_envelope(&root, envelope) + .put_payload_envelope(&root, &envelope) .unwrap(); expected_roots.push(root); } @@ -2208,7 +2208,7 @@ async fn test_payload_envelopes_by_root() { let envelope = make_test_payload_envelope(Slot::new(1), block_root); rig.chain .store - .put_payload_envelope(&block_root, envelope) + .put_payload_envelope(&block_root, &envelope) .unwrap(); let roots = RuntimeVariableList::new(vec![block_root], 1).unwrap(); @@ -2298,7 +2298,7 @@ async fn test_payload_envelopes_by_range_no_duplicates_with_skip_slots() { let envelope = make_test_payload_envelope(Slot::new(slot), root); rig.chain .store - .put_payload_envelope(&root, envelope) + .put_payload_envelope(&root, &envelope) .unwrap(); } } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 78dd69e55a..e9b9de76e6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1064,7 +1064,7 @@ impl, Cold: ItemStore> HotColdDB pub fn put_payload_envelope( &self, block_root: &Hash256, - payload_envelope: SignedExecutionPayloadEnvelope, + payload_envelope: &SignedExecutionPayloadEnvelope, ) -> Result<(), Error> { self.hot_db.put_bytes( SignedExecutionPayloadEnvelope::::db_column(), @@ -1133,13 +1133,10 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state( &self, block_root: Hash256, - payload_status: StatePayloadStatus, max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - if let Some(cached) = - self.get_advanced_hot_state_from_cache(block_root, payload_status, max_slot) - { + if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { return Ok(Some(cached)); } @@ -1161,11 +1158,7 @@ impl, Cold: ItemStore> HotColdDB .into()); } - // Split state should always be `Pending`. - let state_root = if block_root == split.block_root - && let StatePayloadStatus::Pending = payload_status - && split.slot <= max_slot - { + let state_root = if block_root == split.block_root && split.slot <= max_slot { split.state_root } else { state_root @@ -1212,12 +1205,11 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state_from_cache( &self, block_root: Hash256, - payload_status: StatePayloadStatus, max_slot: Slot, ) -> Option<(Hash256, BeaconState)> { self.state_cache .lock() - .get_by_block_root(block_root, payload_status, max_slot) + .get_by_block_root(block_root, max_slot) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -1857,100 +1849,6 @@ impl, Cold: ItemStore> HotColdDB } } - /// Compute the `StatePayloadStatus` for a stored state based on its summary. - /// - /// In future this might become a field of the summary, but this would require a whole DB - /// migration. For now we use an extra read from the DB to determine it. - fn get_hot_state_summary_payload_status( - &self, - summary: &HotStateSummary, - ) -> Result { - // Treat pre-Gloas states as `Pending`. - if !self - .spec - .fork_name_at_slot::(summary.slot) - .gloas_enabled() - { - return Ok(StatePayloadStatus::Pending); - } - - // Treat genesis state as `Pending` (`BeaconBlock` state). - let previous_state_root = summary.previous_state_root; - if previous_state_root.is_zero() { - return Ok(StatePayloadStatus::Pending); - } - - // Load the hot state summary for the previous state. - // - // If it has the same slot as this summary then we know this summary is for a `Full` state - // (payload state), because they are always diffed against their same-slot `Pending` state. - // - // If the previous summary has a different slot AND the latest block is from `summary.slot`, - // then this state *must* be `Pending` (it is the summary for latest block itself). - // - // Otherwise, we are at a skipped slot and must traverse the graph of state summaries - // backwards until we reach a summary for the latest block. This recursion could be quite - // far in the case of a long skip. We could optimise this in future using the - // `diff_base_state` (like in `get_ancestor_state_root`), or by doing a proper DB - // migration. - let previous_state_summary = self - .load_hot_state_summary(&previous_state_root)? - .ok_or(Error::MissingHotStateSummary(previous_state_root))?; - - if previous_state_summary.slot == summary.slot { - Ok(StatePayloadStatus::Full) - } else if summary.slot == summary.latest_block_slot { - Ok(StatePayloadStatus::Pending) - } else { - self.get_hot_state_summary_payload_status(&previous_state_summary) - } - } - - /// Recompute the payload status for a state at `slot` that is stored in the cold DB. - /// - /// This function returns an error for any `slot` that is outside the range of slots stored in - /// the freezer DB. - /// - /// For all slots prior to Gloas, it returns `Pending`. - /// - /// For post-Gloas slots the algorithm is: - /// - /// 1. Load the most recently applied block at `slot` (may not be from `slot` in case of a skip) - /// 2. Load the canonical `state_root` at the slot of the block. If this `state_root` matches - /// the one in the block then we know the state at *that* slot is canonically empty (no - /// payload). Conversely, if it is different, we know that the block's slot is full (assuming - /// no database corruption). - /// 3. The payload status of `slot` is the same as the payload status of `block.slot()`, because - /// we only care about whether a beacon block or payload was applied most recently, and - /// `block` is by definition the most-recently-applied block. - /// - /// All of this mucking around could be avoided if we do a schema migration to record the - /// payload status in the database. For now, this is simpler. - fn get_cold_state_payload_status(&self, slot: Slot) -> Result { - // Pre-Gloas states are always `Pending`. - if !self.spec.fork_name_at_slot::(slot).gloas_enabled() { - return Ok(StatePayloadStatus::Pending); - } - - let block_root = self - .get_cold_block_root(slot)? - .ok_or(HotColdDBError::MissingFrozenBlock(slot))?; - - let block = self - .get_blinded_block(&block_root)? - .ok_or(Error::MissingBlock(block_root))?; - - let state_root = self - .get_cold_state_root(block.slot())? - .ok_or(HotColdDBError::MissingRestorePointState(block.slot()))?; - - if block.state_root() != state_root { - Ok(StatePayloadStatus::Full) - } else { - Ok(StatePayloadStatus::Pending) - } - } - fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -2046,20 +1944,16 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - if let Some( - summary @ HotStateSummary { - slot, - latest_block_root, - diff_base_state, - .. - }, - ) = self.load_hot_state_summary(state_root)? + if let Some(HotStateSummary { + slot, + latest_block_root, + diff_base_state, + .. + }) = self.load_hot_state_summary(state_root)? { - let payload_status = self.get_hot_state_summary_payload_status(&summary)?; debug!( %slot, ?state_root, - ?payload_status, "Loading hot state" ); let mut state = match self.hot_storage_strategy(slot)? { @@ -2113,7 +2007,6 @@ impl, Cold: ItemStore> HotColdDB base_state, slot, latest_block_root, - payload_status, update_cache, )? } @@ -2131,26 +2024,19 @@ impl, Cold: ItemStore> HotColdDB base_state: BeaconState, slot: Slot, latest_block_root: Hash256, - desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { + if base_state.slot() == slot { return Ok(base_state); } - let (blocks, envelopes) = self.load_blocks_to_replay( - base_state.slot(), - slot, - latest_block_root, - desired_payload_status, - )?; + let blocks = self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); // If replaying blocks, and `update_cache` is true, also cache the epoch boundary // state that this state is based on. It may be useful as the basis of more states // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { - // TODO(gloas): prevent caching of the payload_status=Full state? if !update_cache || state.slot() % E::slots_per_epoch() != 0 { return Ok(()); } @@ -2177,16 +2063,12 @@ impl, Cold: ItemStore> HotColdDB debug!( %slot, blocks = ?blocks.iter().map(|block| block.slot()).collect::>(), - envelopes = ?envelopes.iter().map(|e| e.message.slot).collect::>(), - payload_status = ?desired_payload_status, - "Replaying blocks and envelopes" + "Replaying blocks" ); self.replay_blocks( base_state, blocks, - envelopes, - desired_payload_status, slot, no_state_root_iter(), Some(Box::new(state_cache_hook)), @@ -2490,7 +2372,8 @@ impl, Cold: ItemStore> HotColdDB return Ok(base_state); } - let (blocks, envelopes) = self.load_cold_blocks(base_state.slot() + 1, slot)?; + let base_slot = base_state.slot(); + let blocks = self.load_cold_blocks(base_slot + 1, slot)?; // Include state root for base state as it is required by block processing to not // have to hash the state. @@ -2499,16 +2382,7 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - let payload_status = self.get_cold_state_payload_status(slot)?; - let state = self.replay_blocks( - base_state, - blocks, - envelopes, - payload_status, - slot, - Some(state_root_iter), - None, - )?; + let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; debug!( target_slot = %slot, replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(), @@ -2601,76 +2475,39 @@ impl, Cold: ItemStore> HotColdDB } } - /// Load cold blocks and payload envelopes between `start_slot` and `end_slot` inclusive. - #[allow(clippy::type_complexity)] + /// Load cold blocks between `start_slot` and `end_slot` inclusive. pub fn load_cold_blocks( &self, start_slot: Slot, end_slot: Slot, - ) -> Result< - ( - Vec>, - Vec>, - ), - Error, - > { + ) -> Result>, Error> { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_COLD_BLOCKS_TIME); let block_root_iter = self.forwards_block_roots_iterator_until(start_slot, end_slot, || { Err(Error::StateShouldNotBeRequired(end_slot)) })?; - let blocks = process_results(block_root_iter, |iter| { + process_results(block_root_iter, |iter| { iter.map(|(block_root, _slot)| block_root) .dedup() .map(|block_root| { self.get_blinded_block(&block_root)? .ok_or(Error::MissingBlock(block_root)) }) - .collect::, Error>>() - })??; - - // If Gloas is not enabled for any slots in the range, just return `blocks`. - if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() - && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() - { - return Ok((blocks, vec![])); - } - let end_block_root = self - .get_cold_block_root(end_slot)? - .ok_or(HotColdDBError::MissingFrozenBlock(end_slot))?; - let desired_payload_status = self.get_cold_state_payload_status(end_slot)?; - let envelopes = self.load_payload_envelopes_for_blocks( - &blocks, - end_block_root, - desired_payload_status, - )?; - - Ok((blocks, envelopes)) + .collect() + })? } - /// Load the blocks & envelopes between `start_slot` and `end_slot` by backtracking from + /// Load the blocks between `start_slot` and `end_slot` by backtracking from /// `end_block_root`. /// /// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot /// equal to `start_slot`, to reach a state with slot equal to `end_slot`. - /// - /// Payloads are also returned in slot-ascending order, but only payloads forming part of - /// the chain are loaded (payloads for EMPTY slots are omitted). Prior to Gloas, an empty - /// vec of payloads will be returned. - #[allow(clippy::type_complexity)] pub fn load_blocks_to_replay( &self, start_slot: Slot, end_slot: Slot, end_block_root: Hash256, - desired_payload_status: StatePayloadStatus, - ) -> Result< - ( - Vec>, - Vec>, - ), - Error, - > { + ) -> Result>, Error> { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_HOT_BLOCKS_TIME); let mut blocks = ParentRootBlockIterator::new(self, end_block_root) .map(|result| result.map(|(_, block)| block)) @@ -2699,70 +2536,17 @@ impl, Cold: ItemStore> HotColdDB }) .collect::, _>>()?; blocks.reverse(); - - // If Gloas is not enabled for any slots in the range, just return `blocks`. - if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() - && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() - { - return Ok((blocks, vec![])); - } - - let envelopes = self.load_payload_envelopes_for_blocks( - &blocks, - end_block_root, - desired_payload_status, - )?; - - Ok((blocks, envelopes)) - } - - pub fn load_payload_envelopes_for_blocks( - &self, - blocks: &[SignedBlindedBeaconBlock], - end_block_root: Hash256, - desired_payload_status: StatePayloadStatus, - ) -> Result>, Error> { - let mut envelopes = vec![]; - - for (block, next_block) in blocks.iter().tuple_windows() { - if block.fork_name_unchecked().gloas_enabled() { - // Check next block to see if this block's payload is canonical on this chain. - let block_hash = block.payload_bid_block_hash()?; - if !next_block.is_parent_block_full(block_hash) { - // No payload at this slot (empty), nothing to load. - continue; - } - // Using `parent_root` avoids computation. - let block_root = next_block.parent_root(); - let envelope = self - .get_payload_envelope(&block_root)? - .ok_or(HotColdDBError::MissingExecutionPayloadEnvelope(block_root))?; - envelopes.push(envelope); - } - } - - // Load the payload for the last block if desired. - if let StatePayloadStatus::Full = desired_payload_status { - let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( - HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), - )?; - envelopes.push(envelope); - } - - Ok(envelopes) + Ok(blocks) } /// Replay `blocks` on top of `state` until `target_slot` is reached. /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. - #[allow(clippy::too_many_arguments)] pub fn replay_blocks( &self, state: BeaconState, blocks: Vec>, - envelopes: Vec>, - desired_payload_status: StatePayloadStatus, target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, @@ -2771,8 +2555,7 @@ impl, Cold: ItemStore> HotColdDB let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() - .minimal_block_root_verification() - .desired_state_payload_status(desired_payload_status); + .minimal_block_root_verification(); let have_state_root_iterator = state_root_iter.is_some(); if let Some(state_root_iter) = state_root_iter { @@ -2784,7 +2567,7 @@ impl, Cold: ItemStore> HotColdDB } block_replayer - .apply_blocks(blocks, envelopes, Some(target_slot)) + .apply_blocks(blocks, Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( @@ -3800,6 +3583,7 @@ pub fn migrate_database, Cold: ItemStore>( ) -> Result { debug!( slot = %finalized_state.slot(), + state_root = ?finalized_state_root, "Freezer migration started" ); @@ -4219,12 +4003,8 @@ impl HotStateSummary { // slots where there isn't a skip). let latest_block_root = state.get_latest_block_root(state_root); - // Payload status of the state determines a lot about how it is stored. - let payload_status = state.payload_status(); - let get_state_root = |slot| { if slot == state.slot() { - // TODO(gloas): I think we can remove this case Ok::<_, Error>(state_root) } else { Ok::<_, Error>(get_ancestor_state_root(store, state, slot).map_err(|e| { @@ -4247,12 +4027,6 @@ impl HotStateSummary { let previous_state_root = if state.slot() == 0 { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() - } else if let StatePayloadStatus::Full = payload_status - && state.slot() == state.latest_block_header().slot - { - // A Full state at a non-skipped slot builds off the Pending state of the same slot, - // i.e. the state with the same `state_root` as its `BeaconBlock` - state.latest_block_header().state_root } else { get_state_root(state.slot().safe_sub(1_u64)?)? }; diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index e51543c3a2..7aca692ef9 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -67,7 +67,6 @@ where state.build_caches(&self.spec)?; - // TODO(gloas): handle payload envelope replay process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index d016922ade..6d159c9361 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -7,7 +7,7 @@ use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; use tracing::instrument; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, execution::StatePayloadStatus}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; /// Fraction of the LRU cache to leave intact during culling. const CULL_EXEMPT_NUMERATOR: usize = 1; @@ -23,10 +23,10 @@ pub struct FinalizedState { state: BeaconState, } -/// Map from (block_root, payload_status) -> slot -> state_root. +/// Map from block_root -> slot -> state_root. #[derive(Debug, Default)] pub struct BlockMap { - blocks: HashMap<(Hash256, StatePayloadStatus), SlotMap>, + blocks: HashMap, } /// Map from slot -> state_root. @@ -143,11 +143,8 @@ impl StateCache { return Err(Error::FinalizedStateDecreasingSlot); } - let payload_status = state.payload_status(); - // Add to block map. - self.block_map - .insert(block_root, payload_status, state.slot(), state_root); + self.block_map.insert(block_root, state.slot(), state_root); // Prune block map. let state_roots_to_prune = self.block_map.prune(state.slot()); @@ -270,9 +267,7 @@ impl StateCache { // Record the connection from block root and slot to this state. let slot = state.slot(); - let payload_status = state.payload_status(); - self.block_map - .insert(block_root, payload_status, slot, state_root); + self.block_map.insert(block_root, slot, state_root); Ok(PutStateOutcome::New(deleted_states)) } @@ -321,10 +316,9 @@ impl StateCache { pub fn get_by_block_root( &mut self, block_root: Hash256, - payload_status: StatePayloadStatus, slot: Slot, ) -> Option<(Hash256, BeaconState)> { - let slot_map = self.block_map.blocks.get(&(block_root, payload_status))?; + let slot_map = self.block_map.blocks.get(&block_root)?; // Find the state at `slot`, or failing that the most recent ancestor. let state_root = slot_map @@ -345,12 +339,7 @@ impl StateCache { } pub fn delete_block_states(&mut self, block_root: &Hash256) { - let (pending_state_roots, full_state_roots) = - self.block_map.delete_block_states(block_root); - for slot_map in [pending_state_roots, full_state_roots] - .into_iter() - .flatten() - { + if let Some(slot_map) = self.block_map.delete_block_states(block_root) { for state_root in slot_map.slots.values() { self.states.pop(state_root); } @@ -423,14 +412,8 @@ impl StateCache { } impl BlockMap { - fn insert( - &mut self, - block_root: Hash256, - payload_status: StatePayloadStatus, - slot: Slot, - state_root: Hash256, - ) { - let slot_map = self.blocks.entry((block_root, payload_status)).or_default(); + fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { + let slot_map = self.blocks.entry(block_root).or_default(); slot_map.slots.insert(slot, state_root); } @@ -461,12 +444,8 @@ impl BlockMap { }); } - fn delete_block_states(&mut self, block_root: &Hash256) -> (Option, Option) { - let pending_state_roots = self - .blocks - .remove(&(*block_root, StatePayloadStatus::Pending)); - let full_state_roots = self.blocks.remove(&(*block_root, StatePayloadStatus::Full)); - (pending_state_roots, full_state_roots) + fn delete_block_states(&mut self, block_root: &Hash256) -> Option { + self.blocks.remove(block_root) } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index dd16f46c55..d724156f86 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1093,7 +1093,6 @@ pub struct SseExecutionPayload { pub builder_index: u64, pub block_hash: ExecutionBlockHash, pub block_root: Hash256, - pub state_root: Hash256, pub execution_optimistic: bool, } @@ -1104,7 +1103,6 @@ pub struct SseExecutionPayloadGossip { pub builder_index: u64, pub block_hash: ExecutionBlockHash, pub block_root: Hash256, - pub state_root: Hash256, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 92fd4c1faf..21415e478a 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -560,9 +560,22 @@ where )?; // Cache some values for the next forkchoiceUpdate call to the execution layer. - let head_hash = self - .get_block(&head_root) - .and_then(|b| b.execution_status.block_hash()); + // For Gloas blocks, `execution_status` is Irrelevant (no embedded payload). + // If the payload envelope was received (Full), use the bid's block_hash as the + // execution chain head. Otherwise fall back to the parent hash (Pending) or None. + // TODO(gloas): this is a bit messy, and we probably need a similar treatment for + // justified/finalized + // Can fix as part of: https://github.com/sigp/lighthouse/issues/8957 + let head_hash = self.get_block(&head_root).and_then(|b| { + b.execution_status + .block_hash() + .or(match head_payload_status { + PayloadStatus::Full => b.execution_payload_block_hash, + PayloadStatus::Pending | PayloadStatus::Empty => { + b.execution_payload_parent_hash + } + }) + }); let justified_root = self.justified_checkpoint().root; let finalized_root = self.finalized_checkpoint().root; let justified_hash = self @@ -804,7 +817,7 @@ where })); } - let attestation_threshold = spec.get_unaggregated_attestation_due(); + let attestation_threshold = spec.get_attestation_due::(block.slot()); // Add proposer score boost if the block is timely. // TODO(gloas): the spec's `update_proposer_boost_root` additionally checks that @@ -1493,6 +1506,14 @@ where } } + /// Returns whether the proposer should extend the execution payload chain of the given block. + pub fn should_extend_payload(&self, block_root: &Hash256) -> Result> { + let proposer_boost_root = self.fc_store.proposer_boost_root(); + self.proto_array + .should_extend_payload::(block_root, proposer_boost_root) + .map_err(Error::ProtoArrayStringError) + } + /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { if self.is_finalized_checkpoint_or_descendant(*block_root) { diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 2e792028e5..197e1102a3 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -109,6 +109,8 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; + // Block 1 at slot 1: child of genesis. Genesis has execution_payload_block_hash=zero + // (no execution payload at genesis), so all children have parent_payload_status=Empty. ops.push(Operation::ProcessBlock { slot: Slot::new(1), root: get_root(1), @@ -212,8 +214,10 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, - execution_payload_parent_hash: Some(get_hash(42)), - execution_payload_block_hash: Some(get_hash(0)), + // Genesis has zero execution block hash (no payload at genesis), which + // ensures all children get parent_payload_status=Empty. + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::zero()), spec: Some(gloas_spec()), } } @@ -600,18 +604,20 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef /// Test interleaving of blocks, payload validation, and attestations. /// -/// Scenario: -/// - Genesis block (slot 0) -/// - Block 1 (slot 1) extends genesis, Full chain -/// - Block 2 (slot 1) extends genesis, Empty chain -/// - Before payload arrives: payload_received is false for block 1 +/// Scenario (branching at block 1 since genesis has no payload): +/// - Genesis block (slot 0) with zero execution block hash +/// - Block 1 (slot 1) child of genesis (Empty parent status since genesis hash=zero) +/// - Block 2 (slot 2) extends block 1 Full chain (parent_hash matches block 1's block_hash) +/// - Block 3 (slot 2) extends block 1 Empty chain (parent_hash doesn't match) +/// - Before payload arrives: payload_received is false for block 1, only Empty reachable /// - Process execution payload for block 1 → payload_received becomes true -/// - Payload attestations arrive voting block 1's payload as timely + available -/// - Head should follow block 1 because the PTC votes now count (payload_received = true) +/// - Both Full and Empty directions from block 1 become available +/// - With equal weight, tiebreaker prefers Full → Block 2 wins pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; - // Block 1 at slot 1: extends genesis Full chain. + // Block 1 at slot 1: child of genesis. Genesis has zero block hash, so + // parent_payload_status = Empty regardless of block 1's execution_payload_parent_hash. ops.push(Operation::ProcessBlock { slot: Slot::new(1), root: get_root(1), @@ -622,83 +628,94 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe execution_payload_block_hash: Some(get_hash(1)), }); - // Block 2 at slot 1: extends genesis Empty chain (parent_hash doesn't match genesis EL hash). + // Block 2 at slot 2: Full child of block 1 (parent_hash matches block 1's block_hash). ops.push(Operation::ProcessBlock { - slot: Slot::new(1), + slot: Slot::new(2), root: get_root(2), - parent_root: get_root(0), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Block 3 at slot 2: Empty child of block 1 (parent_hash doesn't match block 1's block_hash). + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), execution_payload_parent_hash: Some(get_hash(99)), - execution_payload_block_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(3)), }); - // Both children have parent_payload_status set correctly. + // Verify parent_payload_status is set correctly. ops.push(Operation::AssertParentPayloadStatus { block_root: get_root(1), + expected_status: PayloadStatus::Empty, + }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), expected_status: PayloadStatus::Full, }); ops.push(Operation::AssertParentPayloadStatus { - block_root: get_root(2), + block_root: get_root(3), expected_status: PayloadStatus::Empty, }); - // Per spec `get_forkchoice_store`: genesis starts with payload_received=true - // (anchor block is in `payload_states`). + // Genesis does NOT have payload_received (no payload at genesis). ops.push(Operation::AssertPayloadReceived { block_root: get_root(0), - expected: true, + expected: false, }); - // Give one vote to each child so they have equal weight. + // Block 1 does not have payload_received yet. + ops.push(Operation::AssertPayloadReceived { + block_root: get_root(1), + expected: false, + }); + + // Give one vote to each competing child so they have equal weight. ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_root(1), - attestation_slot: Slot::new(1), + block_root: get_root(2), + attestation_slot: Slot::new(2), }); ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_root(2), - attestation_slot: Slot::new(1), + block_root: get_root(3), + attestation_slot: Slot::new(2), }); - // Equal weight, payload_received=true on genesis → tiebreaker uses - // payload_received (not previous slot, equal payload weights) → prefers Full. - // Block 1 (Full) wins because it matches the Full preference. + // Before payload_received on block 1: only Empty direction available. + // Block 3 (Empty child) is reachable, Block 2 (Full child) is not. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], - expected_head: get_root(1), + expected_head: get_root(3), current_slot: Slot::new(100), expected_payload_status: None, }); - // ProcessExecutionPayloadEnvelope on genesis is a no-op (already received at init). + // Process execution payload envelope for block 1 → payload_received becomes true. ops.push(Operation::ProcessExecutionPayloadEnvelope { - block_root: get_root(0), + block_root: get_root(1), }); ops.push(Operation::AssertPayloadReceived { - block_root: get_root(0), + block_root: get_root(1), expected: true, }); - // Set PTC votes on genesis as timely + data available (simulates PTC voting). - // This doesn't change the preference since genesis is not the previous slot - // (slot 0 + 1 != current_slot 100). - ops.push(Operation::SetPayloadTiebreak { - block_root: get_root(0), - is_timely: true, - is_data_available: true, - }); - - // Still prefers Full via payload_received tiebreaker → Block 1 (Full) wins. + // After payload_received on block 1: both Full and Empty directions available. + // Equal weight, tiebreaker prefers Full → Block 2 (Full child) wins. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], - expected_head: get_root(1), + expected_head: get_root(2), current_slot: Slot::new(100), expected_payload_status: None, }); @@ -708,8 +725,9 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, - execution_payload_parent_hash: Some(get_hash(42)), - execution_payload_block_hash: Some(get_hash(0)), + // Genesis has zero execution block hash (no payload at genesis). + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::zero()), spec: Some(gloas_spec()), } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 4946631f73..4ca7dab69c 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -568,8 +568,10 @@ impl ProtoArray { ProtoNode::V29(v29) => { // Both parent and child are Gloas blocks. The parent is full if the // block hash in the parent node matches the parent block hash in the - // child bid. - if execution_payload_parent_hash == v29.execution_payload_block_hash { + // child bid and the parent block isn't the genesis block. + if v29.execution_payload_block_hash != ExecutionBlockHash::zero() + && execution_payload_parent_hash == v29.execution_payload_block_hash + { PayloadStatus::Full } else { PayloadStatus::Empty @@ -582,18 +584,16 @@ impl ProtoArray { } } } else { - // TODO(gloas): re-assess this assumption - // Parent is missing (genesis or pruned due to finalization). Default to Full - // since this path should only be hit at Gloas genesis. - PayloadStatus::Full + // Parent is missing (genesis or pruned due to finalization). This code path + // should only be hit at Gloas genesis. Default to empty, the genesis block + // has no payload enevelope. + PayloadStatus::Empty }; - // Per spec `get_forkchoice_store`: the anchor (genesis) block has - // its payload state initialized (`payload_states = {anchor_root: ...}`). - // Without `payload_received = true` on genesis, the FULL virtual - // child doesn't exist in the spec's `get_node_children`, making all - // Full concrete children of genesis unreachable in `get_head`. - let is_genesis = parent_index.is_none(); + // The spec does something slightly strange where it initialises the payload timeliness + // votes and payload data availability votes for the anchor block to all true, but never + // adds the anchor to `store.payloads`, so it is never considered full. + let is_anchor = parent_index.is_none(); ProtoNode::V29(ProtoNodeV29 { slot: block.slot, @@ -614,26 +614,25 @@ impl ProtoArray { execution_payload_block_hash, execution_payload_parent_hash, // Per spec `get_forkchoice_store`: the anchor block's PTC votes are - // initialized to all-True, ensuring `is_payload_timely` and - // `is_payload_data_available` return true for the anchor. - payload_timeliness_votes: if is_genesis { + // initialized to all-True. + payload_timeliness_votes: if is_anchor { all_true_bitvector() } else { BitVector::default() }, - payload_data_availability_votes: if is_genesis { + payload_data_availability_votes: if is_anchor { all_true_bitvector() } else { BitVector::default() }, - payload_received: is_genesis, + payload_received: false, proposer_index, // Spec: `record_block_timeliness` + `get_forkchoice_store`. // Anchor gets [True, True]. Others computed from time_into_slot. - block_timeliness_attestation_threshold: is_genesis + block_timeliness_attestation_threshold: is_anchor || (is_current_slot && time_into_slot < spec.get_attestation_due::(current_slot)), - block_timeliness_ptc_threshold: is_genesis + block_timeliness_ptc_threshold: is_anchor || (is_current_slot && time_into_slot < spec.get_payload_attestation_due()), equivocating_attestation_score: 0, }) @@ -1438,7 +1437,7 @@ impl ProtoArray { } } - fn should_extend_payload( + pub fn should_extend_payload( &self, fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 0ecaea3971..577e89baa1 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -17,7 +17,7 @@ use std::{ }; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - Slot, StatePayloadStatus, + Slot, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -110,19 +110,6 @@ pub enum PayloadStatus { Pending = 2, } -impl PayloadStatus { - /// Convert a `PayloadStatus` into the equivalent `StatePayloadStatus`. - /// - /// This maps `Empty` onto `StatePayloadStatus::Pending` because empty and pending fork choice - /// nodes correspond to the exact same state. - pub fn as_state_payload_status(self) -> StatePayloadStatus { - match self { - Self::Empty | Self::Pending => StatePayloadStatus::Pending, - Self::Full => StatePayloadStatus::Full, - } - } -} - /// Spec's `ForkChoiceNode` augmented with ProtoNode index. pub struct IndexedForkChoiceNode { pub root: Hash256, @@ -1019,6 +1006,34 @@ impl ProtoArrayForkChoice { }) } + /// Returns whether the proposer should extend the parent's execution payload chain. + /// + /// This checks timeliness, data availability, and proposer boost conditions per the spec. + pub fn should_extend_payload( + &self, + block_root: &Hash256, + proposer_boost_root: Hash256, + ) -> Result { + let block_index = self + .proto_array + .indices + .get(block_root) + .ok_or_else(|| format!("Unknown block root: {block_root:?}"))?; + let proto_node = self + .proto_array + .nodes + .get(*block_index) + .ok_or_else(|| format!("Missing node at index: {block_index}"))?; + let fc_node = IndexedForkChoiceNode { + root: proto_node.root(), + proto_node_index: *block_index, + payload_status: proto_node.get_parent_payload_status(), + }; + self.proto_array + .should_extend_payload::(&fc_node, proto_node, proposer_boost_root) + .map_err(|e| format!("{e:?}")) + } + /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index f5f06d1cb9..56e667cdd3 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,11 +1,6 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, VerifySignatures, - envelope_processing::{ - EnvelopeProcessingError, VerifyStateRoot, process_execution_payload_envelope, - }, - per_block_processing, - per_epoch_processing::EpochProcessingSummary, + VerifyBlockRoot, per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing, }; use itertools::Itertools; @@ -13,7 +8,7 @@ use std::iter::Peekable; use std::marker::PhantomData; use types::{ BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, - SignedExecutionPayloadEnvelope, Slot, execution::StatePayloadStatus, + Slot, }; pub type PreBlockHook<'a, E, Error> = Box< @@ -29,7 +24,7 @@ pub type PostSlotHook<'a, E, Error> = Box< >; pub type StateRootIterDefault = std::iter::Empty>; -/// Efficiently apply blocks and payloads to a state while configuring various parameters. +/// Efficiently apply blocks to a state while configuring various parameters. /// /// Usage follows a builder pattern. pub struct BlockReplayer< @@ -46,21 +41,8 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, - /// Iterator over state roots for all *block* states. - /// - /// Pre-Gloas, this is all states. Post-Gloas, this is *just* the states corresponding to beacon - /// blocks. For states corresponding to payloads, we read the state root from the payload - /// envelope. - // TODO(gloas): this concept might need adjusting when we implement the cold DB. pub(crate) state_root_iter: Option>, state_root_miss: bool, - /// The payload status of the state desired as the end result of block replay. - /// - /// This dictates whether a payload should be applied after applying the last block. - /// - /// Prior to Gloas, this should always be set to `StatePayloadStatus::Pending` to indicate - /// that no envelope needs to be applied. - desired_state_payload_status: StatePayloadStatus, _phantom: PhantomData, } @@ -68,12 +50,7 @@ pub struct BlockReplayer< pub enum BlockReplayError { SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), - EnvelopeProcessing(EnvelopeProcessingError), BeaconState(BeaconStateError), - /// A payload envelope for this `slot` was required but not provided. - MissingPayloadEnvelope { - slot: Slot, - }, } impl From for BlockReplayError { @@ -88,12 +65,6 @@ impl From for BlockReplayError { } } -impl From for BlockReplayError { - fn from(e: EnvelopeProcessingError) -> Self { - Self::EnvelopeProcessing(e) - } -} - impl From for BlockReplayError { fn from(e: BeaconStateError) -> Self { Self::BeaconState(e) @@ -125,7 +96,6 @@ where post_slot_hook: None, state_root_iter: None, state_root_miss: false, - desired_state_payload_status: StatePayloadStatus::Pending, _phantom: PhantomData, } } @@ -191,14 +161,6 @@ where self } - /// Set the desired payload status of the state reached by replay. - /// - /// This determines whether to apply a payload after applying the last block. - pub fn desired_state_payload_status(mut self, payload_status: StatePayloadStatus) -> Self { - self.desired_state_payload_status = payload_status; - self - } - /// Compute the state root for `self.state` as efficiently as possible. /// /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be @@ -246,38 +208,6 @@ where Ok(state_root) } - /// Apply an execution payload envelope to `self.state`. - /// - /// The `block_state_root` MUST be the `state_root` of the most recently applied block. - /// - /// Returns the `state_root` of `self.state` after payload application. - fn apply_payload_envelope( - &mut self, - envelope: &SignedExecutionPayloadEnvelope, - block_state_root: Hash256, - ) -> Result { - // TODO(gloas): bulk signature verification could be relevant here? - let verify_payload_signatures = - if let BlockSignatureStrategy::NoVerification = self.block_sig_strategy { - VerifySignatures::False - } else { - VerifySignatures::True - }; - // TODO(gloas): state root verif enabled during initial prototyping - let verify_state_root = VerifyStateRoot::True; - process_execution_payload_envelope( - &mut self.state, - Some(block_state_root), - envelope, - verify_payload_signatures, - verify_state_root, - self.spec, - ) - .map_err(BlockReplayError::from)?; - - Ok(envelope.message.state_root) - } - /// Apply `blocks` atop `self.state`, taking care of slot processing. /// /// If `target_slot` is provided then the state will be advanced through to `target_slot` @@ -285,21 +215,8 @@ where pub fn apply_blocks( mut self, blocks: Vec>>, - payload_envelopes: Vec>, target_slot: Option, ) -> Result { - let mut envelopes_iter = payload_envelopes.into_iter(); - - let mut next_envelope_at_slot = |slot| { - if let Some(envelope) = envelopes_iter.next() - && envelope.message.slot == slot - { - Ok(envelope) - } else { - Err(BlockReplayError::MissingPayloadEnvelope { slot }) - } - }; - for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. if i == 0 && block.slot() <= self.state.slot() { @@ -307,36 +224,7 @@ where } while self.state.slot() < block.slot() { - let mut state_root = self.get_state_root(&blocks, i)?; - - // Apply the payload for the *previous* block if the bid in the current block - // indicates that the parent is full (and it hasn't already been applied). - state_root = if block.fork_name_unchecked().gloas_enabled() - && self.state.slot() == self.state.latest_block_header().slot - && self.state.payload_status() == StatePayloadStatus::Pending - { - let latest_bid_block_hash = self - .state - .latest_execution_payload_bid() - .map_err(BlockReplayError::from)? - .block_hash; - - // Similar to `is_parent_block_full`, but reading the block hash from the - // not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd). - if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { - let envelope = next_envelope_at_slot(self.state.slot())?; - // State root for the next slot processing is now the envelope's state root. - self.apply_payload_envelope(&envelope, state_root)? - } else { - // Empty payload at this slot, the state root is unchanged from when the - // beacon block was applied. - state_root - } - } else { - // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state - // is always the output from `self.get_state_root`. - state_root - }; + let state_root = self.get_state_root(&blocks, i)?; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; @@ -380,24 +268,9 @@ where } } - // Apply the last payload if desired. - let mut opt_state_root = if let StatePayloadStatus::Full = self.desired_state_payload_status - && let Some(last_block) = blocks.last() - { - let envelope = next_envelope_at_slot(self.state.slot())?; - Some(self.apply_payload_envelope(&envelope, last_block.state_root())?) - } else { - None - }; - if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { - // Read state root from `opt_state_root` if a payload was just applied. - let state_root = if let Some(root) = opt_state_root.take() { - root - } else { - self.get_state_root(&blocks, blocks.len())? - }; + let state_root = self.get_state_root(&blocks, blocks.len())?; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index 97953b835f..8ea96390e3 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -1,15 +1,10 @@ -use crate::BlockProcessingError; use crate::VerifySignatures; use crate::per_block_processing::compute_timestamp_at_slot; -use crate::per_block_processing::process_operations::{ - process_consolidation_requests, process_deposit_requests_post_gloas, - process_withdrawal_requests, -}; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::ArithError; use tree_hash::TreeHash; use types::{ - BeaconState, BeaconStateError, BuilderIndex, BuilderPendingPayment, ChainSpec, EthSpec, - ExecutionBlockHash, Hash256, SignedExecutionPayloadEnvelope, Slot, + BeaconState, BeaconStateError, BuilderIndex, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, + SignedExecutionPayloadEnvelope, Slot, }; macro_rules! envelope_verify { @@ -20,29 +15,11 @@ macro_rules! envelope_verify { }; } -/// The strategy to be used when validating the payloads state root. -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Clone, Copy)] -pub enum VerifyStateRoot { - /// Validate state root. - True, - /// Do not validate state root. Use with caution. - /// This should only be used when first constructing the payload envelope. - False, -} - -impl VerifyStateRoot { - pub fn is_true(self) -> bool { - self == VerifyStateRoot::True - } -} - #[derive(Debug, Clone)] pub enum EnvelopeProcessingError { /// Bad Signature BadSignature, BeaconStateError(BeaconStateError), - BlockProcessingError(BlockProcessingError), ArithError(ArithError), /// Envelope doesn't match latest beacon block header LatestBlockHeaderMismatch { @@ -89,15 +66,11 @@ pub enum EnvelopeProcessingError { state: u64, envelope: u64, }, - // Invalid state root - InvalidStateRoot { - state: Hash256, + // The execution requests root doesn't match the committed bid + ExecutionRequestsRootMismatch { + committed_bid: Hash256, envelope: Hash256, }, - // BitFieldError - BitFieldError(ssz::BitfieldError), - // Some kind of error calculating the builder payment index - BuilderPaymentIndexOutOfBounds(usize), /// The envelope was deemed invalid by the execution engine. ExecutionInvalid, } @@ -108,50 +81,44 @@ impl From for EnvelopeProcessingError { } } -impl From for EnvelopeProcessingError { - fn from(e: BlockProcessingError) -> Self { - EnvelopeProcessingError::BlockProcessingError(e) - } -} - impl From for EnvelopeProcessingError { fn from(e: ArithError) -> Self { EnvelopeProcessingError::ArithError(e) } } -/// Processes a `SignedExecutionPayloadEnvelope` +/// Verifies a `SignedExecutionPayloadEnvelope` against the beacon state. /// -/// This function does all the state modifications inside `process_execution_payload()` -pub fn process_execution_payload_envelope( - state: &mut BeaconState, - parent_state_root: Option, +/// This function performs pure verification with no state mutation. The execution requests +/// from the envelope are deferred to be processed in the next block via +/// `process_parent_execution_payload`. +/// +/// `block_state_root` should be the post-block state root (used to fill in the block header +/// for beacon_block_root verification). If `None`, the latest_block_header must already have +/// its state_root filled in. +pub fn verify_execution_payload_envelope( + state: &BeaconState, signed_envelope: &SignedExecutionPayloadEnvelope, verify_signatures: VerifySignatures, - verify_state_root: VerifyStateRoot, + block_state_root: Hash256, spec: &ChainSpec, ) -> Result<(), EnvelopeProcessingError> { - if verify_signatures.is_true() { - // Verify Signed Envelope Signature - if !signed_envelope.verify_signature_with_state(state, spec)? { - return Err(EnvelopeProcessingError::BadSignature); - } + if verify_signatures.is_true() && !signed_envelope.verify_signature_with_state(state, spec)? { + return Err(EnvelopeProcessingError::BadSignature); } let envelope = &signed_envelope.message; let payload = &envelope.payload; - let execution_requests = &envelope.execution_requests; - // Cache latest block header state root - if state.latest_block_header().state_root == Hash256::default() { - let previous_state_root = parent_state_root - .map(Ok) - .unwrap_or_else(|| state.canonical_root())?; - state.latest_block_header_mut().state_root = previous_state_root; + // Verify consistency with the beacon block. + // Use a copy of the header with state_root filled in, matching the spec's approach. + let mut header = state.latest_block_header().clone(); + if header.state_root == Hash256::default() { + // The caller must provide the post-block state root so we can compute + // the block header root without mutating state. + header.state_root = block_state_root; } - - // Verify consistency with the beacon block - let latest_block_header_root = state.latest_block_header().tree_hash_root(); + let latest_block_header_root = header.tree_hash_root(); envelope_verify!( envelope.beacon_block_root == latest_block_header_root, EnvelopeProcessingError::LatestBlockHeaderMismatch { @@ -160,9 +127,9 @@ pub fn process_execution_payload_envelope( } ); envelope_verify!( - envelope.slot == state.slot(), + envelope.slot() == state.slot(), EnvelopeProcessingError::SlotMismatch { - envelope_slot: envelope.slot, + envelope_slot: envelope.slot(), parent_state_slot: state.slot(), } ); @@ -238,59 +205,17 @@ pub fn process_execution_payload_envelope( } ); + // Verify execution requests root matches committed bid + let execution_requests_root = envelope.execution_requests.tree_hash_root(); + envelope_verify!( + execution_requests_root == committed_bid.execution_requests_root, + EnvelopeProcessingError::ExecutionRequestsRootMismatch { + committed_bid: committed_bid.execution_requests_root, + envelope: execution_requests_root, + } + ); + // TODO(gloas): newPayload happens here in the spec, ensure we wire that up correctly - process_deposit_requests_post_gloas(state, &execution_requests.deposits, spec)?; - process_withdrawal_requests(state, &execution_requests.withdrawals, spec)?; - process_consolidation_requests(state, &execution_requests.consolidations, spec)?; - - // Queue the builder payment - let payment_index = E::slots_per_epoch() - .safe_add(state.slot().as_u64().safe_rem(E::slots_per_epoch())?)? - as usize; - let payment_mut = state - .builder_pending_payments_mut()? - .get_mut(payment_index) - .ok_or(EnvelopeProcessingError::BuilderPaymentIndexOutOfBounds( - payment_index, - ))?; - - // We have re-ordered the blanking out of the pending payment to avoid a double-lookup. - // This is semantically equivalent to the ordering used by the spec because we have taken a - // clone of the payment prior to doing the write. - let payment_withdrawal = payment_mut.withdrawal.clone(); - *payment_mut = BuilderPendingPayment::default(); - - let amount = payment_withdrawal.amount; - if amount > 0 { - state - .builder_pending_withdrawals_mut()? - .push(payment_withdrawal) - .map_err(|e| EnvelopeProcessingError::BeaconStateError(e.into()))?; - } - - // Cache the execution payload hash - let availability_index = state - .slot() - .as_usize() - .safe_rem(E::slots_per_historical_root())?; - state - .execution_payload_availability_mut()? - .set(availability_index, true) - .map_err(EnvelopeProcessingError::BitFieldError)?; - *state.latest_block_hash_mut()? = payload.block_hash; - - if verify_state_root.is_true() { - // Verify the state root - let state_root = state.canonical_root()?; - envelope_verify!( - envelope.state_root == state_root, - EnvelopeProcessingError::InvalidStateRoot { - state: state_root, - envelope: envelope.state_root, - } - ); - } - Ok(()) } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 861fccb374..9dfbc87b48 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -167,9 +167,21 @@ pub fn initialize_beacon_state_from_eth1( // Remove intermediate Fulu fork from `state.fork`. state.fork_mut().previous_version = spec.gloas_fork_version; - // Override latest execution payload header. - // Here's where we *would* clone the header but there is no header here so.. - // TODO(EIP7732): check this + // The genesis block's bid must have block_hash = 0x00 per spec (empty payload). + // Retain the EL genesis hash in latest_block_hash and parent_block_hash so the + // first post-genesis proposer can build on the correct EL head. + let el_genesis_hash = state.latest_execution_payload_bid()?.block_hash; + let bid = state.latest_execution_payload_bid_mut()?; + bid.parent_block_hash = el_genesis_hash; + bid.block_hash = ExecutionBlockHash::default(); + + // Update latest_block_header to reflect the Gloas genesis block body which contains + // the EL genesis hash in the signed_execution_payload_bid. This is needed because + // BeaconState::new() created the header from BeaconBlock::empty() which has zero bid + // fields, but the spec requires the genesis block's bid to contain the EL block hash + // and the tree hash root of empty ExecutionRequests. + let block = genesis_block(&state, spec)?; + state.latest_block_header_mut().body_root = block.body_root(); } // Now that we have our validators, initialize the caches (including the committees) @@ -181,6 +193,28 @@ pub fn initialize_beacon_state_from_eth1( Ok(state) } +/// Create an unsigned genesis `BeaconBlock` whose body matches the genesis state. +/// +/// For Gloas, the block's `signed_execution_payload_bid` is populated from the state's +/// `latest_execution_payload_bid` so that the body root is consistent with +/// `state.latest_block_header.body_root`. +/// +/// The returned block has `state_root == Hash256::ZERO`; callers that need the real +/// state root should set it themselves. +pub fn genesis_block( + genesis_state: &BeaconState, + spec: &ChainSpec, +) -> Result, BeaconStateError> { + let mut block = BeaconBlock::empty(spec); + if let Ok(block) = block.as_gloas_mut() { + let state_bid = genesis_state.latest_execution_payload_bid()?; + let bid = &mut block.body.signed_execution_payload_bid.message; + bid.block_hash = state_bid.block_hash; + bid.execution_requests_root = state_bid.execution_requests_root; + } + Ok(block) +} + /// Determine whether a candidate genesis state is suitable for starting the chain. pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 210e0437be..71ad394ee6 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -120,7 +120,7 @@ pub fn per_block_processing>( let block = signed_block.message(); // Verify that the `SignedBeaconBlock` instantiation matches the fork at `signed_block.slot()`. - signed_block + let fork_name = signed_block .fork_name(spec) .map_err(BlockProcessingError::InconsistentBlockFork)?; @@ -129,6 +129,11 @@ pub fn per_block_processing>( .fork_name(spec) .map_err(BlockProcessingError::InconsistentStateFork)?; + // Process deferred execution requests from the parent's envelope. + if fork_name.gloas_enabled() { + process_parent_execution_payload(state, block, spec)?; + } + // Build epoch cache if it hasn't already been built, or if it is no longer valid initialize_epoch_cache(state, spec)?; initialize_progressive_balances_cache(state, spec)?; @@ -531,6 +536,139 @@ pub fn compute_timestamp_at_slot( .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } +/// Process the parent block's deferred execution payload effects. +/// +/// This implements the spec's `process_parent_execution_payload` function, which validates +/// the parent execution requests and delegates to `apply_parent_execution_payload` if the +/// parent block was full. This is called at the beginning of block processing, before +/// `process_block_header`. +/// +/// `process_parent_execution_payload` must be called before `process_execution_payload_bid` +/// (which overwrites `state.latest_execution_payload_bid`). +pub fn process_parent_execution_payload>( + state: &mut BeaconState, + block: BeaconBlockRef<'_, E, Payload>, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + let bid_parent_block_hash = block + .body() + .signed_execution_payload_bid()? + .message + .parent_block_hash; + let parent_bid = state.latest_execution_payload_bid()?.clone(); + let requests = block.body().parent_execution_requests()?; + + let is_genesis_block = parent_bid.block_hash == ExecutionBlockHash::zero(); + let is_parent_block_empty = bid_parent_block_hash != parent_bid.block_hash; + + if is_genesis_block || is_parent_block_empty { + // Parent was EMPTY -- no execution requests expected + block_verify!( + *requests == ExecutionRequests::default(), + BlockProcessingError::NonEmptyParentExecutionRequests + ); + return Ok(()); + } + + // Parent was FULL -- verify the bid commitment and apply the payload + let requests_root = requests.tree_hash_root(); + block_verify!( + requests_root == parent_bid.execution_requests_root, + BlockProcessingError::ExecutionRequestsRootMismatch { + expected: parent_bid.execution_requests_root, + found: requests_root, + } + ); + + apply_parent_execution_payload(state, &parent_bid, requests, spec) +} + +/// Apply the parent execution payload's deferred effects to the state. +/// +/// This implements the spec's `apply_parent_execution_payload` function: +/// 1. Processes deposits, withdrawals, and consolidations from execution requests +/// 2. Queues the builder pending payment from the parent's committed bid +/// 3. Updates `execution_payload_availability` and `latest_block_hash` +pub fn apply_parent_execution_payload( + state: &mut BeaconState, + parent_bid: &ExecutionPayloadBid, + requests: &ExecutionRequests, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + let parent_slot = parent_bid.slot; + let parent_epoch = parent_slot.epoch(E::slots_per_epoch()); + + // Process execution requests from the parent's payload + process_operations::process_deposit_requests_post_gloas(state, &requests.deposits, spec)?; + process_operations::process_withdrawal_requests(state, &requests.withdrawals, spec)?; + process_operations::process_consolidation_requests(state, &requests.consolidations, spec)?; + + // Queue the builder payment + if parent_epoch == state.current_epoch() { + let payment_index = E::slots_per_epoch() + .safe_add(parent_slot.as_u64().safe_rem(E::slots_per_epoch())?)? + as usize; + settle_builder_payment(state, payment_index)?; + } else if parent_epoch == state.previous_epoch() { + let payment_index = parent_slot.as_u64().safe_rem(E::slots_per_epoch())? as usize; + settle_builder_payment(state, payment_index)?; + } else if parent_bid.value > 0 { + // Parent is older than previous epoch -- payment entry has already been + // settled or evicted by process_builder_pending_payments at epoch boundaries. + // Append the withdrawal directly from the bid. + state + .builder_pending_withdrawals_mut()? + .push(BuilderPendingWithdrawal { + fee_recipient: parent_bid.fee_recipient, + amount: parent_bid.value, + builder_index: parent_bid.builder_index, + }) + .map_err(|e| BlockProcessingError::BeaconStateError(e.into()))?; + } + + // Update execution payload availability for the parent slot + let availability_index = parent_slot + .as_usize() + .safe_rem(E::slots_per_historical_root())?; + state + .execution_payload_availability_mut()? + .set(availability_index, true) + .map_err(BlockProcessingError::BitfieldError)?; + + // Update latest_block_hash to the parent bid's block_hash + *state.latest_block_hash_mut()? = parent_bid.block_hash; + + Ok(()) +} + +/// Spec: `settle_builder_payment`. +/// +/// Moves a pending payment from `builder_pending_payments[payment_index]` into +/// `builder_pending_withdrawals`, then clears the slot. +pub fn settle_builder_payment( + state: &mut BeaconState, + payment_index: usize, +) -> Result<(), BlockProcessingError> { + let payment_mut = state + .builder_pending_payments_mut()? + .get_mut(payment_index) + .ok_or(BlockProcessingError::BuilderPaymentIndexOutOfBounds( + payment_index, + ))?; + + let withdrawal = payment_mut.withdrawal.clone(); + *payment_mut = BuilderPendingPayment::default(); + + if withdrawal.amount > 0 { + state + .builder_pending_withdrawals_mut()? + .push(withdrawal) + .map_err(|e| BlockProcessingError::BeaconStateError(e.into()))?; + } + + Ok(()) +} + pub fn process_execution_payload_bid>( state: &mut BeaconState, block: BeaconBlockRef<'_, E, Payload>, diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 71083378db..93d668c8c9 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -108,6 +108,13 @@ pub enum BlockProcessingError { }, /// Builder payment index out of bounds (Gloas) BuilderPaymentIndexOutOfBounds(usize), + /// The parent execution requests root doesn't match the committed bid + ExecutionRequestsRootMismatch { + expected: Hash256, + found: Hash256, + }, + /// Parent was not full but non-empty execution requests were provided + NonEmptyParentExecutionRequests, } impl From for BlockProcessingError { diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 0203b33e61..96610c2010 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1014,7 +1014,7 @@ async fn block_replayer_peeking_state_roots() { let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) .state_root_iter(state_root_iter.into_iter()) .no_signature_verification() - .apply_blocks(vec![target_block], vec![], None) + .apply_blocks(vec![target_block], None) .unwrap(); assert_eq!( diff --git a/consensus/state_processing/src/per_block_processing/withdrawals.rs b/consensus/state_processing/src/per_block_processing/withdrawals.rs index 72c3339b10..3b14e904c4 100644 --- a/consensus/state_processing/src/per_block_processing/withdrawals.rs +++ b/consensus/state_processing/src/per_block_processing/withdrawals.rs @@ -9,8 +9,8 @@ use safe_arith::{SafeArith, SafeArithIter}; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, - ExpectedWithdrawals, ExpectedWithdrawalsCapella, ExpectedWithdrawalsElectra, - ExpectedWithdrawalsGloas, Validator, Withdrawal, Withdrawals, + ExecutionBlockHash, ExpectedWithdrawals, ExpectedWithdrawalsCapella, + ExpectedWithdrawalsElectra, ExpectedWithdrawalsGloas, Validator, Withdrawal, Withdrawals, }; /// Compute the next batch of withdrawals which should be included in a block. @@ -494,7 +494,11 @@ pub mod gloas { state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - if !state.is_parent_block_full() { + // Return early if the parent block is empty. + let is_genesis_block = *state.latest_block_hash()? == ExecutionBlockHash::default(); + let is_parent_block_empty = + *state.latest_block_hash()? != state.latest_execution_payload_bid()?.block_hash; + if is_genesis_block || is_parent_block_empty { return Ok(()); } diff --git a/consensus/state_processing/src/upgrade/gloas.rs b/consensus/state_processing/src/upgrade/gloas.rs index b39ee6048f..84cdbf22c2 100644 --- a/consensus/state_processing/src/upgrade/gloas.rs +++ b/consensus/state_processing/src/upgrade/gloas.rs @@ -7,10 +7,12 @@ use ssz_types::BitVector; use ssz_types::FixedVector; use std::collections::HashSet; use std::mem; +use tree_hash::TreeHash; use typenum::Unsigned; use types::{ BeaconState, BeaconStateError as Error, BeaconStateGloas, BuilderPendingPayment, ChainSpec, - DepositData, EthSpec, ExecutionPayloadBid, Fork, is_builder_withdrawal_credential, + DepositData, EthSpec, ExecutionPayloadBid, ExecutionRequests, Fork, + is_builder_withdrawal_credential, }; /// Transform a `Fulu` state into a `Gloas` state. @@ -78,6 +80,7 @@ pub fn upgrade_state_to_gloas( // Execution Bid latest_execution_payload_bid: ExecutionPayloadBid { block_hash: pre.latest_execution_payload_header.block_hash, + execution_requests_root: ExecutionRequests::::default().tree_hash_root(), ..Default::default() }, // Capella diff --git a/consensus/types/src/block/beacon_block.rs b/consensus/types/src/block/beacon_block.rs index 5634d842b6..3360728eaa 100644 --- a/consensus/types/src/block/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -716,6 +716,7 @@ impl> EmptyBlock for BeaconBlockGloa voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), bls_to_execution_changes: VariableList::empty(), + parent_execution_requests: ExecutionRequests::default(), signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), payload_attestations: VariableList::empty(), _phantom: PhantomData, diff --git a/consensus/types/src/block/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs index fd5d976c9b..cd3f4dcaba 100644 --- a/consensus/types/src/block/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -170,6 +170,8 @@ pub struct BeaconBlockBody = FullPay pub signed_execution_payload_bid: SignedExecutionPayloadBid, #[superstruct(only(Gloas))] pub payload_attestations: VariableList, E::MaxPayloadAttestations>, + #[superstruct(only(Gloas))] + pub parent_execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair, Gloas))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] @@ -564,6 +566,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom, @@ -580,6 +583,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom: PhantomData, @@ -898,6 +902,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom, @@ -915,6 +920,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom: PhantomData, diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index dd6f52426a..23b01415c8 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -394,13 +394,15 @@ impl> SignedBeaconBlock /// `block_hash` from the parent beacon block's bid. If the parent beacon state is available /// this can alternatively be fetched from `state.latest_payload_bid`. /// - /// This function returns `false` for all blocks prior to Gloas. + /// This function returns `false` for all blocks prior to Gloas and for the zero + /// `parent_block_hash`. pub fn is_parent_block_full(&self, parent_block_hash: ExecutionBlockHash) -> bool { let Ok(signed_payload_bid) = self.message().body().signed_execution_payload_bid() else { // Prior to Gloas. return false; }; - signed_payload_bid.message.parent_block_hash == parent_block_hash + parent_block_hash != ExecutionBlockHash::zero() + && signed_payload_bid.message.parent_block_hash == parent_block_hash } } diff --git a/consensus/types/src/execution/execution_payload.rs b/consensus/types/src/execution/execution_payload.rs index d99b8785fa..c84a46874d 100644 --- a/consensus/types/src/execution/execution_payload.rs +++ b/consensus/types/src/execution/execution_payload.rs @@ -10,7 +10,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; use crate::{ - core::{Address, EthSpec, ExecutionBlockHash, Hash256}, + core::{Address, EthSpec, ExecutionBlockHash, Hash256, Slot}, fork::{ForkName, ForkVersionDecode}, state::BeaconStateError, test_utils::TestRandom, @@ -109,6 +109,12 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra, Fulu, Gloas), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, + /// EIP-7928: Block access list + #[superstruct(only(Gloas))] + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub block_access_list: VariableList, + #[superstruct(only(Gloas), partial_getter(copy))] + pub slot_number: Slot, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution/execution_payload_bid.rs b/consensus/types/src/execution/execution_payload_bid.rs index 5c8771993e..b2438681c1 100644 --- a/consensus/types/src/execution/execution_payload_bid.rs +++ b/consensus/types/src/execution/execution_payload_bid.rs @@ -37,6 +37,7 @@ pub struct ExecutionPayloadBid { #[serde(with = "serde_utils::quoted_u64")] pub execution_payment: u64, pub blob_kzg_commitments: KzgCommitments, + pub execution_requests_root: Hash256, } impl SignedRoot for ExecutionPayloadBid {} diff --git a/consensus/types/src/execution/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs index 169331a884..028423d681 100644 --- a/consensus/types/src/execution/execution_payload_envelope.rs +++ b/consensus/types/src/execution/execution_payload_envelope.rs @@ -20,8 +20,6 @@ pub struct ExecutionPayloadEnvelope { #[serde(with = "serde_utils::quoted_u64")] pub builder_index: u64, pub beacon_block_root: Hash256, - pub slot: Slot, - pub state_root: Hash256, } impl ExecutionPayloadEnvelope { @@ -32,8 +30,6 @@ impl ExecutionPayloadEnvelope { execution_requests: ExecutionRequests::default(), builder_index: 0, beacon_block_root: Hash256::zero(), - slot: Slot::new(0), - state_root: Hash256::zero(), } } @@ -60,6 +56,10 @@ impl ExecutionPayloadEnvelope { + (E::max_consolidation_requests_per_payload() * ::ssz_fixed_len()) } + + pub fn slot(&self) -> Slot { + self.payload.slot_number + } } impl SignedRoot for ExecutionPayloadEnvelope {} diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs index 591be32b24..a3d4ed8730 100644 --- a/consensus/types/src/execution/mod.rs +++ b/consensus/types/src/execution/mod.rs @@ -12,7 +12,6 @@ mod payload; mod signed_bls_to_execution_change; mod signed_execution_payload_bid; mod signed_execution_payload_envelope; -mod state_payload_status; pub use bls_to_execution_change::BlsToExecutionChange; pub use eth1_data::Eth1Data; @@ -42,4 +41,3 @@ pub use payload::{ pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use signed_execution_payload_bid::SignedExecutionPayloadBid; pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; -pub use state_payload_status::StatePayloadStatus; diff --git a/consensus/types/src/execution/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs index 76fa841680..522c8b3f54 100644 --- a/consensus/types/src/execution/signed_execution_payload_envelope.rs +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -42,7 +42,7 @@ impl SignedExecutionPayloadEnvelope { } pub fn slot(&self) -> Slot { - self.message.slot + self.message.slot() } pub fn epoch(&self) -> Epoch { diff --git a/consensus/types/src/execution/state_payload_status.rs b/consensus/types/src/execution/state_payload_status.rs deleted file mode 100644 index 1661be6060..0000000000 --- a/consensus/types/src/execution/state_payload_status.rs +++ /dev/null @@ -1,18 +0,0 @@ -use serde::{Deserialize, Serialize}; - -/// Payload status as it applies to a `BeaconState` post-Gloas. -/// -/// A state can either be a post-state for a block (in which case we call it `Pending`) or a -/// payload envelope (`Full`). When handling states it is often necessary to know which of these -/// two variants is required. -/// -/// Note that states at skipped slots could be either `Pending` or `Full`, depending on whether -/// the payload for the most-recently applied block was also applied. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum StatePayloadStatus { - /// For states produced by `process_block` executed on a `BeaconBlock`. - Pending, - /// For states produced by `process_execution_payload` on a `ExecutionPayloadEnvelope`. - Full, -} diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 8bef8816e5..7e2b3096a8 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -37,7 +37,7 @@ use crate::{ execution::{ Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, StatePayloadStatus, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }, fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, light_client::consts::{ @@ -571,9 +571,10 @@ where )] #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, + #[test_random(default)] #[superstruct(only(Gloas))] #[metastruct(exclude_from(tree_lists))] - pub latest_execution_payload_bid: ExecutionPayloadBid, + pub latest_block_hash: ExecutionBlockHash, #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] @@ -657,10 +658,9 @@ where pub builder_pending_withdrawals: List, - #[test_random(default)] #[superstruct(only(Gloas))] #[metastruct(exclude_from(tree_lists))] - pub latest_block_hash: ExecutionBlockHash, + pub latest_execution_payload_bid: ExecutionPayloadBid, #[compare_fields(as_iter)] #[test_random(default)] @@ -1273,24 +1273,6 @@ impl BeaconState { } } - /// Determine the payload status of this state. - /// - /// Prior to Gloas this is always `Pending`. - /// - /// Post-Gloas, the definition of the `StatePayloadStatus` is: - /// - /// - `Full` if this state is the result of envelope processing. - /// - `Pending` if this state is the result of block processing. - pub fn payload_status(&self) -> StatePayloadStatus { - if !self.fork_name_unchecked().gloas_enabled() { - StatePayloadStatus::Pending - } else if self.is_parent_block_full() { - StatePayloadStatus::Full - } else { - StatePayloadStatus::Pending - } - } - /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 @@ -2507,22 +2489,6 @@ impl BeaconState { } } - /// Return true if the parent block was full (both beacon block and execution payload were present). - pub fn is_parent_block_full(&self) -> bool { - match self { - BeaconState::Base(_) | BeaconState::Altair(_) => false, - // TODO(EIP-7732): check the implications of this when we get to forkchoice modifications - BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) - | BeaconState::Fulu(_) => true, - BeaconState::Gloas(state) => { - state.latest_execution_payload_bid.block_hash == state.latest_block_hash - } - } - } - /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index ab24ea35a0..facc8208d9 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.4 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.5 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 2daafada31..5a54e150db 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -49,8 +49,6 @@ excluded_paths = [ "tests/.*/eip7805", # Heze fork is not implemented "tests/.*/heze/.*", - # TODO(gloas): remove these ignores as Gloas consensus is implemented - "tests/.*/gloas/fork_choice/.*", # Ignore MatrixEntry SSZ tests for now. "tests/.*/.*/ssz_static/MatrixEntry/.*", # TODO: partial data column not implemented yet @@ -77,7 +75,9 @@ excluded_paths = [ # We don't need these manifest files at the moment. "tests/.*/manifest.yaml", # TODO: gossip condition tests not implemented yet - "tests/.*/.*/networking/.*" + "tests/.*/.*/networking/.*", + # TODO: fast confirmation rule not merged yet + "tests/.*/.*/fast_confirmation", ] diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 5e9dc001c7..2af205ee47 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -19,9 +19,13 @@ use beacon_chain::{ custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, EphemeralHarnessType}, }; -use execution_layer::{PayloadStatusV1, json_structures::JsonPayloadStatusV1Status}; +use execution_layer::{ + PayloadStatusV1, PayloadStatusV1Status, json_structures::JsonPayloadStatusV1Status, +}; use serde::Deserialize; use ssz_derive::Decode; +use state_processing::VerifySignatures; +use state_processing::envelope_processing::verify_execution_payload_envelope; use state_processing::state_advance::complete_state_advance; use std::future::Future; use std::sync::Arc; @@ -995,38 +999,95 @@ impl Tester { valid: bool, ) -> Result<(), Error> { let block_root = signed_envelope.message.beacon_block_root; + let block_hash = signed_envelope.message.payload.block_hash; + let store = &self.harness.chain.store; + let spec = &self.harness.chain.spec; - // Store the envelope in the database so that child blocks extending - // the FULL path can load the parent's post-payload state. + // Simulate the EL: pre-configure the mock execution engine to return VALID + // for envelopes the test expects to be valid. Invalid envelopes are left + // unconfigured so the mock EE's default (SYNCING) rejects them. + let el = self.harness.mock_execution_layer.as_ref().unwrap(); if valid { - self.harness - .chain - .store - .put_payload_envelope(&block_root, signed_envelope.clone()) + el.server.set_new_payload_status( + block_hash, + PayloadStatusV1 { + status: JsonPayloadStatusV1Status::Valid.into(), + latest_valid_hash: Some(block_hash), + validation_error: None, + }, + ); + } + + // Attempt to verify the envelope against the block's post-state. + let verification_result = (|| { + let block = store + .get_blinded_block(&block_root) + .map_err(|e| Error::InternalError(format!("Failed to load block: {e:?}")))? + .ok_or_else(|| { + Error::InternalError(format!("Block not found for root {block_root:?}")) + })?; + let block_state_root = block.state_root(); + + let state = store + .get_hot_state(&block_state_root, CACHE_STATE_IN_TESTS) + .map_err(|e| Error::InternalError(format!("Failed to load state: {e:?}")))? + .ok_or_else(|| { + Error::InternalError(format!("State not found for root {block_state_root:?}")) + })?; + + verify_execution_payload_envelope( + &state, + signed_envelope, + VerifySignatures::True, + block_state_root, + spec, + ) + .map_err(|e| { + Error::InternalError(format!("Failed to process execution payload: {e:?}")) + })?; + + // Check the mock EE's response for this block hash (simulates newPayload). + let ee_valid = el + .server + .ctx + .get_new_payload_status(&block_hash) + .and_then(|r| r.ok()) + .is_some_and(|s| s.status == PayloadStatusV1Status::Valid); + if !ee_valid { + return Err(Error::InternalError(format!( + "Mock EE rejected payload with block hash {block_hash:?}", + ))); + } + + Ok(()) + })(); + + if valid { + verification_result?; + + // Store the envelope so that child blocks can load the parent's payload. + store + .put_payload_envelope(&block_root, signed_envelope) .map_err(|e| { Error::InternalError(format!( "Failed to store payload envelope for {block_root:?}: {e:?}", )) })?; - } - let result = self - .harness - .chain - .canonical_head - .fork_choice_write_lock() - .on_valid_payload_envelope_received(block_root); - - if valid { - result.map_err(|e| { - Error::InternalError(format!( - "on_execution_payload for block root {} failed: {:?}", - block_root, e - )) - })?; - } else if result.is_ok() { + self.harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root) + .map_err(|e| { + Error::InternalError(format!( + "on_execution_payload for block root {} failed: {:?}", + block_root, e + )) + })?; + } else if verification_result.is_ok() { return Err(Error::DidntFail(format!( - "on_execution_payload for block root {} should have failed", + "on_execution_payload envelope for block root {} should have failed", block_root ))); } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 1399815763..f90b6f2a6e 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use serde::Deserialize; use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; -use state_processing::envelope_processing::VerifyStateRoot; +use state_processing::envelope_processing::verify_execution_payload_envelope; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_block_processing::process_operations::{ process_consolidation_requests, process_deposit_requests_post_gloas, @@ -13,7 +13,7 @@ use state_processing::per_block_processing::process_operations::{ }; use state_processing::{ ConsensusContext, - envelope_processing::{EnvelopeProcessingError, process_execution_payload_envelope}, + envelope_processing::EnvelopeProcessingError, per_block_processing::{ VerifyBlockRoot, VerifySignatures, errors::BlockProcessingError, @@ -23,7 +23,7 @@ use state_processing::{ process_bls_to_execution_changes, process_deposits, process_exits, process_payload_attestation, process_proposer_slashings, }, - process_sync_aggregate, withdrawals, + process_parent_execution_payload, process_sync_aggregate, withdrawals, }, }; use std::fmt::Debug; @@ -59,6 +59,12 @@ pub struct ExecutionPayloadBidBlock { block: BeaconBlock, } +/// Newtype for testing parent execution payload processing. +#[derive(Debug, Clone, Deserialize)] +pub struct ParentExecutionPayloadBlock { + block: BeaconBlock, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -441,8 +447,10 @@ impl Operation for SignedExecutionPayloadEnvelope { "signed_envelope.ssz_snappy".into() } - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name.gloas_enabled() + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + // TODO(gloas): re-enable this test when enabled upstream + // fork_name.gloas_enabled() + false } fn decode(path: &Path, _: ForkName, _spec: &ChainSpec) -> Result { @@ -460,12 +468,12 @@ impl Operation for SignedExecutionPayloadEnvelope { .as_ref() .is_some_and(|e| e.execution_valid); if valid { - process_execution_payload_envelope( + let block_state_root = state.update_tree_hash_cache()?; + verify_execution_payload_envelope( state, - None, self, VerifySignatures::True, - VerifyStateRoot::True, + block_state_root, spec, ) } else { @@ -505,6 +513,36 @@ impl Operation for ExecutionPayloadBidBlock { } } +impl Operation for ParentExecutionPayloadBlock { + type Error = BlockProcessingError; + + fn handler_name() -> String { + "parent_execution_payload".into() + } + + fn filename() -> String { + "block.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.gloas_enabled() + } + + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) + .map(|block| ParentExecutionPayloadBlock { block }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_parent_execution_payload(state, self.block.to_ref(), spec) + } +} + impl Operation for WithdrawalsPayload { type Error = BlockProcessingError; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 4373d6b7d1..96798c910c 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -723,8 +723,12 @@ impl Handler for ForkChoiceHandler { return false; } - // on_execution_payload tests exist only for Gloas. - if self.handler_name == "on_execution_payload" && !fork_name.gloas_enabled() { + // on_execution_payload_envelope and get_parent_payload_status tests exist only for + // Gloas and later. + if (self.handler_name == "on_execution_payload_envelope" + || self.handler_name == "get_parent_payload_status") + && !fork_name.gloas_enabled() + { return false; } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5587bbed41..0ffedc7eb8 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -2,10 +2,10 @@ pub use case_result::CaseResult; pub use cases::{ BuilderPendingPayments, Case, EffectiveBalanceUpdates, Eth1DataReset, ExecutionPayloadBidBlock, FeatureName, HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, - JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, PtcWindow, RandaoMixesReset, - RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, - WithdrawalsPayload, + JustificationAndFinalization, ParentExecutionPayloadBlock, ParticipationFlagUpdates, + ParticipationRecordUpdates, PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, + PtcWindow, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, + SyncCommitteeUpdates, WithdrawalsPayload, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 62eb2dd038..79a02d7e80 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -99,6 +99,12 @@ fn operations_execution_payload_bid() { OperationsHandler::>::default().run(); } +#[test] +fn operations_parent_execution_payload() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + #[test] fn operations_payload_attestation() { OperationsHandler::>::default().run(); @@ -1039,9 +1045,15 @@ fn fork_choice_deposit_with_reorg() { } #[test] -fn fork_choice_on_execution_payload() { - ForkChoiceHandler::::new("on_execution_payload").run(); - ForkChoiceHandler::::new("on_execution_payload").run(); +fn fork_choice_on_execution_payload_envelope() { + ForkChoiceHandler::::new("on_execution_payload_envelope").run(); + ForkChoiceHandler::::new("on_execution_payload_envelope").run(); +} + +#[test] +fn fork_choice_get_parent_payload_status() { + ForkChoiceHandler::::new("get_parent_payload_status").run(); + ForkChoiceHandler::::new("get_parent_payload_status").run(); } #[test] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 6bf4a1aa52..05170d907c 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -315,6 +315,7 @@ impl TestRig { Address::repeat_byte(42), Some(vec![]), None, + None, ), ) .await; @@ -359,6 +360,7 @@ impl TestRig { suggested_fee_recipient, Some(vec![]), None, + None, ); let payload_parameters = PayloadParameters { @@ -517,6 +519,7 @@ impl TestRig { suggested_fee_recipient, Some(vec![]), None, + None, ); let payload_parameters = PayloadParameters { @@ -577,6 +580,7 @@ impl TestRig { Address::repeat_byte(42), Some(vec![]), None, + None, ); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 76f7a86aab..c5bcd88eb1 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1432,7 +1432,7 @@ impl ValidatorStore for LighthouseValidatorS ) -> Result, Error> { let signing_context = self.signing_context( Domain::BeaconBuilder, - envelope.slot.epoch(E::slots_per_epoch()), + envelope.slot().epoch(E::slots_per_epoch()), ); // Execution payload envelope signing is not slashable, bypass doppelganger protection. From 02c2841db0832506dcd67c0258043452374c3ee6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 21 Apr 2026 17:23:07 +1000 Subject: [PATCH 23/27] Revert Gloas API changes from 9092 (#9151) This reverts commit 2749e18d0e35e6f148642623327acac5a7066658, from: - #9092 We no longer need those changes since the abolition of pending/full states. Co-Authored-By: Michael Sproul --- beacon_node/http_api/src/block_id.rs | 10 +++--- beacon_node/http_api/src/state_id.rs | 51 ++++++---------------------- common/eth2/src/types.rs | 8 ----- 3 files changed, 15 insertions(+), 54 deletions(-) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index f4645f1304..e6b1ed0879 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,5 +1,5 @@ use crate::version::inconsistent_fork_rejection; -use crate::{ExecutionOptimistic, state_id::checkpoint_block_and_execution_optimistic}; +use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; @@ -60,15 +60,15 @@ impl BlockId { CoreBlockId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - let (_block, execution_optimistic) = - checkpoint_block_and_execution_optimistic(chain, finalized_checkpoint)?; + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; Ok((finalized_checkpoint.root, execution_optimistic, true)) } CoreBlockId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - let (_block, execution_optimistic) = - checkpoint_block_and_execution_optimistic(chain, justified_checkpoint)?; + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; Ok((justified_checkpoint.root, execution_optimistic, false)) } CoreBlockId::Slot(slot) => { diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index ce18388926..13fb9b2c58 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -2,7 +2,6 @@ use crate::ExecutionOptimistic; use crate::metrics; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; -use proto_array::Block; use std::fmt; use std::str::FromStr; use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; @@ -20,8 +19,6 @@ impl StateId { Self(CoreStateId::Slot(slot)) } - // TODO(gloas) add tests for finalized and justified checkpoint states to ensure - // we return the post block state for gloas /// Return the state root identified by `self`. pub fn root( &self, @@ -44,41 +41,15 @@ impl StateId { CoreStateId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - - let slot = finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let (block, execution_optimistic) = - checkpoint_block_and_execution_optimistic(chain, finalized_checkpoint)?; - - if chain - .spec - .fork_name_at_slot::(block.slot) - .gloas_enabled() - { - return Ok((block.state_root, execution_optimistic, true)); - } - + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; (slot, execution_optimistic, true) } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - - let slot = justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let (block, execution_optimistic) = - checkpoint_block_and_execution_optimistic(chain, justified_checkpoint)?; - - if chain - .spec - .fork_name_at_slot::(block.slot) - .gloas_enabled() - { - return Ok((block.state_root, execution_optimistic, false)); - } - + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; (slot, execution_optimistic, false) } CoreStateId::Slot(slot) => ( @@ -283,11 +254,13 @@ impl fmt::Display for StateId { } } -/// Returns checkpoint block and the execution status of the checkpoint's `root`. -pub fn checkpoint_block_and_execution_optimistic( +/// Returns the first slot of the checkpoint's `epoch` and the execution status of the checkpoint's +/// `root`. +pub fn checkpoint_slot_and_execution_optimistic( chain: &BeaconChain, checkpoint: Checkpoint, -) -> Result<(Block, ExecutionOptimistic), warp::reject::Rejection> { +) -> Result<(Slot, ExecutionOptimistic), warp::reject::Rejection> { + let slot = checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()); let fork_choice = chain.canonical_head.fork_choice_read_lock(); let finalized_checkpoint = fork_choice.cached_fork_choice_view().finalized_checkpoint; @@ -304,9 +277,5 @@ pub fn checkpoint_block_and_execution_optimistic( .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::unhandled_error)?; - let block = fork_choice.get_block(&checkpoint.root).ok_or_else(|| { - warp_utils::reject::custom_not_found(format!("Block {:?} not found", checkpoint.root)) - })?; - - Ok((block, execution_optimistic)) + Ok((slot, execution_optimistic)) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index d724156f86..950abeadd8 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -125,15 +125,7 @@ impl fmt::Display for BlockId { pub enum StateId { Head, Genesis, - /// Pre-gloas the finalized state is the checkpoint block state - /// advanced to the epoch boundary. - /// Post-gloas this state is always the checkpoint post-block state and is not advanced - /// to the epoch boundary. Finalized, - /// Pre-gloas the justified state is the checkpoint block state - /// advanced to the epoch boundary. - /// Post-gloas this state is always the checkpoint post-block state and is not advanced - /// to the epoch boundary. Justified, Slot(Slot), Root(Hash256), From 7731b5f250745a8fa039ce46d90826ca91250a11 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 21 Apr 2026 16:36:22 +0900 Subject: [PATCH 24/27] Gloas engine api updates (#9150) Co-Authored-By: Eitan Seri-Levi --- beacon_node/client/src/notifier.rs | 12 ++++---- beacon_node/execution_layer/src/engine_api.rs | 9 ++++-- .../execution_layer/src/engine_api/http.rs | 28 +++++++++++++++---- .../src/test_utils/handle_rpc.rs | 13 +++++++-- .../execution_layer/src/test_utils/mod.rs | 1 + 5 files changed, 47 insertions(+), 16 deletions(-) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 4acb8c3aed..0d73a6bf7a 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -7,8 +7,8 @@ use execution_layer::{ EngineCapabilities, http::{ ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V2, - ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_GET_PAYLOAD_V6, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }, }; use lighthouse_network::{NetworkGlobals, types::SyncState}; @@ -555,11 +555,11 @@ fn methods_required_for_fork( } } ForkName::Gloas => { - if !capabilities.get_payload_v5 { - missing_methods.push(ENGINE_GET_PAYLOAD_V5); + if !capabilities.get_payload_v6 { + missing_methods.push(ENGINE_GET_PAYLOAD_V6); } - if !capabilities.new_payload_v4 { - missing_methods.push(ENGINE_NEW_PAYLOAD_V4); + if !capabilities.new_payload_v5 { + missing_methods.push(ENGINE_NEW_PAYLOAD_V5); } } } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 236340aa29..6566616c04 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -4,8 +4,9 @@ use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V4, ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, - ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_GET_PAYLOAD_V6, + ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, + ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -591,6 +592,7 @@ pub struct EngineCapabilities { pub get_payload_v3: bool, pub get_payload_v4: bool, pub get_payload_v5: bool, + pub get_payload_v6: bool, pub get_client_version_v1: bool, pub get_blobs_v1: bool, pub get_blobs_v2: bool, @@ -647,6 +649,9 @@ impl EngineCapabilities { if self.get_payload_v5 { response.push(ENGINE_GET_PAYLOAD_V5); } + if self.get_payload_v6 { + response.push(ENGINE_GET_PAYLOAD_V6); + } if self.get_client_version_v1 { response.push(ENGINE_GET_CLIENT_VERSION_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index dcf8205406..b9f6289d05 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -43,6 +43,7 @@ pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3"; pub const ENGINE_GET_PAYLOAD_V4: &str = "engine_getPayloadV4"; pub const ENGINE_GET_PAYLOAD_V5: &str = "engine_getPayloadV5"; +pub const ENGINE_GET_PAYLOAD_V6: &str = "engine_getPayloadV6"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; @@ -82,6 +83,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, + ENGINE_GET_PAYLOAD_V6, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, @@ -1052,10 +1054,25 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } + _ => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v5 with {}", + fork_name + ))), + } + } + + pub async fn get_payload_v6( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { ForkName::Gloas => { let response: JsonGetPayloadResponseGloas = self .rpc_request( - ENGINE_GET_PAYLOAD_V5, + ENGINE_GET_PAYLOAD_V6, params, ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) @@ -1065,7 +1082,7 @@ impl HttpJsonRpc { .map_err(Error::BadResponse) } _ => Err(Error::UnsupportedForkVariant(format!( - "called get_payload_v5 with {}", + "called get_payload_v6 with {}", fork_name ))), } @@ -1237,6 +1254,7 @@ impl HttpJsonRpc { get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), get_payload_v4: capabilities.contains(ENGINE_GET_PAYLOAD_V4), get_payload_v5: capabilities.contains(ENGINE_GET_PAYLOAD_V5), + get_payload_v6: capabilities.contains(ENGINE_GET_PAYLOAD_V6), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), get_blobs_v2: capabilities.contains(ENGINE_GET_BLOBS_V2), @@ -1429,10 +1447,10 @@ impl HttpJsonRpc { } } ForkName::Gloas => { - if engine_capabilities.get_payload_v5 { - self.get_payload_v5(fork_name, payload_id).await + if engine_capabilities.get_payload_v6 { + self.get_payload_v6(fork_name, payload_id).await } else { - Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) + Err(Error::RequiredMethodUnsupported("engine_getPayloadV6")) } } ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 058f1e76da..3054289996 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -277,7 +277,8 @@ pub async fn handle_rpc( | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 | ENGINE_GET_PAYLOAD_V4 - | ENGINE_GET_PAYLOAD_V5 => { + | ENGINE_GET_PAYLOAD_V5 + | ENGINE_GET_PAYLOAD_V6 => { let request: JsonPayloadIdRequest = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); @@ -363,7 +364,8 @@ pub async fn handle_rpc( && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2 || method == ENGINE_GET_PAYLOAD_V3 - || method == ENGINE_GET_PAYLOAD_V4) + || method == ENGINE_GET_PAYLOAD_V4 + || method == ENGINE_GET_PAYLOAD_V5) { return Err(( format!("{} called after Gloas fork!", method), @@ -455,13 +457,18 @@ pub async fn handle_rpc( }) .unwrap() } + _ => unreachable!(), + }) + } + ENGINE_GET_PAYLOAD_V6 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { JsonExecutionPayload::Gloas(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseGloas { execution_payload, block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), blobs_bundle: maybe_blobs .ok_or(( - "No blobs returned despite V5 Payload".to_string(), + "No blobs returned despite V6 Payload".to_string(), GENERIC_ERROR_CODE, ))? .into(), diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 6d8c30d316..c382d8abf5 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -55,6 +55,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v3: true, get_payload_v4: true, get_payload_v5: true, + get_payload_v6: true, get_client_version_v1: true, get_blobs_v1: true, get_blobs_v2: true, From 4de08f1b4ab6dfcea543319266eb8d2f8db0cd6f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 22 Apr 2026 12:03:13 +1000 Subject: [PATCH 25/27] Remove more mentions of "pending"/"full" states (#9156) Just a little naming cleanup (no semantic changes) to remove mentions of pending and full states that were still lurking. This hopefully helps Claude forget about the concept (it defaults to naming variables `pending_state`s without this change). Co-Authored-By: Michael Sproul --- .../src/block_production/gloas.rs | 9 ++--- beacon_node/beacon_chain/src/test_utils.rs | 14 +++---- beacon_node/beacon_chain/tests/store_tests.rs | 38 ++++++++----------- beacon_node/http_api/src/produce_block.rs | 2 +- 4 files changed, 27 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_production/gloas.rs b/beacon_node/beacon_chain/src/block_production/gloas.rs index df8d19d214..f895120eac 100644 --- a/beacon_node/beacon_chain/src/block_production/gloas.rs +++ b/beacon_node/beacon_chain/src/block_production/gloas.rs @@ -444,9 +444,9 @@ impl BeaconChain { /// Complete a block by computing its state root, and /// - /// Return `(block, pending_state, block_value)` where: + /// Return `(block, post_block_state, block_value)` where: /// - /// - `pending_state` is the state post block application (prior to payload application) + /// - `post_block_state` is the state post block application /// - `block_value` is the consensus-layer rewards for `block` #[allow(clippy::type_complexity)] #[instrument(skip_all, level = "debug")] @@ -571,9 +571,6 @@ impl BeaconChain { drop(state_root_timer); - // Clone the Pending state (post-block, pre-envelope) for callers that need it. - let pending_state = state.clone(); - let (mut block, _) = signed_beacon_block.deconstruct(); *block.state_root_mut() = state_root; @@ -628,7 +625,7 @@ impl BeaconChain { "Produced beacon block" ); - Ok((block, pending_state, consensus_block_value)) + Ok((block, state, consensus_block_value)) } // TODO(gloas) introduce `ProposerPreferences` so we can build out trustless diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e84f9ad983..00a2ed64f1 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1102,7 +1102,7 @@ where } /// Returns a newly created block, signed by the proposer for the given slot, - /// along with the execution payload envelope (for Gloas) and the pending state. + /// along with the execution payload envelope (for Gloas) and the post-block state. /// /// For pre-Gloas forks, the envelope is `None` and this behaves like `make_block`. pub async fn make_block_with_envelope( @@ -1142,7 +1142,7 @@ where ) }; - let (block, pending_state, _consensus_block_value) = self + let (block, post_block_state, _consensus_block_value) = self .chain .produce_block_on_state_gloas( state, @@ -1159,8 +1159,8 @@ where let signed_block = Arc::new(block.sign( &self.validator_keypairs[proposer_index].sk, - &pending_state.fork(), - pending_state.genesis_validators_root(), + &post_block_state.fork(), + post_block_state.genesis_validators_root(), &self.spec, )); @@ -1175,8 +1175,8 @@ where let domain = self.spec.get_domain( epoch, Domain::BeaconBuilder, - &pending_state.fork(), - pending_state.genesis_validators_root(), + &post_block_state.fork(), + post_block_state.genesis_validators_root(), ); let message = envelope.signing_root(domain); let signature = self.validator_keypairs[proposer_index].sk.sign(message); @@ -1187,7 +1187,7 @@ where }); let block_contents: SignedBlockContentsTuple = (signed_block, None); - (block_contents, signed_envelope, pending_state) + (block_contents, signed_envelope, post_block_state) } else { let (block_contents, state) = self.make_block(state, slot).await; (block_contents, None, state) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 47bda60eb8..86adf50995 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5693,7 +5693,7 @@ async fn test_gloas_block_and_envelope_storage_generic( check_db_invariants(&harness); } -/// Test block replay with and without envelopes. +/// Test that Gloas block replay works without envelopes. #[tokio::test] async fn test_gloas_block_replay_with_envelopes() { if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { @@ -5709,14 +5709,13 @@ async fn test_gloas_block_replay_with_envelopes() { let mut state = genesis_state.clone(); let mut last_block_root = Hash256::zero(); - let mut pending_states = HashMap::new(); - let mut full_states = HashMap::new(); + let mut states = HashMap::new(); for i in 1..=num_blocks { let slot = Slot::new(i); harness.advance_slot(); - let (block_contents, envelope, pending_state) = + let (block_contents, envelope, mut block_state) = harness.make_block_with_envelope(state, slot).await; let block_root = block_contents.0.canonical_root(); @@ -5725,18 +5724,16 @@ async fn test_gloas_block_replay_with_envelopes() { .await .unwrap(); - let pending_state_root = pending_state.clone().update_tree_hash_cache().unwrap(); - pending_states.insert(slot, (pending_state_root, pending_state.clone())); + let state_root = block_state.update_tree_hash_cache().unwrap(); + states.insert(slot, (state_root, block_state.clone())); let envelope = envelope.expect("Gloas block should have envelope"); - let full_state = pending_state; harness - .process_envelope(block_root, envelope, &full_state, pending_state_root) + .process_envelope(block_root, envelope, &block_state, state_root) .await; - full_states.insert(slot, (pending_state_root, full_state.clone())); last_block_root = block_root; - state = full_state; + state = block_state; } let end_slot = Slot::new(num_blocks); @@ -5756,7 +5753,7 @@ async fn test_gloas_block_replay_with_envelopes() { .into_state(); replayed.apply_pending_mutations().unwrap(); - let (_, mut expected) = pending_states.get(&end_slot).unwrap().clone(); + let (_, mut expected) = states.get(&end_slot).unwrap().clone(); expected.apply_pending_mutations().unwrap(); replayed.drop_all_caches().unwrap(); @@ -5782,8 +5779,7 @@ async fn test_gloas_hot_state_hierarchy() { // Build enough blocks to span multiple epochs. With MinimalEthSpec (8 slots/epoch), // 40 slots covers 5 epochs. let num_blocks = E::slots_per_epoch() * 5; - // TODO(gloas): enable finalisation by increasing this threshold - let some_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); @@ -5796,7 +5792,7 @@ async fn test_gloas_hot_state_hierarchy() { let slot = Slot::new(i); harness.advance_slot(); - let (block_contents, envelope, mut pending_state) = + let (block_contents, envelope, mut block_state) = harness.make_block_with_envelope(state.clone(), slot).await; let block_root = block_contents.0.canonical_root(); let signed_block = block_contents.0.clone(); @@ -5809,24 +5805,22 @@ async fn test_gloas_hot_state_hierarchy() { // Attest to the current block at its own slot (same-slot attestation). // In Gloas, same-slot attestations have index=0 and route to Pending in // fork choice, correctly propagating weight through the Full path. - // Use pending_state (at slot i) so the target root resolves correctly. - let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); + let state_root = block_state.update_tree_hash_cache().unwrap(); harness.attest_block( - &pending_state, - pending_state_root, + &block_state, + state_root, block_root.into(), &signed_block, - &some_validators, + &all_validators, ); let envelope = envelope.expect("Gloas block should have envelope"); - let full_state = pending_state; harness - .process_envelope(block_root, envelope, &full_state, pending_state_root) + .process_envelope(block_root, envelope, &block_state, state_root) .await; last_block_root = block_root; - state = full_state; + state = block_state; } // Head should be the block at slot 40 with full payload. diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 70475de130..7173eb698f 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -70,7 +70,7 @@ pub async fn produce_block_v4( let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); - let (block, _pending_state, consensus_block_value) = chain + let (block, _block_state, consensus_block_value) = chain .produce_block_with_verification_gloas( randao_reveal, slot, From 5a13e37456493c5d0441f27f8e51e3eae50ccd40 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 22 Apr 2026 15:07:59 +0300 Subject: [PATCH 26/27] Fix audit failure for `rustls-webpki` (#9161) Another `rustls-webpki` audit failure: https://rustsec.org/advisories/RUSTSEC-2026-0104 Bump `rustls-webpki` to the latest (unaffected) version. As with the previous `rustls-webpki` vulns, we add an ignore for our older version required by warp 0.3. This ignore will be resolved by https://github.com/sigp/lighthouse/pull/9001 Co-Authored-By: Mac L --- Cargo.lock | 8 ++++---- Makefile | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 329518f647..b136e7da98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5266,7 +5266,7 @@ dependencies = [ "rcgen", "ring", "rustls 0.23.35", - "rustls-webpki 0.103.12", + "rustls-webpki 0.103.13", "thiserror 2.0.17", "x509-parser", "yasna", @@ -7678,7 +7678,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.12", + "rustls-webpki 0.103.13", "subtle", "zeroize", ] @@ -7727,9 +7727,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.12" +version = "0.103.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" +checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" dependencies = [ "ring", "rustls-pki-types", diff --git a/Makefile b/Makefile index 280e74d1d9..9246b33999 100644 --- a/Makefile +++ b/Makefile @@ -330,7 +330,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit --ignore RUSTSEC-2026-0049 --ignore RUSTSEC-2026-0098 --ignore RUSTSEC-2026-0099 + cargo audit --ignore RUSTSEC-2026-0049 --ignore RUSTSEC-2026-0098 --ignore RUSTSEC-2026-0099 --ignore RUSTSEC-2026-0104 # Runs cargo deny (check for banned crates, duplicate versions, and source restrictions) deny: install-deny deny-CI From cfc748309f55a9da5d585be646e1d425c5d9571d Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 23 Apr 2026 00:43:17 +0900 Subject: [PATCH 27/27] At the fork transition ensure we build ontop of the correct parent block hash (#9160) When producing a block at the fork, treat parent payload status as full I've been testing on kurtosis and this fixes an issue where we cant propose a block at the fork. This is a screenshot of the fix. The envelope shows missing because we are missing an SSE event, but the envelope is in fact being imported and the chain is progressing just fine image Co-Authored-By: Eitan Seri-Levi --- .../src/block_production/gloas.rs | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_production/gloas.rs b/beacon_node/beacon_chain/src/block_production/gloas.rs index f895120eac..9b3fc2806e 100644 --- a/beacon_node/beacon_chain/src/block_production/gloas.rs +++ b/beacon_node/beacon_chain/src/block_production/gloas.rs @@ -690,13 +690,19 @@ impl BeaconChain { let parent_bid = state.latest_execution_payload_bid()?; // TODO(gloas): need should_extend_payload check here as well - let parent_block_hash = if parent_payload_status == PayloadStatus::Full { - // Build on parent bid's payload. - parent_bid.block_hash - } else { - // Skip parent bid's payload. For genesis this is the EL genesis hash. - parent_bid.parent_block_hash - }; + let parent_block_slot = state.latest_block_header().slot; + let parent_is_pre_gloas = !self + .spec + .fork_name_at_slot::(parent_block_slot) + .gloas_enabled(); + let parent_block_hash = + if parent_payload_status == PayloadStatus::Full || parent_is_pre_gloas { + // Build on parent bid's payload. + parent_bid.block_hash + } else { + // Skip parent bid's payload. For genesis this is the EL genesis hash. + parent_bid.parent_block_hash + }; // TODO(gloas) this should be BlockProductionVersion::V4 // V3 is okay for now as long as we're not connected to a builder