From 9dccfb540f3cd9c5a0f0f9ac18a007f39b3a3b89 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 5 Mar 2026 11:48:30 +0800 Subject: [PATCH 01/43] update cargo-sort (#8933) Co-Authored-By: Tan Chee Keong --- Cargo.toml | 29 +++-------------------------- common/logging/Cargo.toml | 2 +- common/malloc_utils/Cargo.toml | 5 +---- 3 files changed, 5 insertions(+), 31 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 667ba1f803..222392bcb7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -166,20 +166,7 @@ initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } itertools = "0.14" kzg = { path = "crypto/kzg" } -libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = [ - "identify", - "yamux", - "noise", - "dns", - "tcp", - "tokio", - "secp256k1", - "macros", - "metrics", - "quic", - "upnp", - "gossipsub", -] } +libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "secp256k1", "macros", "metrics", "quic", "upnp", "gossipsub"] } libsecp256k1 = "0.7" lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_validator_store = { path = "validator_client/lighthouse_validator_store" } @@ -219,12 +206,7 @@ r2d2 = "0.8" rand = "0.9.0" rayon = "1.7" regex = "1" -reqwest = { version = "0.12", default-features = false, features = [ - "blocking", - "json", - "stream", - "rustls-tls", -] } +reqwest = { version = "0.12", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } ring = "0.17" rpds = "0.11" rusqlite = { version = "0.38", features = ["bundled"] } @@ -253,12 +235,7 @@ sysinfo = "0.26" system_health = { path = "common/system_health" } task_executor = { path = "common/task_executor" } tempfile = "3" -tokio = { version = "1", features = [ - "rt-multi-thread", - "sync", - "signal", - "macros", -] } +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal", "macros"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 41c82dbd61..cbebd1a501 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -13,7 +13,7 @@ logroller = { workspace = true } metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -tokio = { workspace = true, features = [ "time" ] } +tokio = { workspace = true, features = ["time"] } tracing = { workspace = true } tracing-appender = { workspace = true } tracing-core = { workspace = true } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 1052128852..e90490bf09 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -35,7 +35,4 @@ tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats"] } # Jemalloc's background_threads feature requires Linux (pthreads). [target.'cfg(target_os = "linux")'.dependencies] -tikv-jemallocator = { version = "0.6.0", optional = true, features = [ - "stats", - "background_threads", -] } +tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats", "background_threads"] } From 9c4715c251ea19b2cc4c7688916b5cddfa2b1778 Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 6 Mar 2026 09:54:43 +0200 Subject: [PATCH 02/43] Fix lints for Rust v1.94.0 (#8939) Following the release of Rust v1.94.0 there are new Clippy lints which do not pass and are blocking CI (which pulls in the latest version of Rust) This is pretty much the minimum just to get CI running again. Most of the errors involve error types being too large. For now I've added allows but later it might be worth doing a refactor to `Box` or otherwise remove the problematic error types. Co-Authored-By: Mac L --- beacon_node/beacon_chain/tests/attestation_verification.rs | 1 + beacon_node/beacon_chain/tests/payload_invalidation.rs | 1 + beacon_node/beacon_chain/tests/store_tests.rs | 1 + beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/http_api/src/lib.rs | 1 + slasher/service/src/lib.rs | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e8ee628f28..acf326430b 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::{ Error, batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index b282adecd5..bcc50990ec 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index cfc53c8ce0..b6d729cc61 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::block_verification_types::RpcBlock; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d6796f6a05..90968fa213 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2048,7 +2048,7 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| { Withdrawals::::try_from(withdrawals) - .map_err(InvalidBuilderPayload::SszTypesError) + .map_err(|e| Box::new(InvalidBuilderPayload::SszTypesError(e))) .map(|w| w.tree_hash_root()) }) .transpose()?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 74710c4ed2..e9dfa2876a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::result_large_err)] //! This crate contains a HTTP server which serves the endpoints listed here: //! //! https://github.com/ethereum/beacon-APIs diff --git a/slasher/service/src/lib.rs b/slasher/service/src/lib.rs index ac15b49ee9..69ec59aa2c 100644 --- a/slasher/service/src/lib.rs +++ b/slasher/service/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::result_large_err)] mod service; pub use service::SlasherService; From dbfb6fd9231f5a7c74667e5adbdaddacf4f1b768 Mon Sep 17 00:00:00 2001 From: Mac L Date: Sat, 7 Mar 2026 01:09:31 +0200 Subject: [PATCH 03/43] Remove `arbitrary-fuzz` (#8936) We have duplicated features which enable `arbitrary` throughout the codebase. These are `arbitrary` and `arbitrary-fuzz`. I think historically these were supposed to be distinct however in practice these function identically and so we can unify them into a single feature to avoid confusion. Co-Authored-By: Mac L --- Makefile | 4 ++-- common/eip_3076/Cargo.toml | 2 +- common/eip_3076/src/lib.rs | 10 +++++----- consensus/state_processing/Cargo.toml | 4 ++-- consensus/state_processing/src/envelope_processing.rs | 2 +- consensus/state_processing/src/per_block_processing.rs | 8 ++++---- .../per_epoch_processing/base/validator_statuses.rs | 10 +++++----- consensus/state_processing/src/verify_operation.rs | 8 ++++---- consensus/types/Cargo.toml | 1 - validator_client/slashing_protection/Cargo.toml | 2 +- .../slashing_protection/src/interchange_test.rs | 8 ++++---- 11 files changed, 29 insertions(+), 30 deletions(-) diff --git a/Makefile b/Makefile index 9786c17cc9..ad1bbbb8e8 100644 --- a/Makefile +++ b/Makefile @@ -321,8 +321,8 @@ make-ef-tests-nightly: # Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES) - cargo check -p slashing_protection --features arbitrary-fuzz,$(TEST_FEATURES) + cargo check -p state_processing --features arbitrary,$(TEST_FEATURES) + cargo check -p slashing_protection --features arbitrary,$(TEST_FEATURES) # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: install-audit audit-CI diff --git a/common/eip_3076/Cargo.toml b/common/eip_3076/Cargo.toml index 058e1fd1a0..157fe12cb3 100644 --- a/common/eip_3076/Cargo.toml +++ b/common/eip_3076/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [features] default = [] -arbitrary-fuzz = ["dep:arbitrary", "types/arbitrary"] +arbitrary = ["dep:arbitrary", "types/arbitrary"] json = ["dep:serde_json"] [dependencies] diff --git a/common/eip_3076/src/lib.rs b/common/eip_3076/src/lib.rs index cdd05d7b1e..0bf1a94d0e 100644 --- a/common/eip_3076/src/lib.rs +++ b/common/eip_3076/src/lib.rs @@ -13,7 +13,7 @@ pub enum Error { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, @@ -22,7 +22,7 @@ pub struct InterchangeMetadata { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeData { pub pubkey: PublicKeyBytes, pub signed_blocks: Vec, @@ -31,7 +31,7 @@ pub struct InterchangeData { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, @@ -41,7 +41,7 @@ pub struct SignedBlock { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, @@ -52,7 +52,7 @@ pub struct SignedAttestation { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Interchange { pub metadata: InterchangeMetadata, pub data: Vec, diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 7426995439..ae0af03231 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -7,10 +7,10 @@ edition = { workspace = true } [features] default = [] fake_crypto = ["bls/fake_crypto"] -arbitrary-fuzz = [ +arbitrary = [ "dep:arbitrary", "smallvec/arbitrary", - "types/arbitrary-fuzz", + "types/arbitrary", "merkle_proof/arbitrary", "ethereum_ssz/arbitrary", "ssz_types/arbitrary", diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index c2cfeae5d3..be6b7c1b29 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -21,7 +21,7 @@ macro_rules! envelope_verify { } /// The strategy to be used when validating the payloads state root. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Clone, Copy)] pub enum VerifyStateRoot { /// Validate state root. diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 037e1c7cc7..5aa610e98e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -55,12 +55,12 @@ use crate::common::update_progressive_balances_cache::{ initialize_progressive_balances_cache, update_progressive_balances_metrics, }; use crate::epoch_cache::initialize_epoch_cache; -#[cfg(feature = "arbitrary-fuzz")] +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use tracing::instrument; /// The strategy to be used when validating the block's signatures. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(PartialEq, Clone, Copy, Debug)] pub enum BlockSignatureStrategy { /// Do not validate any signature. Use with caution. @@ -74,7 +74,7 @@ pub enum BlockSignatureStrategy { } /// The strategy to be used when validating the block's signatures. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(PartialEq, Clone, Copy)] pub enum VerifySignatures { /// Validate all signatures encountered. @@ -90,7 +90,7 @@ impl VerifySignatures { } /// Control verification of the latest block header. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(PartialEq, Clone, Copy)] pub enum VerifyBlockRoot { True, diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index c5ec80b92a..3e4f7e8189 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -2,7 +2,7 @@ use crate::common::attesting_indices_base::get_attesting_indices; use safe_arith::SafeArith; use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, PendingAttestation}; -#[cfg(feature = "arbitrary-fuzz")] +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; /// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` @@ -16,7 +16,7 @@ macro_rules! set_self_if_other_is_true { } /// The information required to reward a block producer for including an attestation in a block. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Clone, Copy, PartialEq)] pub struct InclusionInfo { /// The distance between the attestation slot and the slot that attestation was included in a @@ -48,7 +48,7 @@ impl InclusionInfo { } /// Information required to reward some validator during the current and previous epoch. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Default, Clone, PartialEq)] pub struct ValidatorStatus { /// True if the validator has been slashed, ever. @@ -118,7 +118,7 @@ impl ValidatorStatus { /// epochs. #[derive(Clone, Debug, PartialEq)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub struct TotalBalances { /// The effective balance increment from the spec. effective_balance_increment: u64, @@ -175,7 +175,7 @@ impl TotalBalances { /// Summarised information about validator participation in the _previous and _current_ epochs of /// some `BeaconState`. -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Clone)] pub struct ValidatorStatuses { /// Information about each individual validator from the state's validator registry. diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index a13786f9f6..1e9c3d5fe3 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -7,7 +7,7 @@ use crate::per_block_processing::{ verify_attester_slashing, verify_bls_to_execution_change, verify_exit, verify_proposer_slashing, }; -#[cfg(feature = "arbitrary-fuzz")] +#[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use educe::Educe; use smallvec::{SmallVec, smallvec}; @@ -41,14 +41,14 @@ pub trait TransformPersist { /// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. #[derive(Educe, Debug, Clone)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[educe( PartialEq, Eq, Hash(bound(T: TransformPersist + std::hash::Hash, E: EthSpec)) )] #[cfg_attr( - feature = "arbitrary-fuzz", + feature = "arbitrary", arbitrary(bound = "T: TransformPersist + Arbitrary<'arbitrary>, E: EthSpec") )] pub struct SigVerifiedOp { @@ -139,7 +139,7 @@ struct SigVerifiedOpDecode { /// We need to store multiple `ForkVersion`s because attester slashings contain two indexed /// attestations which may be signed using different versions. #[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode, TestRandom)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub struct VerifiedAgainst { fork_versions: SmallVec<[ForkVersion; MAX_FORKS_VERIFIED_AGAINST]>, } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index e7e382714b..c5ced83320 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -22,7 +22,6 @@ arbitrary = [ "ssz_types/arbitrary", "swap_or_not_shuffle/arbitrary", ] -arbitrary-fuzz = ["arbitrary"] portable = ["bls/supranational-portable"] [dependencies] diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 695a693385..8017941ca6 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } autotests = false [features] -arbitrary-fuzz = ["dep:arbitrary", "types/arbitrary-fuzz", "eip_3076/arbitrary-fuzz"] +arbitrary = ["dep:arbitrary", "types/arbitrary", "eip_3076/arbitrary"] portable = ["types/portable"] [dependencies] diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index c5c3df7ea4..996116dd1c 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -11,7 +11,7 @@ use tempfile::tempdir; use types::{Epoch, Hash256, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct MultiTestCase { pub name: String, pub genesis_validators_root: Hash256, @@ -19,7 +19,7 @@ pub struct MultiTestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestCase { pub should_succeed: bool, pub contains_slashable_data: bool, @@ -29,7 +29,7 @@ pub struct TestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestBlock { pub pubkey: PublicKeyBytes, pub slot: Slot, @@ -39,7 +39,7 @@ pub struct TestBlock { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestAttestation { pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, From efe43f769967a971fa3006ec764e622109b04e6d Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Sat, 7 Mar 2026 08:09:33 +0900 Subject: [PATCH 04/43] Fix cargo-sort errors (#8945) The `cargo-sort` job in CI is [failing](https://github.com/sigp/lighthouse/actions/runs/22781651620/job/66088700318?pr=8932) since [cargo-sort v2.1.1](https://github.com/DevinR528/cargo-sort/releases/tag/v2.1.1) has been released, which reports new errors for our Cargo.toml files. Ran `cargo-sort` formatter locally with the new version. Co-Authored-By: ackintosh --- account_manager/Cargo.toml | 5 +---- beacon_node/Cargo.toml | 13 +++++-------- beacon_node/beacon_chain/Cargo.toml | 9 ++++++--- common/logging/Cargo.toml | 3 ++- consensus/types/Cargo.toml | 5 +---- 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 8dd50cbc6e..05e6f12554 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "account_manager" version = { workspace = true } -authors = [ - "Paul Hauner ", - "Luke Anderson ", -] +authors = ["Paul Hauner ", "Luke Anderson "] edition = { workspace = true } [dependencies] diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5352814dd5..ebefa6a451 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "beacon_node" version = { workspace = true } -authors = [ - "Paul Hauner ", - "Age Manning ", "Age Manning "] edition = { workspace = true } [features] -test_logger = [] # Print log output to stderr when running tests instead of dropping it +# Print log output to stderr when running tests instead of dropping it. +test_logger = [] [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c5ced83320..c09e3d6931 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "types" version = "0.2.1" -authors = [ - "Paul Hauner ", - "Age Manning ", -] +authors = ["Paul Hauner ", "Age Manning "] edition = { workspace = true } [features] From 537c2ba8b3e49dd9e93dd5df5b67003f5bb91f42 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Mar 2026 11:35:52 +1100 Subject: [PATCH 05/43] Remove `/lighthouse/analysis/block_rewards` APIs (#8935) Mark pointed out that these APIs will require updates for Gloas, so I figured we may as well get rid of them. As far as I know, blockprint was the only use case and it is now defunct. The consensus block value is included in getBlock API responses, so there's no reason for VCs to use the `POST` API, and there is now a standard API for the rewards of canonical blocks. The SSE event was non-standard, and likely only used by blockprint as well. Co-Authored-By: Michael Sproul --- .github/forbidden-files.txt | 3 + beacon_node/beacon_chain/src/block_reward.rs | 140 -------------- .../beacon_chain/src/block_verification.rs | 18 -- beacon_node/beacon_chain/src/events.rs | 15 -- beacon_node/beacon_chain/src/lib.rs | 1 - beacon_node/http_api/src/block_rewards.rs | 178 ------------------ beacon_node/http_api/src/lib.rs | 34 ---- book/src/api_lighthouse.md | 54 ------ common/eth2/src/lighthouse.rs | 25 +-- common/eth2/src/lighthouse/block_rewards.rs | 60 ------ common/eth2/src/types.rs | 17 -- 11 files changed, 4 insertions(+), 541 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/block_reward.rs delete mode 100644 beacon_node/http_api/src/block_rewards.rs delete mode 100644 common/eth2/src/lighthouse/block_rewards.rs diff --git a/.github/forbidden-files.txt b/.github/forbidden-files.txt index ec89bd2e4b..a08a6b4e98 100644 --- a/.github/forbidden-files.txt +++ b/.github/forbidden-files.txt @@ -5,3 +5,6 @@ beacon_node/beacon_chain/src/otb_verification_service.rs beacon_node/store/src/partial_beacon_state.rs beacon_node/store/src/consensus_context.rs +beacon_node/beacon_chain/src/block_reward.rs +beacon_node/http_api/src/block_rewards.rs +common/eth2/src/lighthouse/block_rewards.rs diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs deleted file mode 100644 index f3924bb473..0000000000 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ /dev/null @@ -1,140 +0,0 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; -use operation_pool::{ - AttMaxCover, MaxCover, PROPOSER_REWARD_DENOMINATOR, RewardCache, SplitAttestation, -}; -use state_processing::{ - common::get_attesting_indices_from_state, - per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, -}; -use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256}; - -impl BeaconChain { - pub fn compute_block_reward>( - &self, - block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, - state: &BeaconState, - reward_cache: &mut RewardCache, - include_attestations: bool, - ) -> Result { - if block.slot() != state.slot() { - return Err(BeaconChainError::BlockRewardSlotError); - } - - reward_cache.update(state)?; - - let total_active_balance = state.get_total_active_balance()?; - - let split_attestations = block - .body() - .attestations() - .map(|att| { - let attesting_indices = get_attesting_indices_from_state(state, att)?; - Ok(SplitAttestation::new( - att.clone_as_attestation(), - attesting_indices, - )) - }) - .collect::, BeaconChainError>>()?; - - let mut per_attestation_rewards = split_attestations - .iter() - .map(|att| { - AttMaxCover::new( - att.as_ref(), - state, - reward_cache, - total_active_balance, - &self.spec, - ) - .ok_or(BeaconChainError::BlockRewardAttestationError) - }) - .collect::, _>>()?; - - // Update the attestation rewards for each previous attestation included. - // This is O(n^2) in the number of attestations n. - for i in 0..per_attestation_rewards.len() { - let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1); - let latest_att = &updated[i]; - - for att in to_update { - att.update_covering_set(latest_att.intermediate(), latest_att.covering_set()); - } - } - - let mut prev_epoch_total = 0; - let mut curr_epoch_total = 0; - - for cover in &per_attestation_rewards { - if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() { - curr_epoch_total += cover.score() as u64; - } else { - prev_epoch_total += cover.score() as u64; - } - } - - let attestation_total = prev_epoch_total + curr_epoch_total; - - // Drop the covers. - let per_attestation_rewards = per_attestation_rewards - .into_iter() - .map(|cover| { - // Divide each reward numerator by the denominator. This can lead to the total being - // less than the sum of the individual rewards due to the fact that integer division - // does not distribute over addition. - let mut rewards = cover.fresh_validators_rewards; - rewards - .values_mut() - .for_each(|reward| *reward /= PROPOSER_REWARD_DENOMINATOR); - rewards - }) - .collect(); - - // Add the attestation data if desired. - let attestations = if include_attestations { - block - .body() - .attestations() - .map(|a| a.data().clone()) - .collect() - } else { - vec![] - }; - - let attestation_rewards = AttestationRewards { - total: attestation_total, - prev_epoch_total, - curr_epoch_total, - per_attestation_rewards, - attestations, - }; - - // Sync committee rewards. - let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() { - let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) - .map_err(|_| BeaconChainError::BlockRewardSyncError)?; - sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit - } else { - 0 - }; - - // Total, metadata - let total = attestation_total + sync_committee_rewards; - - let meta = BlockRewardMeta { - slot: block.slot(), - parent_slot: state.latest_block_header().slot, - proposer_index: block.proposer_index(), - graffiti: block.body().graffiti().as_utf8_lossy(), - }; - - Ok(BlockReward { - total, - block_root, - meta, - attestation_rewards, - sync_committee_rewards, - }) - } -} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index d126c3af00..2021b0d952 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1571,24 +1571,6 @@ impl ExecutionPendingBlock { metrics::stop_timer(committee_timer); - /* - * If we have block reward listeners, compute the block reward and push it to the - * event handler. - */ - if let Some(ref event_handler) = chain.event_handler - && event_handler.has_block_reward_subscribers() - { - let mut reward_cache = Default::default(); - let block_reward = chain.compute_block_reward( - block.message(), - block_root, - &state, - &mut reward_cache, - true, - )?; - event_handler.register(EventKind::BlockReward(block_reward)); - } - /* * Perform `per_block_processing` on the block and state, returning early if the block is * invalid. diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 63be944eea..276edc3fe6 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -21,7 +21,6 @@ pub struct ServerSentEventHandler { late_head: Sender>, light_client_finality_update_tx: Sender>, light_client_optimistic_update_tx: Sender>, - block_reward_tx: Sender>, proposer_slashing_tx: Sender>, attester_slashing_tx: Sender>, bls_to_execution_change_tx: Sender>, @@ -48,7 +47,6 @@ impl ServerSentEventHandler { let (late_head, _) = broadcast::channel(capacity); let (light_client_finality_update_tx, _) = broadcast::channel(capacity); let (light_client_optimistic_update_tx, _) = broadcast::channel(capacity); - let (block_reward_tx, _) = broadcast::channel(capacity); let (proposer_slashing_tx, _) = broadcast::channel(capacity); let (attester_slashing_tx, _) = broadcast::channel(capacity); let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); @@ -69,7 +67,6 @@ impl ServerSentEventHandler { late_head, light_client_finality_update_tx, light_client_optimistic_update_tx, - block_reward_tx, proposer_slashing_tx, attester_slashing_tx, bls_to_execution_change_tx, @@ -142,10 +139,6 @@ impl ServerSentEventHandler { .light_client_optimistic_update_tx .send(kind) .map(|count| log_count("light client optimistic update", count)), - EventKind::BlockReward(_) => self - .block_reward_tx - .send(kind) - .map(|count| log_count("block reward", count)), EventKind::ProposerSlashing(_) => self .proposer_slashing_tx .send(kind) @@ -224,10 +217,6 @@ impl ServerSentEventHandler { self.light_client_optimistic_update_tx.subscribe() } - pub fn subscribe_block_reward(&self) -> Receiver> { - self.block_reward_tx.subscribe() - } - pub fn subscribe_attester_slashing(&self) -> Receiver> { self.attester_slashing_tx.subscribe() } @@ -292,10 +281,6 @@ impl ServerSentEventHandler { self.late_head.receiver_count() > 0 } - pub fn has_block_reward_subscribers(&self) -> bool { - self.block_reward_tx.receiver_count() > 0 - } - pub fn has_proposer_slashing_subscribers(&self) -> bool { self.proposer_slashing_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e1a190ffb3..4d3c3e193e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -10,7 +10,6 @@ mod beacon_snapshot; pub mod bellatrix_readiness; pub mod blob_verification; mod block_production; -pub mod block_reward; mod block_times_cache; mod block_verification; pub mod block_verification_types; diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs deleted file mode 100644 index 891f024bf9..0000000000 --- a/beacon_node/http_api/src/block_rewards.rs +++ /dev/null @@ -1,178 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; -use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; -use lru::LruCache; -use state_processing::BlockReplayer; -use std::num::NonZeroUsize; -use std::sync::Arc; -use tracing::{debug, warn}; -use types::block::BlindedBeaconBlock; -use types::new_non_zero_usize; -use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error}; - -const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); - -/// Fetch block rewards for blocks from the canonical chain. -pub fn get_block_rewards( - query: BlockRewardsQuery, - chain: Arc>, -) -> Result, warp::Rejection> { - let start_slot = query.start_slot; - let end_slot = query.end_slot; - let prior_slot = start_slot - 1; - - if start_slot > end_slot || start_slot == 0 { - return Err(custom_bad_request(format!( - "invalid start and end: {}, {}", - start_slot, end_slot - ))); - } - - let end_block_root = chain - .block_root_at_slot(end_slot, WhenSlotSkipped::Prev) - .map_err(unhandled_error)? - .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; - - let blocks = chain - .store - .load_blocks_to_replay(start_slot, end_slot, end_block_root) - .map_err(|e| unhandled_error(BeaconChainError::from(e)))?; - - let state_root = chain - .state_root_at_slot(prior_slot) - .map_err(unhandled_error)? - .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; - - // This branch is reached from the HTTP API. We assume the user wants - // to cache states so that future calls are faster. - let mut state = chain - .get_state(&state_root, Some(prior_slot), true) - .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) - .map_err(unhandled_error)?; - - state - .build_caches(&chain.spec) - .map_err(beacon_state_error)?; - - let mut reward_cache = Default::default(); - let mut block_rewards = Vec::with_capacity(blocks.len()); - - let block_replayer = BlockReplayer::new(state, &chain.spec) - .pre_block_hook(Box::new(|state, block| { - state.build_all_committee_caches(&chain.spec)?; - - // Compute block reward. - let block_reward = chain.compute_block_reward( - block.message(), - block.canonical_root(), - state, - &mut reward_cache, - query.include_attestations, - )?; - block_rewards.push(block_reward); - Ok(()) - })) - .state_root_iter( - chain - .forwards_iter_state_roots_until(prior_slot, end_slot) - .map_err(unhandled_error)?, - ) - .no_signature_verification() - .minimal_block_root_verification() - .apply_blocks(blocks, None) - .map_err(unhandled_error)?; - - if block_replayer.state_root_miss() { - warn!(%start_slot, %end_slot, "Block reward state root miss"); - } - - drop(block_replayer); - - Ok(block_rewards) -} - -/// Compute block rewards for blocks passed in as input. -pub fn compute_block_rewards( - blocks: Vec>, - chain: Arc>, -) -> Result, warp::Rejection> { - let mut block_rewards = Vec::with_capacity(blocks.len()); - let mut state_cache = LruCache::new(STATE_CACHE_SIZE); - let mut reward_cache = Default::default(); - - for block in blocks { - let parent_root = block.parent_root(); - - // Check LRU cache for a constructed state from a previous iteration. - let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) { - debug!( - ?parent_root, - slot = %block.slot(), - "Re-using cached state for block rewards" - ); - state - } else { - debug!( - ?parent_root, - slot = %block.slot(), - "Fetching state for block rewards" - ); - let parent_block = chain - .get_blinded_block(&parent_root) - .map_err(unhandled_error)? - .ok_or_else(|| { - custom_bad_request(format!( - "parent block not known or not canonical: {:?}", - parent_root - )) - })?; - - // This branch is reached from the HTTP API. We assume the user wants - // to cache states so that future calls are faster. - let parent_state = chain - .get_state(&parent_block.state_root(), Some(parent_block.slot()), true) - .map_err(unhandled_error)? - .ok_or_else(|| { - custom_bad_request(format!( - "no state known for parent block: {:?}", - parent_root - )) - })?; - - let block_replayer = BlockReplayer::new(parent_state, &chain.spec) - .no_signature_verification() - .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) - .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) - .map_err(unhandled_error::)?; - - if block_replayer.state_root_miss() { - warn!( - parent_slot = %parent_block.slot(), - slot = %block.slot(), - "Block reward state root miss" - ); - } - - let mut state = block_replayer.into_state(); - state - .build_all_committee_caches(&chain.spec) - .map_err(beacon_state_error)?; - - state_cache.get_or_insert((parent_root, block.slot()), || state) - }; - - // Compute block reward. - let block_reward = chain - .compute_block_reward( - block.to_ref(), - block.canonical_root(), - state, - &mut reward_cache, - true, - ) - .map_err(unhandled_error)?; - block_rewards.push(block_reward); - } - - Ok(block_rewards) -} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e9dfa2876a..0a0ae683ca 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -12,7 +12,6 @@ mod attester_duties; mod beacon; mod block_id; mod block_packing_efficiency; -mod block_rewards; mod build_block_contents; mod builder_states; mod custody; @@ -3066,34 +3065,6 @@ pub fn serve( }, ); - // GET lighthouse/analysis/block_rewards - let get_lighthouse_block_rewards = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("block_rewards")) - .and(warp::query::()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then(|query, task_spawner: TaskSpawner, chain| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_rewards::get_block_rewards(query, chain) - }) - }); - - // POST lighthouse/analysis/block_rewards - let post_lighthouse_block_rewards = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("block_rewards")) - .and(warp_utils::json::json()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then(|blocks, task_spawner: TaskSpawner, chain| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_rewards::compute_block_rewards(blocks, chain) - }) - }); - // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") .and(warp::path("analysis")) @@ -3184,9 +3155,6 @@ pub fn serve( api_types::EventTopic::LightClientOptimisticUpdate => { event_handler.subscribe_light_client_optimistic_update() } - api_types::EventTopic::BlockReward => { - event_handler.subscribe_block_reward() - } api_types::EventTopic::AttesterSlashing => { event_handler.subscribe_attester_slashing() } @@ -3363,7 +3331,6 @@ pub fn serve( .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) .uor(get_lighthouse_custody_info) - .uor(get_lighthouse_block_rewards) .uor(get_lighthouse_attestation_performance) .uor(get_beacon_light_client_optimistic_update) .uor(get_beacon_light_client_finality_update) @@ -3414,7 +3381,6 @@ pub fn serve( .uor(post_validator_liveness_epoch) .uor(post_lighthouse_liveness) .uor(post_lighthouse_database_reconstruct) - .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) .uor(post_lighthouse_finalize) diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 0442bf4ec0..2fd7290cb2 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -590,60 +590,6 @@ Caveats: This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. -## `/lighthouse/analysis/block_rewards` - -Fetch information about the block rewards paid to proposers for a range of consecutive blocks. - -Two query parameters are required: - -- `start_slot` (inclusive): the slot of the first block to compute rewards for. -- `end_slot` (inclusive): the slot of the last block to compute rewards for. - -Example: - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=1" | jq -``` - -The first few lines of the response would look like: - -```json -[ - { - "total": 637260, - "block_root": "0x4a089c5e390bb98e66b27358f157df825128ea953cee9d191229c0bcf423a4f6", - "meta": { - "slot": "1", - "parent_slot": "0", - "proposer_index": 93, - "graffiti": "EF #vm-eth2-raw-iron-101" - }, - "attestation_rewards": { - "total": 637260, - "prev_epoch_total": 0, - "curr_epoch_total": 637260, - "per_attestation_rewards": [ - { - "50102": 780, - } - ] - } - } -] -``` - -Caveats: - -- Presently only attestation and sync committee rewards are computed. -- The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] - in the source. -- For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. - This is because the state *prior* to the `start_slot` needs to be loaded from the database, and - loading a state on a boundary is most efficient. - -[block_reward_src]: -https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs - ## `/lighthouse/analysis/block_packing` Fetch information about the block packing efficiency of blocks for a range of consecutive diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 993c263cbf..3c039b16b3 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -2,12 +2,11 @@ mod attestation_performance; mod block_packing_efficiency; -mod block_rewards; mod custody; pub mod sync_state; use crate::{ - BeaconNodeHttpClient, DepositData, Error, Hash256, Slot, + BeaconNodeHttpClient, DepositData, Error, Hash256, lighthouse::sync_state::SyncState, types::{AdminPeer, Epoch, GenericResponse, ValidatorId}, }; @@ -22,7 +21,6 @@ pub use attestation_performance::{ pub use block_packing_efficiency::{ BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, }; -pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use custody::CustodyInfo; // Define "legacy" implementations of `Option` which use four bytes for encoding the union @@ -317,27 +315,6 @@ impl BeaconNodeHttpClient { Analysis endpoints. */ - /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot - pub async fn get_lighthouse_analysis_block_rewards( - &self, - start_slot: Slot, - end_slot: Slot, - ) -> Result, Error> { - let mut path = self.server.expose_full().clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("analysis") - .push("block_rewards"); - - path.query_pairs_mut() - .append_pair("start_slot", &start_slot.to_string()) - .append_pair("end_slot", &end_slot.to_string()); - - self.get(path).await - } - /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch pub async fn get_lighthouse_analysis_block_packing( &self, diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs deleted file mode 100644 index 38070f3539..0000000000 --- a/common/eth2/src/lighthouse/block_rewards.rs +++ /dev/null @@ -1,60 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use types::{AttestationData, Hash256, Slot}; - -/// Details about the rewards paid to a block proposer for proposing a block. -/// -/// All rewards in GWei. -/// -/// Presently this only counts attestation rewards, but in future should be expanded -/// to include information on slashings and sync committee aggregates too. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockReward { - /// Sum of all reward components. - pub total: u64, - /// Block root of the block that these rewards are for. - pub block_root: Hash256, - /// Metadata about the block, particularly reward-relevant metadata. - pub meta: BlockRewardMeta, - /// Rewards due to attestations. - pub attestation_rewards: AttestationRewards, - /// Sum of rewards due to sync committee signatures. - pub sync_committee_rewards: u64, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockRewardMeta { - pub slot: Slot, - pub parent_slot: Slot, - pub proposer_index: u64, - pub graffiti: String, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationRewards { - /// Total block reward from attestations included. - pub total: u64, - /// Total rewards from previous epoch attestations. - pub prev_epoch_total: u64, - /// Total rewards from current epoch attestations. - pub curr_epoch_total: u64, - /// Vec of attestation rewards for each attestation included. - /// - /// Each element of the vec is a map from validator index to reward. - pub per_attestation_rewards: Vec>, - /// The attestations themselves (optional). - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub attestations: Vec, -} - -/// Query parameters for the `/lighthouse/block_rewards` endpoint. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockRewardsQuery { - /// Lower slot limit for block rewards returned (inclusive). - pub start_slot: Slot, - /// Upper slot limit for block rewards returned (inclusive). - pub end_slot: Slot, - /// Include the full attestations themselves? - #[serde(default)] - pub include_attestations: bool, -} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f8376d430c..2f86170812 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -37,9 +37,6 @@ pub mod beacon_response { pub use crate::beacon_response::*; } -#[cfg(feature = "lighthouse")] -use crate::lighthouse::BlockReward; - // Re-export error types from the unified error module pub use crate::error::{ErrorMessage, Failure, IndexedErrorMessage, ResponseError as Error}; @@ -1199,8 +1196,6 @@ pub enum EventKind { LateHead(SseLateHead), LightClientFinalityUpdate(Box>>), LightClientOptimisticUpdate(Box>>), - #[cfg(feature = "lighthouse")] - BlockReward(BlockReward), PayloadAttributes(VersionedSsePayloadAttributes), ProposerSlashing(Box), AttesterSlashing(Box>), @@ -1225,8 +1220,6 @@ impl EventKind { EventKind::LateHead(_) => "late_head", EventKind::LightClientFinalityUpdate(_) => "light_client_finality_update", EventKind::LightClientOptimisticUpdate(_) => "light_client_optimistic_update", - #[cfg(feature = "lighthouse")] - EventKind::BlockReward(_) => "block_reward", EventKind::ProposerSlashing(_) => "proposer_slashing", EventKind::AttesterSlashing(_) => "attester_slashing", EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", @@ -1302,10 +1295,6 @@ impl EventKind { })?), ))) } - #[cfg(feature = "lighthouse")] - "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( - |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), - )?)), "attester_slashing" => Ok(EventKind::AttesterSlashing( serde_json::from_str(data).map_err(|e| { ServerError::InvalidServerSentEvent(format!("Attester Slashing: {:?}", e)) @@ -1355,8 +1344,6 @@ pub enum EventTopic { PayloadAttributes, LightClientFinalityUpdate, LightClientOptimisticUpdate, - #[cfg(feature = "lighthouse")] - BlockReward, AttesterSlashing, ProposerSlashing, BlsToExecutionChange, @@ -1382,8 +1369,6 @@ impl FromStr for EventTopic { "late_head" => Ok(EventTopic::LateHead), "light_client_finality_update" => Ok(EventTopic::LightClientFinalityUpdate), "light_client_optimistic_update" => Ok(EventTopic::LightClientOptimisticUpdate), - #[cfg(feature = "lighthouse")] - "block_reward" => Ok(EventTopic::BlockReward), "attester_slashing" => Ok(EventTopic::AttesterSlashing), "proposer_slashing" => Ok(EventTopic::ProposerSlashing), "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), @@ -1410,8 +1395,6 @@ impl fmt::Display for EventTopic { EventTopic::LateHead => write!(f, "late_head"), EventTopic::LightClientFinalityUpdate => write!(f, "light_client_finality_update"), EventTopic::LightClientOptimisticUpdate => write!(f, "light_client_optimistic_update"), - #[cfg(feature = "lighthouse")] - EventTopic::BlockReward => write!(f, "block_reward"), EventTopic::AttesterSlashing => write!(f, "attester_slashing"), EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), From 7dab32dd16c997f592836f9728b2a2b4bf3ba756 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 9 Mar 2026 14:23:34 +0900 Subject: [PATCH 06/43] Gloas payload envelope processing (#8806) Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Jimmy Chen Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 106 +---- .../beacon_chain/src/beacon_proposer_cache.rs | 79 +++- .../beacon_chain/src/block_verification.rs | 5 +- beacon_node/beacon_chain/src/builder.rs | 1 + .../beacon_chain/src/envelope_times_cache.rs | 197 ++++++++ .../beacon_chain/src/execution_payload.rs | 37 +- .../beacon_chain/src/historical_blocks.rs | 9 +- beacon_node/beacon_chain/src/lib.rs | 2 + beacon_node/beacon_chain/src/metrics.rs | 28 ++ .../execution_pending_envelope.rs | 105 +++++ .../gossip_verified_envelope.rs | 445 ++++++++++++++++++ .../payload_envelope_verification/import.rs | 354 ++++++++++++++ .../src/payload_envelope_verification/mod.rs | 285 +++++++++++ .../payload_notifier.rs | 94 ++++ .../beacon_chain/tests/block_verification.rs | 2 +- beacon_node/network/src/metrics.rs | 10 + .../gossip_methods.rs | 172 ++++++- .../src/network_beacon_processor/mod.rs | 8 +- beacon_node/network/src/router.rs | 1 + 19 files changed, 1813 insertions(+), 127 deletions(-) create mode 100644 beacon_node/beacon_chain/src/envelope_times_cache.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/import.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 703ed24420..07f3bb01fa 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,9 +4,7 @@ use crate::attestation_verification::{ batch_verify_unaggregated_attestations, }; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; -use crate::beacon_proposer_cache::{ - BeaconProposerCache, EpochBlockProposers, ensure_state_can_determine_proposers_for_epoch, -}; +use crate::beacon_proposer_cache::{BeaconProposerCache, EpochBlockProposers}; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ @@ -26,6 +24,7 @@ use crate::data_availability_checker::{ }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; +use crate::envelope_times_cache::EnvelopeTimesCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; @@ -66,7 +65,6 @@ use crate::sync_committee_verification::{ }; use crate::validator_monitor::{ HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, ValidatorMonitor, get_slot_delay_ms, - timestamp_now, }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ @@ -462,6 +460,8 @@ pub struct BeaconChain { pub early_attester_cache: EarlyAttesterCache, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, + /// A cache used to keep track of various envelope timings. + pub envelope_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, /// A cache used to produce light_client server messages @@ -4042,23 +4042,10 @@ impl BeaconChain { // See https://github.com/sigp/lighthouse/issues/2028 let (_, signed_block, block_data) = signed_block.deconstruct(); - match self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) { - Ok(Some(blobs_or_columns_store_op)) => { - ops.push(blobs_or_columns_store_op); - } - Ok(None) => {} - Err(e) => { - error!( - msg = "Restoring fork choice from disk", - error = &e, - ?block_root, - "Failed to store data columns into the database" - ); - return Err(self - .handle_import_block_db_write_error(fork_choice) - .err() - .unwrap_or(BlockError::InternalError(e))); - } + if let Some(blobs_or_columns_store_op) = + self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) + { + ops.push(blobs_or_columns_store_op); } let block = signed_block.message(); @@ -4088,7 +4075,7 @@ impl BeaconChain { // We're declaring the block "imported" at this point, since fork choice and the DB know // about it. - let block_time_imported = timestamp_now(); + let block_time_imported = self.slot_clock.now_duration().unwrap_or(Duration::MAX); // compute state proofs for light client updates before inserting the state into the // snapshot cache. @@ -4157,7 +4144,7 @@ impl BeaconChain { } /// Check block's consistentency with any configured weak subjectivity checkpoint. - fn check_block_against_weak_subjectivity_checkpoint( + pub(crate) fn check_block_against_weak_subjectivity_checkpoint( &self, block: BeaconBlockRef, block_root: Hash256, @@ -6407,6 +6394,7 @@ impl BeaconChain { // sync anyway). self.naive_aggregation_pool.write().prune(slot); self.block_times_cache.write().prune(slot); + self.envelope_times_cache.write().prune(slot); // Don't run heavy-weight tasks during sync. if self.best_slot() + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot { @@ -6466,62 +6454,14 @@ impl BeaconChain { accessor: impl Fn(&EpochBlockProposers) -> Result, state_provider: impl FnOnce() -> Result<(Hash256, BeaconState), E>, ) -> Result { - let cache_entry = self - .beacon_proposer_cache - .lock() - .get_or_insert_key(proposal_epoch, shuffling_decision_block); - - // If the cache entry is not initialised, run the code to initialise it inside a OnceCell. - // This prevents duplication of work across multiple threads. - // - // If it is already initialised, then `get_or_try_init` will return immediately without - // executing the initialisation code at all. - let epoch_block_proposers = cache_entry.get_or_try_init(|| { - // Fetch the state on-demand if the required epoch was missing from the cache. - // If the caller wants to not compute the state they must return an error here and then - // catch it at the call site. - let (state_root, mut state) = state_provider()?; - - // Ensure the state can compute proposer duties for `epoch`. - ensure_state_can_determine_proposers_for_epoch( - &mut state, - state_root, - proposal_epoch, - &self.spec, - )?; - - // Sanity check the state. - let latest_block_root = state.get_latest_block_root(state_root); - let state_decision_block_root = state.proposer_shuffling_decision_root_at_epoch( - proposal_epoch, - latest_block_root, - &self.spec, - )?; - if state_decision_block_root != shuffling_decision_block { - return Err(Error::ProposerCacheIncorrectState { - state_decision_block_root, - requested_decision_block_root: shuffling_decision_block, - } - .into()); - } - - let proposers = state.get_beacon_proposer_indices(proposal_epoch, &self.spec)?; - - // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have - // advanced the state completely into the new epoch. - let fork = self.spec.fork_at_epoch(proposal_epoch); - - debug!( - ?shuffling_decision_block, - epoch = %proposal_epoch, - "Priming proposer shuffling cache" - ); - - Ok::<_, E>(EpochBlockProposers::new(proposal_epoch, fork, proposers)) - })?; - - // Run the accessor function on the computed epoch proposers. - accessor(epoch_block_proposers).map_err(Into::into) + crate::beacon_proposer_cache::with_proposer_cache( + &self.beacon_proposer_cache, + shuffling_decision_block, + proposal_epoch, + accessor, + state_provider, + &self.spec, + ) } /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head @@ -7197,16 +7137,16 @@ impl BeaconChain { block_root: Hash256, block_slot: Slot, block_data: AvailableBlockData, - ) -> Result>, String> { + ) -> Option> { match block_data { - AvailableBlockData::NoData => Ok(None), + AvailableBlockData::NoData => None, AvailableBlockData::Blobs(blobs) => { debug!( %block_root, count = blobs.len(), "Writing blobs to store" ); - Ok(Some(StoreOp::PutBlobs(block_root, blobs))) + Some(StoreOp::PutBlobs(block_root, blobs)) } AvailableBlockData::DataColumns(mut data_columns) => { let columns_to_custody = self.custody_columns_for_epoch(Some( @@ -7222,7 +7162,7 @@ impl BeaconChain { count = data_columns.len(), "Writing data columns to store" ); - Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))) + Some(StoreOp::PutDataColumns(block_root, data_columns)) } } } diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 912f7f3bad..b258d7471f 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -12,12 +12,13 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use fork_choice::ExecutionStatus; use lru::LruCache; use once_cell::sync::OnceCell; +use parking_lot::Mutex; use safe_arith::SafeArith; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::num::NonZeroUsize; use std::sync::Arc; -use tracing::instrument; +use tracing::{debug, instrument}; use typenum::Unsigned; use types::new_non_zero_usize; use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot}; @@ -164,6 +165,82 @@ impl BeaconProposerCache { } } +/// Access the proposer cache, computing and caching the proposers if necessary. +/// +/// This is a free function that operates on references to the cache and spec, decoupled from +/// `BeaconChain`. The `accessor` is called with the cached `EpochBlockProposers` for the given +/// `(proposal_epoch, shuffling_decision_block)` key. If the cache entry is missing, the +/// `state_provider` closure is called to produce a state which is then used to compute and +/// cache the proposers. +pub fn with_proposer_cache( + beacon_proposer_cache: &Mutex, + shuffling_decision_block: Hash256, + proposal_epoch: Epoch, + accessor: impl Fn(&EpochBlockProposers) -> Result, + state_provider: impl FnOnce() -> Result<(Hash256, BeaconState), Err>, + spec: &ChainSpec, +) -> Result +where + Spec: EthSpec, + Err: From + From, +{ + let cache_entry = beacon_proposer_cache + .lock() + .get_or_insert_key(proposal_epoch, shuffling_decision_block); + + // If the cache entry is not initialised, run the code to initialise it inside a OnceCell. + // This prevents duplication of work across multiple threads. + // + // If it is already initialised, then `get_or_try_init` will return immediately without + // executing the initialisation code at all. + let epoch_block_proposers = cache_entry.get_or_try_init(|| { + // Fetch the state on-demand if the required epoch was missing from the cache. + // If the caller wants to not compute the state they must return an error here and then + // catch it at the call site. + let (state_root, mut state) = state_provider()?; + + // Ensure the state can compute proposer duties for `epoch`. + ensure_state_can_determine_proposers_for_epoch( + &mut state, + state_root, + proposal_epoch, + spec, + )?; + + // Sanity check the state. + let latest_block_root = state.get_latest_block_root(state_root); + let state_decision_block_root = state.proposer_shuffling_decision_root_at_epoch( + proposal_epoch, + latest_block_root, + spec, + )?; + if state_decision_block_root != shuffling_decision_block { + return Err(BeaconChainError::ProposerCacheIncorrectState { + state_decision_block_root, + requested_decision_block_root: shuffling_decision_block, + } + .into()); + } + + let proposers = state.get_beacon_proposer_indices(proposal_epoch, spec)?; + + // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have + // advanced the state completely into the new epoch. + let fork = spec.fork_at_epoch(proposal_epoch); + + debug!( + ?shuffling_decision_block, + epoch = %proposal_epoch, + "Priming proposer shuffling cache" + ); + + Ok::<_, Err>(EpochBlockProposers::new(proposal_epoch, fork, proposers)) + })?; + + // Run the accessor function on the computed epoch proposers. + accessor(epoch_block_proposers).map_err(Into::into) +} + /// Compute the proposer duties using the head state without cache. /// /// Return: diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 2021b0d952..b748bf5c6c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -681,7 +681,8 @@ pub struct SignatureVerifiedBlock { } /// Used to await the result of executing payload with an EE. -type PayloadVerificationHandle = JoinHandle>>; +pub type PayloadVerificationHandle = + JoinHandle>>; /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and /// ready to import into the `BeaconChain`. The validation includes: @@ -1357,7 +1358,7 @@ impl ExecutionPendingBlock { /// verification must be done upstream (e.g., via a `SignatureVerifiedBlock` /// /// Returns an error if the block is invalid, or if the block was unable to be verified. - #[instrument(skip_all, level = "debug")] + #[instrument(skip_all, level = "debug", fields(?block_root))] pub fn from_signature_verified_components( block: MaybeAvailableBlock, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 66a54d46e8..d5935b492a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1023,6 +1023,7 @@ where )), beacon_proposer_cache, block_times_cache: <_>::default(), + envelope_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), validator_pubkey_cache: RwLock::new(validator_pubkey_cache), early_attester_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/envelope_times_cache.rs b/beacon_node/beacon_chain/src/envelope_times_cache.rs new file mode 100644 index 0000000000..84c936c210 --- /dev/null +++ b/beacon_node/beacon_chain/src/envelope_times_cache.rs @@ -0,0 +1,197 @@ +//! This module provides the `EnvelopeTimesCache` which contains information regarding payload +//! envelope timings. +//! +//! This provides `BeaconChain` and associated functions with access to the timestamps of when a +//! payload envelope was observed, verified, executed, and imported. +//! This allows for better traceability and allows us to determine the root cause for why an +//! envelope was imported late. +//! This allows us to distinguish between the following scenarios: +//! - The envelope was observed late. +//! - Consensus verification was slow. +//! - Execution verification was slow. +//! - The DB write was slow. + +use eth2::types::{Hash256, Slot}; +use std::collections::HashMap; +use std::time::Duration; + +type BlockRoot = Hash256; + +#[derive(Clone, Default)] +pub struct EnvelopeTimestamps { + /// When the envelope was first observed (gossip or RPC). + pub observed: Option, + /// When consensus verification (state transition) completed. + pub consensus_verified: Option, + /// When execution layer verification started. + pub started_execution: Option, + /// When execution layer verification completed. + pub executed: Option, + /// When the envelope was imported into the DB. + pub imported: Option, +} + +/// Delay data for envelope processing, computed relative to the slot start time. +#[derive(Debug, Default)] +pub struct EnvelopeDelays { + /// Time after start of slot we saw the envelope. + pub observed: Option, + /// The time it took to complete consensus verification of the envelope. + pub consensus_verification_time: Option, + /// The time it took to complete execution verification of the envelope. + pub execution_time: Option, + /// Time after execution until the envelope was imported. + pub imported: Option, +} + +impl EnvelopeDelays { + fn new(times: EnvelopeTimestamps, slot_start_time: Duration) -> EnvelopeDelays { + let observed = times + .observed + .and_then(|observed_time| observed_time.checked_sub(slot_start_time)); + let consensus_verification_time = times + .consensus_verified + .and_then(|consensus_verified| consensus_verified.checked_sub(times.observed?)); + let execution_time = times + .executed + .and_then(|executed| executed.checked_sub(times.started_execution?)); + let imported = times + .imported + .and_then(|imported_time| imported_time.checked_sub(times.executed?)); + EnvelopeDelays { + observed, + consensus_verification_time, + execution_time, + imported, + } + } +} + +pub struct EnvelopeTimesCacheValue { + pub slot: Slot, + pub timestamps: EnvelopeTimestamps, + pub peer_id: Option, +} + +impl EnvelopeTimesCacheValue { + fn new(slot: Slot) -> Self { + EnvelopeTimesCacheValue { + slot, + timestamps: Default::default(), + peer_id: None, + } + } +} + +#[derive(Default)] +pub struct EnvelopeTimesCache { + pub cache: HashMap, +} + +impl EnvelopeTimesCache { + /// Set the observation time for `block_root` to `timestamp` if `timestamp` is less than + /// any previous timestamp at which this envelope was observed. + pub fn set_time_observed( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + peer_id: Option, + ) { + let entry = self + .cache + .entry(block_root) + .or_insert_with(|| EnvelopeTimesCacheValue::new(slot)); + match entry.timestamps.observed { + Some(existing) if existing <= timestamp => { + // Existing timestamp is earlier, do nothing. + } + _ => { + entry.timestamps.observed = Some(timestamp); + entry.peer_id = peer_id; + } + } + } + + /// Set the timestamp for `field` if that timestamp is less than any previously known value. + fn set_time_if_less( + &mut self, + block_root: BlockRoot, + slot: Slot, + field: impl Fn(&mut EnvelopeTimestamps) -> &mut Option, + timestamp: Duration, + ) { + let entry = self + .cache + .entry(block_root) + .or_insert_with(|| EnvelopeTimesCacheValue::new(slot)); + let existing_timestamp = field(&mut entry.timestamps); + if existing_timestamp.is_none_or(|prev| timestamp < prev) { + *existing_timestamp = Some(timestamp); + } + } + + pub fn set_time_consensus_verified( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + ) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.consensus_verified, + timestamp, + ) + } + + pub fn set_time_started_execution( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + ) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.started_execution, + timestamp, + ) + } + + pub fn set_time_executed(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.executed, + timestamp, + ) + } + + pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.imported, + timestamp, + ) + } + + pub fn get_envelope_delays( + &self, + block_root: BlockRoot, + slot_start_time: Duration, + ) -> EnvelopeDelays { + if let Some(entry) = self.cache.get(&block_root) { + EnvelopeDelays::new(entry.timestamps.clone(), slot_start_time) + } else { + EnvelopeDelays::default() + } + } + + /// Prune the cache to only store the most recent 2 epochs. + pub fn prune(&mut self, current_slot: Slot) { + self.cache + .retain(|_, entry| entry.slot > current_slot.saturating_sub(64_u64)); + } +} diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index a2ebed32ee..2b03a095f1 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -25,7 +25,6 @@ use state_processing::per_block_processing::{ use std::sync::Arc; use tokio::task::JoinHandle; use tracing::{Instrument, debug_span, warn}; -use tree_hash::TreeHash; use types::execution::BlockProductionVersion; use types::*; @@ -109,12 +108,18 @@ impl PayloadNotifier { if let Some(precomputed_status) = self.payload_verification_status { Ok(precomputed_status) } else { - notify_new_payload(&self.chain, self.block.message()).await + notify_new_payload( + &self.chain, + self.block.message().slot(), + self.block.message().parent_root(), + self.block.message().try_into()?, + ) + .await } } } -/// Verify that `execution_payload` contained by `block` is considered valid by an execution +/// Verify that `execution_payload` is considered valid by an execution /// engine. /// /// ## Specification @@ -123,17 +128,21 @@ impl PayloadNotifier { /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -async fn notify_new_payload( +pub async fn notify_new_payload( chain: &Arc>, - block: BeaconBlockRef<'_, T::EthSpec>, + slot: Slot, + parent_beacon_block_root: Hash256, + new_payload_request: NewPayloadRequest<'_, T::EthSpec>, ) -> Result { let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let execution_block_hash = block.execution_payload()?.block_hash(); - let new_payload_response = execution_layer.notify_new_payload(block.try_into()?).await; + let execution_block_hash = new_payload_request.execution_payload_ref().block_hash(); + let new_payload_response = execution_layer + .notify_new_payload(new_payload_request.clone()) + .await; match new_payload_response { Ok(status) => match status { @@ -149,10 +158,7 @@ async fn notify_new_payload( ?validation_error, ?latest_valid_hash, ?execution_block_hash, - root = ?block.tree_hash_root(), - graffiti = block.body().graffiti().as_utf8_lossy(), - proposer_index = block.proposer_index(), - slot = %block.slot(), + %slot, method = "new_payload", "Invalid execution payload" ); @@ -175,11 +181,9 @@ async fn notify_new_payload( { // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. - let latest_root = block.parent_root(); - chain .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { - head_block_root: latest_root, + head_block_root: parent_beacon_block_root, always_invalidate_head: false, latest_valid_ancestor: latest_valid_hash, }) @@ -194,10 +198,7 @@ async fn notify_new_payload( warn!( ?validation_error, ?execution_block_hash, - root = ?block.tree_hash_root(), - graffiti = block.body().graffiti().as_utf8_lossy(), - proposer_index = block.proposer_index(), - slot = %block.slot(), + %slot, method = "new_payload", "Invalid execution payload block hash" ); diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1dae2258f6..bfda52558e 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -165,13 +165,8 @@ impl BeaconChain { } // Store the blobs or data columns too - if let Some(op) = self - .get_blobs_or_columns_store_op(block_root, block.slot(), block_data) - .map_err(|e| { - HistoricalBlockError::StoreError(StoreError::DBError { - message: format!("get_blobs_or_columns_store_op error {e:?}"), - }) - })? + if let Some(op) = + self.get_blobs_or_columns_store_op(block_root, block.slot(), block_data) { blob_batch.extend(self.store.convert_to_kv_batch(vec![op])?); } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4d3c3e193e..4efd90bd22 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -20,6 +20,7 @@ pub mod custody_context; pub mod data_availability_checker; pub mod data_column_verification; mod early_attester_cache; +pub mod envelope_times_cache; mod errors; pub mod events; pub mod execution_payload; @@ -41,6 +42,7 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; +pub mod payload_envelope_verification; pub mod pending_payload_envelopes; pub mod persisted_beacon_chain; pub mod persisted_custody; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 9de67ca93f..786daa09da 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -21,6 +21,34 @@ pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL: &st pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL: &str = "validator_monitor_attestation_simulator_source_attester_miss_total"; +/* +* Execution Payload Envelope Processing +*/ + +pub static ENVELOPE_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "payload_envelope_processing_requests_total", + "Count of payload envelopes submitted for processing", + ) +}); +pub static ENVELOPE_PROCESSING_SUCCESSES: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "payload_envelope_processing_successes_total", + "Count of payload envelopes processed without error", + ) +}); +pub static ENVELOPE_PROCESSING_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "payload_envelope_processing_seconds", + "Full runtime of payload envelope processing", + ) +}); +pub static ENVELOPE_PROCESSING_DB_WRITE: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "payload_envelope_processing_db_write_seconds", + "Time spent writing a newly processed payload envelope and state to DB", + ) +}); /* * Block Processing */ diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs new file mode 100644 index 0000000000..86f9293c8f --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; + +use slot_clock::SlotClock; +use state_processing::{ + VerifySignatures, + envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, +}; +use types::EthSpec; + +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, NotifyExecutionLayer, + PayloadVerificationOutcome, + block_verification::PayloadVerificationHandle, + payload_envelope_verification::{ + EnvelopeError, EnvelopeImportData, MaybeAvailableEnvelope, + gossip_verified_envelope::GossipVerifiedEnvelope, load_snapshot_from_state_root, + payload_notifier::PayloadNotifier, + }, +}; + +pub struct ExecutionPendingEnvelope { + pub signed_envelope: MaybeAvailableEnvelope, + pub import_data: EnvelopeImportData, + pub payload_verification_handle: PayloadVerificationHandle, +} + +impl GossipVerifiedEnvelope { + pub fn into_execution_pending_envelope( + self, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, EnvelopeError> { + let signed_envelope = self.signed_envelope; + let envelope = &signed_envelope.message; + let payload = &envelope.payload; + + // Define a future that will verify the execution payload with an execution engine. + // + // We do this as early as possible so that later parts of this function can run in parallel + // with the payload verification. + let payload_notifier = PayloadNotifier::new( + chain.clone(), + signed_envelope.clone(), + self.block.clone(), + notify_execution_layer, + )?; + let block_root = envelope.beacon_block_root; + let slot = self.block.slot(); + + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + if let Some(started_execution) = chain.slot_clock.now_duration() { + chain + .envelope_times_cache + .write() + .set_time_started_execution(block_root, slot, started_execution); + } + + let payload_verification_status = payload_notifier.notify_new_payload().await?; + Ok(PayloadVerificationOutcome { + payload_verification_status, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; + + let snapshot = if let Some(snapshot) = self.snapshot { + *snapshot + } else { + load_snapshot_from_state_root::(block_root, self.block.state_root(), &chain.store)? + }; + let mut state = snapshot.pre_state; + + // All the state modifications are done in envelope_processing + process_execution_payload_envelope( + &mut state, + Some(snapshot.state_root), + &signed_envelope, + // verify signature already done for GossipVerifiedEnvelope + VerifySignatures::False, + VerifyStateRoot::True, + &chain.spec, + )?; + + Ok(ExecutionPendingEnvelope { + signed_envelope: MaybeAvailableEnvelope::AvailabilityPending { + block_hash: payload.block_hash, + envelope: signed_envelope, + }, + import_data: EnvelopeImportData { + block_root, + post_state: Box::new(state), + }, + payload_verification_handle, + }) + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs new file mode 100644 index 0000000000..03a3a91ac5 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -0,0 +1,445 @@ +use std::sync::Arc; + +use educe::Educe; +use parking_lot::{Mutex, RwLock}; +use store::DatabaseBlock; +use tracing::{Span, debug}; +use types::{ + ChainSpec, EthSpec, ExecutionPayloadBid, ExecutionPayloadEnvelope, Hash256, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, Slot, consts::gloas::BUILDER_INDEX_SELF_BUILD, +}; + +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BeaconStore, + beacon_proposer_cache::{self, BeaconProposerCache}, + canonical_head::CanonicalHead, + payload_envelope_verification::{ + EnvelopeError, EnvelopeProcessingSnapshot, load_snapshot_from_state_root, + }, + validator_pubkey_cache::ValidatorPubkeyCache, +}; + +/// Bundles only the dependencies needed for gossip verification of execution payload envelopes, +/// decoupling `GossipVerifiedEnvelope::new` from the full `BeaconChain`. +pub struct GossipVerificationContext<'a, T: BeaconChainTypes> { + pub canonical_head: &'a CanonicalHead, + pub store: &'a BeaconStore, + pub spec: &'a ChainSpec, + pub beacon_proposer_cache: &'a Mutex, + pub validator_pubkey_cache: &'a RwLock>, + pub genesis_validators_root: Hash256, +} + +/// Verify that an execution payload envelope is consistent with its beacon block +/// and execution bid. +pub(crate) fn verify_envelope_consistency( + envelope: &ExecutionPayloadEnvelope, + block: &SignedBeaconBlock, + execution_bid: &ExecutionPayloadBid, + latest_finalized_slot: Slot, +) -> Result<(), EnvelopeError> { + // Check that the envelope's slot isn't from a slot prior + // to the latest finalized slot. + if envelope.slot < latest_finalized_slot { + return Err(EnvelopeError::PriorToFinalization { + payload_slot: envelope.slot, + latest_finalized_slot, + }); + } + + // Check that the slot of the envelope matches the slot of the block. + if envelope.slot != block.slot() { + return Err(EnvelopeError::SlotMismatch { + block: block.slot(), + envelope: envelope.slot, + }); + } + + // Builder index matches committed bid. + if envelope.builder_index != execution_bid.builder_index { + return Err(EnvelopeError::BuilderIndexMismatch { + committed_bid: execution_bid.builder_index, + envelope: envelope.builder_index, + }); + } + + // The block hash should match the block hash of the execution bid. + if envelope.payload.block_hash != execution_bid.block_hash { + return Err(EnvelopeError::BlockHashMismatch { + committed_bid: execution_bid.block_hash, + envelope: envelope.payload.block_hash, + }); + } + + Ok(()) +} + +/// A wrapper around a `SignedExecutionPayloadEnvelope` that indicates it has been approved for re-gossiping on +/// the p2p network. +#[derive(Educe)] +#[educe(Debug(bound = "T: BeaconChainTypes"))] +pub struct GossipVerifiedEnvelope { + pub signed_envelope: Arc>, + pub block: Arc>, + pub snapshot: Option>>, +} + +impl GossipVerifiedEnvelope { + pub fn new( + signed_envelope: Arc>, + ctx: &GossipVerificationContext<'_, T>, + ) -> Result { + let envelope = &signed_envelope.message; + let beacon_block_root = envelope.beacon_block_root; + + // Check that we've seen the beacon block for this envelope and that it passes validation. + // TODO(EIP-7732): We might need some type of status table in order to differentiate between: + // If we have a block_processing_table, we could have a Processed(Bid, bool) state that is only + // entered post adding to fork choice. That way, we could potentially need only a single call to make + // sure the block is valid and to do all consequent checks with the bid + // + // 1. Blocks we haven't seen (IGNORE), and + // 2. Blocks we've seen that are invalid (REJECT). + // + // Presently these two cases are conflated. + let fork_choice_read_lock = ctx.canonical_head.fork_choice_read_lock(); + let Some(proto_block) = fork_choice_read_lock.get_block(&beacon_block_root) else { + return Err(EnvelopeError::BlockRootUnknown { + block_root: beacon_block_root, + }); + }; + + drop(fork_choice_read_lock); + + let latest_finalized_slot = ctx + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // TODO(EIP-7732): check that we haven't seen another valid `SignedExecutionPayloadEnvelope` + // for this block root from this builder - envelope status table check + let block = match ctx.store.try_get_full_block(&beacon_block_root)? { + Some(DatabaseBlock::Full(block)) => Arc::new(block), + Some(DatabaseBlock::Blinded(_)) | None => { + return Err(EnvelopeError::from(BeaconChainError::MissingBeaconBlock( + beacon_block_root, + ))); + } + }; + let execution_bid = &block + .message() + .body() + .signed_execution_payload_bid()? + .message; + + verify_envelope_consistency(envelope, &block, execution_bid, latest_finalized_slot)?; + + // Verify the envelope signature. + // + // For self-built envelopes, we can use the proposer cache for the fork and the + // validator pubkey cache for the proposer's pubkey, avoiding a state load from disk. + // For external builder envelopes, we must load the state to access the builder registry. + let builder_index = envelope.builder_index; + let block_slot = envelope.slot; + let envelope_epoch = block_slot.epoch(T::EthSpec::slots_per_epoch()); + // Since the payload's block is already guaranteed to be imported, the associated `proto_block.current_epoch_shuffling_id` + // already carries the correct `shuffling_decision_block`. + let proposer_shuffling_decision_block = proto_block + .current_epoch_shuffling_id + .shuffling_decision_block; + + let (signature_is_valid, opt_snapshot) = if builder_index == BUILDER_INDEX_SELF_BUILD { + // Fast path: self-built envelopes can be verified without loading the state. + let mut opt_snapshot = None; + let proposer = beacon_proposer_cache::with_proposer_cache( + ctx.beacon_proposer_cache, + proposer_shuffling_decision_block, + envelope_epoch, + |proposers| proposers.get_slot::(block_slot), + || { + debug!( + %beacon_block_root, + "Proposer shuffling cache miss for envelope verification" + ); + let snapshot = load_snapshot_from_state_root::( + beacon_block_root, + proto_block.state_root, + ctx.store, + )?; + opt_snapshot = Some(Box::new(snapshot.clone())); + Ok::<_, EnvelopeError>((snapshot.state_root, snapshot.pre_state)) + }, + ctx.spec, + )?; + let expected_proposer = proposer.index; + let fork = proposer.fork; + + if block.message().proposer_index() != expected_proposer as u64 { + return Err(EnvelopeError::IncorrectBlockProposer { + proposer_index: block.message().proposer_index(), + local_shuffling: expected_proposer as u64, + }); + } + + let pubkey_cache = ctx.validator_pubkey_cache.read(); + let pubkey = pubkey_cache + .get(block.message().proposer_index() as usize) + .ok_or_else(|| EnvelopeError::UnknownValidator { + proposer_index: block.message().proposer_index(), + })?; + let is_valid = signed_envelope.verify_signature( + pubkey, + &fork, + ctx.genesis_validators_root, + ctx.spec, + ); + (is_valid, opt_snapshot) + } else { + // TODO(gloas) if we implement a builder pubkey cache, we'll need to use it here. + // External builder: must load the state to get the builder pubkey. + let snapshot = load_snapshot_from_state_root::( + beacon_block_root, + proto_block.state_root, + ctx.store, + )?; + let is_valid = + signed_envelope.verify_signature_with_state(&snapshot.pre_state, ctx.spec)?; + (is_valid, Some(Box::new(snapshot))) + }; + + if !signature_is_valid { + return Err(EnvelopeError::BadSignature); + } + + Ok(Self { + signed_envelope, + block, + snapshot: opt_snapshot, + }) + } + + pub fn envelope_cloned(&self) -> Arc> { + self.signed_envelope.clone() + } +} + +impl BeaconChain { + /// Build a `GossipVerificationContext` from this `BeaconChain`. + pub fn gossip_verification_context(&self) -> GossipVerificationContext<'_, T> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + store: &self.store, + spec: &self.spec, + beacon_proposer_cache: &self.beacon_proposer_cache, + validator_pubkey_cache: &self.validator_pubkey_cache, + genesis_validators_root: self.genesis_validators_root, + } + } + + /// Returns `Ok(GossipVerifiedEnvelope)` if the supplied `envelope` should be forwarded onto the + /// gossip network. The envelope is not imported into the chain, it is just partially verified. + /// + /// The returned `GossipVerifiedEnvelope` should be provided to `Self::process_execution_payload_envelope` immediately + /// after it is returned, unless some other circumstance decides it should not be imported at + /// all. + /// + /// ## Errors + /// + /// Returns an `Err` if the given envelope was invalid, or an error was encountered during verification. + pub async fn verify_envelope_for_gossip( + self: &Arc, + envelope: Arc>, + ) -> Result, EnvelopeError> { + let chain = self.clone(); + let span = Span::current(); + self.task_executor + .clone() + .spawn_blocking_handle( + move || { + let _guard = span.enter(); + let slot = envelope.slot(); + let beacon_block_root = envelope.message.beacon_block_root; + + let ctx = chain.gossip_verification_context(); + match GossipVerifiedEnvelope::new(envelope, &ctx) { + Ok(verified) => { + debug!( + %slot, + ?beacon_block_root, + "Successfully verified gossip envelope" + ); + + Ok(verified) + } + Err(e) => { + debug!( + error = e.to_string(), + ?beacon_block_root, + %slot, + "Rejected gossip envelope" + ); + + Err(e) + } + } + }, + "gossip_envelope_verification_handle", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)? + } +} + +#[cfg(test)] +mod tests { + use std::marker::PhantomData; + + use bls::Signature; + use ssz_types::VariableList; + use types::{ + BeaconBlock, BeaconBlockBodyGloas, BeaconBlockGloas, Eth1Data, ExecutionBlockHash, + ExecutionPayloadBid, ExecutionPayloadEnvelope, ExecutionPayloadGloas, ExecutionRequests, + Graffiti, Hash256, MinimalEthSpec, SignedBeaconBlock, SignedExecutionPayloadBid, Slot, + SyncAggregate, + }; + + use super::verify_envelope_consistency; + use crate::payload_envelope_verification::EnvelopeError; + + type E = MinimalEthSpec; + + fn make_envelope( + slot: Slot, + builder_index: u64, + block_hash: ExecutionBlockHash, + ) -> ExecutionPayloadEnvelope { + ExecutionPayloadEnvelope { + payload: ExecutionPayloadGloas { + block_hash, + ..ExecutionPayloadGloas::default() + }, + execution_requests: ExecutionRequests::default(), + builder_index, + beacon_block_root: Hash256::ZERO, + slot, + state_root: Hash256::ZERO, + } + } + + fn make_block(slot: Slot) -> SignedBeaconBlock { + let block = BeaconBlock::Gloas(BeaconBlockGloas { + slot, + proposer_index: 0, + parent_root: Hash256::ZERO, + state_root: Hash256::ZERO, + body: BeaconBlockBodyGloas { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::ZERO, + block_hash: Hash256::ZERO, + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + bls_to_execution_changes: VariableList::empty(), + signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), + payload_attestations: VariableList::empty(), + _phantom: PhantomData, + }, + }); + SignedBeaconBlock::from_block(block, Signature::empty()) + } + + fn make_bid(builder_index: u64, block_hash: ExecutionBlockHash) -> ExecutionPayloadBid { + ExecutionPayloadBid { + builder_index, + block_hash, + ..ExecutionPayloadBid::default() + } + } + + #[test] + fn test_valid_envelope() { + let slot = Slot::new(10); + let builder_index = 5; + let block_hash = ExecutionBlockHash::repeat_byte(0xaa); + + let envelope = make_envelope(slot, builder_index, block_hash); + let block = make_block(slot); + let bid = make_bid(builder_index, block_hash); + + assert!(verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)).is_ok()); + } + + #[test] + fn test_prior_to_finalization() { + let slot = Slot::new(5); + let builder_index = 1; + let block_hash = ExecutionBlockHash::repeat_byte(0xbb); + + let envelope = make_envelope(slot, builder_index, block_hash); + let block = make_block(slot); + let bid = make_bid(builder_index, block_hash); + let latest_finalized_slot = Slot::new(10); + + let result = + verify_envelope_consistency::(&envelope, &block, &bid, latest_finalized_slot); + assert!(matches!( + result, + Err(EnvelopeError::PriorToFinalization { .. }) + )); + } + + #[test] + fn test_slot_mismatch() { + let builder_index = 1; + let block_hash = ExecutionBlockHash::repeat_byte(0xcc); + + let envelope = make_envelope(Slot::new(10), builder_index, block_hash); + let block = make_block(Slot::new(20)); + let bid = make_bid(builder_index, block_hash); + + let result = verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)); + assert!(matches!(result, Err(EnvelopeError::SlotMismatch { .. }))); + } + + #[test] + fn test_builder_index_mismatch() { + let slot = Slot::new(10); + let block_hash = ExecutionBlockHash::repeat_byte(0xdd); + + let envelope = make_envelope(slot, 1, block_hash); + let block = make_block(slot); + let bid = make_bid(2, block_hash); + + let result = verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)); + assert!(matches!( + result, + Err(EnvelopeError::BuilderIndexMismatch { .. }) + )); + } + + #[test] + fn test_block_hash_mismatch() { + let slot = Slot::new(10); + let builder_index = 1; + + let envelope = make_envelope(slot, builder_index, ExecutionBlockHash::repeat_byte(0xee)); + let block = make_block(slot); + let bid = make_bid(builder_index, ExecutionBlockHash::repeat_byte(0xff)); + + let result = verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)); + assert!(matches!( + result, + Err(EnvelopeError::BlockHashMismatch { .. }) + )); + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs new file mode 100644 index 0000000000..2ee315e559 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -0,0 +1,354 @@ +use std::sync::Arc; +use std::time::Duration; + +use fork_choice::PayloadVerificationStatus; +use slot_clock::SlotClock; +use store::StoreOp; +use tracing::{debug, error, info, info_span, instrument, warn}; +use types::{BeaconState, BlockImportSource, Hash256, Slot}; + +use super::{ + AvailableEnvelope, AvailableExecutedEnvelope, EnvelopeError, EnvelopeImportData, + ExecutedEnvelope, gossip_verified_envelope::GossipVerifiedEnvelope, +}; +use crate::{ + AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, + NotifyExecutionLayer, block_verification_types::AvailableBlockData, metrics, + payload_envelope_verification::ExecutionPendingEnvelope, validator_monitor::get_slot_delay_ms, +}; + +const ENVELOPE_METRICS_CACHE_SLOT_LIMIT: u32 = 64; + +impl BeaconChain { + /// Returns `Ok(status)` if the given `unverified_envelope` was successfully verified and + /// imported into the chain. + /// + /// ## Errors + /// + /// Returns an `Err` if the given payload envelope was invalid, or an error was encountered during + /// verification. + #[instrument(skip_all, fields(block_root = ?block_root, block_source = %block_source))] + pub async fn process_execution_payload_envelope( + self: &Arc, + block_root: Hash256, + unverified_envelope: GossipVerifiedEnvelope, + notify_execution_layer: NotifyExecutionLayer, + block_source: BlockImportSource, + publish_fn: impl FnOnce() -> Result<(), EnvelopeError>, + ) -> Result { + let block_slot = unverified_envelope.signed_envelope.slot(); + + // Set observed time if not already set. Usually this should be set by gossip or RPC, + // but just in case we set it again here (useful for tests). + if let Some(seen_timestamp) = self.slot_clock.now_duration() { + self.envelope_times_cache.write().set_time_observed( + block_root, + block_slot, + seen_timestamp, + None, + ); + } + + // TODO(gloas) insert the pre-executed envelope into some type of cache. + + let _full_timer = metrics::start_timer(&metrics::ENVELOPE_PROCESSING_TIMES); + + metrics::inc_counter(&metrics::ENVELOPE_PROCESSING_REQUESTS); + + // A small closure to group the verification and import errors. + let chain = self.clone(); + let import_envelope = async move { + let execution_pending = unverified_envelope + .into_execution_pending_envelope(&chain, notify_execution_layer)?; + publish_fn()?; + + // Record the time it took to complete consensus verification. + if let Some(timestamp) = chain.slot_clock.now_duration() { + chain + .envelope_times_cache + .write() + .set_time_consensus_verified(block_root, block_slot, timestamp); + } + + let envelope_times_cache = chain.envelope_times_cache.clone(); + let slot_clock = chain.slot_clock.clone(); + + // TODO(gloas): rename/refactor these `into_` names to be less similar and more clear + // about what the function actually does. + let executed_envelope = chain + .into_executed_payload_envelope(execution_pending) + .await + .inspect_err(|_| { + // TODO(gloas) If the envelope fails execution for whatever reason (e.g. engine offline), + // and we keep it in the cache, then the node will NOT perform lookup and + // reprocess this block until the block is evicted from DA checker, causing the + // chain to get stuck temporarily if the block is canonical. Therefore we remove + // it from the cache if execution fails. + })?; + + // Record the time it took to wait for execution layer verification. + if let Some(timestamp) = slot_clock.now_duration() { + envelope_times_cache + .write() + .set_time_executed(block_root, block_slot, timestamp); + } + + match executed_envelope { + ExecutedEnvelope::Available(envelope) => { + self.import_available_execution_payload_envelope(Box::new(envelope)) + .await + } + ExecutedEnvelope::AvailabilityPending() => Err(EnvelopeError::InternalError( + "Pending payload envelope not yet implemented".to_owned(), + )), + } + }; + + // Verify and import the payload envelope. + match import_envelope.await { + // The payload envelope was successfully verified and imported. + Ok(status @ AvailabilityProcessingStatus::Imported(block_root)) => { + info!( + ?block_root, + %block_slot, + source = %block_source, + "Execution payload envelope imported" + ); + + // TODO(gloas) do we need to send a `PayloadImported` event to the reprocess queue? + // TODO(gloas) do we need to recompute head? + // should canonical_head return the block and the payload now? + self.recompute_head_at_current_slot().await; + + metrics::inc_counter(&metrics::ENVELOPE_PROCESSING_SUCCESSES); + + Ok(status) + } + Ok(status @ AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + debug!(?block_root, %slot, "Payload envelope awaiting blobs"); + + Ok(status) + } + Err(EnvelopeError::BeaconChainError(e)) => { + if matches!(e.as_ref(), BeaconChainError::TokioJoin(_)) { + debug!(error = ?e, "Envelope processing cancelled"); + } else { + warn!(error = ?e, "Execution payload envelope rejected"); + } + Err(EnvelopeError::BeaconChainError(e)) + } + Err(other) => { + warn!( + reason = other.to_string(), + "Execution payload envelope rejected" + ); + Err(other) + } + } + } + + /// Accepts a fully-verified payload envelope and awaits on its payload verification handle to + /// get a fully `ExecutedEnvelope`. + /// + /// An error is returned if the verification handle couldn't be awaited. + #[instrument(skip_all, level = "debug")] + async fn into_executed_payload_envelope( + self: Arc, + pending_envelope: ExecutionPendingEnvelope, + ) -> Result, EnvelopeError> { + let ExecutionPendingEnvelope { + signed_envelope, + import_data, + payload_verification_handle, + } = pending_envelope; + + let payload_verification_outcome = payload_verification_handle + .await + .map_err(BeaconChainError::TokioJoin)? + .ok_or(BeaconChainError::RuntimeShutdown)??; + + Ok(ExecutedEnvelope::new( + signed_envelope, + import_data, + payload_verification_outcome, + )) + } + + #[instrument(skip_all)] + pub async fn import_available_execution_payload_envelope( + self: &Arc, + envelope: Box>, + ) -> Result { + let AvailableExecutedEnvelope { + envelope, + import_data, + payload_verification_outcome, + } = *envelope; + + let EnvelopeImportData { + block_root, + post_state, + } = import_data; + + let block_root = { + // Capture the current span before moving into the blocking task + let current_span = tracing::Span::current(); + let chain = self.clone(); + self.spawn_blocking_handle( + move || { + // Enter the captured span in the blocking thread + let _guard = current_span.enter(); + chain.import_execution_payload_envelope( + envelope, + block_root, + *post_state, + payload_verification_outcome.payload_verification_status, + ) + }, + "payload_verification_handle", + ) + .await?? + }; + + Ok(AvailabilityProcessingStatus::Imported(block_root)) + } + + /// Accepts a fully-verified and available envelope and imports it into the chain without performing any + /// additional verification. + /// + /// An error is returned if the envelope was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + #[allow(clippy::too_many_arguments)] + #[instrument(skip_all)] + fn import_execution_payload_envelope( + &self, + signed_envelope: AvailableEnvelope, + block_root: Hash256, + state: BeaconState, + _payload_verification_status: PayloadVerificationStatus, + ) -> Result { + // Everything in this initial section is on the hot path for processing the envelope. + // Take an upgradable read lock on fork choice so we can check if this block has already + // been imported. We don't want to repeat work importing a block that is already imported. + let fork_choice_reader = self.canonical_head.fork_choice_upgradable_read_lock(); + if !fork_choice_reader.contains_block(&block_root) { + return Err(EnvelopeError::BlockRootUnknown { block_root }); + } + + // TODO(gloas) add defensive check to see if payload envelope is already in fork choice + // Note that a duplicate cache/payload status table should prevent this from happening + // but it doesnt hurt to be defensive. + + // TODO(gloas) when the code below is implemented we can delete this drop + drop(fork_choice_reader); + + // TODO(gloas) no fork choice logic yet + // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by + // avoiding taking other locks whilst holding this lock. + // let fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); + + // TODO(gloas) Do we need this check? Do not import a block that doesn't descend from the finalized root. + // let signed_block = check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; + + // TODO(gloas) emit SSE event if the payload became the new head payload + + // It is important NOT to return errors here before the database commit, because the envelope + // has already been added to fork choice and the database would be left in an inconsistent + // state if we returned early without committing. In other words, an error here would + // corrupt the node's database permanently. + + // Store the envelope, its post-state, and any data columns. + // If the write fails, revert fork choice to the version from disk, else we can + // end up with envelopes in fork choice that are missing from disk. + // See https://github.com/sigp/lighthouse/issues/2028 + let (signed_envelope, columns) = signed_envelope.deconstruct(); + + let mut ops = vec![]; + + if let Some(blobs_or_columns_store_op) = self.get_blobs_or_columns_store_op( + block_root, + signed_envelope.slot(), + AvailableBlockData::DataColumns(columns), + ) { + ops.push(blobs_or_columns_store_op); + } + + let db_write_timer = metrics::start_timer(&metrics::ENVELOPE_PROCESSING_DB_WRITE); + + ops.push(StoreOp::PutPayloadEnvelope( + block_root, + signed_envelope.clone(), + )); + ops.push(StoreOp::PutState( + signed_envelope.message.state_root, + &state, + )); + + let db_span = info_span!("persist_payloads_and_blobs").entered(); + + if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { + error!( + msg = "Restoring fork choice from disk", + error = ?e, + "Database write failed!" + ); + return Err(e.into()); + // TODO(gloas) handle db write failure + // return Err(self + // .handle_import_block_db_write_error(fork_choice) + // .err() + // .unwrap_or(e.into())); + } + + drop(db_span); + + // TODO(gloas) drop fork choice lock + // The fork choice write-lock is dropped *after* the on-disk database has been updated. + // This prevents inconsistency between the two at the expense of concurrency. + // drop(fork_choice); + + // We're declaring the envelope "imported" at this point, since fork choice and the DB know + // about it. + let envelope_time_imported = self.slot_clock.now_duration().unwrap_or(Duration::MAX); + + // TODO(gloas) depending on what happens with light clients + // we might need to do some light client related computations here + + metrics::stop_timer(db_write_timer); + + self.import_envelope_update_metrics_and_events( + block_root, + signed_envelope.slot(), + envelope_time_imported, + ); + + Ok(block_root) + } + + fn import_envelope_update_metrics_and_events( + &self, + block_root: Hash256, + envelope_slot: Slot, + envelope_time_imported: Duration, + ) { + let envelope_delay_total = + get_slot_delay_ms(envelope_time_imported, envelope_slot, &self.slot_clock); + + // Do not write to the cache for envelopes older than 2 epochs, this helps reduce writes + // to the cache during sync. + if envelope_delay_total + < self + .slot_clock + .slot_duration() + .saturating_mul(ENVELOPE_METRICS_CACHE_SLOT_LIMIT) + { + self.envelope_times_cache.write().set_time_imported( + block_root, + envelope_slot, + envelope_time_imported, + ); + } + + // TODO(gloas) emit SSE event for envelope import (similar to SseBlock for blocks). + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs new file mode 100644 index 0000000000..c707d62dc7 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -0,0 +1,285 @@ +//! The incremental processing steps (e.g., signatures verified but not the state transition) is +//! represented as a sequence of wrapper-types around the envelope. There is a linear progression of +//! types, starting at a `SignedExecutionPayloadEnvelope` and finishing with an `AvailableExecutedEnvelope` (see +//! diagram below). +//! +//! ```ignore +//! SignedExecutionPayloadEnvelope +//! | +//! ▼ +//! GossipVerifiedEnvelope +//! | +//! ▼ +//! ExecutionPendingEnvelope +//! | +//! await +//! ▼ +//! ExecutedEnvelope +//! +//! ``` + +use std::sync::Arc; + +use store::Error as DBError; + +use state_processing::{BlockProcessingError, envelope_processing::EnvelopeProcessingError}; +use tracing::instrument; +use types::{ + BeaconState, BeaconStateError, ChainSpec, DataColumnSidecarList, EthSpec, ExecutionBlockHash, + ExecutionPayloadEnvelope, Hash256, SignedExecutionPayloadEnvelope, Slot, +}; + +use crate::{ + BeaconChainError, BeaconChainTypes, BeaconStore, BlockError, ExecutionPayloadError, + PayloadVerificationOutcome, +}; + +pub mod execution_pending_envelope; +pub mod gossip_verified_envelope; +pub mod import; +mod payload_notifier; + +pub use execution_pending_envelope::ExecutionPendingEnvelope; + +#[derive(PartialEq)] +pub struct EnvelopeImportData { + pub block_root: Hash256, + pub post_state: Box>, +} + +#[derive(Debug)] +#[allow(dead_code)] +pub struct AvailableEnvelope { + execution_block_hash: ExecutionBlockHash, + envelope: Arc>, + columns: DataColumnSidecarList, + /// Timestamp at which this envelope first became available (UNIX timestamp, time since 1970). + columns_available_timestamp: Option, + pub spec: Arc, +} + +impl AvailableEnvelope { + pub fn message(&self) -> &ExecutionPayloadEnvelope { + &self.envelope.message + } + + #[allow(clippy::type_complexity)] + pub fn deconstruct( + self, + ) -> ( + Arc>, + DataColumnSidecarList, + ) { + let AvailableEnvelope { + envelope, columns, .. + } = self; + (envelope, columns) + } +} + +pub enum MaybeAvailableEnvelope { + Available(AvailableEnvelope), + AvailabilityPending { + block_hash: ExecutionBlockHash, + envelope: Arc>, + }, +} + +/// This snapshot is to be used for verifying a payload envelope. +#[derive(Debug, Clone)] +pub struct EnvelopeProcessingSnapshot { + /// This state is equivalent to the `self.beacon_block.state_root()` before applying the envelope. + pub pre_state: BeaconState, + pub state_root: Hash256, + pub beacon_block_root: Hash256, +} + +/// A payload envelope that has gone through processing checks and execution by an EL client. +/// This envelope hasn't necessarily completed data availability checks. +/// +/// +/// It contains 2 variants: +/// 1. `Available`: This envelope has been executed and also contains all data to consider it +/// fully available. +/// 2. `AvailabilityPending`: This envelope hasn't received all required blobs to consider it +/// fully available. +pub enum ExecutedEnvelope { + Available(AvailableExecutedEnvelope), + // TODO(gloas) implement availability pending + AvailabilityPending(), +} + +impl ExecutedEnvelope { + pub fn new( + envelope: MaybeAvailableEnvelope, + import_data: EnvelopeImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + match envelope { + MaybeAvailableEnvelope::Available(available_envelope) => { + Self::Available(AvailableExecutedEnvelope::new( + available_envelope, + import_data, + payload_verification_outcome, + )) + } + // TODO(gloas) implement availability pending + MaybeAvailableEnvelope::AvailabilityPending { + block_hash: _, + envelope: _, + } => Self::AvailabilityPending(), + } + } +} + +/// A payload envelope that has completed all payload processing checks including verification +/// by an EL client **and** has all requisite blob data to be imported into fork choice. +pub struct AvailableExecutedEnvelope { + pub envelope: AvailableEnvelope, + pub import_data: EnvelopeImportData, + pub payload_verification_outcome: PayloadVerificationOutcome, +} + +impl AvailableExecutedEnvelope { + pub fn new( + envelope: AvailableEnvelope, + import_data: EnvelopeImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + Self { + envelope, + import_data, + payload_verification_outcome, + } + } +} + +#[derive(Debug)] +pub enum EnvelopeError { + /// The envelope's block root is unknown. + BlockRootUnknown { block_root: Hash256 }, + /// The signature is invalid. + BadSignature, + /// The builder index doesn't match the committed bid + BuilderIndexMismatch { committed_bid: u64, envelope: u64 }, + /// The envelope slot doesn't match the block + SlotMismatch { block: Slot, envelope: Slot }, + /// The validator index is unknown + UnknownValidator { proposer_index: u64 }, + /// The block hash doesn't match the committed bid + BlockHashMismatch { + committed_bid: ExecutionBlockHash, + envelope: ExecutionBlockHash, + }, + /// The block's proposer_index does not match the locally computed proposer + IncorrectBlockProposer { + proposer_index: u64, + local_shuffling: u64, + }, + /// The slot belongs to a block that is from a slot prior than + /// to most recently finalized slot + PriorToFinalization { + payload_slot: Slot, + latest_finalized_slot: Slot, + }, + /// Some Beacon Chain Error + BeaconChainError(Arc), + /// Some Beacon State error + BeaconStateError(BeaconStateError), + /// Some BlockProcessingError (for electra operations) + BlockProcessingError(BlockProcessingError), + /// Some EnvelopeProcessingError + EnvelopeProcessingError(EnvelopeProcessingError), + /// Error verifying the execution payload + ExecutionPayloadError(ExecutionPayloadError), + /// An error from block-level checks reused during envelope import + BlockError(BlockError), + /// Internal error + InternalError(String), +} + +impl std::fmt::Display for EnvelopeError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for EnvelopeError { + fn from(e: BeaconChainError) -> Self { + EnvelopeError::BeaconChainError(Arc::new(e)) + } +} + +impl From for EnvelopeError { + fn from(e: ExecutionPayloadError) -> Self { + EnvelopeError::ExecutionPayloadError(e) + } +} + +impl From for EnvelopeError { + fn from(e: BeaconStateError) -> Self { + EnvelopeError::BeaconStateError(e) + } +} + +impl From for EnvelopeError { + fn from(e: DBError) -> Self { + EnvelopeError::BeaconChainError(Arc::new(BeaconChainError::DBError(e))) + } +} + +impl From for EnvelopeError { + fn from(e: BlockError) -> Self { + EnvelopeError::BlockError(e) + } +} + +/// Pull errors up from EnvelopeProcessingError to EnvelopeError +impl From for EnvelopeError { + fn from(e: EnvelopeProcessingError) -> Self { + match e { + EnvelopeProcessingError::BadSignature => EnvelopeError::BadSignature, + EnvelopeProcessingError::BeaconStateError(e) => EnvelopeError::BeaconStateError(e), + EnvelopeProcessingError::BlockHashMismatch { + committed_bid, + envelope, + } => EnvelopeError::BlockHashMismatch { + committed_bid, + envelope, + }, + EnvelopeProcessingError::BlockProcessingError(e) => { + EnvelopeError::BlockProcessingError(e) + } + e => EnvelopeError::EnvelopeProcessingError(e), + } + } +} + +#[instrument(skip_all, level = "debug", fields(beacon_block_root = %beacon_block_root))] +/// Load state from store given a known state root and block root. +/// Use this when the proto block has already been looked up from fork choice. +pub(crate) fn load_snapshot_from_state_root( + beacon_block_root: Hash256, + block_state_root: Hash256, + store: &BeaconStore, +) -> Result, EnvelopeError> { + // TODO(EIP-7732): add metrics here + + // We can use `get_hot_state` here rather than `get_advanced_hot_state` because the envelope + // must be from the same slot as its block (so no advance is required). + let cache_state = true; + let state = store + .get_hot_state(&block_state_root, cache_state) + .map_err(EnvelopeError::from)? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state for envelope block {block_state_root:?}", + )) + })?; + + Ok(EnvelopeProcessingSnapshot { + pre_state: state, + state_root: block_state_root, + beacon_block_root, + }) +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs new file mode 100644 index 0000000000..df21d33493 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs @@ -0,0 +1,94 @@ +use std::sync::Arc; + +use execution_layer::{NewPayloadRequest, NewPayloadRequestGloas}; +use fork_choice::PayloadVerificationStatus; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; +use tracing::warn; +use types::{SignedBeaconBlock, SignedExecutionPayloadEnvelope}; + +use crate::{ + BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer, + execution_payload::notify_new_payload, payload_envelope_verification::EnvelopeError, +}; + +/// Used to await the result of executing payload with a remote EE. +pub struct PayloadNotifier { + pub chain: Arc>, + envelope: Arc>, + block: Arc>, + payload_verification_status: Option, +} + +impl PayloadNotifier { + pub fn new( + chain: Arc>, + envelope: Arc>, + block: Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result { + let payload_verification_status = { + let payload_message = &envelope.message; + + match notify_execution_layer { + NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { + let new_payload_request = Self::build_new_payload_request(&envelope, &block)?; + // TODO(gloas): check and test RLP block hash calculation post-Gloas + if let Err(e) = new_payload_request.perform_optimistic_sync_verifications() { + warn!( + block_number = ?payload_message.payload.block_number, + info = "you can silence this warning with --disable-optimistic-finalized-sync", + error = ?e, + "Falling back to slow block hash verification" + ); + None + } else { + Some(PayloadVerificationStatus::Optimistic) + } + } + _ => None, + } + }; + + Ok(Self { + chain, + envelope, + block, + payload_verification_status, + }) + } + + pub async fn notify_new_payload(self) -> Result { + if let Some(precomputed_status) = self.payload_verification_status { + Ok(precomputed_status) + } else { + let parent_root = self.block.message().parent_root(); + let request = Self::build_new_payload_request(&self.envelope, &self.block)?; + notify_new_payload(&self.chain, self.envelope.slot(), parent_root, request).await + } + } + + fn build_new_payload_request<'a>( + envelope: &'a SignedExecutionPayloadEnvelope, + block: &'a SignedBeaconBlock, + ) -> Result, BlockError> { + let bid = &block + .message() + .body() + .signed_execution_payload_bid() + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))? + .message; + + let versioned_hashes = bid + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(); + + Ok(NewPayloadRequest::Gloas(NewPayloadRequestGloas { + execution_payload: &envelope.message.payload, + versioned_hashes, + parent_beacon_block_root: block.message().parent_root(), + execution_requests: &envelope.message.execution_requests, + })) + } +} diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index e94e64e91d..e385e0dc48 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,5 +1,5 @@ #![cfg(not(debug_assertions))] - +// TODO(gloas) we probably need similar test for payload envelope verification use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; use beacon_chain::data_availability_checker::{AvailabilityCheckError, AvailableBlockData}; use beacon_chain::data_column_verification::CustodyDataColumn; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 240fd70e01..2119acf946 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -539,6 +539,16 @@ pub static SYNC_RPC_REQUEST_TIME: LazyLock> = LazyLock::new ) }); +/* + * Execution Payload Envelope Delay Metrics + */ +pub static ENVELOPE_DELAY_GOSSIP: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "payload_envelope_delay_gossip", + "The first time we see this payload envelope from gossip as a delay from the start of the slot", + ) +}); + /* * Block Delay Metrics */ diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index e90018c851..3335315157 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,7 +4,6 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; -use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; @@ -19,6 +18,10 @@ use beacon_chain::{ sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, }; +use beacon_chain::{ + blob_verification::{GossipBlobError, GossipVerifiedBlob}, + payload_envelope_verification::gossip_verified_envelope::GossipVerifiedEnvelope, +}; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use logging::crit; @@ -3248,25 +3251,166 @@ impl NetworkBeaconProcessor { } } - pub async fn process_gossip_execution_payload( + #[allow(clippy::too_many_arguments)] + #[instrument( + name = "lh_process_execution_payload_envelope", + parent = None, + level = "debug", + skip_all, + fields(beacon_block_root = tracing::field::Empty), + )] + pub async fn process_gossip_execution_payload_envelope( + self: Arc, + message_id: MessageId, + peer_id: PeerId, + envelope: Arc>, + seen_timestamp: Duration, + ) { + if let Some(gossip_verified_envelope) = self + .process_gossip_unverified_execution_payload_envelope( + message_id, + peer_id, + envelope.clone(), + seen_timestamp, + ) + .await + { + let beacon_block_root = gossip_verified_envelope.signed_envelope.beacon_block_root(); + + Span::current().record("beacon_block_root", beacon_block_root.to_string()); + + // TODO(gloas) in process_gossip_block here we check_and_insert on the duplicate cache + // before calling gossip_verified_block. We need this to ensure we dont try to execute the + // payload multiple times. + + self.process_gossip_verified_execution_payload_envelope( + peer_id, + gossip_verified_envelope, + ) + .await; + } + } + + async fn process_gossip_unverified_execution_payload_envelope( self: &Arc, message_id: MessageId, peer_id: PeerId, - execution_payload: SignedExecutionPayloadEnvelope, + envelope: Arc>, + seen_duration: Duration, + ) -> Option> { + let envelope_delay = + get_slot_delay_ms(seen_duration, envelope.slot(), &self.chain.slot_clock); + + let verification_result = self + .chain + .clone() + .verify_envelope_for_gossip(envelope.clone()) + .await; + + let verified_envelope = match verification_result { + Ok(verified_envelope) => { + metrics::set_gauge( + &metrics::ENVELOPE_DELAY_GOSSIP, + envelope_delay.as_millis() as i64, + ); + + // Write the time the envelope was observed into the delay cache. + self.chain.envelope_times_cache.write().set_time_observed( + verified_envelope.signed_envelope.beacon_block_root(), + envelope.slot(), + seen_duration, + Some(peer_id.to_string()), + ); + + info!( + slot = %verified_envelope.signed_envelope.slot(), + root = ?verified_envelope.signed_envelope.beacon_block_root(), + "New envelope received" + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + verified_envelope + } + // TODO(gloas) penalize peers accordingly + Err(_) => return None, + }; + + let envelope_slot = verified_envelope.signed_envelope.slot(); + let beacon_block_root = verified_envelope.signed_envelope.beacon_block_root(); + match self.chain.slot() { + // We only need to do a simple check about the envelope slot vs the current slot because + // `verify_envelope_for_gossip` already ensures that the envelope slot is within tolerance + // for envelope imports. + Ok(current_slot) if envelope_slot > current_slot => { + warn!( + ?envelope_slot, + ?beacon_block_root, + msg = "if this happens consistently, check system clock", + "envelope arrived early" + ); + + // TODO(gloas) update metrics to note how early the envelope arrived + + let inner_self = self.clone(); + let _process_fn = Box::pin(async move { + inner_self + .process_gossip_verified_execution_payload_envelope( + peer_id, + verified_envelope, + ) + .await; + }); + + // TODO(gloas) send to reprocess queue + None + } + Ok(_) => Some(verified_envelope), + Err(e) => { + error!( + error = ?e, + %envelope_slot, + ?beacon_block_root, + location = "envelope gossip", + "Failed to defer envelope import" + ); + None + } + } + } + + async fn process_gossip_verified_execution_payload_envelope( + self: Arc, + _peer_id: PeerId, + verified_envelope: GossipVerifiedEnvelope, ) { - // TODO(EIP-7732): Implement proper execution payload envelope gossip processing. - // This should integrate with the envelope_verification.rs module once it's implemented. + let _processing_start_time = Instant::now(); + let beacon_block_root = verified_envelope.signed_envelope.beacon_block_root(); - trace!( - %peer_id, - builder_index = execution_payload.message.builder_index, - slot = %execution_payload.message.slot, - beacon_block_root = %execution_payload.message.beacon_block_root, - "Processing execution payload envelope" - ); + #[allow(clippy::result_large_err)] + let result = self + .chain + .process_execution_payload_envelope( + beacon_block_root, + verified_envelope, + NotifyExecutionLayer::Yes, + BlockImportSource::Gossip, + || Ok(()), + ) + .await; - // For now, ignore all envelopes since verification is not implemented - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // TODO(gloas) metrics + // register_process_result_metrics(&result, metrics::BlockSource::Gossip, "envelope"); + + match &result { + Ok(AvailabilityProcessingStatus::Imported(_)) + | Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + // Nothing to do + } + Err(_) => { + // TODO(gloas) implement peer penalties + } + } } pub fn process_gossip_execution_payload_bid( diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index e1adf860de..357d6c08fd 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -429,11 +429,17 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, execution_payload: Box>, + seen_timestamp: Duration, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .process_gossip_execution_payload(message_id, peer_id, *execution_payload) + .process_gossip_execution_payload_envelope( + message_id, + peer_id, + Arc::new(*execution_payload), + seen_timestamp, + ) .await }; diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 8373dec322..77d64c92e6 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -493,6 +493,7 @@ impl Router { message_id, peer_id, signed_execution_payload_envelope, + timestamp_now(), ), ) } From 71f6eab51f5c5e58afc9c3f4fbfbca4dfc605025 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 4 Mar 2026 15:50:42 +1100 Subject: [PATCH 07/43] Bump deps --- Cargo.lock | 79 ++++++++++++++++++++++++++---------------------------- Cargo.toml | 9 +++++-- 2 files changed, 45 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40c550f4c6..1795de0bc1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1909,7 +1909,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -2442,7 +2442,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.111", + "syn 1.0.109", ] [[package]] @@ -3985,11 +3985,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" dependencies = [ - "hashbrown 0.15.5", + "hashbrown 0.16.1", ] [[package]] @@ -4996,7 +4996,7 @@ dependencies = [ [[package]] name = "libp2p" version = "0.56.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "bytes", "either", @@ -5027,7 +5027,7 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" version = "0.6.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5037,7 +5037,7 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" version = "0.6.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5047,7 +5047,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.43.2" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "either", "fnv", @@ -5071,7 +5071,7 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.44.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "async-trait", "futures", @@ -5086,7 +5086,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.50.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "async-channel 2.5.0", "asynchronous-codec", @@ -5098,7 +5098,7 @@ dependencies = [ "futures", "futures-timer", "getrandom 0.2.16", - "hashlink 0.10.0", + "hashlink 0.11.0", "hex_fmt", "libp2p-core", "libp2p-identity", @@ -5116,7 +5116,7 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.47.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "asynchronous-codec", "either", @@ -5156,7 +5156,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.48.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "hickory-proto", @@ -5174,7 +5174,7 @@ dependencies = [ [[package]] name = "libp2p-metrics" version = "0.17.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "libp2p-core", @@ -5190,7 +5190,7 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.43.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "asynchronous-codec", "bytes", @@ -5208,7 +5208,7 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.46.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "asynchronous-codec", "bytes", @@ -5230,7 +5230,7 @@ dependencies = [ [[package]] name = "libp2p-quic" version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "futures-timer", @@ -5250,14 +5250,14 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.47.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +version = "0.47.1" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "either", "fnv", "futures", "futures-timer", - "hashlink 0.10.0", + "hashlink 0.11.0", "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", @@ -5272,7 +5272,7 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.35.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "heck", "quote", @@ -5282,7 +5282,7 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.44.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "futures-timer", @@ -5297,7 +5297,7 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.6.2" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "futures-rustls", @@ -5315,7 +5315,7 @@ dependencies = [ [[package]] name = "libp2p-upnp" version = "0.6.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "futures-timer", @@ -5329,7 +5329,7 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.47.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "either", "futures", @@ -6021,7 +6021,7 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "bytes", "futures", @@ -7129,7 +7129,7 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-protobuf" version = "0.8.1" -source = "git+https://github.com/sigp/quick-protobuf.git?rev=681f413312404ab6e51f0b46f39b0075c6f4ebfd#681f413312404ab6e51f0b46f39b0075c6f4ebfd" +source = "git+https://github.com/sigp/quick-protobuf.git?rev=87c4ccb9bb2af494de375f5f6c62850badd26304#87c4ccb9bb2af494de375f5f6c62850badd26304" dependencies = [ "byteorder", ] @@ -7137,7 +7137,7 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" version = "0.3.1" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "asynchronous-codec", "bytes", @@ -7149,8 +7149,7 @@ dependencies = [ [[package]] name = "quinn" version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +source = "git+https://github.com/sigp/quinn?rev=59af87979c8411864c1cb68613222f54ed2930a7#59af87979c8411864c1cb68613222f54ed2930a7" dependencies = [ "bytes", "cfg_aliases", @@ -7160,7 +7159,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls 0.23.35", - "socket2 0.6.1", + "socket2 0.5.10", "thiserror 2.0.17", "tokio", "tracing", @@ -7170,8 +7169,7 @@ dependencies = [ [[package]] name = "quinn-proto" version = "0.11.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +source = "git+https://github.com/sigp/quinn?rev=59af87979c8411864c1cb68613222f54ed2930a7#59af87979c8411864c1cb68613222f54ed2930a7" dependencies = [ "bytes", "getrandom 0.3.4", @@ -7191,15 +7189,14 @@ dependencies = [ [[package]] name = "quinn-udp" version = "0.5.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +source = "git+https://github.com/sigp/quinn?rev=59af87979c8411864c1cb68613222f54ed2930a7#59af87979c8411864c1cb68613222f54ed2930a7" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.1", + "socket2 0.5.10", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -7822,7 +7819,7 @@ dependencies = [ [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "git+https://github.com/libp2p/rust-libp2p.git#5e3519fb66b92c7f7c0dc744ab360fd8b669fe54" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" dependencies = [ "futures", "pin-project", @@ -10132,7 +10129,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.48.0", ] [[package]] @@ -10607,7 +10604,7 @@ dependencies = [ [[package]] name = "yamux" version = "0.13.8" -source = "git+https://github.com/sigp/rust-yamux?rev=575b17c0f44f4253079a6bafaa2de74ca1d6dfaa#575b17c0f44f4253079a6bafaa2de74ca1d6dfaa" +source = "git+https://github.com/sigp/rust-yamux?rev=29efa6aebd4bdfcb16bfb21969ec0c785e570b74#29efa6aebd4bdfcb16bfb21969ec0c785e570b74" dependencies = [ "futures", "log", diff --git a/Cargo.toml b/Cargo.toml index 5f6f43d2f2..ab33cb6310 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -302,5 +302,10 @@ inherits = "release" debug = true [patch.crates-io] -quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } -yamux = { git = "https://github.com/sigp/rust-yamux", rev = "575b17c0f44f4253079a6bafaa2de74ca1d6dfaa" } +quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "87c4ccb9bb2af494de375f5f6c62850badd26304" } +yamux = { git = "https://github.com/sigp/rust-yamux", rev = "29efa6aebd4bdfcb16bfb21969ec0c785e570b74" } +quinn = { git = "https://github.com/sigp/quinn", rev = "59af87979c8411864c1cb68613222f54ed2930a7" } + +[patch."https://github.com/libp2p/rust-libp2p.git"] +libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "f88e43de9eba00b416d0374b1a1fb2de47b65864" } +libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p.git", rev = "f88e43de9eba00b416d0374b1a1fb2de47b65864" } From ac1db1d2e23f849f7937bbc38cb9c85445d837dc Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 5 Mar 2026 11:48:30 +0800 Subject: [PATCH 08/43] update cargo-sort (#8933) Co-Authored-By: Tan Chee Keong --- Cargo.toml | 30 +++--------------------------- common/logging/Cargo.toml | 2 +- common/malloc_utils/Cargo.toml | 5 +---- 3 files changed, 5 insertions(+), 32 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ab33cb6310..82db6dbfc4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -166,20 +166,7 @@ initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } itertools = "0.10" kzg = { path = "crypto/kzg" } -libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = [ - "identify", - "yamux", - "noise", - "dns", - "tcp", - "tokio", - "secp256k1", - "macros", - "metrics", - "quic", - "upnp", - "gossipsub", -] } +libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "secp256k1", "macros", "metrics", "quic", "upnp", "gossipsub"] } libsecp256k1 = "0.7" lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_validator_store = { path = "validator_client/lighthouse_validator_store" } @@ -219,13 +206,7 @@ r2d2 = "0.8" rand = "0.9.0" rayon = "1.7" regex = "1" -reqwest = { version = "0.12", default-features = false, features = [ - "blocking", - "json", - "stream", - "rustls-tls", - "native-tls-vendored", -] } +reqwest = { version = "0.12", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.17" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } @@ -254,12 +235,7 @@ sysinfo = "0.26" system_health = { path = "common/system_health" } task_executor = { path = "common/task_executor" } tempfile = "3" -tokio = { version = "1", features = [ - "rt-multi-thread", - "sync", - "signal", - "macros", -] } +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal", "macros"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 41c82dbd61..cbebd1a501 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -13,7 +13,7 @@ logroller = { workspace = true } metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -tokio = { workspace = true, features = [ "time" ] } +tokio = { workspace = true, features = ["time"] } tracing = { workspace = true } tracing-appender = { workspace = true } tracing-core = { workspace = true } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 1052128852..e90490bf09 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -35,7 +35,4 @@ tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats"] } # Jemalloc's background_threads feature requires Linux (pthreads). [target.'cfg(target_os = "linux")'.dependencies] -tikv-jemallocator = { version = "0.6.0", optional = true, features = [ - "stats", - "background_threads", -] } +tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats", "background_threads"] } From 5a174f2a00b33d4905bcc241749b96965541132a Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 6 Mar 2026 09:54:43 +0200 Subject: [PATCH 09/43] Fix lints for Rust v1.94.0 (#8939) Following the release of Rust v1.94.0 there are new Clippy lints which do not pass and are blocking CI (which pulls in the latest version of Rust) This is pretty much the minimum just to get CI running again. Most of the errors involve error types being too large. For now I've added allows but later it might be worth doing a refactor to `Box` or otherwise remove the problematic error types. Co-Authored-By: Mac L --- beacon_node/beacon_chain/tests/attestation_verification.rs | 1 + beacon_node/beacon_chain/tests/payload_invalidation.rs | 1 + beacon_node/beacon_chain/tests/store_tests.rs | 1 + beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/http_api/src/lib.rs | 1 + slasher/service/src/lib.rs | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 208798dfdf..9553965ae6 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::{ Error, batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1204412d65..11c916e850 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ea5f735bde..e618873bdd 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,4 +1,5 @@ #![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::block_verification_types::RpcBlock; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 33b83aab09..024c6805b9 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2205,7 +2205,7 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| { Withdrawals::::try_from(withdrawals) - .map_err(InvalidBuilderPayload::SszTypesError) + .map_err(|e| Box::new(InvalidBuilderPayload::SszTypesError(e))) .map(|w| w.tree_hash_root()) }) .transpose()?; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 095c52fb29..69aa7cd91f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::result_large_err)] //! This crate contains a HTTP server which serves the endpoints listed here: //! //! https://github.com/ethereum/beacon-APIs diff --git a/slasher/service/src/lib.rs b/slasher/service/src/lib.rs index ac15b49ee9..69ec59aa2c 100644 --- a/slasher/service/src/lib.rs +++ b/slasher/service/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::result_large_err)] mod service; pub use service::SlasherService; From 6a92761f441e5a3a9169454df11025cb1a32d751 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Sat, 7 Mar 2026 08:09:33 +0900 Subject: [PATCH 10/43] Fix cargo-sort errors (#8945) The `cargo-sort` job in CI is [failing](https://github.com/sigp/lighthouse/actions/runs/22781651620/job/66088700318?pr=8932) since [cargo-sort v2.1.1](https://github.com/DevinR528/cargo-sort/releases/tag/v2.1.1) has been released, which reports new errors for our Cargo.toml files. Ran `cargo-sort` formatter locally with the new version. Co-Authored-By: ackintosh --- account_manager/Cargo.toml | 5 +---- beacon_node/Cargo.toml | 13 +++++-------- beacon_node/beacon_chain/Cargo.toml | 9 ++++++--- common/logging/Cargo.toml | 3 ++- consensus/types/Cargo.toml | 5 +---- 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 8dd50cbc6e..05e6f12554 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "account_manager" version = { workspace = true } -authors = [ - "Paul Hauner ", - "Luke Anderson ", -] +authors = ["Paul Hauner ", "Luke Anderson "] edition = { workspace = true } [dependencies] diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5352814dd5..ebefa6a451 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "beacon_node" version = { workspace = true } -authors = [ - "Paul Hauner ", - "Age Manning ", "Age Manning "] edition = { workspace = true } [features] -test_logger = [] # Print log output to stderr when running tests instead of dropping it +# Print log output to stderr when running tests instead of dropping it. +test_logger = [] [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index feea855c84..e5c5662d71 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "types" version = "0.2.1" -authors = [ - "Paul Hauner ", - "Age Manning ", -] +authors = ["Paul Hauner ", "Age Manning "] edition = { workspace = true } [features] From 3deab9b0410233c1d57bddfaa9903cc6fbdaa958 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Mar 2026 11:27:08 +1100 Subject: [PATCH 11/43] Release v8.1.2 --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1795de0bc1..cba93f2fd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.1.1" +version = "8.1.2" dependencies = [ "account_utils", "bls", @@ -1276,7 +1276,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.1.1" +version = "8.1.2" dependencies = [ "account_utils", "beacon_chain", @@ -1513,7 +1513,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.1.1" +version = "8.1.2" dependencies = [ "beacon_node", "bytes", @@ -4897,7 +4897,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.1.1" +version = "8.1.2" dependencies = [ "account_utils", "beacon_chain", @@ -5383,7 +5383,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.1.1" +version = "8.1.2" dependencies = [ "account_manager", "account_utils", @@ -5515,7 +5515,7 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.1.1" +version = "8.1.2" dependencies = [ "regex", ] @@ -9619,7 +9619,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "8.1.1" +version = "8.1.2" dependencies = [ "account_utils", "beacon_node_fallback", diff --git a/Cargo.toml b/Cargo.toml index 82db6dbfc4..f483e998c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,7 @@ resolver = "2" [workspace.package] edition = "2024" -version = "8.1.1" +version = "8.1.2" [workspace.dependencies] account_utils = { path = "common/account_utils" } From 9f3873f2bf242440d1ee8e06d078ea189b73e53b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 10 Mar 2026 16:49:35 +1100 Subject: [PATCH 12/43] Fix syn in Cargo.lock --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index d96021aaea..704039a175 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2461,7 +2461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] From 081229b7488e37d472f768a3908a0c37fb320d7c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 10 Mar 2026 18:57:51 +1100 Subject: [PATCH 13/43] Implement proposer duties v2 endpoint (#8918) Fix the issue with the `proposer_duties` endpoint using the wrong dependent root post-Fulu by implementing the new v2 endpoint: - https://github.com/ethereum/beacon-APIs/pull/563 We need to add this in time for Gloas, and then we can we can deprecate and remove v1. - Add a new API handler for the v2 endpoint - Add client code in the `eth2` crate - Update existing tests and add some new ones to confirm the different behaviour of v1 and v2 There's a bit of test duplication with v1, but this will be resolved once v1 and its tests are deleted. Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> --- beacon_node/http_api/src/lib.rs | 3 +- beacon_node/http_api/src/proposer_duties.rs | 99 ++++++-- beacon_node/http_api/src/validator/mod.rs | 17 +- .../http_api/tests/interactive_tests.rs | 234 ++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 99 ++++++++ common/eth2/src/lib.rs | 18 ++ 6 files changed, 438 insertions(+), 32 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0a0ae683ca..b5b74a3840 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -263,6 +263,7 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( // GET validator/duties/proposer/{epoch} let get_validator_duties_proposer = get_validator_duties_proposer( - eth_v1.clone(), + any_version.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 1ebb174785..0b0926f955 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -13,13 +13,45 @@ use slot_clock::SlotClock; use tracing::debug; use types::{Epoch, EthSpec, Hash256, Slot}; +/// Selects which dependent root to return in the API response. +/// +/// - `Legacy`: the block root at the last slot of epoch N-1 (v1 behaviour, for backwards compat). +/// - `True`: the fork-aware proposer shuffling decision root (v2 behaviour). Pre-Fulu this equals +/// the legacy root; post-Fulu it uses epoch N-2. +#[derive(Clone, Copy, PartialEq, Eq)] +enum DependentRootSelection { + Legacy, + True, +} + /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; -/// Handles a request from the HTTP API for proposer duties. +/// Handles a request from the HTTP API for v1 proposer duties. +/// +/// Returns the legacy dependent root (block root at end of epoch N-1) for backwards compatibility. pub fn proposer_duties( request_epoch: Epoch, chain: &BeaconChain, +) -> Result { + proposer_duties_internal(request_epoch, chain, DependentRootSelection::Legacy) +} + +/// Handles a request from the HTTP API for v2 proposer duties. +/// +/// Returns the true fork-aware dependent root. Pre-Fulu this equals the legacy root; post-Fulu it +/// uses epoch N-2 due to deterministic proposer lookahead with `min_seed_lookahead`. +pub fn proposer_duties_v2( + request_epoch: Epoch, + chain: &BeaconChain, +) -> Result { + proposer_duties_internal(request_epoch, chain, DependentRootSelection::True) +} + +fn proposer_duties_internal( + request_epoch: Epoch, + chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result { let current_epoch = chain .slot_clock @@ -49,24 +81,29 @@ pub fn proposer_duties( if request_epoch == current_epoch || request_epoch == tolerant_current_epoch { // If we could consider ourselves in the `request_epoch` when allowing for clock disparity // tolerance then serve this request from the cache. - if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain)? { + if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain, root_selection)? + { Ok(duties) } else { debug!(%request_epoch, "Proposer cache miss"); - compute_and_cache_proposer_duties(request_epoch, chain) + compute_and_cache_proposer_duties(request_epoch, chain, root_selection) } } else if request_epoch == current_epoch .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) = + let (proposers, dependent_root, legacy_dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => legacy_dependent_root, + DependentRootSelection::True => dependent_root, + }; convert_to_api_response( chain, request_epoch, - legacy_dependent_root, + selected_root, execution_status.is_optimistic_or_invalid(), proposers, ) @@ -84,7 +121,7 @@ pub fn proposer_duties( // request_epoch < current_epoch // // Queries about the past are handled with a slow path. - compute_historic_proposer_duties(request_epoch, chain) + compute_historic_proposer_duties(request_epoch, chain, root_selection) } } @@ -98,6 +135,7 @@ pub fn proposer_duties( fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result, warp::reject::Rejection> { let head = chain.canonical_head.cached_head(); let head_block = &head.snapshot.beacon_block; @@ -116,11 +154,14 @@ fn try_proposer_duties_from_cache( .beacon_state .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; - let legacy_dependent_root = head - .snapshot - .beacon_state - .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) - .map_err(warp_utils::reject::beacon_state_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => head + .snapshot + .beacon_state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?, + DependentRootSelection::True => head_decision_root, + }; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; @@ -134,7 +175,7 @@ fn try_proposer_duties_from_cache( convert_to_api_response( chain, request_epoch, - legacy_dependent_root, + selected_root, execution_optimistic, indices.to_vec(), ) @@ -155,6 +196,7 @@ fn try_proposer_duties_from_cache( fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result { let (indices, dependent_root, legacy_dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) @@ -168,10 +210,14 @@ fn compute_and_cache_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => legacy_dependent_root, + DependentRootSelection::True => dependent_root, + }; convert_to_api_response( chain, current_epoch, - legacy_dependent_root, + selected_root, execution_status.is_optimistic_or_invalid(), indices, ) @@ -182,6 +228,7 @@ fn compute_and_cache_proposer_duties( fn compute_historic_proposer_duties( epoch: Epoch, chain: &BeaconChain, + root_selection: DependentRootSelection, ) -> Result { // If the head is quite old then it might still be relevant for a historical request. // @@ -219,9 +266,9 @@ fn compute_historic_proposer_duties( }; // Ensure the state lookup was correct. - if state.current_epoch() != epoch { + if state.current_epoch() != epoch && state.current_epoch() + 1 != epoch { return Err(warp_utils::reject::custom_server_error(format!( - "state epoch {} not equal to request epoch {}", + "state from epoch {} cannot serve request epoch {}", state.current_epoch(), epoch ))); @@ -234,18 +281,18 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. - let legacy_dependent_root = state - .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) - .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::unhandled_error)?; + let selected_root = match root_selection { + DependentRootSelection::Legacy => state + .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::unhandled_error)?, + DependentRootSelection::True => state + .proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root, &chain.spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::unhandled_error)?, + }; - convert_to_api_response( - chain, - epoch, - legacy_dependent_root, - execution_optimistic, - indices, - ) + convert_to_api_response(chain, epoch, selected_root, execution_optimistic, indices) } /// Converts the internal representation of proposer duties into one that is compatible with the diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index a9082df715..3d96b85870 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -6,7 +6,7 @@ use crate::utils::{ AnyVersionFilter, ChainFilter, EthV1Filter, NetworkTxFilter, NotWhileSyncingFilter, ResponseFilter, TaskSpawnerFilter, ValidatorSubscriptionTxFilter, publish_network_message, }; -use crate::version::V3; +use crate::version::{V1, V2, V3, unsupported_version_rejection}; use crate::{StateId, attester_duties, proposer_duties, sync_committees}; use beacon_chain::attestation_verification::VerifiedAttestation; use beacon_chain::validator_monitor::timestamp_now; @@ -971,12 +971,12 @@ pub fn post_validator_aggregate_and_proofs( // GET validator/duties/proposer/{epoch} pub fn get_validator_duties_proposer( - eth_v1: EthV1Filter, + any_version: AnyVersionFilter, chain_filter: ChainFilter, not_while_syncing_filter: NotWhileSyncingFilter, task_spawner_filter: TaskSpawnerFilter, ) -> ResponseFilter { - eth_v1 + any_version .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("proposer")) @@ -990,13 +990,20 @@ pub fn get_validator_duties_proposer( .and(task_spawner_filter) .and(chain_filter) .then( - |epoch: Epoch, + |endpoint_version: EndpointVersion, + epoch: Epoch, not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - proposer_duties::proposer_duties(epoch, &chain) + if endpoint_version == V1 { + proposer_duties::proposer_duties(epoch, &chain) + } else if endpoint_version == V2 { + proposer_duties::proposer_duties_v2(epoch, &chain) + } else { + Err(unsupported_version_rejection(endpoint_version)) + } }) }, ) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index a18dd10464..e0e4029875 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1053,6 +1053,240 @@ async fn proposer_duties_with_gossip_tolerance() { ); } +// Test that a request for next epoch v2 proposer duties succeeds when the current slot clock is +// within gossip clock disparity (500ms) of the new epoch. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn proposer_duties_v2_with_gossip_tolerance() { + let validator_count = 24; + + let tester = InteractiveTester::::new(None, validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + + let num_initial = 4 * E::slots_per_epoch() - 1; + let next_epoch_start_slot = Slot::new(num_initial + 1); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + assert_eq!(harness.chain.slot().unwrap(), num_initial); + + // Set the clock to just before the next epoch. + harness.chain.slot_clock.advance_time( + Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(), + ); + assert_eq!( + harness + .chain + .slot_clock + .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) + .unwrap(), + next_epoch_start_slot + ); + + let head_state = harness.get_current_state(); + let head_block_root = harness.head_block_root(); + let tolerant_current_epoch = next_epoch_start_slot.epoch(E::slots_per_epoch()); + + // Prime the proposer shuffling cache with an incorrect entry (regression test). + let wrong_decision_root = head_state + .proposer_shuffling_decision_root(head_block_root, spec) + .unwrap(); + let wrong_proposer_indices = vec![0; E::slots_per_epoch() as usize]; + harness + .chain + .beacon_proposer_cache + .lock() + .insert( + tolerant_current_epoch, + wrong_decision_root, + wrong_proposer_indices.clone(), + head_state.fork(), + ) + .unwrap(); + + // Request the v2 proposer duties. + let proposer_duties_tolerant_current_epoch = client + .get_validator_duties_proposer_v2(tolerant_current_epoch) + .await + .unwrap(); + + assert_eq!( + proposer_duties_tolerant_current_epoch.dependent_root, + head_state + .proposer_shuffling_decision_root_at_epoch( + tolerant_current_epoch, + head_block_root, + spec, + ) + .unwrap() + ); + assert_ne!( + proposer_duties_tolerant_current_epoch + .data + .iter() + .map(|data| data.validator_index as usize) + .collect::>(), + wrong_proposer_indices, + ); + + // We should get the exact same result after properly advancing into the epoch. + harness + .chain + .slot_clock + .advance_time(spec.maximum_gossip_clock_disparity()); + assert_eq!(harness.chain.slot().unwrap(), next_epoch_start_slot); + let proposer_duties_current_epoch = client + .get_validator_duties_proposer_v2(tolerant_current_epoch) + .await + .unwrap(); + + assert_eq!( + proposer_duties_tolerant_current_epoch, + proposer_duties_current_epoch + ); +} + +// Test that post-Fulu, v1 and v2 proposer duties return different dependent roots. +// Post-Fulu, the true dependent root shifts to the block root at the end of epoch N-2 (due to +// `min_seed_lookahead`), while the legacy v1 root remains at the end of epoch N-1. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn proposer_duties_v2_post_fulu_dependent_root() { + type E = MinimalEthSpec; + let spec = test_spec::(); + + if !spec.is_fulu_scheduled() { + return; + } + + let validator_count = 24; + let slots_per_epoch = E::slots_per_epoch(); + + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + let harness = &tester.harness; + let client = &tester.client; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + mock_el.server.all_payloads_valid(); + + // Build 3 full epochs of chain so we're in epoch 3. + let num_slots = 3 * slots_per_epoch; + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_slots as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::AllValidators, + LightClientStrategy::Disabled, + ) + .await; + + let current_epoch = harness.chain.epoch().unwrap(); + assert_eq!(current_epoch, Epoch::new(3)); + + // For epoch 3 with min_seed_lookahead=1: + // Post-Fulu decision slot: end of epoch N-2 = end of epoch 1 = slot 15 + // Legacy decision slot: end of epoch N-1 = end of epoch 2 = slot 23 + let true_decision_slot = Epoch::new(1).end_slot(slots_per_epoch); + let legacy_decision_slot = Epoch::new(2).end_slot(slots_per_epoch); + assert_eq!(true_decision_slot, Slot::new(15)); + assert_eq!(legacy_decision_slot, Slot::new(23)); + + // Fetch the block roots at these slots to compute expected dependent roots. + let expected_v2_root = harness + .chain + .block_root_at_slot(true_decision_slot, beacon_chain::WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let expected_v1_root = harness + .chain + .block_root_at_slot(legacy_decision_slot, beacon_chain::WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + + // Sanity check: the two roots should be different since they refer to different blocks. + assert_ne!( + expected_v1_root, expected_v2_root, + "legacy and true decision roots should differ post-Fulu" + ); + + // Query v1 and v2 proposer duties for the current epoch. + let v1_result = client + .get_validator_duties_proposer(current_epoch) + .await + .unwrap(); + let v2_result = client + .get_validator_duties_proposer_v2(current_epoch) + .await + .unwrap(); + + // The proposer assignments (data) must be identical. + assert_eq!(v1_result.data, v2_result.data); + + // The dependent roots must differ. + assert_ne!( + v1_result.dependent_root, v2_result.dependent_root, + "v1 and v2 dependent roots should differ post-Fulu" + ); + + // Verify each root matches the expected value. + assert_eq!( + v1_result.dependent_root, expected_v1_root, + "v1 dependent root should be block root at end of epoch N-1" + ); + assert_eq!( + v2_result.dependent_root, expected_v2_root, + "v2 dependent root should be block root at end of epoch N-2" + ); + + // Also verify the next-epoch path (epoch 4). + let next_epoch = current_epoch + 1; + let v1_next = client + .get_validator_duties_proposer(next_epoch) + .await + .unwrap(); + let v2_next = client + .get_validator_duties_proposer_v2(next_epoch) + .await + .unwrap(); + + assert_eq!(v1_next.data, v2_next.data); + assert_ne!( + v1_next.dependent_root, v2_next.dependent_root, + "v1 and v2 next-epoch dependent roots should differ post-Fulu" + ); + + // For epoch 4: true decision is end of epoch 2 (slot 23), legacy is end of epoch 3 (slot 31). + let expected_v2_next_root = harness + .chain + .block_root_at_slot( + Epoch::new(2).end_slot(slots_per_epoch), + beacon_chain::WhenSlotSkipped::Prev, + ) + .unwrap() + .unwrap(); + let expected_v1_next_root = harness + .chain + .block_root_at_slot( + Epoch::new(3).end_slot(slots_per_epoch), + beacon_chain::WhenSlotSkipped::Prev, + ) + .unwrap() + .unwrap_or(harness.head_block_root()); + assert_eq!(v1_next.dependent_root, expected_v1_next_root); + assert_eq!(v2_next.dependent_root, expected_v2_next_root); + assert_ne!(expected_v2_next_root, harness.head_block_root()); +} + // Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo` // have been updated with the correct values. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6696e109a5..739c39aa32 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3392,6 +3392,80 @@ impl ApiTester { self } + pub async fn test_get_validator_duties_proposer_v2(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + + for epoch in 0..=current_epoch.as_u64() + 1 { + let epoch = Epoch::from(epoch); + + // Compute the true dependent root using the spec's decision slot. + let decision_slot = self.chain.spec.proposer_shuffling_decision_slot::(epoch); + let dependent_root = self + .chain + .block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap_or(self.chain.head_beacon_block_root()); + + let result = self + .client + .get_validator_duties_proposer_v2(epoch) + .await + .unwrap(); + + let mut state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected_duties = epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let index = state + .get_beacon_proposer_index(slot, &self.chain.spec) + .unwrap(); + let pubkey = state.validators().get(index).unwrap().pubkey; + + ProposerData { + pubkey, + validator_index: index as u64, + slot, + } + }) + .collect::>(); + + let expected = DutiesResponse { + data: expected_duties, + execution_optimistic: Some(false), + dependent_root, + }; + + assert_eq!(result, expected); + + // v1 and v2 should return the same data. + let v1_result = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap(); + assert_eq!(result.data, v1_result.data); + } + + // Requests to the epochs after the next epoch should fail. + self.client + .get_validator_duties_proposer_v2(current_epoch + 2) + .await + .unwrap_err(); + + self + } + pub async fn test_get_validator_duties_early(self) -> Self { let current_epoch = self.chain.epoch().unwrap(); let next_epoch = current_epoch + 1; @@ -7617,6 +7691,31 @@ async fn get_validator_duties_proposer_with_skip_slots() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_duties_proposer_v2() { + ApiTester::new_from_config(ApiTesterConfig { + spec: test_spec::(), + retain_historic_states: true, + ..ApiTesterConfig::default() + }) + .await + .test_get_validator_duties_proposer_v2() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_duties_proposer_v2_with_skip_slots() { + ApiTester::new_from_config(ApiTesterConfig { + spec: test_spec::(), + retain_historic_states: true, + ..ApiTesterConfig::default() + }) + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_proposer_v2() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn block_production() { ApiTester::new().await.test_block_production().await; diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index ac96da6173..628c12981a 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -2144,6 +2144,24 @@ impl BeaconNodeHttpClient { .await } + /// `GET v2/validator/duties/proposer/{epoch}` + pub async fn get_validator_duties_proposer_v2( + &self, + epoch: Epoch, + ) -> Result>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("proposer") + .push(&epoch.to_string()); + + self.get_with_timeout(path, self.timeouts.proposer_duties) + .await + } + /// `GET v2/validator/blocks/{slot}` pub async fn get_validator_blocks( &self, From 906400ed3435fffebaf5a56b60b778dbe3883ec1 Mon Sep 17 00:00:00 2001 From: Romeo Date: Tue, 10 Mar 2026 14:36:58 +0100 Subject: [PATCH 14/43] Implement proposer lookahead endpoint (#8815) closes #8809 Implement GET /eth/v1/beacon/states/{state_id}/proposer_lookahead ([beacon-APIs#565](https://github.com/ethereum/beacon-APIs/pull/565)). Returns the proposer lookahead from Fulu+ states; 400 for pre-Fulu. Includes integration test. Co-Authored-By: romeoscript Co-Authored-By: Tan Chee Keong --- beacon_node/http_api/src/beacon/states.rs | 68 +++++++++++++++++- beacon_node/http_api/src/lib.rs | 5 ++ beacon_node/http_api/tests/tests.rs | 85 ++++++++++++++++++++++- common/eth2/src/lib.rs | 41 +++++++++++ 4 files changed, 196 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs index 50be7211d8..02ac3f4da7 100644 --- a/beacon_node/http_api/src/beacon/states.rs +++ b/beacon_node/http_api/src/beacon/states.rs @@ -3,17 +3,20 @@ use crate::task_spawner::{Priority, TaskSpawner}; use crate::utils::ResponseFilter; use crate::validator::pubkey_to_validator_index; use crate::version::{ - ResponseIncludesVersion, add_consensus_version_header, + ResponseIncludesVersion, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::{ - ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, + self as api_types, ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, ValidatorsRequestBody, }; +use ssz::Encode; use std::sync::Arc; use types::{AttestationShufflingId, BeaconStateError, CommitteeCache, EthSpec, RelativeEpoch}; use warp::filters::BoxedFilter; +use warp::http::Response; +use warp::hyper::Body; use warp::{Filter, Reply}; use warp_utils::query::multi_key_query; @@ -160,6 +163,67 @@ pub fn get_beacon_state_pending_deposits( .boxed() } +// GET beacon/states/{state_id}/proposer_lookahead +pub fn get_beacon_state_proposer_lookahead( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("proposer_lookahead")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(lookahead) = state.proposer_lookahead() else { + return Err(warp_utils::reject::custom_bad_request( + "Proposer lookahead is not available for pre-Fulu states" + .to_string(), + )); + }; + + Ok(( + lookahead.to_vec(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(data.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + // GET beacon/states/{state_id}/randao?epoch pub fn get_beacon_state_randao( beacon_states_path: BeaconStatesPath, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b5b74a3840..0ef8654d8d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -650,6 +650,10 @@ pub fn serve( let get_beacon_state_pending_consolidations = states::get_beacon_state_pending_consolidations(beacon_states_path.clone()); + // GET beacon/states/{state_id}/proposer_lookahead + let get_beacon_state_proposer_lookahead = + states::get_beacon_state_proposer_lookahead(beacon_states_path.clone()); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -3284,6 +3288,7 @@ pub fn serve( .uor(get_beacon_state_pending_deposits) .uor(get_beacon_state_pending_partial_withdrawals) .uor(get_beacon_state_pending_consolidations) + .uor(get_beacon_state_proposer_lookahead) .uor(get_beacon_headers) .uor(get_beacon_headers_block_id) .uor(get_beacon_block) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 739c39aa32..a97ce01ac1 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -37,7 +37,7 @@ use proto_array::ExecutionStatus; use reqwest::{RequestBuilder, Response, StatusCode}; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; -use ssz::BitList; +use ssz::{BitList, Decode}; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use state_processing::state_advance::partial_state_advance; @@ -1409,6 +1409,72 @@ impl ApiTester { self } + pub async fn test_beacon_states_proposer_lookahead(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = match self + .client + .get_beacon_states_proposer_lookahead(state_id.0) + .await + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.proposer_lookahead().unwrap(); + + let response = result.unwrap(); + assert_eq!(response.data(), &expected.to_vec()); + + // Check that the version header is returned in the response + let fork_name = state.fork_name(&self.chain.spec).unwrap(); + assert_eq!(response.version(), Some(fork_name),); + } + + self + } + + pub async fn test_beacon_states_proposer_lookahead_ssz(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = match self + .client + .get_beacon_states_proposer_lookahead_ssz(state_id.0) + .await + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.proposer_lookahead().unwrap(); + + let ssz_bytes = result.unwrap(); + let decoded = Vec::::from_ssz_bytes(&ssz_bytes) + .expect("should decode SSZ proposer lookahead"); + assert_eq!(decoded, expected.to_vec()); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -7360,6 +7426,23 @@ async fn beacon_get_state_info_electra() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info_fulu() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_states_proposer_lookahead() + .await + .test_beacon_states_proposer_lookahead_ssz() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_get_blocks() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 628c12981a..5547ced491 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -898,6 +898,47 @@ impl BeaconNodeHttpClient { .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } + /// `GET beacon/states/{state_id}/proposer_lookahead` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_proposer_lookahead( + &self, + state_id: StateId, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("proposer_lookahead"); + + self.get_fork_contextual(path, |fork| fork) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) + } + + /// `GET beacon/states/{state_id}/proposer_lookahead` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_proposer_lookahead_ssz( + &self, + state_id: StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("proposer_lookahead"); + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.default) + .await + } + /// `GET beacon/light_client/updates` /// /// Returns `Ok(None)` on a 404 error. From 2bb79f43aadbf6e71ab2ea67efdda4585e9184ea Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 11 Mar 2026 02:43:59 +0900 Subject: [PATCH 15/43] Update contribution guidlines regarding LLM usage (#8879) Co-Authored-By: Eitan Seri- Levi --- CONTRIBUTING.md | 9 +++++++++ wordlist.txt | 1 + 2 files changed, 10 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4cad219c89..f81f75cd8b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,6 +37,15 @@ Requests](https://github.com/sigp/lighthouse/pulls) is where code gets reviewed. We use [discord](https://discord.gg/cyAszAh) to chat informally. +### A Note on LLM usage + +We are happy to support contributors who are genuinely engaging with the code base. Our general policy regarding LLM usage: + +- Please refrain from submissions that you haven't thoroughly understood, reviewed, and tested. +- Please disclose if a significant portion of your contribution was AI-generated. +- Descriptions and comments should be made by you. +- We reserve the right to reject any contributions we feel are violating the spirit of open source contribution. + ### General Work-Flow We recommend the following work-flow for contributors: diff --git a/wordlist.txt b/wordlist.txt index e0e1fe7d73..822e336146 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -58,6 +58,7 @@ JSON KeyManager Kurtosis LMDB +LLM LLVM LRU LTO From 815040dc3c056560c5c67a7a71a87d2bbc658fd2 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 11 Mar 2026 07:43:26 +0200 Subject: [PATCH 16/43] Remove `c-kzg` (#8930) #7330 Removes `c-kzg` from our `kzg` crate and rely fully on the `rust_eth_kzg` crate. This removes the old `Blob` type entirely and instead handles `rust_eth_kzg::KzgBlobRef`s directly which allows us to avoid some extra stack allocations . Similarly, we make `Bytes32` and `Bytes48` type aliases rather than structs as this fits better with the new `rust_eth_kzg` API. Co-Authored-By: Mac L --- Cargo.lock | 1 - Cargo.toml | 3 - beacon_node/beacon_chain/src/kzg_utils.rs | 50 +++-- .../test_utils/execution_block_generator.rs | 15 +- consensus/types/src/data/blob_sidecar.rs | 11 +- consensus/types/src/kzg_ext/mod.rs | 2 +- crypto/kzg/Cargo.toml | 2 - crypto/kzg/benches/benchmark.rs | 18 +- crypto/kzg/src/kzg_commitment.rs | 10 +- crypto/kzg/src/kzg_proof.rs | 8 +- crypto/kzg/src/lib.rs | 172 ++++++++---------- crypto/kzg/src/trusted_setup.rs | 18 +- deny.toml | 1 + .../cases/kzg_verify_cell_kzg_proof_batch.rs | 6 +- 14 files changed, 129 insertions(+), 188 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 704039a175..0ca12dce46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4825,7 +4825,6 @@ name = "kzg" version = "0.1.0" dependencies = [ "arbitrary", - "c-kzg", "criterion", "educe", "ethereum_hashing", diff --git a/Cargo.toml b/Cargo.toml index efedfe3b37..7572cc324d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,9 +117,6 @@ bitvec = "1" bls = { path = "crypto/bls" } byteorder = "1" bytes = "1.11.1" -# Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable -# feature ourselves when desired. -c-kzg = { version = "2.1", default-features = false } cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 33b3260361..10cb208729 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,6 +1,5 @@ use kzg::{ - Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, - Error as KzgError, Kzg, KzgBlobRef, + Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, KzgBlobRef, }; use rayon::prelude::*; use ssz_types::{FixedVector, VariableList}; @@ -15,18 +14,18 @@ use types::{ SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlindedBeaconBlock, Slot, }; -/// Converts a blob ssz List object to an array to be used with the kzg -/// crypto library. -fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { - KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into) +/// Converts a blob ssz FixedVector to a reference to a fixed-size array +/// to be used with `rust_eth_kzg`. +fn ssz_blob_to_kzg_blob_ref(blob: &Blob) -> Result, KzgError> { + blob.as_ref().try_into().map_err(|e| { + KzgError::InconsistentArrayLength(format!( + "blob should have a guaranteed size due to FixedVector: {e:?}" + )) + }) } -fn ssz_blob_to_crypto_blob_boxed(blob: &Blob) -> Result, KzgError> { - ssz_blob_to_crypto_blob::(blob).map(Box::new) -} - -/// Converts a cell ssz List object to an array to be used with the kzg -/// crypto library. +/// Converts a cell ssz FixedVector to a reference to a fixed-size array +/// to be used with `rust_eth_kzg`. fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result, KzgError> { let cell_bytes: &[u8] = cell.as_ref(); cell_bytes @@ -42,8 +41,8 @@ pub fn validate_blob( kzg_proof: KzgProof, ) -> Result<(), KzgError> { let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.verify_blob_kzg_proof(kzg_blob, kzg_commitment, kzg_proof) } /// Validate a batch of `DataColumnSidecar`. @@ -72,7 +71,7 @@ where } for &proof in data_column.kzg_proofs() { - proofs.push(Bytes48::from(proof)); + proofs.push(proof.0); } // In Gloas, commitments come from the block's ExecutionPayloadBid, not the sidecar. @@ -90,7 +89,7 @@ where }; for &commitment in kzg_commitments.iter() { - commitments.push(Bytes48::from(commitment)); + commitments.push(commitment.0); } let expected_len = column_indices.len(); @@ -120,7 +119,7 @@ pub fn validate_blobs( let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); let blobs = blobs .into_iter() - .map(|blob| ssz_blob_to_crypto_blob::(blob)) + .map(|blob| ssz_blob_to_kzg_blob_ref::(blob)) .collect::, KzgError>>()?; kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs) @@ -132,8 +131,8 @@ pub fn compute_blob_kzg_proof( blob: &Blob, kzg_commitment: KzgCommitment, ) -> Result { - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.compute_blob_kzg_proof(kzg_blob, kzg_commitment) } /// Compute the kzg commitment for a given blob. @@ -141,8 +140,8 @@ pub fn blob_to_kzg_commitment( kzg: &Kzg, blob: &Blob, ) -> Result { - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.blob_to_kzg_commitment(&kzg_blob) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.blob_to_kzg_commitment(kzg_blob) } /// Compute the kzg proof for a given blob and an evaluation point z. @@ -151,10 +150,9 @@ pub fn compute_kzg_proof( blob: &Blob, z: Hash256, ) -> Result<(KzgProof, Hash256), KzgError> { - let z = z.0.into(); - let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; - kzg.compute_kzg_proof(&kzg_blob, &z) - .map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec()))) + let kzg_blob = ssz_blob_to_kzg_blob_ref::(blob)?; + kzg.compute_kzg_proof(kzg_blob, &z.0) + .map(|(proof, z)| (proof, Hash256::from_slice(&z))) } /// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` @@ -165,7 +163,7 @@ pub fn verify_kzg_proof( z: Hash256, y: Hash256, ) -> Result { - kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) + kzg.verify_kzg_proof(kzg_commitment, &z.0, &y.0, kzg_proof) } /// Build data column sidecars from a signed beacon block and its blobs. diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 62a46246da..e94924d8b2 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -989,7 +989,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use kzg::{Bytes48, CellRef, KzgBlobRef, trusted_setup::get_trusted_setup}; + use kzg::{CellRef, KzgBlobRef, trusted_setup::get_trusted_setup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] @@ -1015,10 +1015,11 @@ mod test { fn validate_blob_bundle_v1() -> Result<(), String> { let kzg = load_kzg()?; let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; - let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) - .map(Box::new) - .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; - kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + let kzg_blob: KzgBlobRef = blob + .as_ref() + .try_into() + .map_err(|e| format!("Error converting blob to kzg blob ref: {e:?}"))?; + kzg.verify_blob_kzg_proof(kzg_blob, kzg_commitment, kzg_proof) .map_err(|e| format!("Invalid blobs bundle: {e:?}")) } @@ -1028,8 +1029,8 @@ mod test { load_test_blobs_bundle_v2::().map(|(commitment, proofs, blob)| { let kzg_blob: KzgBlobRef = blob.as_ref().try_into().unwrap(); ( - vec![Bytes48::from(commitment); proofs.len()], - proofs.into_iter().map(|p| p.into()).collect::>(), + vec![commitment.0; proofs.len()], + proofs.into_iter().map(|p| p.0).collect::>(), kzg.compute_cells(kzg_blob).unwrap(), ) })?; diff --git a/consensus/types/src/data/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs index 638491d6d7..2774176190 100644 --- a/consensus/types/src/data/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -3,7 +3,7 @@ use std::{fmt::Debug, hash::Hash, sync::Arc}; use bls::Signature; use context_deserialize::context_deserialize; use educe::Educe; -use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; +use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; use rand::Rng; use safe_arith::ArithError; @@ -253,14 +253,17 @@ impl BlobSidecar { let blob = Blob::::new(blob_bytes) .map_err(|e| format!("error constructing random blob: {:?}", e))?; - let kzg_blob = KzgBlob::from_bytes(&blob).unwrap(); + let kzg_blob: &[u8; BYTES_PER_BLOB] = blob + .as_ref() + .try_into() + .map_err(|e| format!("error converting blob to kzg blob ref: {:?}", e))?; let commitment = kzg - .blob_to_kzg_commitment(&kzg_blob) + .blob_to_kzg_commitment(kzg_blob) .map_err(|e| format!("error computing kzg commitment: {:?}", e))?; let proof = kzg - .compute_blob_kzg_proof(&kzg_blob, commitment) + .compute_blob_kzg_proof(kzg_blob, commitment) .map_err(|e| format!("error computing kzg proof: {:?}", e))?; Ok(Self { diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs index 63533ec71f..e0ec9dd956 100644 --- a/consensus/types/src/kzg_ext/mod.rs +++ b/consensus/types/src/kzg_ext/mod.rs @@ -1,6 +1,6 @@ pub mod consts; -pub use kzg::{Blob as KzgBlob, Error as KzgError, Kzg, KzgCommitment, KzgProof}; +pub use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof}; use ssz_types::VariableList; diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index 840f8cfc9c..19f39a182b 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -12,7 +12,6 @@ fake_crypto = [] [dependencies] arbitrary = { workspace = true, optional = true } -c-kzg = { workspace = true } educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -28,7 +27,6 @@ tree_hash = { workspace = true } [dev-dependencies] criterion = { workspace = true } -serde_json = { workspace = true } [[bench]] name = "benchmark" diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 432d84654a..d5d5596211 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -1,6 +1,5 @@ -use c_kzg::KzgSettings; use criterion::{criterion_group, criterion_main, Criterion}; -use kzg::{trusted_setup::get_trusted_setup, TrustedSetup, NO_PRECOMPUTE}; +use kzg::trusted_setup::get_trusted_setup; use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; pub fn bench_init_context(c: &mut Criterion) { @@ -20,21 +19,6 @@ pub fn bench_init_context(c: &mut Criterion) { ) }) }); - c.bench_function("Initialize context c-kzg (4844)", |b| { - b.iter(|| { - let trusted_setup: TrustedSetup = - serde_json::from_reader(trusted_setup_bytes.as_slice()) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); - KzgSettings::load_trusted_setup( - &trusted_setup.g1_monomial(), - &trusted_setup.g1_lagrange(), - &trusted_setup.g2_monomial(), - NO_PRECOMPUTE, - ) - .unwrap() - }) - }); } criterion_group!(benches, bench_init_context); diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs index bc5fc5f5aa..d8ef4b36cf 100644 --- a/crypto/kzg/src/kzg_commitment.rs +++ b/crypto/kzg/src/kzg_commitment.rs @@ -1,4 +1,4 @@ -use c_kzg::BYTES_PER_COMMITMENT; +use crate::{Bytes48, BYTES_PER_COMMITMENT}; use educe::Educe; use ethereum_hashing::hash_fixed; use serde::de::{Deserialize, Deserializer}; @@ -14,7 +14,7 @@ pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; #[derive(Educe, Clone, Copy, Encode, Decode)] #[educe(PartialEq, Eq, Hash)] #[ssz(struct_behaviour = "transparent")] -pub struct KzgCommitment(pub [u8; c_kzg::BYTES_PER_COMMITMENT]); +pub struct KzgCommitment(pub [u8; BYTES_PER_COMMITMENT]); impl KzgCommitment { pub fn calculate_versioned_hash(&self) -> Hash256 { @@ -24,13 +24,13 @@ impl KzgCommitment { } pub fn empty_for_testing() -> Self { - KzgCommitment([0; c_kzg::BYTES_PER_COMMITMENT]) + KzgCommitment([0; BYTES_PER_COMMITMENT]) } } -impl From for c_kzg::Bytes48 { +impl From for Bytes48 { fn from(value: KzgCommitment) -> Self { - value.0.into() + value.0 } } diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs index aa9ed185a0..e0867520eb 100644 --- a/crypto/kzg/src/kzg_proof.rs +++ b/crypto/kzg/src/kzg_proof.rs @@ -1,4 +1,4 @@ -use c_kzg::BYTES_PER_PROOF; +use crate::BYTES_PER_PROOF; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz_derive::{Decode, Encode}; @@ -11,12 +11,6 @@ use tree_hash::{PackedEncoding, TreeHash}; #[ssz(struct_behaviour = "transparent")] pub struct KzgProof(pub [u8; BYTES_PER_PROOF]); -impl From for c_kzg::Bytes48 { - fn from(value: KzgProof) -> Self { - value.0.into() - } -} - impl KzgProof { /// Creates a valid proof using `G1_POINT_AT_INFINITY`. pub fn empty() -> Self { diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 66499dad8e..6ee352b0db 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -12,11 +12,12 @@ pub use crate::{ trusted_setup::TrustedSetup, }; -pub use c_kzg::{ - Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, - BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, +pub use rust_eth_kzg::constants::{ + BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB, }; +pub const BYTES_PER_PROOF: usize = 48; + use crate::trusted_setup::load_trusted_setup; use rayon::prelude::*; pub use rust_eth_kzg::{ @@ -25,13 +26,6 @@ pub use rust_eth_kzg::{ }; use tracing::{instrument, Span}; -/// Disables the fixed-base multi-scalar multiplication optimization for computing -/// cell KZG proofs, because `rust-eth-kzg` already handles the precomputation. -/// -/// Details about `precompute` parameter can be found here: -/// -pub const NO_PRECOMPUTE: u64 = 0; - // Note: Both `NUMBER_OF_COLUMNS` and `CELLS_PER_EXT_BLOB` are preset values - however this // is a constant in the KZG library - be aware that overriding `NUMBER_OF_COLUMNS` will break KZG // operations. @@ -39,14 +33,15 @@ pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_E pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; +type Bytes32 = [u8; 32]; +type Bytes48 = [u8; 48]; + #[derive(Debug)] pub enum Error { /// An error from initialising the trusted setup. TrustedSetupError(String), - /// An error from the underlying kzg library. - Kzg(c_kzg::Error), - /// A prover/verifier error from the rust-eth-kzg library. - PeerDASKZG(rust_eth_kzg::Error), + /// An error from the rust-eth-kzg library. + Kzg(rust_eth_kzg::Error), /// The kzg verification failed KzgVerificationFailed, /// Misc indexing error @@ -57,38 +52,29 @@ pub enum Error { DASContextUninitialized, } -impl From for Error { - fn from(value: c_kzg::Error) -> Self { +impl From for Error { + fn from(value: rust_eth_kzg::Error) -> Self { Error::Kzg(value) } } -/// A wrapper over a kzg library that holds the trusted setup parameters. +/// A wrapper over the rust-eth-kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { - trusted_setup: KzgSettings, context: DASContext, } impl Kzg { pub fn new_from_trusted_setup_no_precomp(trusted_setup: &[u8]) -> Result { - let (ckzg_trusted_setup, rkzg_trusted_setup) = load_trusted_setup(trusted_setup)?; + let rkzg_trusted_setup = load_trusted_setup(trusted_setup)?; let context = DASContext::new(&rkzg_trusted_setup, rust_eth_kzg::UsePrecomp::No); - Ok(Self { - trusted_setup: KzgSettings::load_trusted_setup( - &ckzg_trusted_setup.g1_monomial(), - &ckzg_trusted_setup.g1_lagrange(), - &ckzg_trusted_setup.g2_monomial(), - NO_PRECOMPUTE, - )?, - context, - }) + Ok(Self { context }) } /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. pub fn new_from_trusted_setup(trusted_setup: &[u8]) -> Result { - let (ckzg_trusted_setup, rkzg_trusted_setup) = load_trusted_setup(trusted_setup)?; + let rkzg_trusted_setup = load_trusted_setup(trusted_setup)?; // It's not recommended to change the config parameter for precomputation as storage // grows exponentially, but the speedup is exponential - after a while the speedup @@ -100,15 +86,7 @@ impl Kzg { }, ); - Ok(Self { - trusted_setup: KzgSettings::load_trusted_setup( - &ckzg_trusted_setup.g1_monomial(), - &ckzg_trusted_setup.g1_lagrange(), - &ckzg_trusted_setup.g2_monomial(), - NO_PRECOMPUTE, - )?, - context, - }) + Ok(Self { context }) } fn context(&self) -> &DASContext { @@ -118,34 +96,35 @@ impl Kzg { /// Compute the kzg proof given a blob and its kzg commitment. pub fn compute_blob_kzg_proof( &self, - blob: &Blob, + blob: KzgBlobRef<'_>, kzg_commitment: KzgCommitment, ) -> Result { - self.trusted_setup - .compute_blob_kzg_proof(blob, &kzg_commitment.into()) - .map(|proof| KzgProof(proof.to_bytes().into_inner())) - .map_err(Into::into) + let proof = self + .context() + .compute_blob_kzg_proof(blob, &kzg_commitment.0) + .map_err(Error::Kzg)?; + Ok(KzgProof(proof)) } /// Verify a kzg proof given the blob, kzg commitment and kzg proof. pub fn verify_blob_kzg_proof( &self, - blob: &Blob, + blob: KzgBlobRef<'_>, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, ) -> Result<(), Error> { if cfg!(feature = "fake_crypto") { return Ok(()); } - if !self.trusted_setup.verify_blob_kzg_proof( - blob, - &kzg_commitment.into(), - &kzg_proof.into(), - )? { - Err(Error::KzgVerificationFailed) - } else { - Ok(()) - } + self.context() + .verify_blob_kzg_proof(blob, &kzg_commitment.0, &kzg_proof.0) + .map_err(|e| { + if e.is_proof_invalid() { + Error::KzgVerificationFailed + } else { + Error::Kzg(e) + } + }) } /// Verify a batch of blob commitment proof triplets. @@ -154,52 +133,48 @@ impl Kzg { /// TODO(pawan): test performance against a parallelized rayon impl. pub fn verify_blob_kzg_proof_batch( &self, - blobs: &[Blob], + blobs: &[KzgBlobRef<'_>], kzg_commitments: &[KzgCommitment], kzg_proofs: &[KzgProof], ) -> Result<(), Error> { if cfg!(feature = "fake_crypto") { return Ok(()); } - let commitments_bytes = kzg_commitments - .iter() - .map(|comm| Bytes48::from(*comm)) - .collect::>(); + let blob_refs: Vec<&[u8; BYTES_PER_BLOB]> = blobs.to_vec(); + let commitment_refs: Vec<&[u8; 48]> = kzg_commitments.iter().map(|c| &c.0).collect(); + let proof_refs: Vec<&[u8; 48]> = kzg_proofs.iter().map(|p| &p.0).collect(); - let proofs_bytes = kzg_proofs - .iter() - .map(|proof| Bytes48::from(*proof)) - .collect::>(); - - if !self.trusted_setup.verify_blob_kzg_proof_batch( - blobs, - &commitments_bytes, - &proofs_bytes, - )? { - Err(Error::KzgVerificationFailed) - } else { - Ok(()) - } + self.context() + .verify_blob_kzg_proof_batch(blob_refs, commitment_refs, proof_refs) + .map_err(|e| { + if e.is_proof_invalid() { + Error::KzgVerificationFailed + } else { + Error::Kzg(e) + } + }) } /// Converts a blob to a kzg commitment. - pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result { - self.trusted_setup + pub fn blob_to_kzg_commitment(&self, blob: KzgBlobRef<'_>) -> Result { + let commitment = self + .context() .blob_to_kzg_commitment(blob) - .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) - .map_err(Into::into) + .map_err(Error::Kzg)?; + Ok(KzgCommitment(commitment)) } /// Computes the kzg proof for a given `blob` and an evaluation point `z` pub fn compute_kzg_proof( &self, - blob: &Blob, + blob: KzgBlobRef<'_>, z: &Bytes32, ) -> Result<(KzgProof, Bytes32), Error> { - self.trusted_setup - .compute_kzg_proof(blob, z) - .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) - .map_err(Into::into) + let (proof, y) = self + .context() + .compute_kzg_proof(blob, *z) + .map_err(Error::Kzg)?; + Ok((KzgProof(proof), y)) } /// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` @@ -213,9 +188,14 @@ impl Kzg { if cfg!(feature = "fake_crypto") { return Ok(true); } - self.trusted_setup - .verify_kzg_proof(&kzg_commitment.into(), z, y, &kzg_proof.into()) - .map_err(Into::into) + match self + .context() + .verify_kzg_proof(&kzg_commitment.0, *z, *y, &kzg_proof.0) + { + Ok(()) => Ok(true), + Err(e) if e.is_proof_invalid() => Ok(false), + Err(e) => Err(Error::Kzg(e)), + } } /// Computes the cells and associated proofs for a given `blob`. @@ -226,18 +206,15 @@ impl Kzg { let (cells, proofs) = self .context() .compute_cells_and_kzg_proofs(blob) - .map_err(Error::PeerDASKZG)?; + .map_err(Error::Kzg)?; - // Convert the proof type to a c-kzg proof type - let c_kzg_proof = proofs.map(KzgProof); - Ok((cells, c_kzg_proof)) + let kzg_proofs = proofs.map(KzgProof); + Ok((cells, kzg_proofs)) } /// Computes the cells for a given `blob`. pub fn compute_cells(&self, blob: KzgBlobRef<'_>) -> Result<[Cell; CELLS_PER_EXT_BLOB], Error> { - self.context() - .compute_cells(blob) - .map_err(Error::PeerDASKZG) + self.context().compute_cells(blob).map_err(Error::Kzg) } /// Verifies a batch of cell-proof-commitment triplets. @@ -291,8 +268,8 @@ impl Kzg { for (cell, proof, commitment) in &column_data { cells.push(*cell); - proofs.push(proof.as_ref()); - commitments.push(commitment.as_ref()); + proofs.push(proof); + commitments.push(commitment); } // Create per-chunk tracing span for visualizing parallel processing. @@ -319,7 +296,7 @@ impl Kzg { Err(e) if e.is_proof_invalid() => { Err((Some(column_index), Error::KzgVerificationFailed)) } - Err(e) => Err((Some(column_index), Error::PeerDASKZG(e))), + Err(e) => Err((Some(column_index), Error::Kzg(e))), } }) .collect::, (Option, Error)>>()?; @@ -335,10 +312,9 @@ impl Kzg { let (cells, proofs) = self .context() .recover_cells_and_kzg_proofs(cell_ids.to_vec(), cells.to_vec()) - .map_err(Error::PeerDASKZG)?; + .map_err(Error::Kzg)?; - // Convert the proof type to a c-kzg proof type - let c_kzg_proof = proofs.map(KzgProof); - Ok((cells, c_kzg_proof)) + let kzg_proofs = proofs.map(KzgProof); + Ok((cells, kzg_proofs)) } } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index 75884b8199..5c285b50f2 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -24,7 +24,7 @@ struct G1Point([u8; BYTES_PER_G1_POINT]); struct G2Point([u8; BYTES_PER_G2_POINT]); /// Contains the trusted setup parameters that are required to instantiate a -/// `c_kzg::KzgSettings` object. +/// `rust_eth_kzg::TrustedSetup` object. /// /// The serialize/deserialize implementations are written according to /// the format specified in the ethereum consensus specs trusted setup files. @@ -155,19 +155,9 @@ fn strip_prefix(s: &str) -> &str { } } -/// Loads the trusted setup from JSON. -/// -/// ## Note: -/// Currently we load both c-kzg and rust-eth-kzg trusted setup structs, because c-kzg is still being -/// used for 4844. Longer term we're planning to switch all KZG operations to the rust-eth-kzg -/// crate, and we'll be able to maintain a single trusted setup struct. -pub(crate) fn load_trusted_setup( - trusted_setup: &[u8], -) -> Result<(TrustedSetup, PeerDASTrustedSetup), Error> { - let ckzg_trusted_setup: TrustedSetup = serde_json::from_slice(trusted_setup) - .map_err(|e| Error::TrustedSetupError(format!("{e:?}")))?; +/// Loads the trusted setup from JSON bytes into a `rust_eth_kzg::TrustedSetup`. +pub(crate) fn load_trusted_setup(trusted_setup: &[u8]) -> Result { let trusted_setup_json = std::str::from_utf8(trusted_setup) .map_err(|e| Error::TrustedSetupError(format!("{e:?}")))?; - let rkzg_trusted_setup = PeerDASTrustedSetup::from_json(trusted_setup_json); - Ok((ckzg_trusted_setup, rkzg_trusted_setup)) + Ok(PeerDASTrustedSetup::from_json(trusted_setup_json)) } diff --git a/deny.toml b/deny.toml index 3b230155f7..cf0cd7d3cd 100644 --- a/deny.toml +++ b/deny.toml @@ -11,6 +11,7 @@ deny = [ { crate = "derivative", reason = "use educe or derive_more instead" }, { crate = "ark-ff", reason = "present in Cargo.lock but not needed by Lighthouse" }, { crate = "openssl", reason = "non-Rust dependency, use rustls instead" }, + { crate = "c-kzg", reason = "non-Rust dependency, use rust_eth_kzg instead" }, { crate = "strum", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "reqwest", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "aes", deny-multiple-versions = true, reason = "takes a long time to compile" }, diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index 7973af861f..200f439c28 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_result; -use kzg::{Bytes48, Error as KzgError}; +use kzg::Error as KzgError; use serde::Deserialize; use std::marker::PhantomData; @@ -47,8 +47,8 @@ impl Case for KZGVerifyCellKZGProofBatch { let result = parse_input(&self.input).and_then(|(cells, proofs, cell_indices, commitments)| { - let proofs: Vec = proofs.iter().map(|&proof| proof.into()).collect(); - let commitments: Vec = commitments.iter().map(|&c| c.into()).collect(); + let proofs = proofs.iter().map(|&proof| proof.0).collect::>(); + let commitments = commitments.iter().map(|&c| c.0).collect::>(); let cells = cells.iter().map(|c| c.as_ref()).collect::>(); let kzg = get_kzg(); match kzg.verify_cell_proof_batch(&cells, &proofs, cell_indices, &commitments) { From 6350a270319267615034475613c7ff3366b941d3 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 11 Mar 2026 01:20:02 -0500 Subject: [PATCH 17/43] Optionally check DB invariants at runtime (#8952) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/beacon_chain/src/invariants.rs | 56 ++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/tests/store_tests.rs | 43 + beacon_node/http_api/src/database.rs | 9 + beacon_node/http_api/src/lib.rs | 14 + beacon_node/store/src/invariants.rs | 781 ++++++++++++++++++ beacon_node/store/src/lib.rs | 1 + beacon_node/store/src/state_cache.rs | 13 + 8 files changed, 918 insertions(+) create mode 100644 beacon_node/beacon_chain/src/invariants.rs create mode 100644 beacon_node/store/src/invariants.rs diff --git a/beacon_node/beacon_chain/src/invariants.rs b/beacon_node/beacon_chain/src/invariants.rs new file mode 100644 index 0000000000..7bcec7b0b4 --- /dev/null +++ b/beacon_node/beacon_chain/src/invariants.rs @@ -0,0 +1,56 @@ +//! Beacon chain database invariant checks. +//! +//! Builds the `InvariantContext` from beacon chain state and delegates all checks +//! to `HotColdDB::check_invariants`. + +use crate::BeaconChain; +use crate::beacon_chain::BeaconChainTypes; +use store::invariants::{InvariantCheckResult, InvariantContext}; + +impl BeaconChain { + /// Run all database invariant checks. + /// + /// Collects context from fork choice, state cache, custody columns, and pubkey cache, + /// then delegates to the store-level `check_invariants` method. + pub fn check_database_invariants(&self) -> Result { + let fork_choice_blocks = { + let fc = self.canonical_head.fork_choice_read_lock(); + let proto_array = fc.proto_array().core_proto_array(); + proto_array + .nodes + .iter() + .filter(|node| { + // Only check blocks that are descendants of the finalized checkpoint. + // Pruned non-canonical fork blocks may linger in the proto-array but + // are legitimately absent from the database. + fc.is_finalized_checkpoint_or_descendant(node.root) + }) + .map(|node| (node.root, node.slot)) + .collect() + }; + + let custody_context = self.data_availability_checker.custody_context(); + + let ctx = InvariantContext { + fork_choice_blocks, + state_cache_roots: self.store.state_cache.lock().state_roots(), + custody_columns: custody_context + .custody_columns_for_epoch(None, &self.spec) + .to_vec(), + pubkey_cache_pubkeys: { + let cache = self.validator_pubkey_cache.read(); + (0..cache.len()) + .filter_map(|i| { + cache.get(i).map(|pk| { + use store::StoreItem; + crate::validator_pubkey_cache::DatabasePubkey::from_pubkey(pk) + .as_store_bytes() + }) + }) + .collect() + }, + }; + + self.store.check_invariants(&ctx) + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4efd90bd22..29081fd767 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -29,6 +29,7 @@ pub mod fork_choice_signal; pub mod graffiti_calculator; pub mod historical_blocks; pub mod historical_data_columns; +pub mod invariants; pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b6d729cc61..86f4af3efc 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -148,6 +148,22 @@ fn get_harness_generic( harness } +/// Check that all database invariants hold. +/// +/// Panics with a descriptive message if any invariant is violated. +fn check_db_invariants(harness: &TestHarness) { + let result = harness + .chain + .check_database_invariants() + .expect("invariant check should not error"); + + assert!( + result.is_ok(), + "database invariant violations found:\n{:#?}", + result.violations, + ); +} + fn get_states_descendant_of_block( store: &HotColdDB, BeaconNodeBackend>, block_root: Hash256, @@ -308,6 +324,7 @@ async fn full_participation_no_skips() { check_split_slot(&harness, store); check_chain_dump(&harness, num_blocks_produced + 1); check_iterators(&harness); + check_db_invariants(&harness); } #[tokio::test] @@ -352,6 +369,7 @@ async fn randomised_skips() { check_split_slot(&harness, store.clone()); check_chain_dump(&harness, num_blocks_produced + 1); check_iterators(&harness); + check_db_invariants(&harness); } #[tokio::test] @@ -400,6 +418,7 @@ async fn long_skip() { check_split_slot(&harness, store); check_chain_dump(&harness, initial_blocks + final_blocks + 1); check_iterators(&harness); + check_db_invariants(&harness); } /// Go forward to the point where the genesis randao value is no longer part of the vector. @@ -1769,6 +1788,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { } assert!(!rig.knows_head(&stray_head)); + + check_db_invariants(&rig); } #[tokio::test] @@ -1897,6 +1918,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(!rig.knows_head(&stray_head)); let chain_dump = rig.chain.chain_dump().unwrap(); assert!(get_blocks(&chain_dump).contains(&shared_head)); + + check_db_invariants(&rig); } #[tokio::test] @@ -1988,6 +2011,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { } rig.assert_knows_head(stray_head.into()); + + check_db_invariants(&rig); } #[tokio::test] @@ -2127,6 +2152,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { } assert!(!rig.knows_head(&stray_head)); + + check_db_invariants(&rig); } // This is to check if state outside of normal block processing are pruned correctly. @@ -2377,6 +2404,8 @@ async fn finalizes_non_epoch_start_slot() { state_hash ); } + + check_db_invariants(&rig); } fn check_all_blocks_exist<'a>( @@ -2643,6 +2672,8 @@ async fn pruning_test( check_all_states_exist(&harness, all_canonical_states.iter()); check_no_states_exist(&harness, stray_states.difference(&all_canonical_states)); check_no_blocks_exist(&harness, stray_blocks.values()); + + check_db_invariants(&harness); } #[tokio::test] @@ -2707,6 +2738,8 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { vec![(genesis_state_root, Slot::new(0))], "get_states_descendant_of_block({bad_block_parent_root:?})" ); + + check_db_invariants(&harness); } #[tokio::test] @@ -3361,6 +3394,16 @@ async fn weak_subjectivity_sync_test( store.clone().reconstruct_historic_states(None).unwrap(); assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); assert_eq!(store.get_anchor_info().state_upper_limit, Slot::new(0)); + + // Check database invariants after full checkpoint sync + backfill + reconstruction. + let result = beacon_chain + .check_database_invariants() + .expect("invariant check should not error"); + assert!( + result.is_ok(), + "database invariant violations:\n{:#?}", + result.violations, + ); } // This test prunes data columns from epoch 0 and then tries to re-import them via diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 8a50ec45b0..4737d92079 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -2,6 +2,7 @@ use beacon_chain::store::metadata::CURRENT_SCHEMA_VERSION; use beacon_chain::{BeaconChain, BeaconChainTypes}; use serde::Serialize; use std::sync::Arc; +use store::invariants::InvariantCheckResult; use store::{AnchorInfo, BlobInfo, Split, StoreConfig}; #[derive(Debug, Serialize)] @@ -30,3 +31,11 @@ pub fn info( blob_info, }) } + +pub fn check_invariants( + chain: Arc>, +) -> Result { + chain.check_database_invariants().map_err(|e| { + warp_utils::reject::custom_bad_request(format!("error checking database invariants: {e:?}")) + }) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0ef8654d8d..26bad809df 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3007,6 +3007,19 @@ pub fn serve( }, ); + // GET lighthouse/database/invariants + let get_lighthouse_database_invariants = database_path + .and(warp::path("invariants")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner + .blocking_json_task(Priority::P1, move || database::check_invariants(chain)) + }, + ); + // POST lighthouse/database/reconstruct let post_lighthouse_database_reconstruct = database_path .and(warp::path("reconstruct")) @@ -3336,6 +3349,7 @@ pub fn serve( .uor(get_lighthouse_validator_inclusion) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) + .uor(get_lighthouse_database_invariants) .uor(get_lighthouse_custody_info) .uor(get_lighthouse_attestation_performance) .uor(get_beacon_light_client_optimistic_update) diff --git a/beacon_node/store/src/invariants.rs b/beacon_node/store/src/invariants.rs new file mode 100644 index 0000000000..eb5232d344 --- /dev/null +++ b/beacon_node/store/src/invariants.rs @@ -0,0 +1,781 @@ +//! Database invariant checks for the hot and cold databases. +//! +//! These checks verify the consistency of data stored in the database. They are designed to be +//! called from the HTTP API and from tests to detect data corruption or bugs in the store logic. +//! +//! See the `check_invariants` and `check_database_invariants` methods for the full list. + +use crate::hdiff::StorageStrategy; +use crate::hot_cold_store::{ColdStateSummary, HotStateSummary}; +use crate::{DBColumn, Error, ItemStore}; +use crate::{HotColdDB, Split}; +use serde::Serialize; +use ssz::Decode; +use std::cmp; +use std::collections::HashSet; +use types::*; + +/// Result of running invariant checks on the database. +#[derive(Debug, Clone, Serialize)] +pub struct InvariantCheckResult { + /// List of invariant violations found. + pub violations: Vec, +} + +impl InvariantCheckResult { + pub fn new() -> Self { + Self { + violations: Vec::new(), + } + } + + pub fn is_ok(&self) -> bool { + self.violations.is_empty() + } + + pub fn add_violation(&mut self, violation: InvariantViolation) { + self.violations.push(violation); + } + + pub fn merge(&mut self, other: InvariantCheckResult) { + self.violations.extend(other.violations); + } +} + +impl Default for InvariantCheckResult { + fn default() -> Self { + Self::new() + } +} + +/// Context data from the beacon chain needed for invariant checks. +/// +/// This allows all invariant checks to live in the store crate while still checking +/// invariants that depend on fork choice, state cache, and custody context. +pub struct InvariantContext { + /// Block roots tracked by fork choice (invariant 1). + pub fork_choice_blocks: Vec<(Hash256, Slot)>, + /// State roots held in the in-memory state cache (invariant 8). + pub state_cache_roots: Vec, + /// Custody columns for the current epoch (invariant 7). + pub custody_columns: Vec, + /// Compressed pubkey bytes from the in-memory validator pubkey cache, indexed by validator index + /// (invariant 9). + pub pubkey_cache_pubkeys: Vec>, +} + +/// A single invariant violation. +#[derive(Debug, Clone, Serialize)] +pub enum InvariantViolation { + /// Invariant 1: fork choice block consistency. + /// + /// ```text + /// block in fork_choice && descends_from_finalized -> block in hot_db + /// ``` + ForkChoiceBlockMissing { block_root: Hash256, slot: Slot }, + /// Invariant 2: block and state consistency. + /// + /// ```text + /// block in hot_db && block.slot >= split.slot + /// -> state_summary for block.state_root() in hot_db + /// ``` + HotBlockMissingStateSummary { + block_root: Hash256, + slot: Slot, + state_root: Hash256, + }, + /// Invariant 3: state summary diff consistency. + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db according to hierarchy rules + /// ``` + HotStateMissingSnapshot { state_root: Hash256, slot: Slot }, + /// Invariant 3: state summary diff consistency (missing diff). + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db according to hierarchy rules + /// ``` + HotStateMissingDiff { state_root: Hash256, slot: Slot }, + /// Invariant 3: DiffFrom/ReplayFrom base slot must reference an existing summary. + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db according to hierarchy rules + /// ``` + HotStateBaseSummaryMissing { + slot: Slot, + base_state_root: Hash256, + }, + /// Invariant 4: state summary chain consistency. + /// + /// ```text + /// state_summary in hot_db && state_summary.slot > split.slot + /// -> state_summary for previous_state_root in hot_db + /// ``` + HotStateMissingPreviousSummary { + slot: Slot, + previous_state_root: Hash256, + }, + /// Invariant 5: block and execution payload consistency. + /// + /// ```text + /// block in hot_db && !prune_payloads -> payload for block.root in hot_db + /// ``` + ExecutionPayloadMissing { block_root: Hash256, slot: Slot }, + /// Invariant 6: block and blobs consistency. + /// + /// ```text + /// block in hot_db && num_blob_commitments > 0 + /// -> blob_list for block.root in hot_db + /// ``` + BlobSidecarMissing { block_root: Hash256, slot: Slot }, + /// Invariant 7: block and data columns consistency. + /// + /// ```text + /// block in hot_db && num_blob_commitments > 0 + /// && block.slot >= earliest_available_slot + /// && data_column_idx in custody_columns + /// -> (block_root, data_column_idx) in hot_db + /// ``` + DataColumnMissing { + block_root: Hash256, + slot: Slot, + column_index: ColumnIndex, + }, + /// Invariant 8: state cache and disk consistency. + /// + /// ```text + /// state in state_cache -> state_summary in hot_db + /// ``` + StateCacheMissingSummary { state_root: Hash256 }, + /// Invariant 9: pubkey cache consistency. + /// + /// ```text + /// state_summary in hot_db + /// -> all validator pubkeys from state.validators are in the hot_db + /// ``` + PubkeyCacheMissing { validator_index: usize }, + /// Invariant 9b: pubkey cache value mismatch. + /// + /// ```text + /// pubkey_cache[i] == hot_db(PubkeyCache)[i] + /// ``` + PubkeyCacheMismatch { validator_index: usize }, + /// Invariant 10: block root indices mapping. + /// + /// ```text + /// oldest_block_slot <= i < split.slot + /// -> block_root for slot i in cold_db + /// && block for block_root in hot_db + /// ``` + ColdBlockRootMissing { + slot: Slot, + oldest_block_slot: Slot, + split_slot: Slot, + }, + /// Invariant 10: block root index references a block that must exist. + /// + /// ```text + /// oldest_block_slot <= i < split.slot + /// -> block_root for slot i in cold_db + /// && block for block_root in hot_db + /// ``` + ColdBlockRootOrphan { slot: Slot, block_root: Hash256 }, + /// Invariant 11: state root indices mapping. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + ColdStateRootMissing { + slot: Slot, + state_lower_limit: Slot, + state_upper_limit: Slot, + split_slot: Slot, + }, + /// Invariant 11: state root index must have a cold state summary. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + ColdStateRootMissingSummary { slot: Slot, state_root: Hash256 }, + /// Invariant 11: cold state summary slot must match index slot. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + ColdStateRootSlotMismatch { + slot: Slot, + state_root: Hash256, + summary_slot: Slot, + }, + /// Invariant 12: cold state diff consistency. + /// + /// ```text + /// cold_state_summary in cold_db + /// -> slot |-> state diff/snapshot/nothing in cold_db according to diff hierarchy + /// ``` + ColdStateMissingSnapshot { state_root: Hash256, slot: Slot }, + /// Invariant 12: cold state diff consistency (missing diff). + /// + /// ```text + /// cold_state_summary in cold_db + /// -> slot |-> state diff/snapshot/nothing in cold_db according to diff hierarchy + /// ``` + ColdStateMissingDiff { state_root: Hash256, slot: Slot }, + /// Invariant 12: DiffFrom/ReplayFrom base slot must reference an existing summary. + /// + /// ```text + /// cold_state_summary in cold_db + /// -> slot |-> state diff/snapshot/nothing in cold_db according to diff hierarchy + /// ``` + ColdStateBaseSummaryMissing { slot: Slot, base_slot: Slot }, +} + +impl, Cold: ItemStore> HotColdDB { + /// Run all database invariant checks. + /// + /// The `ctx` parameter provides data from the beacon chain layer (fork choice, state cache, + /// custody columns, pubkey cache) so that all invariant checks can live in this single file. + pub fn check_invariants(&self, ctx: &InvariantContext) -> Result { + let mut result = InvariantCheckResult::new(); + let split = self.get_split_info(); + + result.merge(self.check_fork_choice_block_consistency(ctx)?); + result.merge(self.check_hot_block_invariants(&split, ctx)?); + result.merge(self.check_hot_state_summary_diff_consistency()?); + result.merge(self.check_hot_state_summary_chain_consistency(&split)?); + result.merge(self.check_state_cache_consistency(ctx)?); + result.merge(self.check_cold_block_root_indices(&split)?); + result.merge(self.check_cold_state_root_indices(&split)?); + result.merge(self.check_cold_state_diff_consistency()?); + result.merge(self.check_pubkey_cache_consistency(ctx)?); + + Ok(result) + } + + /// Invariant 1 (Hot DB): Fork choice block consistency. + /// + /// ```text + /// block in fork_choice && descends_from_finalized -> block in hot_db + /// ``` + /// + /// Every canonical fork choice block (descending from finalized) must exist in the hot + /// database. Pruned non-canonical fork blocks may linger in the proto-array and are + /// excluded from this check. + fn check_fork_choice_block_consistency( + &self, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + for &(block_root, slot) in &ctx.fork_choice_blocks { + let exists = self + .hot_db + .key_exists(DBColumn::BeaconBlock, block_root.as_slice())?; + if !exists { + result + .add_violation(InvariantViolation::ForkChoiceBlockMissing { block_root, slot }); + } + } + + Ok(result) + } + + /// Invariants 2, 5, 6, 7 (Hot DB): Block-related consistency checks. + /// + /// Iterates hot DB blocks once and checks: + /// - Invariant 2: block-state summary consistency + /// - Invariant 5: execution payload consistency (when prune_payloads=false) + /// - Invariant 6: blob sidecar consistency (Deneb to Fulu) + /// - Invariant 7: data column consistency (post-Fulu, when custody_columns provided) + fn check_hot_block_invariants( + &self, + split: &Split, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + let check_payloads = !self.get_config().prune_payloads; + let bellatrix_fork_slot = self + .spec + .bellatrix_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let deneb_fork_slot = self + .spec + .deneb_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let fulu_fork_slot = self + .spec + .fulu_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let oldest_blob_slot = self.get_blob_info().oldest_blob_slot; + let oldest_data_column_slot = self.get_data_column_info().oldest_data_column_slot; + + for res in self.hot_db.iter_column::(DBColumn::BeaconBlock) { + let (block_root, block_bytes) = res?; + let block = SignedBlindedBeaconBlock::::from_ssz_bytes(&block_bytes, &self.spec)?; + let slot = block.slot(); + + // Invariant 2: block-state consistency. + if slot >= split.slot { + let state_root = block.state_root(); + let has_summary = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSummary, state_root.as_slice())?; + if !has_summary { + result.add_violation(InvariantViolation::HotBlockMissingStateSummary { + block_root, + slot, + state_root, + }); + } + } + + // Invariant 5: execution payload consistency. + // TODO(gloas): reconsider this invariant + if check_payloads + && let Some(bellatrix_slot) = bellatrix_fork_slot + && slot >= bellatrix_slot + && !self.execution_payload_exists(&block_root)? + && !self.payload_envelope_exists(&block_root)? + { + result.add_violation(InvariantViolation::ExecutionPayloadMissing { + block_root, + slot, + }); + } + + // Invariant 6: blob sidecar consistency. + // Only check blocks that actually have blob KZG commitments — blocks with 0 + // commitments legitimately have no blob sidecars stored. + if let Some(deneb_slot) = deneb_fork_slot + && let Some(oldest_blob) = oldest_blob_slot + && slot >= deneb_slot + && slot >= oldest_blob + && fulu_fork_slot.is_none_or(|fulu_slot| slot < fulu_slot) + && block.num_expected_blobs() > 0 + { + let has_blob = self + .blobs_db + .key_exists(DBColumn::BeaconBlob, block_root.as_slice())?; + if !has_blob { + result + .add_violation(InvariantViolation::BlobSidecarMissing { block_root, slot }); + } + } + + // Invariant 7: data column consistency. + // Only check blocks that actually have blob KZG commitments. + // TODO(gloas): reconsider this invariant — non-canonical payloads won't have + // their data column sidecars stored. + if !ctx.custody_columns.is_empty() + && let Some(fulu_slot) = fulu_fork_slot + && let Some(oldest_dc) = oldest_data_column_slot + && slot >= fulu_slot + && slot >= oldest_dc + && block.num_expected_blobs() > 0 + { + let stored_columns = self.get_data_column_keys(block_root)?; + for col_idx in &ctx.custody_columns { + if !stored_columns.contains(col_idx) { + result.add_violation(InvariantViolation::DataColumnMissing { + block_root, + slot, + column_index: *col_idx, + }); + } + } + } + } + + Ok(result) + } + + /// Invariant 3 (Hot DB): State summary diff/snapshot consistency. + /// + /// ```text + /// state_summary in hot_db + /// -> state diff/snapshot/nothing in hot_db per HDiff hierarchy rules + /// ``` + /// + /// Each hot state summary should have the correct storage artifact (snapshot, diff, or + /// nothing) according to the HDiff hierarchy configuration. The hierarchy uses the + /// anchor_slot as its start point for the hot DB. + fn check_hot_state_summary_diff_consistency(&self) -> Result { + let mut result = InvariantCheckResult::new(); + + let anchor_slot = self.get_anchor_info().anchor_slot; + + // Collect all summary slots and their strategies in a first pass. + let mut known_state_roots = HashSet::new(); + let mut base_state_refs: Vec<(Slot, Hash256)> = Vec::new(); + + for res in self + .hot_db + .iter_column::(DBColumn::BeaconStateHotSummary) + { + let (state_root, value) = res?; + let summary = HotStateSummary::from_ssz_bytes(&value)?; + + known_state_roots.insert(state_root); + + match self.hierarchy.storage_strategy(summary.slot, anchor_slot)? { + StorageStrategy::Snapshot => { + let has_snapshot = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSnapshot, state_root.as_slice())?; + if !has_snapshot { + result.add_violation(InvariantViolation::HotStateMissingSnapshot { + state_root, + slot: summary.slot, + }); + } + } + StorageStrategy::DiffFrom(base_slot) => { + let has_diff = self + .hot_db + .key_exists(DBColumn::BeaconStateHotDiff, state_root.as_slice())?; + if !has_diff { + result.add_violation(InvariantViolation::HotStateMissingDiff { + state_root, + slot: summary.slot, + }); + } + if let Ok(base_root) = summary.diff_base_state.get_root(base_slot) { + base_state_refs.push((summary.slot, base_root)); + } + } + StorageStrategy::ReplayFrom(base_slot) => { + if let Ok(base_root) = summary.diff_base_state.get_root(base_slot) { + base_state_refs.push((summary.slot, base_root)); + } + } + } + } + + // Verify that all diff base state roots reference existing summaries. + for (slot, base_state_root) in base_state_refs { + if !known_state_roots.contains(&base_state_root) { + result.add_violation(InvariantViolation::HotStateBaseSummaryMissing { + slot, + base_state_root, + }); + } + } + + Ok(result) + } + + /// Invariant 4 (Hot DB): State summary chain consistency. + /// + /// ```text + /// state_summary in hot_db && state_summary.slot > split.slot + /// -> state_summary for previous_state_root in hot_db + /// ``` + /// + /// The chain of `previous_state_root` links must be continuous back to the split state. + /// The split state itself is the boundary and does not need a predecessor in the hot DB. + fn check_hot_state_summary_chain_consistency( + &self, + split: &Split, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + for res in self + .hot_db + .iter_column::(DBColumn::BeaconStateHotSummary) + { + let (_state_root, value) = res?; + let summary = HotStateSummary::from_ssz_bytes(&value)?; + + if summary.slot > split.slot { + let prev_root = summary.previous_state_root; + let has_prev = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSummary, prev_root.as_slice())?; + if !has_prev { + result.add_violation(InvariantViolation::HotStateMissingPreviousSummary { + slot: summary.slot, + previous_state_root: prev_root, + }); + } + } + } + + Ok(result) + } + + /// Invariant 8 (Hot DB): State cache and disk consistency. + /// + /// ```text + /// state in state_cache -> state_summary in hot_db + /// ``` + /// + /// Every state held in the in-memory state cache (including the finalized state) should + /// have a corresponding hot state summary on disk. + fn check_state_cache_consistency( + &self, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + for &state_root in &ctx.state_cache_roots { + let has_summary = self + .hot_db + .key_exists(DBColumn::BeaconStateHotSummary, state_root.as_slice())?; + if !has_summary { + result.add_violation(InvariantViolation::StateCacheMissingSummary { state_root }); + } + } + + Ok(result) + } + + /// Invariant 10 (Cold DB): Block root indices. + /// + /// ```text + /// oldest_block_slot <= i < split.slot + /// -> block_root for slot i in cold_db + /// && block for block_root in hot_db + /// ``` + /// + /// Every slot in the cold range (from `oldest_block_slot` to `split.slot`) should have a + /// block root index entry, and the referenced block should exist in the hot DB. Note that + /// skip slots store the most recent non-skipped block's root, so `block.slot()` may differ + /// from the index slot. + fn check_cold_block_root_indices(&self, split: &Split) -> Result { + let mut result = InvariantCheckResult::new(); + + let anchor_info = self.get_anchor_info(); + + if anchor_info.oldest_block_slot >= split.slot { + return Ok(result); + } + + for slot_val in anchor_info.oldest_block_slot.as_u64()..split.slot.as_u64() { + let slot = Slot::new(slot_val); + + let slot_bytes = slot_val.to_be_bytes(); + let block_root_bytes = self + .cold_db + .get_bytes(DBColumn::BeaconBlockRoots, &slot_bytes)?; + + let Some(root_bytes) = block_root_bytes else { + result.add_violation(InvariantViolation::ColdBlockRootMissing { + slot, + oldest_block_slot: anchor_info.oldest_block_slot, + split_slot: split.slot, + }); + continue; + }; + + if root_bytes.len() != 32 { + return Err(Error::InvalidKey(format!( + "cold block root at slot {slot} has invalid length {}", + root_bytes.len() + ))); + } + + let block_root = Hash256::from_slice(&root_bytes); + let block_exists = self + .hot_db + .key_exists(DBColumn::BeaconBlock, block_root.as_slice())?; + if !block_exists { + result.add_violation(InvariantViolation::ColdBlockRootOrphan { slot, block_root }); + } + } + + Ok(result) + } + + /// Invariant 11 (Cold DB): State root indices. + /// + /// ```text + /// (i <= state_lower_limit || i >= min(split.slot, state_upper_limit)) && i < split.slot + /// -> i |-> state_root in cold_db(BeaconStateRoots) + /// && state_root |-> cold_state_summary in cold_db(BeaconColdStateSummary) + /// && cold_state_summary.slot == i + /// ``` + fn check_cold_state_root_indices(&self, split: &Split) -> Result { + let mut result = InvariantCheckResult::new(); + + let anchor_info = self.get_anchor_info(); + + // Expected slots are: (i <= state_lower_limit || i >= effective_upper) && i < split.slot + // where effective_upper = min(split.slot, state_upper_limit). + for slot_val in 0..split.slot.as_u64() { + let slot = Slot::new(slot_val); + + if slot <= anchor_info.state_lower_limit + || slot >= cmp::min(split.slot, anchor_info.state_upper_limit) + { + let slot_bytes = slot_val.to_be_bytes(); + let Some(root_bytes) = self + .cold_db + .get_bytes(DBColumn::BeaconStateRoots, &slot_bytes)? + else { + result.add_violation(InvariantViolation::ColdStateRootMissing { + slot, + state_lower_limit: anchor_info.state_lower_limit, + state_upper_limit: anchor_info.state_upper_limit, + split_slot: split.slot, + }); + continue; + }; + + if root_bytes.len() != 32 { + return Err(Error::InvalidKey(format!( + "cold state root at slot {slot} has invalid length {}", + root_bytes.len() + ))); + } + + let state_root = Hash256::from_slice(&root_bytes); + + match self + .cold_db + .get_bytes(DBColumn::BeaconColdStateSummary, state_root.as_slice())? + { + None => { + result.add_violation(InvariantViolation::ColdStateRootMissingSummary { + slot, + state_root, + }); + } + Some(summary_bytes) => { + let summary = ColdStateSummary::from_ssz_bytes(&summary_bytes)?; + if summary.slot != slot { + result.add_violation(InvariantViolation::ColdStateRootSlotMismatch { + slot, + state_root, + summary_slot: summary.slot, + }); + } + } + } + } + } + + Ok(result) + } + + /// Invariant 12 (Cold DB): Cold state diff/snapshot consistency. + /// + /// ```text + /// cold_state_summary in cold_db + /// -> state diff/snapshot/nothing in cold_db per HDiff hierarchy rules + /// ``` + /// + /// Each cold state summary should have the correct storage artifact according to the + /// HDiff hierarchy. Cold states always use genesis (slot 0) as the hierarchy start since + /// they are finalized and have no anchor_slot dependency. + fn check_cold_state_diff_consistency(&self) -> Result { + let mut result = InvariantCheckResult::new(); + + let mut summary_slots = HashSet::new(); + let mut base_slot_refs = Vec::new(); + + for res in self + .cold_db + .iter_column::(DBColumn::BeaconColdStateSummary) + { + let (state_root, value) = res?; + let summary = ColdStateSummary::from_ssz_bytes(&value)?; + + summary_slots.insert(summary.slot); + + let slot_bytes = summary.slot.as_u64().to_be_bytes(); + + match self + .hierarchy + .storage_strategy(summary.slot, Slot::new(0))? + { + StorageStrategy::Snapshot => { + let has_snapshot = self + .cold_db + .key_exists(DBColumn::BeaconStateSnapshot, &slot_bytes)?; + if !has_snapshot { + result.add_violation(InvariantViolation::ColdStateMissingSnapshot { + state_root, + slot: summary.slot, + }); + } + } + StorageStrategy::DiffFrom(base_slot) => { + let has_diff = self + .cold_db + .key_exists(DBColumn::BeaconStateDiff, &slot_bytes)?; + if !has_diff { + result.add_violation(InvariantViolation::ColdStateMissingDiff { + state_root, + slot: summary.slot, + }); + } + base_slot_refs.push((summary.slot, base_slot)); + } + StorageStrategy::ReplayFrom(base_slot) => { + base_slot_refs.push((summary.slot, base_slot)); + } + } + } + + // Verify that all DiffFrom/ReplayFrom base slots reference existing summaries. + for (slot, base_slot) in base_slot_refs { + if !summary_slots.contains(&base_slot) { + result.add_violation(InvariantViolation::ColdStateBaseSummaryMissing { + slot, + base_slot, + }); + } + } + + Ok(result) + } + + /// Invariant 9 (Hot DB): Pubkey cache consistency. + /// + /// ```text + /// all validator pubkeys from states are in hot_db(PubkeyCache) + /// ``` + /// + /// Checks that the in-memory pubkey cache and the on-disk PubkeyCache column have the same + /// number of entries AND that each pubkey matches at every validator index. + fn check_pubkey_cache_consistency( + &self, + ctx: &InvariantContext, + ) -> Result { + let mut result = InvariantCheckResult::new(); + + // Read on-disk pubkeys by sequential validator index (matching how they are stored + // with Hash256::from_low_u64_be(index) as key). + // Iterate in-memory pubkeys and verify each matches on disk. + for (validator_index, in_memory_bytes) in ctx.pubkey_cache_pubkeys.iter().enumerate() { + let mut key = [0u8; 32]; + key[24..].copy_from_slice(&(validator_index as u64).to_be_bytes()); + match self.hot_db.get_bytes(DBColumn::PubkeyCache, &key)? { + Some(on_disk_bytes) if in_memory_bytes != &on_disk_bytes => { + result + .add_violation(InvariantViolation::PubkeyCacheMismatch { validator_index }); + } + None => { + result + .add_violation(InvariantViolation::PubkeyCacheMissing { validator_index }); + } + _ => {} + } + } + + Ok(result) + } +} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 3363eb800c..bfa1200602 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -15,6 +15,7 @@ pub mod hdiff; pub mod historic_state_cache; pub mod hot_cold_store; mod impls; +pub mod invariants; mod memory_store; pub mod metadata; pub mod metrics; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 4b0d1ee016..6d159c9361 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -111,6 +111,19 @@ impl StateCache { self.hdiff_buffers.mem_usage() } + /// Return all state roots currently held in the cache, including the finalized state. + pub fn state_roots(&self) -> Vec { + let mut roots: Vec = self + .states + .iter() + .map(|(&state_root, _)| state_root) + .collect(); + if let Some(ref finalized) = self.finalized_state { + roots.push(finalized.state_root); + } + roots + } + pub fn update_finalized_state( &mut self, state_root: Hash256, From bff72a920da50a2abefa44b75c98b9597200ee8a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Mar 2026 10:06:25 +1100 Subject: [PATCH 18/43] Update database and block replayer to handle payload envelopes (#8886) Closes: - https://github.com/sigp/lighthouse/issues/8869 - Update `BlockReplayer` to support replay of execution payload envelopes. - Update `HotColdDB` to load payload envelopes and feed them to the `BlockReplayer` for both hot + cold states. However the cold DB code is not fully working yet (see: https://github.com/sigp/lighthouse/issues/8958). - Add `StatePayloadStatus` to allow callers to specify whether they want a state with a payload applied, or not. - Fix the state cache to key by `StatePayloadStatus`. - Lots of fixes to block production and block processing regarding state management. - Initial test harness support for producing+processing Gloas blocks+envelopes - A few new tests to cover Gloas DB operations Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul Co-Authored-By: Jimmy Chen --- beacon_node/beacon_chain/src/beacon_chain.rs | 27 +- .../beacon_chain/src/blob_verification.rs | 10 +- .../src/block_production/gloas.rs | 15 +- .../beacon_chain/src/block_production/mod.rs | 41 +- .../beacon_chain/src/block_verification.rs | 37 +- beacon_node/beacon_chain/src/builder.rs | 12 +- .../beacon_chain/src/canonical_head.rs | 13 +- .../src/data_column_verification.rs | 11 +- .../src/schema_change/migration_schema_v24.rs | 2 + .../beacon_chain/src/state_advance_timer.rs | 14 +- beacon_node/beacon_chain/src/test_utils.rs | 164 ++++++- beacon_node/beacon_chain/tests/rewards.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 443 +++++++++++++++++- .../test_utils/execution_block_generator.rs | 10 +- .../http_api/src/attestation_performance.rs | 3 +- .../http_api/src/block_packing_efficiency.rs | 3 +- beacon_node/http_api/src/produce_block.rs | 2 +- .../http_api/src/sync_committee_rewards.rs | 3 +- beacon_node/store/src/hdiff.rs | 6 + beacon_node/store/src/hot_cold_store.rs | 252 ++++++++-- beacon_node/store/src/reconstruct.rs | 1 + beacon_node/store/src/state_cache.rs | 43 +- .../state_processing/src/block_replayer.rs | 136 +++++- .../src/envelope_processing.rs | 2 - .../src/per_block_processing/tests.rs | 2 +- .../state_processing/src/state_advance.rs | 5 + .../types/src/block/signed_beacon_block.rs | 27 ++ consensus/types/src/execution/mod.rs | 2 + .../src/execution/state_payload_status.rs | 18 + consensus/types/src/state/beacon_state.rs | 20 +- 30 files changed, 1243 insertions(+), 84 deletions(-) create mode 100644 consensus/types/src/execution/state_payload_status.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 07f3bb01fa..ab2097e001 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2031,9 +2031,16 @@ impl BeaconChain { // required information. (justified_checkpoint, committee_len) } else { + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (advanced_state_root, mut state) = self .store - .get_advanced_hot_state(beacon_block_root, request_slot, beacon_state_root)? + .get_advanced_hot_state( + beacon_block_root, + StatePayloadStatus::Pending, + request_slot, + beacon_state_root, + )? .ok_or(Error::MissingBeaconState(beacon_state_root))?; if state.current_epoch() < request_epoch { partial_state_advance( @@ -4662,12 +4669,19 @@ impl BeaconChain { if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) } else { + // TODO(gloas): this function needs updating to be envelope-aware + // See: https://github.com/sigp/lighthouse/issues/8957 let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; let (state_root, state) = self .store - .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? + .get_advanced_hot_state( + parent_block_root, + StatePayloadStatus::Pending, + proposal_slot, + block.state_root(), + )? .ok_or(Error::MissingBeaconState(block.state_root()))?; (Cow::Owned(state), state_root) }; @@ -6599,9 +6613,16 @@ impl BeaconChain { let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (state_root, state) = self .store - .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? + .get_advanced_hot_state( + head_block_root, + StatePayloadStatus::Pending, + target_slot, + head_block.state_root, + )? .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index fe111628db..86b385d818 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -20,6 +20,7 @@ use tree_hash::TreeHash; use types::data::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, + StatePayloadStatus, }; /// An error occurred while validating a gossip blob. @@ -508,9 +509,16 @@ pub fn validate_blob_sidecar_for_gossip = (BeaconBlock>, ConsensusBlockValue); +type BlockProductionResult = (BeaconBlock, BeaconState, ConsensusBlockValue); pub type PreparePayloadResult = Result, BlockProductionError>; pub type PreparePayloadHandle = JoinHandle>>; @@ -425,6 +425,12 @@ impl BeaconChain { )) } + /// Complete a block by computing its state root, and + /// + /// Return `(block, pending_state, block_value)` where: + /// + /// - `pending_state` is the state post block application (prior to payload application) + /// - `block_value` is the consensus-layer rewards for `block` #[allow(clippy::type_complexity)] fn complete_partial_beacon_block_gloas( &self, @@ -433,7 +439,7 @@ impl BeaconChain { payload_data: Option>, mut state: BeaconState, verification: ProduceBlockVerification, - ) -> Result<(BeaconBlock>, u64), BlockProductionError> { + ) -> Result, BlockProductionError> { let PartialBeaconBlock { slot, proposer_index, @@ -545,6 +551,9 @@ impl BeaconChain { drop(state_root_timer); + // Clone the Pending state (post-block, pre-envelope) for callers that need it. + let pending_state = state.clone(); + let (mut block, _) = signed_beacon_block.deconstruct(); *block.state_root_mut() = state_root; @@ -605,7 +614,7 @@ impl BeaconChain { "Produced beacon block" ); - Ok((block, consensus_block_value)) + Ok((block, pending_state, consensus_block_value)) } // TODO(gloas) introduce `ProposerPreferences` so we can build out trustless diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index 76c8b77e93..b33323f527 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use proto_array::ProposerHeadError; use slot_clock::SlotClock; use tracing::{debug, error, info, instrument, warn}; -use types::{BeaconState, Hash256, Slot}; +use types::{BeaconState, Hash256, Slot, StatePayloadStatus}; use crate::{ BeaconChain, BeaconChainTypes, BlockProductionError, StateSkipConfig, @@ -37,8 +37,14 @@ impl BeaconChain { }; let (state, state_root_opt) = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. - if let Some((re_org_state, re_org_state_root)) = - self.get_state_for_re_org(slot, head_slot, head_block_root) + // TODO(gloas): re-enable reorgs + let gloas_enabled = self + .spec + .fork_name_at_slot::(slot) + .gloas_enabled(); + if !gloas_enabled + && let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( %slot, @@ -49,9 +55,30 @@ impl BeaconChain { } else { // Fetch the head state advanced through to `slot`, which should be present in the // state cache thanks to the state advance timer. + // TODO(gloas): need to fix this once fork choice understands payloads + // for now we just use the existence of the head's payload envelope to determine + // whether we should build atop it + let (payload_status, parent_state_root) = if gloas_enabled + && let Ok(Some(envelope)) = self.store.get_payload_envelope(&head_block_root) + { + debug!( + %slot, + parent_state_root = ?envelope.message.state_root, + parent_block_root = ?head_block_root, + "Building Gloas block on full state" + ); + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + (StatePayloadStatus::Pending, head_state_root) + }; let (state_root, state) = self .store - .get_advanced_hot_state(head_block_root, slot, head_state_root) + .get_advanced_hot_state( + head_block_root, + payload_status, + slot, + parent_state_root, + ) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; (state, Some(state_root)) @@ -204,7 +231,11 @@ impl BeaconChain { let (state_root, state) = self .store - .get_advanced_hot_state_from_cache(re_org_parent_block, slot) + .get_advanced_hot_state_from_cache( + re_org_parent_block, + StatePayloadStatus::Pending, + slot, + ) .or_else(|| { warn!(reason = "no state in cache", "Not attempting re-org"); None diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index b748bf5c6c..1be9bd4181 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -99,7 +99,8 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, FullPayload, Hash256, InconsistentFork, KzgProofs, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, StatePayloadStatus, + data::DataColumnSidecarError, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -1491,7 +1492,11 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - let state_root = if parent.beacon_block.slot() == state.slot() { + // TODO(gloas): could do a similar optimisation here for Full blocks if we have access + // to the parent envelope and its `state_root`. + let state_root = if parent.beacon_block.slot() == state.slot() + && state.payload_status() == StatePayloadStatus::Pending + { // If it happens that `pre_state` has *not* already been advanced forward a single // slot, then there is no need to compute the state root for this // `per_slot_processing` call since that state root is already stored in the parent @@ -1908,9 +1913,31 @@ fn load_parent>( // Retrieve any state that is advanced through to at most `block.slot()`: this is // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). + // + // Post-Gloas we must also fetch a state with the correct payload status. If the current + // block builds upon the payload of its parent block, then we know the parent block is FULL + // and we need to load the full state. + let (payload_status, parent_state_root) = + if block.as_block().fork_name_unchecked().gloas_enabled() + && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() + { + if block.as_block().is_parent_block_full(parent_bid_block_hash) { + // TODO(gloas): loading the envelope here is not very efficient + let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + })?; + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + (StatePayloadStatus::Pending, parent_block.state_root()) + } + } else { + (StatePayloadStatus::Pending, parent_block.state_root()) + }; let (parent_state_root, state) = chain .store - .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? + .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? .ok_or_else(|| { BeaconChainError::DBInconsistent( format!("Missing state for parent block {root:?}",), @@ -1933,7 +1960,9 @@ fn load_parent>( ); } - let beacon_state_root = if state.slot() == parent_block.slot() { + let beacon_state_root = if state.slot() == parent_block.slot() + && let StatePayloadStatus::Pending = payload_status + { // Sanity check. if parent_state_root != parent_block.state_root() { return Err(BeaconChainError::DBInconsistent(format!( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index d5935b492a..59fa5ec9ec 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -45,7 +45,7 @@ use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, StatePayloadStatus, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -783,8 +783,16 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; + // TODO(gloas): update head loading to load Full block once fork choice works + let payload_status = StatePayloadStatus::Pending; + let (_head_state_root, head_state) = store - .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) + .get_advanced_hot_state( + head_block_root, + payload_status, + current_slot, + head_block.state_root(), + ) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 1a08ac3f88..fd060e2b59 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -305,8 +305,16 @@ impl CanonicalHead { .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let current_slot = fork_choice.fc_store().get_current_slot(); + + // TODO(gloas): pass a better payload status once fork choice is implemented + let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = store - .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? + .get_advanced_hot_state( + beacon_block_root, + payload_status, + current_slot, + beacon_block.state_root(), + )? .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; let snapshot = BeaconSnapshot { @@ -673,10 +681,13 @@ impl BeaconChain { .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + // TODO(gloas): update once we have fork choice + let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = self .store .get_advanced_hot_state( new_view.head_block_root, + payload_status, current_slot, beacon_block.state_root(), )? diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 08acfdffa4..dde9fad342 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -20,7 +20,7 @@ use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, - EthSpec, Hash256, Slot, + EthSpec, Hash256, Slot, StatePayloadStatus, }; /// An error occurred while validating a gossip data column. @@ -706,9 +706,16 @@ fn verify_proposer_and_signature( index = %column_index, "Proposer shuffling cache miss for column verification" ); + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root chain .store - .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) + .get_advanced_hot_state( + block_parent_root, + StatePayloadStatus::Pending, + column_slot, + parent_block.state_root, + ) .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? .ok_or_else(|| { GossipDataColumnError::BeaconChainError(Box::new( diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs index 1e1823a836..c8dfe1ac9b 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs @@ -16,6 +16,7 @@ use store::{ use tracing::{debug, info, warn}; use types::{ BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, + execution::StatePayloadStatus, }; /// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. @@ -58,6 +59,7 @@ pub fn get_state_v22( base_state, summary.slot, summary.latest_block_root, + StatePayloadStatus::Pending, update_cache, ) .map(Some) diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index cb916cb514..4c070e7ecc 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -26,7 +26,10 @@ use std::sync::{ use task_executor::TaskExecutor; use tokio::time::{Instant, sleep, sleep_until}; use tracing::{Instrument, debug, debug_span, error, instrument, warn}; -use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{ + AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot, + StatePayloadStatus, +}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -277,9 +280,16 @@ fn advance_head(beacon_chain: &Arc>) -> Resu (snapshot.beacon_block_root, snapshot.beacon_state_root()) }; + // TODO(gloas): do better once we have fork choice + let payload_status = StatePayloadStatus::Pending; let (head_state_root, mut state) = beacon_chain .store - .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? + .get_advanced_hot_state( + head_block_root, + payload_status, + current_slot, + head_block_state_root, + )? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; let initial_slot = state.slot(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index eefb5d48b7..4bc5bb21d3 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -27,7 +27,7 @@ use bls::{ use eth2::types::{GraffitiPolicy, SignedBlockContentsTuple}; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ - ExecutionLayer, + ExecutionLayer, NewPayloadRequest, NewPayloadRequestGloas, auth::JwtKey, test_utils::{DEFAULT_JWT_SECRET, ExecutionBlockGenerator, MockBuilder, MockExecutionLayer}, }; @@ -52,7 +52,8 @@ use ssz_types::{RuntimeVariableList, VariableList}; use state_processing::ConsensusContext; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::per_block_processing::{ - BlockSignatureStrategy, VerifyBlockRoot, per_block_processing, + BlockSignatureStrategy, VerifyBlockRoot, deneb::kzg_commitment_to_versioned_hash, + per_block_processing, }; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; @@ -66,6 +67,7 @@ use store::database::interface::BeaconNodeBackend; use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; +use tracing::debug; use tree_hash::TreeHash; use typenum::U4294967296; use types::attestation::IndexedAttestationBase; @@ -1092,6 +1094,86 @@ where (block_contents, block_response.state) } + /// Returns a newly created block, signed by the proposer for the given slot, + /// along with the execution payload envelope (for Gloas) and the pending state. + /// + /// For pre-Gloas forks, the envelope is `None` and this behaves like `make_block`. + pub async fn make_block_with_envelope( + &self, + mut state: BeaconState, + slot: Slot, + ) -> ( + SignedBlockContentsTuple, + Option>, + BeaconState, + ) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + if state.fork_name_unchecked().gloas_enabled() + || self.spec.fork_name_at_slot::(slot).gloas_enabled() + { + complete_state_advance(&mut state, None, slot, &self.spec) + .expect("should be able to advance state to slot"); + state.build_caches(&self.spec).expect("should build caches"); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); + let graffiti_settings = + GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); + + let (block, pending_state, _consensus_block_value) = self + .chain + .produce_block_on_state_gloas( + state, + None, + slot, + randao_reveal, + graffiti_settings, + ProduceBlockVerification::VerifyRandao, + ) + .await + .unwrap(); + + let signed_block = Arc::new(block.sign( + &self.validator_keypairs[proposer_index].sk, + &pending_state.fork(), + pending_state.genesis_validators_root(), + &self.spec, + )); + + // Retrieve the cached envelope produced during block production and sign it. + let signed_envelope = self + .chain + .pending_payload_envelopes + .write() + .remove(slot) + .map(|envelope| { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = self.spec.get_domain( + epoch, + Domain::BeaconBuilder, + &pending_state.fork(), + pending_state.genesis_validators_root(), + ); + let message = envelope.signing_root(domain); + let signature = self.validator_keypairs[proposer_index].sk.sign(message); + SignedExecutionPayloadEnvelope { + message: envelope, + signature, + } + }); + + let block_contents: SignedBlockContentsTuple = (signed_block, None); + (block_contents, signed_envelope, pending_state) + } else { + let (block_contents, state) = self.make_block(state, slot).await; + (block_contents, None, state) + } + } + /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. pub async fn make_block_return_pre_state( @@ -2575,6 +2657,84 @@ where Ok(block_hash) } + /// Process an execution payload envelope for a Gloas block. + pub async fn process_envelope( + &self, + block_root: Hash256, + signed_envelope: SignedExecutionPayloadEnvelope, + pending_state: &mut BeaconState, + ) -> Hash256 { + let state_root = signed_envelope.message.state_root; + debug!( + slot = %signed_envelope.message.slot, + ?state_root, + "Processing execution payload envelope" + ); + let block_state_root = pending_state + .update_tree_hash_cache() + .expect("should compute pending state root"); + + state_processing::envelope_processing::process_execution_payload_envelope( + pending_state, + Some(block_state_root), + &signed_envelope, + state_processing::VerifySignatures::True, + state_processing::envelope_processing::VerifyStateRoot::True, + &self.spec, + ) + .expect("should process envelope"); + + // Notify the EL of the new payload so forkchoiceUpdated can reference it. + let block = self + .chain + .store + .get_blinded_block(&block_root) + .expect("should read block from store") + .expect("block should exist in store"); + + let bid = &block + .message() + .body() + .signed_execution_payload_bid() + .expect("Gloas block should have a payload bid") + .message; + + let versioned_hashes = bid + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(); + + let request = NewPayloadRequest::Gloas(NewPayloadRequestGloas { + execution_payload: &signed_envelope.message.payload, + versioned_hashes, + parent_beacon_block_root: block.message().parent_root(), + execution_requests: &signed_envelope.message.execution_requests, + }); + + self.chain + .execution_layer + .as_ref() + .expect("harness should have execution layer") + .notify_new_payload(request) + .await + .expect("newPayload should succeed"); + + // Store the envelope. + self.chain + .store + .put_payload_envelope(&block_root, signed_envelope) + .expect("should store envelope"); + + // Store the Full state. + self.chain + .store + .put_state(&state_root, pending_state) + .expect("should store full state"); + + state_root + } + /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. pub fn build_rpc_block_from_store_blobs( diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index bc7c98041f..1889c1f625 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -845,13 +845,14 @@ async fn check_all_base_rewards_for_subset( .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) .unwrap(); + // TODO(gloas): handle payloads? let mut pre_state = BlockReplayer::>::new( parent_state, &harness.spec, ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .unwrap() .into_state(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 86f4af3efc..a70ad89ca9 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -708,8 +708,13 @@ async fn block_replayer_hooks() { .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) .await; - let blocks = store - .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) + let (blocks, envelopes) = store + .load_blocks_to_replay( + Slot::new(0), + max_slot, + end_block_root.into(), + StatePayloadStatus::Pending, + ) .unwrap(); let mut pre_slots = vec![]; @@ -744,7 +749,7 @@ async fn block_replayer_hooks() { post_block_slots.push(block.slot()); Ok(()) })) - .apply_blocks(blocks, None) + .apply_blocks(blocks, envelopes, None) .unwrap() .into_state(); @@ -3842,7 +3847,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let (split_state_root, mut advanced_split_state) = harness .chain .store - .get_advanced_hot_state(split.block_root, split.slot, split.state_root) + .get_advanced_hot_state( + split.block_root, + StatePayloadStatus::Pending, + split.slot, + split.state_root, + ) .unwrap() .unwrap(); complete_state_advance( @@ -5470,6 +5480,427 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { ); } +// ===================== Gloas Store Tests ===================== + +/// Test basic Gloas block + envelope storage and retrieval. +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_no_skips() { + test_gloas_block_and_envelope_storage_generic(32, vec![], false).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_some_skips() { + test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], false).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_no_skips_w_cache() { + test_gloas_block_and_envelope_storage_generic(32, vec![], true).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_some_skips_w_cache() { + test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], true).await +} + +async fn test_gloas_block_and_envelope_storage_generic( + num_slots: u64, + skipped_slots: Vec, + use_state_cache: bool, +) { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store_config = if !use_state_cache { + StoreConfig { + state_cache_size: new_non_zero_usize(1), + ..StoreConfig::default() + } + } else { + StoreConfig::default() + }; + let spec = test_spec::(); + let store = get_store_generic(&db_path, store_config, spec); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec; + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state; + + let mut block_roots = vec![]; + let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)]; + + for i in 1..=num_slots { + let slot = Slot::new(i); + harness.advance_slot(); + + if skipped_slots.contains(&i) { + complete_state_advance(&mut state, None, slot, spec) + .expect("should be able to advance state to slot"); + + let state_root = state.canonical_root().unwrap(); + store.put_state(&state_root, &state).unwrap(); + stored_states.push((slot, state.payload_status(), state_root)); + } + + let (block_contents, envelope, mut pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + // Process the block. + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); + stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root)); + + // Process the envelope. + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state.clone(); + let envelope_state_root = envelope.message.state_root; + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + assert_eq!(full_state_root, envelope_state_root); + stored_states.push((slot, StatePayloadStatus::Full, full_state_root)); + + block_roots.push(block_root); + state = full_state; + } + + // Verify block storage. + for (i, block_root) in block_roots.iter().enumerate() { + // Block can be loaded. + assert!( + store.get_blinded_block(block_root).unwrap().is_some(), + "block at slot {} should be in DB", + i + 1 + ); + + // Envelope can be loaded. + let loaded_envelope = store.get_payload_envelope(block_root).unwrap(); + assert!( + loaded_envelope.is_some(), + "envelope at slot {} should be in DB", + i + 1 + ); + } + + // Verify state storage. + // Iterate in reverse order to frustrate the cache. + for (slot, payload_status, state_root) in stored_states.into_iter().rev() { + println!("{slot}: {state_root:?}"); + let Some(mut loaded_state) = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + else { + panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}"); + }; + assert_eq!(loaded_state.slot(), slot); + assert_eq!( + loaded_state.payload_status(), + payload_status, + "slot = {slot}" + ); + assert_eq!( + loaded_state.canonical_root().unwrap(), + state_root, + "slot = {slot}" + ); + } +} + +/// Test that Pending and Full states have the correct payload status through round-trip +/// storage and retrieval. +#[tokio::test] +async fn test_gloas_state_payload_status() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 6u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state; + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + // Verify the pending state has correct payload status. + assert_eq!( + pending_state.payload_status(), + StatePayloadStatus::Pending, + "pending state at slot {} should be Pending", + i + ); + + // Process the envelope and verify the full state has correct payload status. + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + + assert_eq!( + full_state.payload_status(), + StatePayloadStatus::Full, + "full state at slot {} should be Full", + i + ); + + // Round-trip: load the full state from DB and check status. + let loaded_full = store + .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) + .unwrap() + .expect("full state should exist in DB"); + assert_eq!( + loaded_full.payload_status(), + StatePayloadStatus::Full, + "loaded full state at slot {} should be Full after round-trip", + i + ); + + state = full_state; + } +} + +/// Test block replay with and without envelopes. +#[tokio::test] +async fn test_gloas_block_replay_with_envelopes() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 16u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state.clone(); + + let mut last_block_root = Hash256::zero(); + let mut pending_states = HashMap::new(); + let mut full_states = HashMap::new(); + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let pending_state_root = pending_state.clone().update_tree_hash_cache().unwrap(); + pending_states.insert(slot, (pending_state_root, pending_state.clone())); + + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + full_states.insert(slot, (full_state_root, full_state.clone())); + + last_block_root = block_root; + state = full_state; + } + + let end_slot = Slot::new(num_blocks); + + // Load blocks for Pending replay (no envelopes for the last block). + let (blocks_pending, envelopes_pending) = store + .load_blocks_to_replay( + Slot::new(0), + end_slot, + last_block_root, + StatePayloadStatus::Pending, + ) + .unwrap(); + assert!( + !blocks_pending.is_empty(), + "should have blocks for pending replay" + ); + // For Pending, no envelope for the first block (slot 0) or last block; envelopes for + // intermediate blocks whose payloads are canonical. + let expected_pending_envelopes = blocks_pending.len().saturating_sub(2); + assert_eq!( + envelopes_pending.len(), + expected_pending_envelopes, + "pending replay should have envelopes for all blocks except the last" + ); + assert!( + blocks_pending + .iter() + .skip(1) + .take(envelopes_pending.len()) + .map(|block| block.slot()) + .eq(envelopes_pending + .iter() + .map(|envelope| envelope.message.slot)), + "block and envelope slots should match" + ); + + // Load blocks for Full replay (envelopes for all blocks including the last). + let (blocks_full, envelopes_full) = store + .load_blocks_to_replay( + Slot::new(0), + end_slot, + last_block_root, + StatePayloadStatus::Full, + ) + .unwrap(); + assert_eq!( + envelopes_full.len(), + expected_pending_envelopes + 1, + "full replay should have one more envelope than pending replay" + ); + + // Replay to Pending state and verify. + let mut replayed_pending = + BlockReplayer::::new(genesis_state.clone(), store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .desired_state_payload_status(StatePayloadStatus::Pending) + .apply_blocks(blocks_pending, envelopes_pending, None) + .expect("should replay blocks to pending state") + .into_state(); + replayed_pending.apply_pending_mutations().unwrap(); + + let (_, mut expected_pending) = pending_states.get(&end_slot).unwrap().clone(); + expected_pending.apply_pending_mutations().unwrap(); + + replayed_pending.drop_all_caches().unwrap(); + expected_pending.drop_all_caches().unwrap(); + assert_eq!( + replayed_pending, expected_pending, + "replayed pending state should match stored pending state" + ); + + // Replay to Full state and verify. + let mut replayed_full = + BlockReplayer::::new(genesis_state, store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .desired_state_payload_status(StatePayloadStatus::Full) + .apply_blocks(blocks_full, envelopes_full, None) + .expect("should replay blocks to full state") + .into_state(); + replayed_full.apply_pending_mutations().unwrap(); + + let (_, mut expected_full) = full_states.get(&end_slot).unwrap().clone(); + expected_full.apply_pending_mutations().unwrap(); + + replayed_full.drop_all_caches().unwrap(); + expected_full.drop_all_caches().unwrap(); + assert_eq!( + replayed_full, expected_full, + "replayed full state should match stored full state" + ); +} + +/// Test the hot state hierarchy with Full states stored as ReplayFrom. +#[tokio::test] +async fn test_gloas_hot_state_hierarchy() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Build enough blocks to span multiple epochs. With MinimalEthSpec (8 slots/epoch), + // 40 slots covers 5 epochs. + let num_blocks = E::slots_per_epoch() * 5; + // TODO(gloas): enable finalisation by increasing this threshold + let some_validators = (0..LOW_VALIDATOR_COUNT / 2).collect::>(); + + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + + // Use manual block building with envelopes for the first few blocks, + // then use the standard attested-blocks path once we've verified envelope handling. + let mut state = genesis_state; + let mut last_block_root = Hash256::zero(); + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state.clone(), slot).await; + let block_root = block_contents.0.canonical_root(); + + // Attest to previous block before processing next. + if i > 1 { + let state_root = state.update_tree_hash_cache().unwrap(); + harness.attest_block( + &state, + state_root, + last_block_root.into(), + &block_contents.0, + &some_validators, + ); + } + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + + last_block_root = block_root; + state = full_state; + } + + // Verify states can be loaded and have correct payload status. + let _head_state = harness.get_current_state(); + let _head_slot = harness.head_slot(); + + // States at all slots on the canonical chain should be retrievable. + for slot_num in 1..=num_blocks { + let slot = Slot::new(slot_num); + // Get the state root from the block at this slot via the state root iterator. + let state_root = harness.chain.state_root_at_slot(slot).unwrap().unwrap(); + + let mut loaded_state = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); + assert_eq!(loaded_state.canonical_root().unwrap(), state_root); + } + + // Verify chain dump and iterators work with Gloas states. + check_chain_dump(&harness, num_blocks + 1); + check_iterators(&harness); +} + /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. fn check_split_slot( harness: &TestHarness, @@ -5521,7 +5952,9 @@ fn check_chain_dump_from_slot(harness: &TestHarness, from_slot: Slot, expected_l ); // Check presence of execution payload on disk. - if harness.chain.spec.bellatrix_fork_epoch.is_some() { + if harness.chain.spec.bellatrix_fork_epoch.is_some() + && !harness.chain.spec.is_gloas_scheduled() + { assert!( harness .chain diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index e94924d8b2..a66f7a9b55 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -932,8 +932,14 @@ pub fn generate_genesis_header(spec: &ChainSpec) -> Option None, + ForkName::Gloas => { + // TODO(gloas): we are using a Fulu header for now, but this gets fixed up by the + // genesis builder anyway which translates it to bid/latest_block_hash. + let mut header = ExecutionPayloadHeader::Fulu(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; + Some(header) + } } } diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 6e285829d2..05ed36e68b 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -205,8 +205,9 @@ pub fn get_attestation_performance( }) .collect::, _>>()?; + // TODO(gloas): add payloads replayer = replayer - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(|e| custom_server_error(format!("{:?}", e)))?; } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 3772470b28..725a0648a5 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -398,8 +398,9 @@ pub fn get_block_packing_efficiency( }) .collect::, _>>()?; + // TODO(gloas): add payloads replayer = replayer - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?; } diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 607221686f..70475de130 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -70,7 +70,7 @@ pub async fn produce_block_v4( let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); - let (block, consensus_block_value) = chain + let (block, _pending_state, consensus_block_value) = chain .produce_block_with_verification_gloas( randao_reveal, slot, diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 9bc1f6ead4..8715fc2b1e 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -66,11 +66,12 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; + // TODO(gloas): handle payloads? let replayer = BlockReplayer::new(parent_state, &chain.spec) .no_signature_verification() .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .map_err(unhandled_error::)?; Ok(replayer.into_state()) diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3777c83b60..85ac56454c 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -654,6 +654,12 @@ impl HierarchyModuli { /// layer 2 diff will point to the start snapshot instead of the layer 1 diff at /// 2998272. pub fn storage_strategy(&self, slot: Slot, start_slot: Slot) -> Result { + // Initially had the idea of using different storage strategies for full and pending states, + // but it was very complex. However without this concept we end up storing two diffs/two + // snapshots at full slots. The complexity of managing skipped slots was the main impetus + // for reverting the payload-status sensitive design: a Full skipped slot has no same-slot + // Pending state to replay from, so has to be handled differently from Full non-skipped + // slots. match slot.cmp(&start_slot) { Ordering::Less => return Err(Error::LessThanStart(slot, start_slot)), Ordering::Equal => return Ok(StorageStrategy::Snapshot), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index fe3477dbfe..428086c464 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -186,6 +186,7 @@ pub enum HotColdDBError { MissingHotHDiff(Hash256), MissingHDiff(Slot), MissingExecutionPayload(Hash256), + MissingExecutionPayloadEnvelope(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, MissingFrozenBlockSlot(Hash256), @@ -1132,10 +1133,13 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state( &self, block_root: Hash256, + payload_status: StatePayloadStatus, max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { + if let Some(cached) = + self.get_advanced_hot_state_from_cache(block_root, payload_status, max_slot) + { return Ok(Some(cached)); } @@ -1157,7 +1161,11 @@ impl, Cold: ItemStore> HotColdDB .into()); } - let state_root = if block_root == split.block_root && split.slot <= max_slot { + // Split state should always be `Pending`. + let state_root = if block_root == split.block_root + && let StatePayloadStatus::Pending = payload_status + && split.slot <= max_slot + { split.state_root } else { state_root @@ -1204,11 +1212,12 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state_from_cache( &self, block_root: Hash256, + payload_status: StatePayloadStatus, max_slot: Slot, ) -> Option<(Hash256, BeaconState)> { self.state_cache .lock() - .get_by_block_root(block_root, max_slot) + .get_by_block_root(block_root, payload_status, max_slot) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -1379,6 +1388,8 @@ impl, Cold: ItemStore> HotColdDB // NOTE: `hot_storage_strategy` can error if there are states in the database // prior to the `anchor_slot`. This can happen if checkpoint sync has been // botched and left some states in the database prior to completing. + // Use `Pending` status here because snapshots and diffs are only stored for + // `Pending` states. if let Some(slot) = slot && let Ok(strategy) = self.hot_storage_strategy(slot) { @@ -1846,6 +1857,55 @@ impl, Cold: ItemStore> HotColdDB } } + /// Compute the `StatePayloadStatus` for a stored state based on its summary. + /// + /// In future this might become a field of the summary, but this would require a whole DB + /// migration. For now we use an extra read from the DB to determine it. + fn get_hot_state_summary_payload_status( + &self, + summary: &HotStateSummary, + ) -> Result { + // Treat pre-Gloas states as `Pending`. + if !self + .spec + .fork_name_at_slot::(summary.slot) + .gloas_enabled() + { + return Ok(StatePayloadStatus::Pending); + } + + // Treat genesis state as `Pending` (`BeaconBlock` state). + let previous_state_root = summary.previous_state_root; + if previous_state_root.is_zero() { + return Ok(StatePayloadStatus::Pending); + } + + // Load the hot state summary for the previous state. + // + // If it has the same slot as this summary then we know this summary is for a `Full` state + // (payload state), because they are always diffed against their same-slot `Pending` state. + // + // If the previous summary has a different slot AND the latest block is from `summary.slot`, + // then this state *must* be `Pending` (it is the summary for latest block itself). + // + // Otherwise, we are at a skipped slot and must traverse the graph of state summaries + // backwards until we reach a summary for the latest block. This recursion could be quite + // far in the case of a long skip. We could optimise this in future using the + // `diff_base_state` (like in `get_ancestor_state_root`), or by doing a proper DB + // migration. + let previous_state_summary = self + .load_hot_state_summary(&previous_state_root)? + .ok_or(Error::MissingHotStateSummary(previous_state_root))?; + + if previous_state_summary.slot == summary.slot { + Ok(StatePayloadStatus::Full) + } else if summary.slot == summary.latest_block_slot { + Ok(StatePayloadStatus::Pending) + } else { + self.get_hot_state_summary_payload_status(&previous_state_summary) + } + } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -1941,13 +2001,22 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - if let Some(HotStateSummary { - slot, - latest_block_root, - diff_base_state, - .. - }) = self.load_hot_state_summary(state_root)? + if let Some( + summary @ HotStateSummary { + slot, + latest_block_root, + diff_base_state, + .. + }, + ) = self.load_hot_state_summary(state_root)? { + let payload_status = self.get_hot_state_summary_payload_status(&summary)?; + debug!( + %slot, + ?state_root, + ?payload_status, + "Loading hot state" + ); let mut state = match self.hot_storage_strategy(slot)? { strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { let buffer_timer = metrics::start_timer_vec( @@ -1999,6 +2068,7 @@ impl, Cold: ItemStore> HotColdDB base_state, slot, latest_block_root, + payload_status, update_cache, )? } @@ -2016,19 +2086,26 @@ impl, Cold: ItemStore> HotColdDB base_state: BeaconState, slot: Slot, latest_block_root: Hash256, + desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot { + if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { return Ok(base_state); } - let blocks = self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; + let (blocks, envelopes) = self.load_blocks_to_replay( + base_state.slot(), + slot, + latest_block_root, + desired_payload_status, + )?; let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); // If replaying blocks, and `update_cache` is true, also cache the epoch boundary // state that this state is based on. It may be useful as the basis of more states // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { + // TODO(gloas): prevent caching of the payload_status=Full state? if !update_cache || state.slot() % E::slots_per_epoch() != 0 { return Ok(()); } @@ -2052,9 +2129,19 @@ impl, Cold: ItemStore> HotColdDB Ok(()) }; + debug!( + %slot, + blocks = ?blocks.iter().map(|block| block.slot()).collect::>(), + envelopes = ?envelopes.iter().map(|e| e.message.slot).collect::>(), + payload_status = ?desired_payload_status, + "Replaying blocks and envelopes" + ); + self.replay_blocks( base_state, blocks, + envelopes, + desired_payload_status, slot, no_state_root_iter(), Some(Box::new(state_cache_hook)), @@ -2358,7 +2445,7 @@ impl, Cold: ItemStore> HotColdDB return Ok(base_state); } - let blocks = self.load_cold_blocks(base_state.slot() + 1, slot)?; + let (blocks, envelopes) = self.load_cold_blocks(base_state.slot() + 1, slot)?; // Include state root for base state as it is required by block processing to not // have to hash the state. @@ -2367,7 +2454,17 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; + // TODO(gloas): calculate correct payload status for cold states + let payload_status = StatePayloadStatus::Pending; + let state = self.replay_blocks( + base_state, + blocks, + envelopes, + payload_status, + slot, + Some(state_root_iter), + None, + )?; debug!( target_slot = %slot, replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(), @@ -2460,40 +2557,77 @@ impl, Cold: ItemStore> HotColdDB } } - /// Load cold blocks between `start_slot` and `end_slot` inclusive. + /// Load cold blocks and payload envelopes between `start_slot` and `end_slot` inclusive. + #[allow(clippy::type_complexity)] pub fn load_cold_blocks( &self, start_slot: Slot, end_slot: Slot, - ) -> Result>, Error> { + ) -> Result< + ( + Vec>, + Vec>, + ), + Error, + > { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_COLD_BLOCKS_TIME); let block_root_iter = self.forwards_block_roots_iterator_until(start_slot, end_slot, || { Err(Error::StateShouldNotBeRequired(end_slot)) })?; - process_results(block_root_iter, |iter| { + let blocks = process_results(block_root_iter, |iter| { iter.map(|(block_root, _slot)| block_root) .dedup() .map(|block_root| { self.get_blinded_block(&block_root)? .ok_or(Error::MissingBlock(block_root)) }) - .collect() - })? + .collect::, Error>>() + })??; + + // If Gloas is not enabled for any slots in the range, just return `blocks`. + if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() + && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() + { + return Ok((blocks, vec![])); + } + // TODO(gloas): wire this up + let end_block_root = Hash256::ZERO; + let desired_payload_status = StatePayloadStatus::Pending; + let envelopes = self.load_payload_envelopes_for_blocks( + &blocks, + end_block_root, + desired_payload_status, + )?; + + Ok((blocks, envelopes)) } - /// Load the blocks between `start_slot` and `end_slot` by backtracking from `end_block_hash`. + /// Load the blocks & envelopes between `start_slot` and `end_slot` by backtracking from + /// `end_block_root`. /// /// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot /// equal to `start_slot`, to reach a state with slot equal to `end_slot`. + /// + /// Payloads are also returned in slot-ascending order, but only payloads forming part of + /// the chain are loaded (payloads for EMPTY slots are omitted). Prior to Gloas, an empty + /// vec of payloads will be returned. + #[allow(clippy::type_complexity)] pub fn load_blocks_to_replay( &self, start_slot: Slot, end_slot: Slot, - end_block_hash: Hash256, - ) -> Result>>, Error> { + end_block_root: Hash256, + desired_payload_status: StatePayloadStatus, + ) -> Result< + ( + Vec>, + Vec>, + ), + Error, + > { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_HOT_BLOCKS_TIME); - let mut blocks = ParentRootBlockIterator::new(self, end_block_hash) + let mut blocks = ParentRootBlockIterator::new(self, end_block_root) .map(|result| result.map(|(_, block)| block)) // Include the block at the end slot (if any), it needs to be // replayed in order to construct the canonical state at `end_slot`. @@ -2520,17 +2654,70 @@ impl, Cold: ItemStore> HotColdDB }) .collect::, _>>()?; blocks.reverse(); - Ok(blocks) + + // If Gloas is not enabled for any slots in the range, just return `blocks`. + if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() + && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() + { + return Ok((blocks, vec![])); + } + + let envelopes = self.load_payload_envelopes_for_blocks( + &blocks, + end_block_root, + desired_payload_status, + )?; + + Ok((blocks, envelopes)) + } + + pub fn load_payload_envelopes_for_blocks( + &self, + blocks: &[SignedBlindedBeaconBlock], + end_block_root: Hash256, + desired_payload_status: StatePayloadStatus, + ) -> Result>, Error> { + let mut envelopes = vec![]; + + for (block, next_block) in blocks.iter().tuple_windows() { + if block.fork_name_unchecked().gloas_enabled() { + // Check next block to see if this block's payload is canonical on this chain. + let block_hash = block.payload_bid_block_hash()?; + if !next_block.is_parent_block_full(block_hash) { + // No payload at this slot (empty), nothing to load. + continue; + } + // Using `parent_root` avoids computation. + let block_root = next_block.parent_root(); + let envelope = self + .get_payload_envelope(&block_root)? + .ok_or(HotColdDBError::MissingExecutionPayloadEnvelope(block_root))?; + envelopes.push(envelope); + } + } + + // Load the payload for the last block if desired. + if let StatePayloadStatus::Full = desired_payload_status { + let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( + HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), + )?; + envelopes.push(envelope); + } + + Ok(envelopes) } /// Replay `blocks` on top of `state` until `target_slot` is reached. /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. + #[allow(clippy::too_many_arguments)] pub fn replay_blocks( &self, state: BeaconState, - blocks: Vec>>, + blocks: Vec>, + envelopes: Vec>, + desired_payload_status: StatePayloadStatus, target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, @@ -2539,7 +2726,8 @@ impl, Cold: ItemStore> HotColdDB let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() - .minimal_block_root_verification(); + .minimal_block_root_verification() + .desired_state_payload_status(desired_payload_status); let have_state_root_iterator = state_root_iter.is_some(); if let Some(state_root_iter) = state_root_iter { @@ -2551,7 +2739,7 @@ impl, Cold: ItemStore> HotColdDB } block_replayer - .apply_blocks(blocks, Some(target_slot)) + .apply_blocks(blocks, envelopes, Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( @@ -4006,11 +4194,15 @@ impl HotStateSummary { // slots where there isn't a skip). let latest_block_root = state.get_latest_block_root(state_root); + // Payload status of the state determines a lot about how it is stored. + let payload_status = state.payload_status(); + let get_state_root = |slot| { if slot == state.slot() { + // TODO(gloas): I think we can remove this case Ok::<_, Error>(state_root) } else { - Ok(get_ancestor_state_root(store, state, slot).map_err(|e| { + Ok::<_, Error>(get_ancestor_state_root(store, state, slot).map_err(|e| { Error::StateSummaryIteratorError { error: e, from_state_root: state_root, @@ -4030,6 +4222,12 @@ impl HotStateSummary { let previous_state_root = if state.slot() == 0 { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() + } else if let StatePayloadStatus::Full = payload_status + && state.slot() == state.latest_block_header().slot + { + // A Full state at a non-skipped slot builds off the Pending state of the same slot, + // i.e. the state with the same `state_root` as its `BeaconBlock` + state.latest_block_header().state_root } else { get_state_root(state.slot().safe_sub(1_u64)?)? }; diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 7aca692ef9..e51543c3a2 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -67,6 +67,7 @@ where state.build_caches(&self.spec)?; + // TODO(gloas): handle payload envelope replay process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 6d159c9361..d016922ade 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -7,7 +7,7 @@ use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; use tracing::instrument; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, execution::StatePayloadStatus}; /// Fraction of the LRU cache to leave intact during culling. const CULL_EXEMPT_NUMERATOR: usize = 1; @@ -23,10 +23,10 @@ pub struct FinalizedState { state: BeaconState, } -/// Map from block_root -> slot -> state_root. +/// Map from (block_root, payload_status) -> slot -> state_root. #[derive(Debug, Default)] pub struct BlockMap { - blocks: HashMap, + blocks: HashMap<(Hash256, StatePayloadStatus), SlotMap>, } /// Map from slot -> state_root. @@ -143,8 +143,11 @@ impl StateCache { return Err(Error::FinalizedStateDecreasingSlot); } + let payload_status = state.payload_status(); + // Add to block map. - self.block_map.insert(block_root, state.slot(), state_root); + self.block_map + .insert(block_root, payload_status, state.slot(), state_root); // Prune block map. let state_roots_to_prune = self.block_map.prune(state.slot()); @@ -267,7 +270,9 @@ impl StateCache { // Record the connection from block root and slot to this state. let slot = state.slot(); - self.block_map.insert(block_root, slot, state_root); + let payload_status = state.payload_status(); + self.block_map + .insert(block_root, payload_status, slot, state_root); Ok(PutStateOutcome::New(deleted_states)) } @@ -316,9 +321,10 @@ impl StateCache { pub fn get_by_block_root( &mut self, block_root: Hash256, + payload_status: StatePayloadStatus, slot: Slot, ) -> Option<(Hash256, BeaconState)> { - let slot_map = self.block_map.blocks.get(&block_root)?; + let slot_map = self.block_map.blocks.get(&(block_root, payload_status))?; // Find the state at `slot`, or failing that the most recent ancestor. let state_root = slot_map @@ -339,7 +345,12 @@ impl StateCache { } pub fn delete_block_states(&mut self, block_root: &Hash256) { - if let Some(slot_map) = self.block_map.delete_block_states(block_root) { + let (pending_state_roots, full_state_roots) = + self.block_map.delete_block_states(block_root); + for slot_map in [pending_state_roots, full_state_roots] + .into_iter() + .flatten() + { for state_root in slot_map.slots.values() { self.states.pop(state_root); } @@ -412,8 +423,14 @@ impl StateCache { } impl BlockMap { - fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { - let slot_map = self.blocks.entry(block_root).or_default(); + fn insert( + &mut self, + block_root: Hash256, + payload_status: StatePayloadStatus, + slot: Slot, + state_root: Hash256, + ) { + let slot_map = self.blocks.entry((block_root, payload_status)).or_default(); slot_map.slots.insert(slot, state_root); } @@ -444,8 +461,12 @@ impl BlockMap { }); } - fn delete_block_states(&mut self, block_root: &Hash256) -> Option { - self.blocks.remove(block_root) + fn delete_block_states(&mut self, block_root: &Hash256) -> (Option, Option) { + let pending_state_roots = self + .blocks + .remove(&(*block_root, StatePayloadStatus::Pending)); + let full_state_roots = self.blocks.remove(&(*block_root, StatePayloadStatus::Full)); + (pending_state_roots, full_state_roots) } } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 56e667cdd3..a10d6179fe 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,6 +1,11 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, per_block_processing, per_epoch_processing::EpochProcessingSummary, + VerifyBlockRoot, VerifySignatures, + envelope_processing::{ + EnvelopeProcessingError, VerifyStateRoot, process_execution_payload_envelope, + }, + per_block_processing, + per_epoch_processing::EpochProcessingSummary, per_slot_processing, }; use itertools::Itertools; @@ -8,7 +13,7 @@ use std::iter::Peekable; use std::marker::PhantomData; use types::{ BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, - Slot, + SignedExecutionPayloadEnvelope, Slot, execution::StatePayloadStatus, }; pub type PreBlockHook<'a, E, Error> = Box< @@ -24,7 +29,7 @@ pub type PostSlotHook<'a, E, Error> = Box< >; pub type StateRootIterDefault = std::iter::Empty>; -/// Efficiently apply blocks to a state while configuring various parameters. +/// Efficiently apply blocks and payloads to a state while configuring various parameters. /// /// Usage follows a builder pattern. pub struct BlockReplayer< @@ -41,8 +46,21 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, + /// Iterator over state roots for all *block* states. + /// + /// Pre-Gloas, this is all states. Post-Gloas, this is *just* the states corresponding to beacon + /// blocks. For states corresponding to payloads, we read the state root from the payload + /// envelope. + // TODO(gloas): this concept might need adjusting when we implement the cold DB. pub(crate) state_root_iter: Option>, state_root_miss: bool, + /// The payload status of the state desired as the end result of block replay. + /// + /// This dictates whether a payload should be applied after applying the last block. + /// + /// Prior to Gloas, this should always be set to `StatePayloadStatus::Pending` to indicate + /// that no envelope needs to be applied. + desired_state_payload_status: StatePayloadStatus, _phantom: PhantomData, } @@ -50,7 +68,12 @@ pub struct BlockReplayer< pub enum BlockReplayError { SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), + EnvelopeProcessing(EnvelopeProcessingError), BeaconState(BeaconStateError), + /// A payload envelope for this `slot` was required but not provided. + MissingPayloadEnvelope { + slot: Slot, + }, } impl From for BlockReplayError { @@ -65,6 +88,12 @@ impl From for BlockReplayError { } } +impl From for BlockReplayError { + fn from(e: EnvelopeProcessingError) -> Self { + Self::EnvelopeProcessing(e) + } +} + impl From for BlockReplayError { fn from(e: BeaconStateError) -> Self { Self::BeaconState(e) @@ -96,6 +125,7 @@ where post_slot_hook: None, state_root_iter: None, state_root_miss: false, + desired_state_payload_status: StatePayloadStatus::Pending, _phantom: PhantomData, } } @@ -161,6 +191,14 @@ where self } + /// Set the desired payload status of the state reached by replay. + /// + /// This determines whether to apply a payload after applying the last block. + pub fn desired_state_payload_status(mut self, payload_status: StatePayloadStatus) -> Self { + self.desired_state_payload_status = payload_status; + self + } + /// Compute the state root for `self.state` as efficiently as possible. /// /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be @@ -208,6 +246,38 @@ where Ok(state_root) } + /// Apply an execution payload envelope to `self.state`. + /// + /// The `block_state_root` MUST be the `state_root` of the most recently applied block. + /// + /// Returns the `state_root` of `self.state` after payload application. + fn apply_payload_envelope( + &mut self, + envelope: &SignedExecutionPayloadEnvelope, + block_state_root: Hash256, + ) -> Result { + // TODO(gloas): bulk signature verification could be relevant here? + let verify_payload_signatures = + if let BlockSignatureStrategy::NoVerification = self.block_sig_strategy { + VerifySignatures::False + } else { + VerifySignatures::True + }; + // TODO(gloas): state root verif enabled during initial prototyping + let verify_state_root = VerifyStateRoot::True; + process_execution_payload_envelope( + &mut self.state, + Some(block_state_root), + envelope, + verify_payload_signatures, + verify_state_root, + self.spec, + ) + .map_err(BlockReplayError::from)?; + + Ok(envelope.message.state_root) + } + /// Apply `blocks` atop `self.state`, taking care of slot processing. /// /// If `target_slot` is provided then the state will be advanced through to `target_slot` @@ -215,8 +285,21 @@ where pub fn apply_blocks( mut self, blocks: Vec>>, + payload_envelopes: Vec>, target_slot: Option, ) -> Result { + let mut envelopes_iter = payload_envelopes.into_iter(); + + let mut next_envelope_at_slot = |slot| { + if let Some(envelope) = envelopes_iter.next() + && envelope.message.slot == slot + { + Ok(envelope) + } else { + Err(BlockReplayError::MissingPayloadEnvelope { slot }) + } + }; + for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. if i == 0 && block.slot() <= self.state.slot() { @@ -224,7 +307,35 @@ where } while self.state.slot() < block.slot() { - let state_root = self.get_state_root(&blocks, i)?; + let mut state_root = self.get_state_root(&blocks, i)?; + + // Apply the payload for the *previous* block if the bid in the current block + // indicates that the parent is full (and it hasn't already been applied). + state_root = if block.fork_name_unchecked().gloas_enabled() + && self.state.slot() == self.state.latest_block_header().slot + { + let latest_bid_block_hash = self + .state + .latest_execution_payload_bid() + .map_err(BlockReplayError::from)? + .block_hash; + + // Similar to `is_parent_block_full`, but reading the block hash from the + // not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd). + if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { + let envelope = next_envelope_at_slot(self.state.slot())?; + // State root for the next slot processing is now the envelope's state root. + self.apply_payload_envelope(&envelope, state_root)? + } else { + // Empty payload at this slot, the state root is unchanged from when the + // beacon block was applied. + state_root + } + } else { + // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state + // is always the output from `self.get_state_root`. + state_root + }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; @@ -268,9 +379,24 @@ where } } + // Apply the last payload if desired. + let mut opt_state_root = if let StatePayloadStatus::Full = self.desired_state_payload_status + && let Some(last_block) = blocks.last() + { + let envelope = next_envelope_at_slot(self.state.slot())?; + Some(self.apply_payload_envelope(&envelope, last_block.state_root())?) + } else { + None + }; + if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { - let state_root = self.get_state_root(&blocks, blocks.len())?; + // Read state root from `opt_state_root` if a payload was just applied. + let state_root = if let Some(root) = opt_state_root.take() { + root + } else { + self.get_state_root(&blocks, blocks.len())? + }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index be6b7c1b29..97953b835f 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -241,8 +241,6 @@ pub fn process_execution_payload_envelope( // TODO(gloas): newPayload happens here in the spec, ensure we wire that up correctly process_deposit_requests_post_gloas(state, &execution_requests.deposits, spec)?; - - // TODO(gloas): gotta update these process_withdrawal_requests(state, &execution_requests.withdrawals, spec)?; process_consolidation_requests(state, &execution_requests.consolidations, spec)?; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 96610c2010..0203b33e61 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1014,7 +1014,7 @@ async fn block_replayer_peeking_state_roots() { let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) .state_root_iter(state_root_iter.into_iter()) .no_signature_verification() - .apply_blocks(vec![target_block], None) + .apply_blocks(vec![target_block], vec![], None) .unwrap(); assert_eq!( diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 11a956bc2a..1114562155 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -77,6 +77,11 @@ pub fn partial_state_advance( // (all-zeros) state root. let mut initial_state_root = Some(if state.slot() > state.latest_block_header().slot { state_root_opt.unwrap_or_else(Hash256::zero) + } else if state.slot() == state.latest_block_header().slot + && !state.latest_block_header().state_root.is_zero() + { + // Post-Gloas Full state case. + state.latest_block_header().state_root } else { state_root_opt.ok_or(Error::StateRootNotProvided)? }); diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index aeb3c18d95..b6218ba64d 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -14,6 +14,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ + ExecutionBlockHash, block::{ BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, @@ -365,6 +366,32 @@ impl> SignedBeaconBlock format_kzg_commitments(commitments.as_ref()) } + + /// Convenience accessor for the block's bid's `block_hash`. + /// + /// This method returns an error prior to Gloas. + pub fn payload_bid_block_hash(&self) -> Result { + self.message() + .body() + .signed_execution_payload_bid() + .map(|bid| bid.message.block_hash) + } + + /// Check if the `parent_hash` in this block's `signed_payload_bid` matches `parent_block_hash`. + /// + /// This function is useful post-Gloas for determining if the parent block is full, *without* + /// necessarily needing access to a beacon state. The passed in `parent_block_hash` MUST be the + /// `block_hash` from the parent beacon block's bid. If the parent beacon state is available + /// this can alternatively be fetched from `state.latest_payload_bid`. + /// + /// This function returns `false` for all blocks prior to Gloas. + pub fn is_parent_block_full(&self, parent_block_hash: ExecutionBlockHash) -> bool { + let Ok(signed_payload_bid) = self.message().body().signed_execution_payload_bid() else { + // Prior to Gloas. + return false; + }; + signed_payload_bid.message.parent_block_hash == parent_block_hash + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs index a3d4ed8730..591be32b24 100644 --- a/consensus/types/src/execution/mod.rs +++ b/consensus/types/src/execution/mod.rs @@ -12,6 +12,7 @@ mod payload; mod signed_bls_to_execution_change; mod signed_execution_payload_bid; mod signed_execution_payload_envelope; +mod state_payload_status; pub use bls_to_execution_change::BlsToExecutionChange; pub use eth1_data::Eth1Data; @@ -41,3 +42,4 @@ pub use payload::{ pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use signed_execution_payload_bid::SignedExecutionPayloadBid; pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; +pub use state_payload_status::StatePayloadStatus; diff --git a/consensus/types/src/execution/state_payload_status.rs b/consensus/types/src/execution/state_payload_status.rs new file mode 100644 index 0000000000..1661be6060 --- /dev/null +++ b/consensus/types/src/execution/state_payload_status.rs @@ -0,0 +1,18 @@ +use serde::{Deserialize, Serialize}; + +/// Payload status as it applies to a `BeaconState` post-Gloas. +/// +/// A state can either be a post-state for a block (in which case we call it `Pending`) or a +/// payload envelope (`Full`). When handling states it is often necessary to know which of these +/// two variants is required. +/// +/// Note that states at skipped slots could be either `Pending` or `Full`, depending on whether +/// the payload for the most-recently applied block was also applied. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum StatePayloadStatus { + /// For states produced by `process_block` executed on a `BeaconBlock`. + Pending, + /// For states produced by `process_execution_payload` on a `ExecutionPayloadEnvelope`. + Full, +} diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index bd67f469d2..34cfd0ca1c 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -36,7 +36,7 @@ use crate::{ execution::{ Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, StatePayloadStatus, }, fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, light_client::consts::{ @@ -1266,6 +1266,24 @@ impl BeaconState { } } + /// Determine the payload status of this state. + /// + /// Prior to Gloas this is always `Pending`. + /// + /// Post-Gloas, the definition of the `StatePayloadStatus` is: + /// + /// - `Full` if this state is the result of envelope processing. + /// - `Pending` if this state is the result of block processing. + pub fn payload_status(&self) -> StatePayloadStatus { + if !self.fork_name_unchecked().gloas_enabled() { + StatePayloadStatus::Pending + } else if self.is_parent_block_full() { + StatePayloadStatus::Full + } else { + StatePayloadStatus::Pending + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 From a36b7f3ddbf4bd8456403ef1f945403c7a23affa Mon Sep 17 00:00:00 2001 From: lystopad Date: Thu, 12 Mar 2026 00:03:05 +0000 Subject: [PATCH 19/43] Schedule Fulu fork for Chiado testnet (#8954) Co-Authored-By: Oleksandr Lystopad --- .../built_in_network_configs/chiado/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index f0c04d891a..e1eb022cc9 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -49,7 +49,7 @@ ELECTRA_FORK_VERSION: 0x0500006f ELECTRA_FORK_EPOCH: 948224 # Thu Mar 6 2025 09:43:40 GMT+0000 # Fulu FULU_FORK_VERSION: 0x0600006f -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 1353216 # Mon Mar 16 2026 09:33:00 UTC # Gloas GLOAS_FORK_VERSION: 0x0700006f GLOAS_FORK_EPOCH: 18446744073709551615 From e1e97e6df069a67bb687fd02829ac53b6950d378 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 12 Mar 2026 09:11:37 +0800 Subject: [PATCH 20/43] Fix proposer lookahead endpoint JSON return type (#8970) Co-Authored-By: Tan Chee Keong --- beacon_node/http_api/src/beacon/states.rs | 4 ++-- beacon_node/http_api/tests/tests.rs | 5 +++-- common/eth2/src/lib.rs | 2 +- common/eth2/src/types.rs | 9 +++++++++ 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs index 02ac3f4da7..84ef3c1f26 100644 --- a/beacon_node/http_api/src/beacon/states.rs +++ b/beacon_node/http_api/src/beacon/states.rs @@ -9,7 +9,7 @@ use crate::version::{ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::{ self as api_types, ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, - ValidatorsRequestBody, + ValidatorIndexData, ValidatorsRequestBody, }; use ssz::Encode; use std::sync::Arc; @@ -213,7 +213,7 @@ pub fn get_beacon_state_proposer_lookahead( ResponseIncludesVersion::Yes(fork_name), execution_optimistic, finalized, - data, + ValidatorIndexData(data), ) .map(|res| warp::reply::json(&res).into_response()), } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a97ce01ac1..aed7a6b200 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1430,10 +1430,11 @@ impl ApiTester { } let state = state_opt.as_mut().expect("result should be none"); - let expected = state.proposer_lookahead().unwrap(); + let expected = state.proposer_lookahead().unwrap().to_vec(); let response = result.unwrap(); - assert_eq!(response.data(), &expected.to_vec()); + // Compare Vec directly, not Vec + assert_eq!(response.data().0, expected); // Check that the version header is returned in the response let fork_name = state.fork_name(&self.chain.spec).unwrap(); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 5547ced491..af87af14ba 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -904,7 +904,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_proposer_lookahead( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 2f86170812..94dff95bc6 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -708,6 +708,15 @@ pub struct DataColumnIndicesQuery { #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); +impl<'de, T> ContextDeserialize<'de, T> for ValidatorIndexData { + fn context_deserialize(deserializer: D, _context: T) -> Result + where + D: Deserializer<'de>, + { + Self::deserialize(deserializer) + } +} + /// Borrowed variant of `ValidatorIndexData`, for serializing/sending. #[derive(Clone, Copy, Serialize)] #[serde(transparent)] From 4b3a9d3d10a6181a1a1588880de133457eb90816 Mon Sep 17 00:00:00 2001 From: Shane K Moore <41407272+shane-moore@users.noreply.github.com> Date: Thu, 12 Mar 2026 02:53:32 -0700 Subject: [PATCH 21/43] Refactor/stream vc vote publishing (#8880) Changes four `ValidatorStore` batch signing methods to return `impl Stream` instead of `Future`. Services consume the stream and publish each batch as it arrives. No behavioral change for lh since `LighthouseValidatorStore` wraps everything in `stream::once` Also replaces anonymous tuples in method signatures with named structs Co-Authored-By: shane-moore Co-Authored-By: Michael Sproul Co-Authored-By: Mac L --- Cargo.lock | 1 + testing/web3signer_tests/src/lib.rs | 66 ++- .../http_api/src/tests/keystores.rs | 43 +- .../lighthouse_validator_store/src/lib.rs | 496 ++++++++++++------ .../src/attestation_service.rs | 348 ++++++------ .../src/sync_committee_service.rs | 239 ++++----- validator_client/validator_store/Cargo.toml | 1 + validator_client/validator_store/src/lib.rs | 89 ++-- 8 files changed, 740 insertions(+), 543 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ca12dce46..1d187d1c68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9710,6 +9710,7 @@ version = "0.1.0" dependencies = [ "bls", "eth2", + "futures", "slashing_protection", "types", ] diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 4b9432b67b..1f36f8d4ce 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -25,6 +25,7 @@ mod tests { use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; use fixed_bytes::FixedBytesExtended; + use futures::StreamExt; use initialized_validators::{ InitializedValidators, load_pem_certificate, load_pkcs12_identity, }; @@ -50,7 +51,7 @@ mod tests { use types::{attestation::AttestationBase, *}; use url::Url; use validator_store::{ - Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore, + AttestationToSign, Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore, }; /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will @@ -654,13 +655,14 @@ mod tests { .await .assert_signatures_match("attestation", |pubkey, validator_store| async move { let attestation = get_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await - .unwrap() - .pop() - .unwrap() - .1 + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap().unwrap().pop().unwrap().1 }) .await .assert_signatures_match("signed_aggregate", |pubkey, validator_store| async move { @@ -879,22 +881,28 @@ mod tests { .await .assert_signatures_match("first_attestation", |pubkey, validator_store| async move { let attestation = first_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await - .unwrap() - .pop() - .unwrap() - .1 + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap().unwrap().pop().unwrap().1 }) .await .assert_slashable_attestation_should_sign( "double_vote_attestation", move |pubkey, validator_store| async move { let attestation = double_vote_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap() }, slashable_message_should_sign, ) @@ -903,9 +911,14 @@ mod tests { "surrounding_attestation", move |pubkey, validator_store| async move { let attestation = surrounding_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap() }, slashable_message_should_sign, ) @@ -914,9 +927,14 @@ mod tests { "surrounded_attestation", move |pubkey, validator_store| async move { let attestation = surrounded_attestation(); - validator_store - .sign_attestations(vec![(0, pubkey, 0, attestation)]) - .await + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey, + validator_committee_index: 0, + attestation, + }]); + tokio::pin!(stream); + stream.next().await.unwrap() }, slashable_message_should_sign, ) diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 601b2f1666..eb35075526 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -9,6 +9,7 @@ use eth2::lighthouse_vc::{ types::Web3SignerValidatorRequest, }; use fixed_bytes::FixedBytesExtended; +use futures::StreamExt; use itertools::Itertools; use lighthouse_validator_store::DEFAULT_GAS_LIMIT; use rand::rngs::StdRng; @@ -19,6 +20,7 @@ use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; use typenum::Unsigned; use types::{Address, attestation::AttestationBase}; +use validator_store::AttestationToSign; use validator_store::ValidatorStore; use zeroize::Zeroizing; @@ -1101,11 +1103,16 @@ async fn generic_migration_test( // Sign attestations on VC1. for (validator_index, attestation) in first_vc_attestations { let public_key = keystore_pubkey(&keystores[validator_index]); - let safe_attestations = tester1 + let stream = tester1 .validator_store - .sign_attestations(vec![(0, public_key, 0, attestation.clone())]) - .await - .unwrap(); + .sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey: public_key, + validator_committee_index: 0, + attestation: attestation.clone(), + }]); + tokio::pin!(stream); + let safe_attestations = stream.next().await.unwrap().unwrap(); assert_eq!(safe_attestations.len(), 1); // Compare data only, ignoring signatures which are added during signing. assert_eq!(safe_attestations[0].1.data(), attestation.data()); @@ -1184,10 +1191,16 @@ async fn generic_migration_test( // Sign attestations on the second VC. for (validator_index, attestation, should_succeed) in second_vc_attestations { let public_key = keystore_pubkey(&keystores[validator_index]); - let result = tester2 + let stream = tester2 .validator_store - .sign_attestations(vec![(0, public_key, 0, attestation.clone())]) - .await; + .sign_attestations(vec![AttestationToSign { + validator_index: 0, + pubkey: public_key, + validator_committee_index: 0, + attestation: attestation.clone(), + }]); + tokio::pin!(stream); + let result = stream.next().await.unwrap(); match result { Ok(safe_attestations) => { if should_succeed { @@ -1331,14 +1344,14 @@ async fn delete_concurrent_with_signing() { for j in 0..num_attestations { let att = make_attestation(j, j + 1); for (validator_index, public_key) in thread_pubkeys.iter().enumerate() { - let _ = validator_store - .sign_attestations(vec![( - validator_index as u64, - *public_key, - 0, - att.clone(), - )]) - .await; + let stream = validator_store.sign_attestations(vec![AttestationToSign { + validator_index: validator_index as u64, + pubkey: *public_key, + validator_committee_index: 0, + attestation: att.clone(), + }]); + tokio::pin!(stream); + let _ = stream.next().await; } } }); diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 7806482ffb..e8c1cfbc43 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -2,7 +2,7 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition} use bls::{PublicKeyBytes, Signature}; use doppelganger_service::DoppelgangerService; use eth2::types::PublishBlockRequest; -use futures::future::join_all; +use futures::{Stream, future::join_all, stream}; use initialized_validators::InitializedValidators; use logging::crit; use parking_lot::{Mutex, RwLock}; @@ -17,7 +17,7 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use tracing::{error, info, instrument, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecutionPayloadEnvelope, Fork, @@ -28,7 +28,8 @@ use types::{ ValidatorRegistrationData, VoluntaryExit, graffiti::GraffitiString, }; use validator_store::{ - DoppelgangerStatus, Error as ValidatorStoreError, ProposalData, SignedBlock, UnsignedBlock, + AggregateToSign, AttestationToSign, ContributionToSign, DoppelgangerStatus, + Error as ValidatorStoreError, ProposalData, SignedBlock, SyncMessageToSign, UnsignedBlock, ValidatorStore, }; @@ -691,6 +692,119 @@ impl LighthouseValidatorStore { Ok(safe_attestations) } + + /// Signs an `AggregateAndProof` for a given validator. + /// + /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be + /// modified by actors other than the signing validator. + pub async fn produce_signed_aggregate_and_proof( + &self, + validator_pubkey: PublicKeyBytes, + aggregator_index: u64, + aggregate: Attestation, + selection_proof: SelectionProof, + ) -> Result, Error> { + let signing_epoch = aggregate.data().target.epoch; + let signing_context = self.signing_context(Domain::AggregateAndProof, signing_epoch); + + let message = + AggregateAndProof::from_attestation(aggregator_index, aggregate, selection_proof); + + let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; + let signature = signing_method + .get_signature::>( + SignableMessage::SignedAggregateAndProof(message.to_ref()), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_AGGREGATES_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SignedAggregateAndProof::from_aggregate_and_proof( + message, signature, + )) + } + + pub async fn produce_sync_committee_signature( + &self, + slot: Slot, + beacon_block_root: Hash256, + validator_index: u64, + validator_pubkey: &PublicKeyBytes, + ) -> Result { + let signing_epoch = slot.epoch(E::slots_per_epoch()); + let signing_context = self.signing_context(Domain::SyncCommittee, signing_epoch); + + // Bypass `with_validator_signing_method`: sync committee messages are not slashable. + let signing_method = self.doppelganger_bypassed_signing_method(*validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::SyncCommitteeSignature { + beacon_block_root, + slot, + }, + signing_context, + &self.spec, + &self.task_executor, + ) + .await + .map_err(Error::SpecificError)?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SyncCommitteeMessage { + slot, + beacon_block_root, + validator_index, + signature, + }) + } + + pub async fn produce_signed_contribution_and_proof( + &self, + aggregator_index: u64, + aggregator_pubkey: PublicKeyBytes, + contribution: SyncCommitteeContribution, + selection_proof: SyncSelectionProof, + ) -> Result, Error> { + let signing_epoch = contribution.slot.epoch(E::slots_per_epoch()); + let signing_context = self.signing_context(Domain::ContributionAndProof, signing_epoch); + + // Bypass `with_validator_signing_method`: sync committee messages are not slashable. + let signing_method = self.doppelganger_bypassed_signing_method(aggregator_pubkey)?; + + let message = ContributionAndProof { + aggregator_index, + contribution, + selection_proof: selection_proof.into(), + }; + + let signature = signing_method + .get_signature::>( + SignableMessage::SignedContributionAndProof(&message), + signing_context, + &self.spec, + &self.task_executor, + ) + .await + .map_err(Error::SpecificError)?; + + validator_metrics::inc_counter_vec( + &validator_metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, + &[validator_metrics::SUCCESS], + ); + + Ok(SignedContributionAndProof { message, signature }) + } } impl ValidatorStore for LighthouseValidatorStore { @@ -882,72 +996,83 @@ impl ValidatorStore for LighthouseValidatorS } } - async fn sign_attestations( + fn sign_attestations( self: &Arc, - mut attestations: Vec<(u64, PublicKeyBytes, usize, Attestation)>, - ) -> Result)>, Error> { - // Sign all attestations concurrently. - let signing_futures = - attestations - .iter_mut() - .map(|(_, pubkey, validator_committee_index, attestation)| { + mut attestations: Vec>, + ) -> impl Stream)>, Error>> + Send { + let store = self.clone(); + stream::once(async move { + // Sign all attestations concurrently. + let signing_futures = attestations.iter_mut().map( + |AttestationToSign { + pubkey, + validator_committee_index, + attestation, + .. + }| { let pubkey = *pubkey; let validator_committee_index = *validator_committee_index; + let store = store.clone(); async move { - self.sign_attestation_no_slashing_protection( - pubkey, - validator_committee_index, - attestation, - ) - .await + store + .sign_attestation_no_slashing_protection( + pubkey, + validator_committee_index, + attestation, + ) + .await } - }); + }, + ); - // Execute all signing in parallel. - let results: Vec<_> = join_all(signing_futures).await; + // Execute all signing in parallel. + let results: Vec<_> = join_all(signing_futures).await; - // Collect successfully signed attestations and log errors. - let mut signed_attestations = Vec::with_capacity(attestations.len()); - for (result, (validator_index, pubkey, _, attestation)) in - results.into_iter().zip(attestations.into_iter()) - { - match result { - Ok(()) => { - signed_attestations.push((validator_index, attestation, pubkey)); - } - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - warn!( - info = "a validator may have recently been removed from this VC", - ?pubkey, - "Missing pubkey for attestation" - ); - } - Err(e) => { - crit!( - error = ?e, - "Failed to sign attestation" - ); + // Collect successfully signed attestations and log errors. + let mut signed_attestations = Vec::with_capacity(attestations.len()); + for (result, att) in results.into_iter().zip(attestations.into_iter()) { + match result { + Ok(()) => { + signed_attestations.push(( + att.validator_index, + att.attestation, + att.pubkey, + )); + } + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + warn!( + info = "a validator may have recently been removed from this VC", + ?pubkey, + "Missing pubkey for attestation" + ); + } + Err(e) => { + crit!( + error = ?e, + "Failed to sign attestation" + ); + } } } - } - if signed_attestations.is_empty() { - return Ok(vec![]); - } + if signed_attestations.is_empty() { + return Ok(vec![]); + } - // Check slashing protection and insert into database. Use a dedicated blocking thread - // to avoid clogging the async executor with blocking database I/O. - let validator_store = self.clone(); - let safe_attestations = self - .task_executor - .spawn_blocking_handle( - move || validator_store.slashing_protect_attestations(signed_attestations), - "slashing_protect_attestations", - ) - .ok_or(Error::ExecutorError)? - .await - .map_err(|_| Error::ExecutorError)??; - Ok(safe_attestations) + // Check slashing protection and insert into database. Use a dedicated blocking + // thread to avoid clogging the async executor with blocking database I/O. + let validator_store = store.clone(); + let safe_attestations = store + .task_executor + .spawn_blocking_handle( + move || validator_store.slashing_protect_attestations(signed_attestations), + "slashing_protect_attestations", + ) + .ok_or(Error::ExecutorError)? + .await + .map_err(|_| Error::ExecutorError)??; + Ok(safe_attestations) + }) } async fn sign_validator_registration_data( @@ -979,43 +1104,6 @@ impl ValidatorStore for LighthouseValidatorS }) } - /// Signs an `AggregateAndProof` for a given validator. - /// - /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be - /// modified by actors other than the signing validator. - async fn produce_signed_aggregate_and_proof( - &self, - validator_pubkey: PublicKeyBytes, - aggregator_index: u64, - aggregate: Attestation, - selection_proof: SelectionProof, - ) -> Result, Error> { - let signing_epoch = aggregate.data().target.epoch; - let signing_context = self.signing_context(Domain::AggregateAndProof, signing_epoch); - - let message = - AggregateAndProof::from_attestation(aggregator_index, aggregate, selection_proof); - - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; - let signature = signing_method - .get_signature::>( - SignableMessage::SignedAggregateAndProof(message.to_ref()), - signing_context, - &self.spec, - &self.task_executor, - ) - .await?; - - validator_metrics::inc_counter_vec( - &validator_metrics::SIGNED_AGGREGATES_TOTAL, - &[validator_metrics::SUCCESS], - ); - - Ok(SignedAggregateAndProof::from_aggregate_and_proof( - message, signature, - )) - } - /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to /// `validator_pubkey`. async fn produce_selection_proof( @@ -1090,80 +1178,172 @@ impl ValidatorStore for LighthouseValidatorS Ok(signature.into()) } - async fn produce_sync_committee_signature( - &self, - slot: Slot, - beacon_block_root: Hash256, - validator_index: u64, - validator_pubkey: &PublicKeyBytes, - ) -> Result { - let signing_epoch = slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::SyncCommittee, signing_epoch); - - // Bypass `with_validator_signing_method`: sync committee messages are not slashable. - let signing_method = self.doppelganger_bypassed_signing_method(*validator_pubkey)?; - - let signature = signing_method - .get_signature::>( - SignableMessage::SyncCommitteeSignature { - beacon_block_root, - slot, + fn sign_aggregate_and_proofs( + self: &Arc, + aggregates: Vec>, + ) -> impl Stream>, Error>> + Send { + let store = self.clone(); + let count = aggregates.len(); + stream::once(async move { + let signing_futures = aggregates.into_iter().map( + |AggregateToSign { + pubkey, + aggregator_index, + aggregate, + selection_proof, + }| { + let store = store.clone(); + async move { + let result = store + .produce_signed_aggregate_and_proof( + pubkey, + aggregator_index, + aggregate, + selection_proof, + ) + .await; + (pubkey, result) + } }, - signing_context, - &self.spec, - &self.task_executor, - ) - .await - .map_err(Error::SpecificError)?; + ); - validator_metrics::inc_counter_vec( - &validator_metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, - &[validator_metrics::SUCCESS], - ); + let results = join_all(signing_futures) + .instrument(info_span!("sign_aggregates", count)) + .await; - Ok(SyncCommitteeMessage { - slot, - beacon_block_root, - validator_index, - signature, + let mut signed = Vec::with_capacity(results.len()); + for (pubkey, result) in results { + match result { + Ok(agg) => signed.push(agg), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!(?pubkey, "Missing pubkey for aggregate"); + } + Err(e) => { + crit!(error = ?e, pubkey = ?pubkey, "Failed to sign aggregate"); + } + } + } + Ok(signed) }) } - async fn produce_signed_contribution_and_proof( - &self, - aggregator_index: u64, - aggregator_pubkey: PublicKeyBytes, - contribution: SyncCommitteeContribution, - selection_proof: SyncSelectionProof, - ) -> Result, Error> { - let signing_epoch = contribution.slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::ContributionAndProof, signing_epoch); + fn sign_sync_committee_signatures( + self: &Arc, + messages: Vec, + ) -> impl Stream, Error>> + Send { + let store = self.clone(); + let count = messages.len(); + stream::once(async move { + let signing_futures = messages.into_iter().map( + |SyncMessageToSign { + slot, + beacon_block_root, + validator_index, + pubkey, + }| { + let store = store.clone(); + async move { + let result = store + .produce_sync_committee_signature( + slot, + beacon_block_root, + validator_index, + &pubkey, + ) + .await; + (pubkey, validator_index, slot, result) + } + }, + ); - // Bypass `with_validator_signing_method`: sync committee messages are not slashable. - let signing_method = self.doppelganger_bypassed_signing_method(aggregator_pubkey)?; + let results = join_all(signing_futures) + .instrument(info_span!("sign_sync_signatures", count)) + .await; - let message = ContributionAndProof { - aggregator_index, - contribution, - selection_proof: selection_proof.into(), - }; + let mut signed = Vec::with_capacity(results.len()); + for (_pubkey, validator_index, slot, result) in results { + match result { + Ok(sig) => signed.push(sig), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!( + ?pubkey, + validator_index, + %slot, + "Missing pubkey for sync committee signature" + ); + } + Err(e) => { + crit!( + validator_index, + %slot, + error = ?e, + "Failed to sign sync committee signature" + ); + } + } + } + Ok(signed) + }) + } - let signature = signing_method - .get_signature::>( - SignableMessage::SignedContributionAndProof(&message), - signing_context, - &self.spec, - &self.task_executor, - ) - .await - .map_err(Error::SpecificError)?; + fn sign_sync_committee_contributions( + self: &Arc, + contributions: Vec>, + ) -> impl Stream>, Error>> + Send { + let store = self.clone(); + let count = contributions.len(); + stream::once(async move { + let signing_futures = contributions.into_iter().map( + |ContributionToSign { + aggregator_index, + aggregator_pubkey, + contribution, + selection_proof, + }| { + let store = store.clone(); + let slot = contribution.slot; + async move { + let result = store + .produce_signed_contribution_and_proof( + aggregator_index, + aggregator_pubkey, + contribution, + selection_proof, + ) + .await; + (slot, result) + } + }, + ); - validator_metrics::inc_counter_vec( - &validator_metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, - &[validator_metrics::SUCCESS], - ); + let results = join_all(signing_futures) + .instrument(info_span!("sign_sync_contributions", count)) + .await; - Ok(SignedContributionAndProof { message, signature }) + let mut signed = Vec::with_capacity(results.len()); + for (slot, result) in results { + match result { + Ok(contribution) => signed.push(contribution), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + debug!(?pubkey, %slot, "Missing pubkey for sync contribution"); + } + Err(e) => { + crit!( + %slot, + error = ?e, + "Unable to sign sync committee contribution" + ); + } + } + } + Ok(signed) + }) } /// Prune the slashing protection database so that it remains performant. diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index a9d5283312..fe808efd88 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,6 +1,6 @@ use crate::duties_service::{DutiesService, DutyAndProof}; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, beacon_head_monitor::HeadEvent}; -use futures::future::join_all; +use futures::StreamExt; use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; @@ -13,7 +13,7 @@ use tokio::time::{Duration, Instant, sleep, sleep_until}; use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Hash256, Slot}; -use validator_store::{Error as ValidatorStoreError, ValidatorStore}; +use validator_store::{AggregateToSign, AttestationToSign, ValidatorStore}; /// Builds an `AttestationService`. #[derive(Default)] @@ -560,12 +560,12 @@ impl AttestationService AttestationService(attestation_data.slot); - let single_attestations = safe_attestations - .iter() - .filter_map(|(i, a)| { - match a.to_single_attestation_with_attester_index(*i) { - Ok(a) => Some(a), - Err(e) => { - // This shouldn't happen unless BN and VC are out of sync with - // respect to the Electra fork. - error!( - error = ?e, + // Publish each batch as it arrives from the stream. + let mut received_non_empty_batch = false; + while let Some(result) = attestation_stream.next().await { + match result { + Ok(batch) if !batch.is_empty() => { + received_non_empty_batch = true; + + let single_attestations = batch + .iter() + .filter_map(|(attester_index, attestation)| { + match attestation + .to_single_attestation_with_attester_index(*attester_index) + { + Ok(single_attestation) => Some(single_attestation), + Err(e) => { + // This shouldn't happen unless BN and VC are out of sync with + // respect to the Electra fork. + error!( + error = ?e, + committee_index = attestation_data.index, + slot = slot.as_u64(), + "type" = "unaggregated", + "Unable to convert to SingleAttestation" + ); + None + } + } + }) + .collect::>(); + let single_attestations = &single_attestations; + let validator_indices = single_attestations + .iter() + .map(|att| att.attester_index) + .collect::>(); + let published_count = single_attestations.len(); + + // Post the attestations to the BN. + match self + .beacon_nodes + .request(ApiTopic::Attestations, |beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_POST], + ); + + beacon_node + .post_beacon_pool_attestations_v2::( + single_attestations.clone(), + fork_name, + ) + .await + }) + .instrument(info_span!("publish_attestations", count = published_count)) + .await + { + Ok(()) => info!( + count = published_count, + validator_indices = ?validator_indices, + head_block = ?attestation_data.beacon_block_root, + committee_index = attestation_data.index, + slot = attestation_data.slot.as_u64(), + "type" = "unaggregated", + "Successfully published attestations" + ), + Err(e) => error!( + error = %e, committee_index = attestation_data.index, slot = slot.as_u64(), "type" = "unaggregated", - "Unable to convert to SingleAttestation" - ); - None + "Unable to publish attestations" + ), } } - }) - .collect::>(); - let single_attestations = &single_attestations; - let validator_indices = single_attestations - .iter() - .map(|att| att.attester_index) - .collect::>(); - let published_count = single_attestations.len(); + Err(e) => { + crit!(error = ?e, "Failed to sign attestations"); + } + _ => {} + } + } - // Post the attestations to the BN. - match self - .beacon_nodes - .request(ApiTopic::Attestations, |beacon_node| async move { - let _timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::ATTESTATIONS_HTTP_POST], - ); - - beacon_node - .post_beacon_pool_attestations_v2::( - single_attestations.clone(), - fork_name, - ) - .await - }) - .instrument(info_span!("publish_attestations", count = published_count)) - .await - { - Ok(()) => info!( - count = published_count, - validator_indices = ?validator_indices, - head_block = ?attestation_data.beacon_block_root, - committee_index = attestation_data.index, - slot = attestation_data.slot.as_u64(), - "type" = "unaggregated", - "Successfully published attestations" - ), - Err(e) => error!( - error = %e, - committee_index = attestation_data.index, - slot = slot.as_u64(), - "type" = "unaggregated", - "Unable to publish attestations" - ), + if !received_non_empty_batch { + warn!("No attestations were published"); } Ok(()) @@ -725,113 +737,103 @@ impl AttestationService(attestation_data, &self.chain_spec) { - crit!("Inconsistent validator duties during signing"); - return None; - } - - match self - .validator_store - .produce_signed_aggregate_and_proof( - duty.pubkey, - duty.validator_index, - aggregated_attestation.clone(), - selection_proof.clone(), - ) - .await - { - Ok(aggregate) => Some(aggregate), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!(?pubkey, "Missing pubkey for aggregate"); - None - } - Err(e) => { - crit!( - error = ?e, - pubkey = ?duty.pubkey, - "Failed to sign aggregate" - ); - None - } - } - }); - - // Execute all the futures in parallel, collecting any successful results. - let aggregator_count = validator_duties + // Build the batch of aggregates to sign. + let aggregates_to_sign: Vec<_> = validator_duties .iter() - .filter(|d| d.selection_proof.is_some()) - .count(); - let signed_aggregate_and_proofs = join_all(signing_futures) - .instrument(info_span!("sign_aggregates", count = aggregator_count)) - .await - .into_iter() - .flatten() - .collect::>(); + .filter_map(|duty_and_proof| { + let duty = &duty_and_proof.duty; + let selection_proof = duty_and_proof.selection_proof.as_ref()?; - if !signed_aggregate_and_proofs.is_empty() { - let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice(); - match self - .beacon_nodes - .first_success(|beacon_node| async move { - let _timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::AGGREGATES_HTTP_POST], - ); - if fork_name.electra_enabled() { - beacon_node - .post_validator_aggregate_and_proof_v2( - signed_aggregate_and_proofs_slice, - fork_name, - ) - .await - } else { - beacon_node - .post_validator_aggregate_and_proof_v1( - signed_aggregate_and_proofs_slice, - ) - .await - } + if !duty.match_attestation_data::(attestation_data, &self.chain_spec) { + crit!("Inconsistent validator duties during signing"); + return None; + } + + Some(AggregateToSign { + pubkey: duty.pubkey, + aggregator_index: duty.validator_index, + aggregate: aggregated_attestation.clone(), + selection_proof: selection_proof.clone(), }) - .instrument(info_span!( - "publish_aggregates", - count = signed_aggregate_and_proofs.len() - )) - .await - { - Ok(()) => { - for signed_aggregate_and_proof in signed_aggregate_and_proofs { - let attestation = signed_aggregate_and_proof.message().aggregate(); - info!( - aggregator = signed_aggregate_and_proof.message().aggregator_index(), - signatures = attestation.num_set_aggregation_bits(), - head_block = format!("{:?}", attestation.data().beacon_block_root), - committee_index = attestation.committee_index(), - slot = attestation.data().slot.as_u64(), - "type" = "aggregated", - "Successfully published attestation" - ); + }) + .collect(); + + // Sign aggregates. Returns a stream of batches. + let aggregate_stream = self + .validator_store + .sign_aggregate_and_proofs(aggregates_to_sign); + tokio::pin!(aggregate_stream); + + // Publish each batch as it arrives from the stream. + while let Some(result) = aggregate_stream.next().await { + match result { + Ok(batch) if !batch.is_empty() => { + let signed_aggregate_and_proofs = batch.as_slice(); + match self + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES_HTTP_POST], + ); + if fork_name.electra_enabled() { + beacon_node + .post_validator_aggregate_and_proof_v2( + signed_aggregate_and_proofs, + fork_name, + ) + .await + } else { + beacon_node + .post_validator_aggregate_and_proof_v1( + signed_aggregate_and_proofs, + ) + .await + } + }) + .instrument(info_span!( + "publish_aggregates", + count = signed_aggregate_and_proofs.len() + )) + .await + { + Ok(()) => { + for signed_aggregate_and_proof in signed_aggregate_and_proofs { + let attestation = signed_aggregate_and_proof.message().aggregate(); + info!( + aggregator = + signed_aggregate_and_proof.message().aggregator_index(), + signatures = attestation.num_set_aggregation_bits(), + head_block = + format!("{:?}", attestation.data().beacon_block_root), + committee_index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "type" = "aggregated", + "Successfully published attestation" + ); + } + } + Err(e) => { + for signed_aggregate_and_proof in signed_aggregate_and_proofs { + let attestation = &signed_aggregate_and_proof.message().aggregate(); + crit!( + error = %e, + aggregator = signed_aggregate_and_proof + .message() + .aggregator_index(), + committee_index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "type" = "aggregated", + "Failed to publish attestation" + ); + } + } } } Err(e) => { - for signed_aggregate_and_proof in signed_aggregate_and_proofs { - let attestation = &signed_aggregate_and_proof.message().aggregate(); - crit!( - error = %e, - aggregator = signed_aggregate_and_proof.message().aggregator_index(), - committee_index = attestation.committee_index(), - slot = attestation.data().slot.as_u64(), - "type" = "aggregated", - "Failed to publish attestation" - ); - } + crit!(error = ?e, "Failed to sign aggregates"); } + _ => {} } } diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 59e8524a1a..26ce052ea0 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -2,8 +2,8 @@ use crate::duties_service::DutiesService; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use bls::PublicKeyBytes; use eth2::types::BlockId; +use futures::StreamExt; use futures::future::FutureExt; -use futures::future::join_all; use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; @@ -17,7 +17,7 @@ use types::{ ChainSpec, EthSpec, Hash256, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, }; -use validator_store::{Error as ValidatorStoreError, ValidatorStore}; +use validator_store::{ContributionToSign, SyncMessageToSign, ValidatorStore}; pub const SUBSCRIPTION_LOOKAHEAD_EPOCHS: u64 = 4; @@ -247,78 +247,57 @@ impl SyncCommitteeService, ) -> Result<(), ()> { - // Create futures to produce sync committee signatures. - let signature_futures = validator_duties.iter().map(|duty| async move { - match self - .validator_store - .produce_sync_committee_signature( - slot, - beacon_block_root, - duty.validator_index, - &duty.pubkey, - ) - .await - { - Ok(signature) => Some(signature), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!( - ?pubkey, - validator_index = duty.validator_index, - %slot, - "Missing pubkey for sync committee signature" - ); - None + let messages_to_sign: Vec<_> = validator_duties + .iter() + .map(|duty| SyncMessageToSign { + slot, + beacon_block_root, + validator_index: duty.validator_index, + pubkey: duty.pubkey, + }) + .collect(); + + let signature_stream = self + .validator_store + .sign_sync_committee_signatures(messages_to_sign); + tokio::pin!(signature_stream); + + while let Some(result) = signature_stream.next().await { + match result { + Ok(committee_signatures) if !committee_signatures.is_empty() => { + let committee_signatures = &committee_signatures; + match self + .beacon_nodes + .request(ApiTopic::SyncCommittee, |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(committee_signatures) + .await + }) + .instrument(info_span!( + "publish_sync_signatures", + count = committee_signatures.len() + )) + .await + { + Ok(()) => info!( + count = committee_signatures.len(), + head_block = ?beacon_block_root, + %slot, + "Successfully published sync committee messages" + ), + Err(e) => error!( + %slot, + error = %e, + "Unable to publish sync committee messages" + ), + } } Err(e) => { - crit!( - validator_index = duty.validator_index, - %slot, - error = ?e, - "Failed to sign sync committee signature" - ); - None + crit!(%slot, error = ?e, "Failed to sign sync committee signatures"); } + _ => {} } - }); - - // Execute all the futures in parallel, collecting any successful results. - let committee_signatures = &join_all(signature_futures) - .instrument(info_span!( - "sign_sync_signatures", - count = validator_duties.len() - )) - .await - .into_iter() - .flatten() - .collect::>(); - - self.beacon_nodes - .request(ApiTopic::SyncCommittee, |beacon_node| async move { - beacon_node - .post_beacon_pool_sync_committee_signatures(committee_signatures) - .await - }) - .instrument(info_span!( - "publish_sync_signatures", - count = committee_signatures.len() - )) - .await - .map_err(|e| { - error!( - %slot, - error = %e, - "Unable to publish sync committee messages" - ); - })?; - - info!( - count = committee_signatures.len(), - head_block = ?beacon_block_root, - %slot, - "Successfully published sync committee messages" - ); + } Ok(()) } @@ -389,77 +368,61 @@ impl SyncCommitteeService Some(signed_contribution), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - debug!(?pubkey, %slot, "Missing pubkey for sync contribution"); - None - } - Err(e) => { - crit!( + let contributions_to_sign: Vec<_> = subnet_aggregators + .into_iter() + .map( + |(aggregator_index, aggregator_pk, selection_proof)| ContributionToSign { + aggregator_index, + aggregator_pubkey: aggregator_pk, + contribution: contribution.clone(), + selection_proof, + }, + ) + .collect(); + + let contribution_stream = self + .validator_store + .sign_sync_committee_contributions(contributions_to_sign); + tokio::pin!(contribution_stream); + + while let Some(result) = contribution_stream.next().await { + match result { + Ok(signed_contributions) if !signed_contributions.is_empty() => { + let signed_contributions = &signed_contributions; + // Publish to the beacon node. + match self + .beacon_nodes + .first_success(|beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions) + .await + }) + .instrument(info_span!( + "publish_sync_contributions", + count = signed_contributions.len() + )) + .await + { + Ok(()) => info!( + subnet = %subnet_id, + beacon_block_root = %beacon_block_root, + num_signers = contribution.aggregation_bits.num_set_bits(), %slot, - error = ?e, - "Unable to sign sync committee contribution" - ); - None + "Successfully published sync contributions" + ), + Err(e) => error!( + %slot, + error = %e, + "Unable to publish signed contributions and proofs" + ), } } - }, - ); - - // Execute all the futures in parallel, collecting any successful results. - let signed_contributions = &join_all(signature_futures) - .instrument(info_span!( - "sign_sync_contributions", - count = aggregator_count - )) - .await - .into_iter() - .flatten() - .collect::>(); - - // Publish to the beacon node. - self.beacon_nodes - .first_success(|beacon_node| async move { - beacon_node - .post_validator_contribution_and_proofs(signed_contributions) - .await - }) - .instrument(info_span!( - "publish_sync_contributions", - count = signed_contributions.len() - )) - .await - .map_err(|e| { - error!( - %slot, - error = %e, - "Unable to publish signed contributions and proofs" - ); - })?; - - info!( - subnet = %subnet_id, - beacon_block_root = %beacon_block_root, - num_signers = contribution.aggregation_bits.num_set_bits(), - %slot, - "Successfully published sync contributions" - ); + Err(e) => { + crit!(%slot, error = ?e, "Failed to sign sync committee contributions"); + } + _ => {} + } + } Ok(()) } diff --git a/validator_client/validator_store/Cargo.toml b/validator_client/validator_store/Cargo.toml index 8b1879c837..2c6a68d494 100644 --- a/validator_client/validator_store/Cargo.toml +++ b/validator_client/validator_store/Cargo.toml @@ -7,5 +7,6 @@ authors = ["Sigma Prime "] [dependencies] bls = { workspace = true } eth2 = { workspace = true } +futures = { workspace = true } slashing_protection = { workspace = true } types = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 87ab669e8d..da0b33de18 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,5 +1,6 @@ use bls::{PublicKeyBytes, Signature}; use eth2::types::{FullBlockContents, PublishBlockRequest}; +use futures::Stream; use slashing_protection::NotSafe; use std::fmt::Debug; use std::future::Future; @@ -32,6 +33,38 @@ impl From for Error { } } +/// Input for batch attestation signing +pub struct AttestationToSign { + pub validator_index: u64, + pub pubkey: PublicKeyBytes, + pub validator_committee_index: usize, + pub attestation: Attestation, +} + +/// Input for batch aggregate signing +pub struct AggregateToSign { + pub pubkey: PublicKeyBytes, + pub aggregator_index: u64, + pub aggregate: Attestation, + pub selection_proof: SelectionProof, +} + +/// Input for batch sync committee message signing +pub struct SyncMessageToSign { + pub slot: Slot, + pub beacon_block_root: Hash256, + pub validator_index: u64, + pub pubkey: PublicKeyBytes, +} + +/// Input for batch sync committee contribution signing +pub struct ContributionToSign { + pub aggregator_index: u64, + pub aggregator_pubkey: PublicKeyBytes, + pub contribution: SyncCommitteeContribution, + pub selection_proof: SyncSelectionProof, +} + /// A helper struct, used for passing data from the validator store to services. pub struct ProposalData { pub validator_index: Option, @@ -106,13 +139,9 @@ pub trait ValidatorStore: Send + Sync { /// Sign a batch of `attestations` and apply slashing protection to them. /// - /// Only successfully signed attestations that pass slashing protection are returned, along with - /// the validator index of the signer. Eventually this will be replaced by `SingleAttestation` - /// use. - /// - /// Input: - /// - /// * Vec of (validator_index, pubkey, validator_committee_index, attestation). + /// Returns a stream of batches of successfully signed attestations. Each batch contains + /// attestations that passed slashing protection, along with the validator index of the signer. + /// Eventually this will be replaced by `SingleAttestation` use. /// /// Output: /// @@ -120,26 +149,14 @@ pub trait ValidatorStore: Send + Sync { #[allow(clippy::type_complexity)] fn sign_attestations( self: &Arc, - attestations: Vec<(u64, PublicKeyBytes, usize, Attestation)>, - ) -> impl Future)>, Error>> + Send; + attestations: Vec>, + ) -> impl Stream)>, Error>> + Send; fn sign_validator_registration_data( &self, validator_registration_data: ValidatorRegistrationData, ) -> impl Future>> + Send; - /// Signs an `AggregateAndProof` for a given validator. - /// - /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be - /// modified by actors other than the signing validator. - fn produce_signed_aggregate_and_proof( - &self, - validator_pubkey: PublicKeyBytes, - aggregator_index: u64, - aggregate: Attestation, - selection_proof: SelectionProof, - ) -> impl Future, Error>> + Send; - /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to /// `validator_pubkey`. fn produce_selection_proof( @@ -156,21 +173,23 @@ pub trait ValidatorStore: Send + Sync { subnet_id: SyncSubnetId, ) -> impl Future>> + Send; - fn produce_sync_committee_signature( - &self, - slot: Slot, - beacon_block_root: Hash256, - validator_index: u64, - validator_pubkey: &PublicKeyBytes, - ) -> impl Future>> + Send; + /// Sign a batch of aggregate and proofs and return results as a stream of batches. + fn sign_aggregate_and_proofs( + self: &Arc, + aggregates: Vec>, + ) -> impl Stream>, Error>> + Send; - fn produce_signed_contribution_and_proof( - &self, - aggregator_index: u64, - aggregator_pubkey: PublicKeyBytes, - contribution: SyncCommitteeContribution, - selection_proof: SyncSelectionProof, - ) -> impl Future, Error>> + Send; + /// Sign a batch of sync committee messages and return results as a stream of batches. + fn sign_sync_committee_signatures( + self: &Arc, + messages: Vec, + ) -> impl Stream, Error>> + Send; + + /// Sign a batch of sync committee contributions and return results as a stream of batches. + fn sign_sync_committee_contributions( + self: &Arc, + contributions: Vec>, + ) -> impl Stream>, Error>> + Send; /// Prune the slashing protection database so that it remains performant. /// From 53a711956eb5c5ffeef277b2a13850bd4911946b Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Sat, 14 Mar 2026 03:27:15 +0900 Subject: [PATCH 22/43] Fix flaky `test_same_subnet_unsubscription` (#8932) Co-Authored-By: figtracer <1gusredo@gmail.com> Co-Authored-By: ackintosh --- beacon_node/network/src/subnet_service/mod.rs | 7 ---- .../network/src/subnet_service/tests/mod.rs | 34 +++++++++---------- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index be491e56d3..008e7ab9ac 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -198,13 +198,6 @@ impl SubnetService { self.permanent_attestation_subscriptions.iter() } - /// Returns whether we are subscribed to a subnet for testing purposes. - #[cfg(test)] - pub(crate) fn is_subscribed(&self, subnet: &Subnet) -> bool { - self.subscriptions.contains_key(subnet) - || self.permanent_attestation_subscriptions.contains(subnet) - } - /// Returns whether we are subscribed to a permanent subnet for testing purposes. #[cfg(test)] pub(crate) fn is_subscribed_permanent(&self, subnet: &Subnet) -> bool { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index bee6569b7b..619154d738 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -335,28 +335,26 @@ mod test { // submit the subscriptions subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter()); - // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) - let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); + let subnet = Subnet::Attestation(subnet_id1); - if subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { - // If we are permanently subscribed to this subnet, we won't see a subscribe message - let _ = get_events_until_num_slots(&mut subnet_service, None, 1).await; + if subnet_service.is_subscribed_permanent(&subnet) { + // If permanently subscribed, no Subscribe/Unsubscribe events will be generated + let events = get_events_until_num_slots(&mut subnet_service, None, 3).await; + assert!(events.is_empty()); } else { - let subscription = get_events_until_num_slots(&mut subnet_service, None, 1).await; - assert_eq!(subscription, [expected]); + // Wait 1 slot: expect a single Subscribe event (no duplicate for the same subnet). + let events = get_events_until_num_slots(&mut subnet_service, None, 1).await; + assert_eq!(events, [SubnetServiceMessage::Subscribe(subnet)]); + + // Wait for the Unsubscribe event after subscription_slot2 expires. + // Use a longer timeout because the test doesn't start exactly at a slot + // boundary, so the previous 1-slot wait may end partway through slot 1, + // leaving insufficient time to catch the Unsubscribe within another 1 slot. + let events = get_events_until_num_slots(&mut subnet_service, Some(1), 3).await; + assert_eq!(events, [SubnetServiceMessage::Unsubscribe(subnet)]); } - // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events_until_num_slots(&mut subnet_service, None, 1).await; - - // If the long lived and short lived subnets are different, we should get an unsubscription - // event. - let expected = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { - assert_eq!([expected], unsubscribe_event[..]); - } - - // Should no longer be subscribed to any short lived subnets after unsubscription. + // Should no longer be subscribed to any short lived subnets after unsubscription. assert_eq!(subnet_service.subscriptions().count(), 0); } From 02137492f30276619dbb764f4fada34c9d72cd21 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 13 Mar 2026 14:22:25 -0500 Subject: [PATCH 23/43] Fix intermittent simulator test failures (#8983) Fixes intermittent simulator test failures with error: `Head not synced for node 2. Found 127; Should be 128` Modify the delayed node in `basic_sim` to join earlier, giving it sufficient time to discover peers and form a proper gossip mesh before the sync verification check. **Change:** Delayed node now joins at `END_EPOCH - 3` (epoch 13) instead of `END_EPOCH - 1` (epoch 15). Co-Authored-By: Mark Mackey Co-Authored-By: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --- testing/simulator/src/basic_sim.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index a9d0a0756b..79581ee529 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -363,7 +363,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { network_1.add_beacon_node_with_delay( beacon_config.clone(), mock_execution_config.clone(), - END_EPOCH - 1, + END_EPOCH - 3, slot_duration, slots_per_epoch ), From 6ca610d918e8a12946c7c9baaeb4bcbfbc3429d5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 13 Mar 2026 14:22:29 -0500 Subject: [PATCH 24/43] Breakup RPCBlock into LookupBlock & RangeSyncBlock (#8860) Co-Authored-By: Mark Mackey --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 +- .../beacon_chain/src/block_verification.rs | 97 +++++---- .../src/block_verification_types.rs | 201 +++++++++--------- .../src/data_availability_checker.rs | 13 +- beacon_node/beacon_chain/src/test_utils.rs | 180 ++++++++-------- .../tests/attestation_production.rs | 37 +--- .../beacon_chain/tests/blob_verification.rs | 7 +- .../beacon_chain/tests/block_verification.rs | 169 +++++---------- .../beacon_chain/tests/column_verification.rs | 16 +- .../tests/payload_invalidation.rs | 48 ++--- beacon_node/beacon_chain/tests/store_tests.rs | 53 ++--- beacon_node/http_api/src/publish_blocks.rs | 16 +- .../src/network_beacon_processor/mod.rs | 11 +- .../network_beacon_processor/sync_methods.rs | 51 ++--- .../src/network_beacon_processor/tests.rs | 32 +-- .../network/src/sync/backfill_sync/mod.rs | 6 +- beacon_node/network/src/sync/batch.rs | 4 +- .../src/sync/block_sidecar_coupling.rs | 28 +-- .../network/src/sync/network_context.rs | 19 +- .../network/src/sync/range_sync/chain.rs | 6 +- .../network/src/sync/range_sync/range.rs | 4 +- beacon_node/network/src/sync/tests/lookups.rs | 96 ++++----- beacon_node/network/src/sync/tests/mod.rs | 6 +- beacon_node/network/src/sync/tests/range.rs | 20 +- testing/ef_tests/src/cases/fork_choice.rs | 46 ++-- 25 files changed, 505 insertions(+), 669 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ab2097e001..20af7b4630 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -13,7 +13,7 @@ use crate::block_verification::{ signature_verify_chain_segment, verify_header_signature, }; use crate::block_verification_types::{ - AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, + AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RangeSyncBlock, }; pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; @@ -137,7 +137,7 @@ use types::*; pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. -type HashBlockTuple = (Hash256, RpcBlock); +type HashBlockTuple = (Hash256, RangeSyncBlock); // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO; @@ -2746,7 +2746,7 @@ impl BeaconChain { /// This method is potentially long-running and should not run on the core executor. pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, + chain_segment: Vec>, ) -> Result>, Box> { // This function will never import any blocks. let imported_blocks = vec![]; @@ -2855,7 +2855,7 @@ impl BeaconChain { /// `Self::process_block`. pub async fn process_chain_segment( self: &Arc, - chain_segment: Vec>, + chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { for block in chain_segment.iter() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1be9bd4181..06ec26185f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -50,7 +50,7 @@ use crate::beacon_snapshot::PreProcessingSnapshot; use crate::blob_verification::GossipBlobError; -use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; +use crate::block_verification_types::{AsBlock, BlockImportData, LookupBlock, RangeSyncBlock}; use crate::data_availability_checker::{ AvailabilityCheckError, AvailableBlock, AvailableBlockData, MaybeAvailableBlock, }; @@ -585,7 +585,7 @@ pub(crate) fn process_block_slash_info( - mut chain_segment: Vec<(Hash256, RpcBlock)>, + mut chain_segment: Vec<(Hash256, RangeSyncBlock)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -616,24 +616,14 @@ pub fn signature_verify_chain_segment( let consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); - match block { - RpcBlock::FullyAvailable(available_block) => { - available_blocks.push(available_block.clone()); - signature_verified_blocks.push(SignatureVerifiedBlock { - block: MaybeAvailableBlock::Available(available_block), - block_root, - parent: None, - consensus_context, - }); - } - RpcBlock::BlockOnly { .. } => { - // RangeSync and BackfillSync already ensure that the chain segment is fully available - // so this shouldn't be possible in practice. - return Err(BlockError::InternalError( - "Chain segment is not fully available".to_string(), - )); - } - } + let available_block = block.into_available_block(); + available_blocks.push(available_block.clone()); + signature_verified_blocks.push(SignatureVerifiedBlock { + block: MaybeAvailableBlock::Available(available_block), + block_root, + parent: None, + consensus_context, + }); } chain @@ -1300,11 +1290,11 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } } -impl IntoExecutionPendingBlock for RpcBlock { +impl IntoExecutionPendingBlock for RangeSyncBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. #[instrument( - name = "rpc_block_into_execution_pending_block_slashable", + name = "range_sync_block_into_execution_pending_block_slashable", level = "debug" skip_all, )] @@ -1318,24 +1308,51 @@ impl IntoExecutionPendingBlock for RpcBlock let block_root = check_block_relevancy(self.as_block(), block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; - let maybe_available_block = match &self { - RpcBlock::FullyAvailable(available_block) => { - chain - .data_availability_checker - .verify_kzg_for_available_block(available_block) - .map_err(|e| { - BlockSlashInfo::SignatureNotChecked( - self.signed_block_header(), - BlockError::AvailabilityCheck(e), - ) - })?; - MaybeAvailableBlock::Available(available_block.clone()) - } - // No need to perform KZG verification unless we have a fully available block - RpcBlock::BlockOnly { block, block_root } => MaybeAvailableBlock::AvailabilityPending { - block_root: *block_root, - block: block.clone(), - }, + let available_block = self.into_available_block(); + chain + .data_availability_checker + .verify_kzg_for_available_block(&available_block) + .map_err(|e| { + BlockSlashInfo::SignatureNotChecked( + available_block.as_block().signed_block_header(), + BlockError::AvailabilityCheck(e), + ) + })?; + let maybe_available_block = MaybeAvailableBlock::Available(available_block); + SignatureVerifiedBlock::check_slashable(maybe_available_block, block_root, chain)? + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) + } + + fn block(&self) -> &SignedBeaconBlock { + self.as_block() + } + + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } +} + +impl IntoExecutionPendingBlock for LookupBlock { + /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + #[instrument( + name = "lookup_block_into_execution_pending_block_slashable", + level = "debug" + skip_all, + )] + fn into_execution_pending_block_slashable( + self, + block_root: Hash256, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, BlockSlashInfo> { + // Perform an early check to prevent wasting time on irrelevant blocks. + let block_root = check_block_relevancy(self.as_block(), block_root, chain) + .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; + + let maybe_available_block = MaybeAvailableBlock::AvailabilityPending { + block_root, + block: self.block_cloned(), }; SignatureVerifiedBlock::check_slashable(maybe_available_block, block_root, chain)? diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index f98cd40d08..be73ef15d7 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -13,76 +13,70 @@ use types::{ SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -/// A block that has been received over RPC. It has 2 internal variants: -/// -/// 1. `FullyAvailable`: A fully available block. This can either be a pre-deneb block, a -/// post-Deneb block with blobs, a post-Fulu block with the columns the node is required to custody, -/// or a post-Deneb block that doesn't require blobs/columns. Hence, it is fully self contained w.r.t -/// verification. i.e. this block has all the required data to get verified and imported into fork choice. -/// -/// 2. `BlockOnly`: This is a post-deneb block that requires blobs to be considered fully available. -#[derive(Clone, Educe)] -#[educe(Hash(bound(E: EthSpec)))] -pub enum RpcBlock { - FullyAvailable(AvailableBlock), - BlockOnly { - block: Arc>, - block_root: Hash256, - }, +/// A wrapper around a `SignedBeaconBlock`. This varaint is constructed +/// when lookup sync only fetches a single block. It does not contain +/// any blobs or data columns. +pub struct LookupBlock { + block: Arc>, + block_root: Hash256, } -impl Debug for RpcBlock { +impl LookupBlock { + pub fn new(block: Arc>) -> Self { + let block_root = block.canonical_root(); + Self { block, block_root } + } + + pub fn block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } +} + +/// A fully available block that has been constructed by range sync. +/// The block contains all the data required to import into fork choice. +/// This includes any and all blobs/columns required, including zero if +/// none are required. This can happen if the block is pre-deneb or if +/// it's simply past the DA boundary. +#[derive(Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] +pub struct RangeSyncBlock { + block: AvailableBlock, +} + +impl Debug for RangeSyncBlock { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "RpcBlock({:?})", self.block_root()) } } -impl RpcBlock { +impl RangeSyncBlock { pub fn block_root(&self) -> Hash256 { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_root(), - RpcBlock::BlockOnly { block_root, .. } => *block_root, - } + self.block.block_root() } pub fn as_block(&self) -> &SignedBeaconBlock { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block(), - RpcBlock::BlockOnly { block, .. } => block, - } + self.block.block() } pub fn block_cloned(&self) -> Arc> { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_cloned(), - RpcBlock::BlockOnly { block, .. } => block.clone(), - } + self.block.block_cloned() } - pub fn block_data(&self) -> Option<&AvailableBlockData> { - match self { - RpcBlock::FullyAvailable(available_block) => Some(available_block.data()), - RpcBlock::BlockOnly { .. } => None, - } + pub fn block_data(&self) -> &AvailableBlockData { + self.block.data() } } -impl RpcBlock { - /// Constructs an `RpcBlock` from a block and optional availability data. - /// - /// This function creates an RpcBlock which can be in one of two states: - /// - `FullyAvailable`: When `block_data` is provided, the block contains all required - /// data for verification. - /// - `BlockOnly`: When `block_data` is `None`, the block may still need additional - /// data to be considered fully available (used during block lookups or when blobs - /// will arrive separately). - /// - /// # Validation - /// - /// When `block_data` is provided, this function validates that: - /// - Block data is not provided when not required. - /// - Required blobs are present and match the expected count. - /// - Required custody columns are included based on the nodes custody requirements. +impl RangeSyncBlock { + /// Constructs an `RangeSyncBlock` from a block and availability data. /// /// # Errors /// @@ -92,62 +86,41 @@ impl RpcBlock { /// - `MissingCustodyColumns`: Block requires custody columns but they are incomplete. pub fn new( block: Arc>, - block_data: Option>, + block_data: AvailableBlockData, da_checker: &DataAvailabilityChecker, spec: Arc, ) -> Result where T: BeaconChainTypes, { - match block_data { - Some(block_data) => Ok(RpcBlock::FullyAvailable(AvailableBlock::new( - block, block_data, da_checker, spec, - )?)), - None => Ok(RpcBlock::BlockOnly { - block_root: block.canonical_root(), - block, - }), - } + let available_block = AvailableBlock::new(block, block_data, da_checker, spec)?; + Ok(Self { + block: available_block, + }) } #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Hash256, - Arc>, - Option>, - ) { - match self { - RpcBlock::FullyAvailable(available_block) => { - let (block_root, block, block_data) = available_block.deconstruct(); - (block_root, block, Some(block_data)) - } - RpcBlock::BlockOnly { block, block_root } => (block_root, block, None), - } + pub fn deconstruct(self) -> (Hash256, Arc>, AvailableBlockData) { + self.block.deconstruct() } pub fn n_blobs(&self) -> usize { - if let Some(block_data) = self.block_data() { - match block_data { - AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, - AvailableBlockData::Blobs(blobs) => blobs.len(), - } - } else { - 0 + match self.block_data() { + AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, + AvailableBlockData::Blobs(blobs) => blobs.len(), } } pub fn n_data_columns(&self) -> usize { - if let Some(block_data) = self.block_data() { - match block_data { - AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, - AvailableBlockData::DataColumns(columns) => columns.len(), - } - } else { - 0 + match self.block_data() { + AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, + AvailableBlockData::DataColumns(columns) => columns.len(), } } + + pub fn into_available_block(self) -> AvailableBlock { + self.block + } } /// A block that has gone through all pre-deneb block processing checks including block processing @@ -412,7 +385,7 @@ impl AsBlock for AvailableBlock { } } -impl AsBlock for RpcBlock { +impl AsBlock for RangeSyncBlock { fn slot(&self) -> Slot { self.as_block().slot() } @@ -432,24 +405,42 @@ impl AsBlock for RpcBlock { self.as_block().message() } fn as_block(&self) -> &SignedBeaconBlock { - match self { - Self::BlockOnly { - block, - block_root: _, - } => block, - Self::FullyAvailable(available_block) => available_block.block(), - } + self.block.as_block() } fn block_cloned(&self) -> Arc> { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_cloned(), - RpcBlock::BlockOnly { - block, - block_root: _, - } => block.clone(), - } + self.block.block_cloned() } fn canonical_root(&self) -> Hash256 { - self.as_block().canonical_root() + self.block.block_root() + } +} + +impl AsBlock for LookupBlock { + fn slot(&self) -> Slot { + self.block().slot() + } + fn epoch(&self) -> Epoch { + self.block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef<'_, E> { + self.block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + self.block() + } + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } + fn canonical_root(&self) -> Hash256 { + self.block_root } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index e266e02f7f..4372efa809 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -891,7 +891,7 @@ impl MaybeAvailableBlock { mod test { use super::*; use crate::CustodyContext; - use crate::block_verification_types::RpcBlock; + use crate::block_verification_types::RangeSyncBlock; use crate::custody_context::NodeCustodyType; use crate::data_column_verification::CustodyDataColumn; use crate::test_utils::{ @@ -1085,7 +1085,7 @@ mod test { /// Regression test for KZG verification truncation bug (https://github.com/sigp/lighthouse/pull/7927) #[test] - fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns_fulu() { + fn verify_kzg_for_range_sync_blocks_should_not_truncate_data_columns_fulu() { let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let da_checker = new_da_checker(spec.clone()); @@ -1128,17 +1128,14 @@ mod test { let block_data = AvailableBlockData::new_with_data_columns(custody_columns); let da_checker = Arc::new(new_da_checker(spec.clone())); - RpcBlock::new(Arc::new(block), Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(Arc::new(block), block_data, &da_checker, spec.clone()) .expect("should create RPC block with custody columns") }) .collect::>(); let available_blocks = blocks_with_columns - .iter() - .filter_map(|block| match block { - RpcBlock::FullyAvailable(available_block) => Some(available_block.clone()), - RpcBlock::BlockOnly { .. } => None, - }) + .into_iter() + .map(|block| block.into_available_block()) .collect::>(); // WHEN verifying all blocks together (totalling 256 data columns) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 4bc5bb21d3..c53c29438e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,5 +1,5 @@ use crate::blob_verification::GossipVerifiedBlob; -use crate::block_verification_types::{AsBlock, AvailableBlockData, RpcBlock}; +use crate::block_verification_types::{AsBlock, AvailableBlockData, LookupBlock, RangeSyncBlock}; use crate::custody_context::NodeCustodyType; use crate::data_availability_checker::DataAvailabilityChecker; use crate::graffiti_calculator::GraffitiSettings; @@ -823,20 +823,20 @@ where mock_builder_server } - pub fn get_head_block(&self) -> RpcBlock { + pub fn get_head_block(&self) -> RangeSyncBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - self.build_rpc_block_from_store_blobs(Some(block_root), block) + self.build_range_sync_block_from_store_blobs(Some(block_root), block) } - pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { + pub fn get_full_block(&self, block_root: &Hash256) -> RangeSyncBlock { let block = self .chain .get_blinded_block(block_root) .unwrap() .unwrap_or_else(|| panic!("block root does not exist in harness {block_root:?}")); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) + self.build_range_sync_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } pub fn get_all_validators(&self) -> Vec { @@ -1340,15 +1340,12 @@ where let signed_block = self.sign_beacon_block(block, state); let block_root = signed_block.canonical_root(); - let rpc_block = RpcBlock::BlockOnly { - block_root, - block: Arc::new(signed_block), - }; + let lookup_block = LookupBlock::new(Arc::new(signed_block)); self.chain.slot_clock.set_slot(slot.as_u64()); self.chain .process_block( block_root, - rpc_block, + lookup_block, NotifyExecutionLayer::No, BlockImportSource::Lookup, || Ok(()), @@ -2607,20 +2604,33 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); let is_available = !has_blob_commitments || blob_items.is_some(); + let block_hash: SignedBeaconBlockHash = if !is_available { + self.chain + .process_block( + block_root, + LookupBlock::new(block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + } else { + let range_sync_block = self.build_range_sync_block_from_blobs(block, blob_items)?; + self.chain + .process_block( + block_root, + range_sync_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + }; - let rpc_block = self.build_rpc_block_from_blobs(block, blob_items, is_available)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2640,19 +2650,33 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); let is_available = !has_blob_commitments || blob_items.is_some(); - let rpc_block = self.build_rpc_block_from_blobs(block, blob_items, is_available)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); + let block_hash: SignedBeaconBlockHash = if is_available { + let range_sync_block = self.build_range_sync_block_from_blobs(block, blob_items)?; + self.chain + .process_block( + block_root, + range_sync_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + } else { + self.chain + .process_block( + block_root, + LookupBlock::new(block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + }; + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2735,13 +2759,13 @@ where state_root } - /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from + /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. - pub fn build_rpc_block_from_store_blobs( + pub fn build_range_sync_block_from_store_blobs( &self, block_root: Option, block: Arc>, - ) -> RpcBlock { + ) -> RangeSyncBlock { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); let has_blobs = block .message() @@ -2749,9 +2773,9 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { - return RpcBlock::new( + return RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2768,9 +2792,9 @@ where .unwrap(); let custody_columns = columns.into_iter().collect::>(); let block_data = AvailableBlockData::new_with_data_columns(custody_columns); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2783,9 +2807,9 @@ where AvailableBlockData::NoData }; - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2793,18 +2817,17 @@ where } } - /// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`. - pub fn build_rpc_block_from_blobs( + /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and `BlobsList`. + pub fn build_range_sync_block_from_blobs( &self, block: Arc>>, blob_items: Option<(KzgProofs, BlobsList)>, - is_available: bool, - ) -> Result, BlockError> { + ) -> Result, BlockError> { Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { let epoch = block.slot().epoch(E::slots_per_epoch()); let sampling_columns = self.chain.sampling_columns_for_epoch(epoch); - if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) { + if blob_items.is_some_and(|(kzg_proofs, _)| !kzg_proofs.is_empty()) { // Note: this method ignores the actual custody columns and just take the first // `sampling_column_count` for testing purpose only, because the chain does not // currently have any knowledge of the columns being custodied. @@ -2812,33 +2835,17 @@ where .into_iter() .filter(|d| sampling_columns.contains(d.index())) .collect::>(); - if is_available { - let block_data = AvailableBlockData::new_with_data_columns(columns); - RpcBlock::new( - block, - Some(block_data), - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } else { - RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } - } else if is_available { - RpcBlock::new( + let block_data = AvailableBlockData::new_with_data_columns(columns); + RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), )? } else { - RpcBlock::new( + RangeSyncBlock::new( block, - None, + AvailableBlockData::NoData, &self.chain.data_availability_checker, self.chain.spec.clone(), )? @@ -2850,27 +2857,18 @@ where }) .transpose() .unwrap(); - if is_available { - let block_data = if let Some(blobs) = blobs { - AvailableBlockData::new_with_blobs(blobs) - } else { - AvailableBlockData::NoData - }; - - RpcBlock::new( - block, - Some(block_data), - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? + let block_data = if let Some(blobs) = blobs { + AvailableBlockData::new_with_blobs(blobs) } else { - RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } + AvailableBlockData::NoData + }; + + RangeSyncBlock::new( + block, + block_data, + &self.chain.data_availability_checker, + self.chain.spec.clone(), + )? }) } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index a1922f32a4..bca60d27cd 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_simulator::produce_unaggregated_attestation; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; @@ -223,19 +222,9 @@ async fn produces_attestations() { assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); - let rpc_block = - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); - - let available_block = match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .unwrap(); - available_block - } - RpcBlock::BlockOnly { .. } => panic!("block should be available"), - }; + let range_sync_block = harness + .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); + let available_block = range_sync_block.into_available_block(); let early_attestation = { let proto_block = chain @@ -292,20 +281,12 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); - let rpc_block = harness - .build_rpc_block_from_store_blobs(Some(head.beacon_block_root), head.beacon_block.clone()); - - let available_block = match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - harness - .chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .unwrap(); - available_block - } - RpcBlock::BlockOnly { .. } => panic!("block should be available"), - }; + let available_block = harness + .build_range_sync_block_from_store_blobs( + Some(head.beacon_block_root), + head.beacon_block.clone(), + ) + .into_available_block(); harness .chain diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index ee61177b2a..0ee9a7dba6 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -5,7 +5,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, - block_verification_types::AsBlock, + block_verification_types::{AsBlock, LookupBlock}, }; use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; @@ -76,14 +76,11 @@ async fn rpc_blobs_with_invalid_header_signature() { // Process the block without blobs so that it doesn't become available. harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .unwrap(); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index e385e0dc48..8981b20a55 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,6 +1,6 @@ #![cfg(not(debug_assertions))] // TODO(gloas) we probably need similar test for payload envelope verification -use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, LookupBlock, RangeSyncBlock}; use beacon_chain::data_availability_checker::{AvailabilityCheckError, AvailableBlockData}; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ @@ -13,7 +13,7 @@ use beacon_chain::{ }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, - InvalidSignature, NotifyExecutionLayer, signature_verify_chain_segment, + InvalidSignature, NotifyExecutionLayer, }; use bls::{AggregateSignature, Keypair, Signature}; use fixed_bytes::FixedBytesExtended; @@ -136,7 +136,7 @@ fn chain_segment_blocks( chain_segment: &[BeaconSnapshot], chain_segment_sidecars: &[Option>], chain: Arc>, -) -> Vec> +) -> Vec> where T: BeaconChainTypes, { @@ -145,25 +145,25 @@ where .zip(chain_segment_sidecars.iter()) .map(|(snapshot, data_sidecars)| { let block = snapshot.beacon_block.clone(); - build_rpc_block(block, data_sidecars, chain.clone()) + build_range_sync_block(block, data_sidecars, chain.clone()) }) .collect() } -fn build_rpc_block( +fn build_range_sync_block( block: Arc>, data_sidecars: &Option>, chain: Arc>, -) -> RpcBlock +) -> RangeSyncBlock where T: BeaconChainTypes, { match data_sidecars { Some(DataSidecars::Blobs(blobs)) => { let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) @@ -176,17 +176,17 @@ where .map(|c| c.as_data_column().clone()) .collect::>(), ); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) .unwrap() } - None => RpcBlock::new( + None => RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &chain.data_availability_checker, chain.spec.clone(), ) @@ -301,7 +301,7 @@ fn update_data_column_signed_header( async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -339,7 +339,7 @@ async fn chain_segment_full_segment() { async fn chain_segment_varying_chunk_size() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - let blocks: Vec> = + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -384,7 +384,7 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -405,7 +405,7 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -413,9 +413,9 @@ async fn chain_segment_non_linear_parent_roots() { let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -447,15 +447,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -477,15 +477,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.chain.spec.clone(), ) @@ -512,11 +512,11 @@ async fn assert_invalid_signature( snapshots: &[BeaconSnapshot], item: &str, ) { - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); @@ -543,7 +543,7 @@ async fn assert_invalid_signature( .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been @@ -558,7 +558,7 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - build_rpc_block( + build_range_sync_block( snapshots[block_index].beacon_block.clone(), &chain_segment_blobs[block_index], harness.chain.clone(), @@ -620,7 +620,7 @@ async fn invalid_signature_gossip_block() { .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); harness @@ -630,18 +630,12 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); - let rpc_block = RpcBlock::new( - Arc::new(signed_block), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let lookup_block = LookupBlock::new(Arc::new(signed_block)); let process_res = harness .chain .process_block( - rpc_block.block_root(), - rpc_block, + lookup_block.block_root(), + lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -675,11 +669,11 @@ async fn invalid_signature_block_proposal() { block.clone(), junk_signature(), )); - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. @@ -994,11 +988,11 @@ async fn invalid_signature_deposit() { Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); assert!( @@ -1641,9 +1635,9 @@ async fn add_base_block_to_altair_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let base_rpc_block = RpcBlock::new( + let base_range_sync_block = RangeSyncBlock::new( Arc::new(base_block.clone()), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -1652,8 +1646,8 @@ async fn add_base_block_to_altair_chain() { harness .chain .process_block( - base_rpc_block.block_root(), - base_rpc_block, + base_range_sync_block.block_root(), + base_range_sync_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1672,9 +1666,9 @@ async fn add_base_block_to_altair_chain() { .chain .process_chain_segment( vec![ - RpcBlock::new( + RangeSyncBlock::new( Arc::new(base_block), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone() ) @@ -1792,19 +1786,13 @@ async fn add_altair_block_to_base_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let altair_rpc_block = RpcBlock::new( - Arc::new(altair_block.clone()), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let altair_lookup_block = LookupBlock::new(Arc::new(altair_block.clone())); assert!(matches!( harness .chain .process_block( - altair_rpc_block.block_root(), - altair_rpc_block, + altair_lookup_block.block_root(), + altair_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1823,9 +1811,9 @@ async fn add_altair_block_to_base_chain() { .chain .process_chain_segment( vec![ - RpcBlock::new( + RangeSyncBlock::new( Arc::new(altair_block), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone() ) @@ -1891,18 +1879,18 @@ async fn import_duplicate_block_unrealized_justification() { // Create two verified variants of the block, representing the same block being processed in // parallel. let notify_execution_layer = NotifyExecutionLayer::Yes; - let rpc_block = RpcBlock::new( + let range_sync_block = RangeSyncBlock::new( block.clone(), - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone(), ) .unwrap(); - let verified_block1 = rpc_block + let verified_block1 = range_sync_block .clone() .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); - let verified_block2 = rpc_block + let verified_block2 = range_sync_block .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); @@ -1972,48 +1960,9 @@ async fn import_execution_pending_block( } } -// Test that `signature_verify_chain_segment` errors with a chain segment of mixed `FullyAvailable` -// and `BlockOnly` RpcBlocks. This situation should never happen in production. -#[tokio::test] -async fn signature_verify_mixed_rpc_block_variants() { - let (snapshots, data_sidecars) = get_chain_segment().await; - let snapshots: Vec<_> = snapshots.into_iter().take(10).collect(); - let data_sidecars: Vec<_> = data_sidecars.into_iter().take(10).collect(); - - let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - - let mut chain_segment = Vec::new(); - - for (i, (snapshot, blobs)) in snapshots.iter().zip(data_sidecars.iter()).enumerate() { - let block = snapshot.beacon_block.clone(); - let block_root = snapshot.beacon_block_root; - - // Alternate between FullyAvailable and BlockOnly - let rpc_block = if i % 2 == 0 { - // FullyAvailable - with blobs/columns if needed - build_rpc_block(block, blobs, harness.chain.clone()) - } else { - // BlockOnly - no data - RpcBlock::new( - block, - None, - &harness.chain.data_availability_checker, - harness.chain.spec.clone(), - ) - .unwrap() - }; - - chain_segment.push((block_root, rpc_block)); - } - - // This should error because `signature_verify_chain_segment` expects a list - // of `RpcBlock::FullyAvailable`. - assert!(signature_verify_chain_segment(chain_segment.clone(), &harness.chain).is_err()); -} - // Test that RpcBlock::new() rejects blocks when blob count doesn't match expected. #[tokio::test] -async fn rpc_block_construction_fails_with_wrong_blob_count() { +async fn range_sync_block_construction_fails_with_wrong_blob_count() { let spec = test_spec::(); if !spec.fork_name_at_slot::(Slot::new(0)).deneb_enabled() @@ -2064,9 +2013,9 @@ async fn rpc_block_construction_fails_with_wrong_blob_count() { let block_data = AvailableBlockData::new_with_blobs(wrong_blobs); // Try to create RpcBlock with wrong blob count - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(block_data), + block_data, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); @@ -2086,7 +2035,7 @@ async fn rpc_block_construction_fails_with_wrong_blob_count() { // Test that RpcBlock::new() rejects blocks when custody columns are incomplete. #[tokio::test] -async fn rpc_block_rejects_missing_custody_columns() { +async fn range_sync_block_rejects_missing_custody_columns() { let spec = test_spec::(); if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { @@ -2139,9 +2088,9 @@ async fn rpc_block_rejects_missing_custody_columns() { let block_data = AvailableBlockData::new_with_data_columns(incomplete_columns); // Try to create RpcBlock with incomplete custody columns - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(block_data), + block_data, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); @@ -2227,9 +2176,9 @@ async fn rpc_block_allows_construction_past_da_boundary() { // Try to create RpcBlock with NoData for a block past DA boundary // This should succeed since columns are not expected for blocks past DA boundary - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 9941c957e2..6114bd7f45 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -7,7 +7,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, - block_verification_types::AsBlock, + block_verification_types::{AsBlock, LookupBlock}, }; use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; @@ -80,16 +80,13 @@ async fn rpc_columns_with_invalid_header_signature() { // Process the block without blobs so that it doesn't become available. harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .unwrap(); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -169,16 +166,13 @@ async fn verify_header_signature_fork_block_bug() { // The block will be accepted but won't become the head because it's not fully available. // This keeps the head at the pre-fork state (Electra). harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .expect("Should build RPC block"); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index bcc50990ec..3ed8f59838 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,7 +1,7 @@ #![cfg(not(debug_assertions))] #![allow(clippy::result_large_err)] -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::{ BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, StateSkipConfig, @@ -686,19 +686,13 @@ async fn invalidates_all_descendants() { assert_eq!(fork_parent_state.slot(), fork_parent_slot); let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); let fork_block_root = rig .harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -796,19 +790,13 @@ async fn switches_heads() { let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); let fork_block_root = rig .harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1086,15 +1074,9 @@ async fn invalid_parent() { )); // Ensure the block built atop an invalid payload is invalid for import. - let rpc_block = RpcBlock::new( - block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let lookup_block = LookupBlock::new(block.clone()); assert!(matches!( - rig.harness.chain.process_block(rpc_block.block_root(), rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, + rig.harness.chain.process_block(lookup_block.block_root(), lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1348,18 +1330,12 @@ async fn recover_from_invalid_head_by_importing_blocks() { } = InvalidHeadSetup::new().await; // Import the fork block, it should become the head. - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); rig.harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index a70ad89ca9..89c28cca37 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,7 +2,7 @@ #![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::Error as AttnError; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::data_availability_checker::AvailableBlock; @@ -3144,7 +3144,10 @@ async fn weak_subjectivity_sync_test( beacon_chain .process_block( full_block_root, - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), + harness.build_range_sync_block_from_store_blobs( + Some(block_root), + Arc::new(full_block), + ), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3214,20 +3217,16 @@ async fn weak_subjectivity_sync_test( .expect("should get block") .expect("should get block"); - let rpc_block = - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)); + let range_sync_block = harness + .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(full_block)); - match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - harness - .chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .expect("should verify kzg"); - available_blocks.push(available_block); - } - RpcBlock::BlockOnly { .. } => panic!("Should be an available block"), - } + let fully_available_block = range_sync_block.into_available_block(); + harness + .chain + .data_availability_checker + .verify_kzg_for_available_block(&fully_available_block) + .expect("should verify kzg"); + available_blocks.push(fully_available_block); } // Corrupt the signature on the 1st block to ensure that the backfill processor is checking @@ -3798,19 +3797,13 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert_eq!(split.block_root, valid_fork_block.parent_root()); assert_ne!(split.state_root, unadvanced_split_state_root); - let invalid_fork_rpc_block = RpcBlock::new( - invalid_fork_block.clone(), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let invalid_fork_lookup_block = LookupBlock::new(invalid_fork_block.clone()); // Applying the invalid block should fail. let err = harness .chain .process_block( - invalid_fork_rpc_block.block_root(), - invalid_fork_rpc_block, + invalid_fork_lookup_block.block_root(), + invalid_fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3820,18 +3813,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); // Applying the valid block should succeed, but it should not become head. - let valid_fork_rpc_block = RpcBlock::new( - valid_fork_block.clone(), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let valid_fork_lookup_block = LookupBlock::new(valid_fork_block.clone()); harness .chain .process_block( - valid_fork_rpc_block.block_root(), - valid_fork_rpc_block, + valid_fork_lookup_block.block_root(), + valid_fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index bbf92a4dda..43dfbeb836 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -2,7 +2,7 @@ use crate::metrics; use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::{AsBlock, LookupBlock}; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ @@ -311,19 +311,11 @@ pub async fn publish_block>( slot = %block.slot(), "Block previously seen" ); - let Ok(rpc_block) = RpcBlock::new( - block.clone(), - None, - &chain.data_availability_checker, - chain.spec.clone(), - ) else { - return Err(warp_utils::reject::custom_bad_request( - "Unable to construct rpc block".to_string(), - )); - }; + // try to reprocess as a lookup (single) block and let sync take care of missing components + let lookup_block = LookupBlock::new(block.clone()); let import_result = Box::pin(chain.process_block( block_root, - rpc_block, + lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 357d6c08fd..e40eacce08 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,7 +1,8 @@ use crate::sync::manager::BlockProcessType; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::blob_verification::{GossipBlobError, observe_gossip_blob}; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, observe_gossip_data_column}; use beacon_chain::fetch_blobs::{ EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs, @@ -517,14 +518,14 @@ impl NetworkBeaconProcessor { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. - pub fn send_rpc_beacon_block( + pub fn send_lookup_beacon_block( self: &Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), Error> { - let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + let process_fn = self.clone().generate_lookup_beacon_block_process_fn( block_root, block, seen_timestamp, @@ -610,7 +611,7 @@ impl NetworkBeaconProcessor { pub fn send_chain_segment( self: &Arc, process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>, ) -> Result<(), Error> { debug!(blocks = blocks.len(), id = ?process_id, "Batch sending for process"); let processor = self.clone(); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 629a42c688..f7fbce8e56 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -6,7 +6,8 @@ use crate::sync::{ ChainId, manager::{BlockProcessType, SyncMessage}, }; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::historical_data_columns::HistoricalDataColumnError; use beacon_chain::{ @@ -51,16 +52,16 @@ impl NetworkBeaconProcessor { /// /// This separate function was required to prevent a cycle during compiler /// type checking. - pub fn generate_rpc_beacon_block_process_fn( + pub fn generate_lookup_beacon_block_process_fn( self: Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { let process_fn = async move { let duplicate_cache = self.duplicate_cache.clone(); - self.process_rpc_block( + self.process_lookup_block( block_root, block, seen_timestamp, @@ -73,15 +74,15 @@ impl NetworkBeaconProcessor { } /// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block. - pub fn generate_rpc_beacon_block_fns( + pub fn generate_lookup_beacon_block_fns( self: Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> (AsyncFn, BlockingFn) { // An async closure which will import the block. - let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + let process_fn = self.clone().generate_lookup_beacon_block_process_fn( block_root, block, seen_timestamp, @@ -107,10 +108,10 @@ impl NetworkBeaconProcessor { skip_all, fields(?block_root), )] - pub async fn process_rpc_block( + pub async fn process_lookup_block( self: Arc>, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, duplicate_cache: DuplicateCache, @@ -118,14 +119,14 @@ impl NetworkBeaconProcessor { // Check if the block is already being imported through another source let Some(handle) = duplicate_cache.check_and_insert(block_root) else { debug!( - action = "sending rpc block to reprocessing queue", + action = "sending lookup block to reprocessing queue", %block_root, ?process_type, "Gossip block is being processed" ); // Send message to work reprocess queue to retry the block - let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( + let (process_fn, ignore_fn) = self.clone().generate_lookup_beacon_block_fns( block_root, block, seen_timestamp, @@ -160,7 +161,7 @@ impl NetworkBeaconProcessor { slot = %block.slot(), commitments_formatted, ?process_type, - "Processing RPC block" + "Processing Lookup block" ); let signed_beacon_block = block.block_cloned(); @@ -530,7 +531,7 @@ impl NetworkBeaconProcessor { pub async fn process_chain_segment( &self, process_id: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) { let ChainSegmentProcessId::RangeBatchId(chain_id, epoch) = process_id else { // This is a request from range sync, this should _never_ happen @@ -611,7 +612,7 @@ impl NetworkBeaconProcessor { pub fn process_chain_segment_backfill( &self, process_id: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) { let ChainSegmentProcessId::BackSyncBatchId(epoch) = process_id else { // this a request from RangeSync, this should _never_ happen @@ -682,7 +683,7 @@ impl NetworkBeaconProcessor { #[instrument(skip_all)] async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec<_> = downloaded_blocks.cloned().collect(); @@ -716,23 +717,13 @@ impl NetworkBeaconProcessor { #[instrument(skip_all)] fn process_backfill_blocks( &self, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { let total_blocks = downloaded_blocks.len(); - let mut available_blocks = vec![]; - - for downloaded_block in downloaded_blocks { - match downloaded_block { - RpcBlock::FullyAvailable(available_block) => available_blocks.push(available_block), - RpcBlock::BlockOnly { .. } => return ( - 0, - Err(ChainSegmentFailed { - peer_action: None, - message: "Invalid downloaded_blocks segment. All downloaded blocks must be fully available".to_string() - }) - ), - } - } + let available_blocks = downloaded_blocks + .into_iter() + .map(|block| block.into_available_block()) + .collect::>(); match self .chain diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 4b0ca0d46c..5fa8c729cb 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -8,7 +8,7 @@ use crate::{ service::NetworkMessage, sync::{SyncMessage, manager::BlockProcessType}, }; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip_fulu; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; @@ -437,36 +437,24 @@ impl TestRig { } } - pub fn enqueue_rpc_block(&self) { + pub fn enqueue_lookup_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - RpcBlock::new( - self.next_block.clone(), - None, - &self._harness.chain.data_availability_checker, - self._harness.spec.clone(), - ) - .unwrap(), + LookupBlock::new(self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 0 }, ) .unwrap(); } - pub fn enqueue_single_lookup_rpc_block(&self) { + pub fn enqueue_single_lookup_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - RpcBlock::new( - self.next_block.clone(), - None, - &self._harness.chain.data_availability_checker, - self._harness.spec.clone(), - ) - .unwrap(), + LookupBlock::new(self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -1305,7 +1293,7 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod } } BlockImportMethod::Rpc => { - rig.enqueue_rpc_block(); + rig.enqueue_lookup_block(); events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); @@ -1391,7 +1379,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod } } BlockImportMethod::Rpc => { - rig.enqueue_rpc_block(); + rig.enqueue_lookup_block(); events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); @@ -1585,7 +1573,7 @@ async fn test_rpc_block_reprocessing() { let next_block_root = rig.next_block.canonical_root(); // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); - rig.enqueue_single_lookup_rpc_block(); + rig.enqueue_single_lookup_block(); rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 801c9eca4d..0f80138d24 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -19,7 +19,7 @@ use crate::sync::manager::BatchProcessResult; use crate::sync::network_context::{ RangeRequestId, RpcRequestSendError, RpcResponseError, SyncNetworkContext, }; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::service::api_types::Id; use lighthouse_network::types::{BackFillState, NetworkGlobals}; @@ -55,7 +55,7 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 10; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 10; -type RpcBlocks = Vec>; +type RpcBlocks = Vec>; type BackFillBatchInfo = BatchInfo, RpcBlocks>; @@ -390,7 +390,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) -> Result { // check if we have this batch let Some(batch) = self.batches.get_mut(&batch_id) else { diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index e87ffd119e..10af1bf503 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -1,4 +1,4 @@ -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use educe::Educe; use lighthouse_network::PeerId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; @@ -449,7 +449,7 @@ impl BatchInfo { } // BatchInfo implementations for RangeSync -impl BatchInfo>> { +impl BatchInfo>> { /// Returns a BlocksByRange request associated with the batch. pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { ( diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index a287771854..98cf3e0a1f 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,6 +1,6 @@ use beacon_chain::{ BeaconChainTypes, - block_verification_types::{AvailableBlockData, RpcBlock}, + block_verification_types::{AvailableBlockData, RangeSyncBlock}, data_availability_checker::DataAvailabilityChecker, data_column_verification::CustodyDataColumn, get_block_root, @@ -200,7 +200,7 @@ impl RangeBlockComponentsRequest { &mut self, da_checker: Arc>, spec: Arc, - ) -> Option>, CouplingError>> + ) -> Option>, CouplingError>> where T: BeaconChainTypes, { @@ -288,7 +288,7 @@ impl RangeBlockComponentsRequest { blobs: Vec>>, da_checker: Arc>, spec: Arc, - ) -> Result>, CouplingError> + ) -> Result>, CouplingError> where T: BeaconChainTypes, { @@ -335,7 +335,7 @@ impl RangeBlockComponentsRequest { })?; let block_data = AvailableBlockData::new_with_blobs(blobs); responses.push( - RpcBlock::new(block, Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(block, block_data, &da_checker, spec.clone()) .map_err(|e| CouplingError::BlobPeerFailure(format!("{e:?}")))?, ) } @@ -360,7 +360,7 @@ impl RangeBlockComponentsRequest { attempt: usize, da_checker: Arc>, spec: Arc, - ) -> Result>, CouplingError> + ) -> Result>, CouplingError> where T: BeaconChainTypes, { @@ -388,12 +388,12 @@ impl RangeBlockComponentsRequest { // Now iterate all blocks ensuring that the block roots of each block and data column match, // plus we have columns for our custody requirements - let mut rpc_blocks = Vec::with_capacity(blocks.len()); + let mut range_sync_blocks = Vec::with_capacity(blocks.len()); let exceeded_retries = attempt >= MAX_COLUMN_RETRIES; for block in blocks { let block_root = get_block_root(&block); - rpc_blocks.push(if block.num_expected_blobs() > 0 { + range_sync_blocks.push(if block.num_expected_blobs() > 0 { let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root) else { let responsible_peers = column_to_peer.iter().map(|c| (*c.0, *c.1)).collect(); @@ -441,11 +441,11 @@ impl RangeBlockComponentsRequest { let block_data = AvailableBlockData::new_with_data_columns(custody_columns.iter().map(|c| c.as_data_column().clone()).collect::>()); - RpcBlock::new(block, Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(block, block_data, &da_checker, spec.clone()) .map_err(|e| CouplingError::InternalError(format!("{:?}", e)))? } else { // Block has no data, expects zero columns - RpcBlock::new(block, Some(AvailableBlockData::NoData), &da_checker, spec.clone()) + RangeSyncBlock::new(block, AvailableBlockData::NoData, &da_checker, spec.clone()) .map_err(|e| CouplingError::InternalError(format!("{:?}", e)))? }); } @@ -458,7 +458,7 @@ impl RangeBlockComponentsRequest { debug!(?remaining_roots, "Not all columns consumed for block"); } - Ok(rpc_blocks) + Ok(range_sync_blocks) } } @@ -947,7 +947,7 @@ mod tests { } let result: Result< - Vec>, + Vec>, crate::sync::block_sidecar_coupling::CouplingError, > = info.responses(da_checker.clone(), spec.clone()).unwrap(); assert!(result.is_err()); @@ -981,10 +981,10 @@ mod tests { // WHEN: Attempting to get responses again let result = info.responses(da_checker, spec).unwrap(); - // THEN: Should succeed with complete RPC blocks + // THEN: Should succeed with complete RangeSync blocks assert!(result.is_ok()); - let rpc_blocks = result.unwrap(); - assert_eq!(rpc_blocks.len(), 2); + let range_sync_blocks = result.unwrap(); + assert_eq!(range_sync_blocks.len(), 2); } #[test] diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 7e2c0d9a94..ff630bb470 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,7 +17,8 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use crate::sync::range_data_column_batch_request::RangeDataColumnBatchRequest; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; @@ -735,7 +736,7 @@ impl SyncNetworkContext { &mut self, id: ComponentsByRangeRequestId, range_block_component: RangeBlockComponent, - ) -> Option>, RpcResponseError>> { + ) -> Option>, RpcResponseError>> { let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); return None; @@ -1588,21 +1589,15 @@ impl SyncNetworkContext { .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - let block = RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - ) - .map_err(|_| SendErrorProcessor::SendError)?; + let lookup_block = LookupBlock::new(block); - debug!(block = ?block_root, block_slot = %block.slot(), id, "Sending block for processing"); + debug!(block = ?block_root, block_slot = %lookup_block.slot(), id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - block, + lookup_block, seen_timestamp, BlockProcessType::SingleBlock { id }, ) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index e3ff638121..d533d8ed0d 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -10,7 +10,7 @@ use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; use crate::sync::{BatchProcessResult, network_context::SyncNetworkContext}; use beacon_chain::BeaconChainTypes; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; use logging::crit; @@ -40,7 +40,7 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// and continued is now in an inconsistent state. pub type ProcessingResult = Result; -type RpcBlocks = Vec>; +type RpcBlocks = Vec>; type RangeSyncBatchInfo = BatchInfo, RpcBlocks>; type RangeSyncBatches = BTreeMap>; @@ -273,7 +273,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) -> ProcessingResult { let _guard = self.span.clone().entered(); // check if we have this batch diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9fd72ac98a..6509ac3cb3 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -47,7 +47,7 @@ use crate::status::ToStatusMessage; use crate::sync::BatchProcessResult; use crate::sync::batch::BatchId; use crate::sync::network_context::{RpcResponseError, SyncNetworkContext}; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::service::api_types::Id; @@ -213,7 +213,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 769a11d976..cd872df887 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -7,6 +7,7 @@ use crate::sync::{ manager::{BlockProcessType, BlockProcessingResult, SyncManager}, }; use beacon_chain::blob_verification::KzgVerifiedBlob; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, NotifyExecutionLayer, @@ -464,7 +465,7 @@ impl TestRig { panic!("Test consumer requested unknown block: {id:?}") }) .block_data() - .and_then(|d| d.blobs()) + .blobs() .unwrap_or_else(|| panic!("Block {id:?} has no blobs")) .iter() .find(|blob| blob.index == id.index) @@ -528,7 +529,7 @@ impl TestRig { panic!("Test consumer requested unknown block: {id:?}") }) .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .unwrap_or_else(|| panic!("Block id {id:?} has no columns")); id.columns .iter() @@ -594,7 +595,7 @@ impl TestRig { // - Some blocks may not have blobs as the blob count is random let blobs = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) - .filter_map(|block| block.block_data().and_then(|d| d.blobs())) + .filter_map(|block| block.block_data().blobs()) .flat_map(|blobs| blobs.into_iter()) .collect::>(); self.send_rpc_blobs_response(req_id, peer_id, &blobs); @@ -610,7 +611,7 @@ impl TestRig { // - Some blocks may not have columns as the blob count is random let columns = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) - .filter_map(|block| block.block_data().and_then(|d| d.data_columns())) + .filter_map(|block| block.block_data().data_columns()) .flat_map(|columns| { columns .into_iter() @@ -786,10 +787,10 @@ impl TestRig { } fn corrupt_last_block_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let mut block = (*rpc_block.block_cloned()).clone(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let range_sync_block = self.get_last_block().clone(); + let mut block = (*range_sync_block.block_cloned()).clone(); + let blobs = range_sync_block.block_data().blobs(); + let columns = range_sync_block.block_data().data_columns(); *block.signature_mut() = self.valid_signature(); self.re_insert_block(Arc::new(block), blobs, columns); } @@ -801,15 +802,15 @@ impl TestRig { } fn corrupt_last_blob_proposer_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let mut blobs = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let mut blobs = range_sync_block .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs") .into_iter() .collect::>(); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let columns = range_sync_block.block_data().data_columns(); let first = blobs.first_mut().expect("empty blobs"); Arc::make_mut(first).signed_block_header.signature = self.valid_signature(); let max_blobs = @@ -822,15 +823,15 @@ impl TestRig { } fn corrupt_last_blob_kzg_proof(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let mut blobs = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let mut blobs = range_sync_block .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs") .into_iter() .collect::>(); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let columns = range_sync_block.block_data().data_columns(); let first = blobs.first_mut().expect("empty blobs"); Arc::make_mut(first).kzg_proof = kzg::KzgProof::empty(); let max_blobs = @@ -843,12 +844,12 @@ impl TestRig { } fn corrupt_last_column_proposer_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let mut columns = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let blobs = range_sync_block.block_data().blobs(); + let mut columns = range_sync_block .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("no columns"); let first = columns.first_mut().expect("empty columns"); Arc::make_mut(first) @@ -859,12 +860,12 @@ impl TestRig { } fn corrupt_last_column_kzg_proof(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let mut columns = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let blobs = range_sync_block.block_data().blobs(); + let mut columns = range_sync_block .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("no columns"); let first = columns.first_mut().expect("empty columns"); let column = Arc::make_mut(first); @@ -873,7 +874,7 @@ impl TestRig { self.re_insert_block(block, blobs, Some(columns)); } - fn get_last_block(&self) -> &RpcBlock { + fn get_last_block(&self) -> &RangeSyncBlock { let (_, last_block) = self .network_blocks_by_root .iter() @@ -893,13 +894,13 @@ impl TestRig { let block_root = block.canonical_root(); let block_slot = block.slot(); let block_data = if let Some(columns) = columns { - Some(AvailableBlockData::new_with_data_columns(columns)) + AvailableBlockData::new_with_data_columns(columns) } else if let Some(blobs) = blobs { - Some(AvailableBlockData::new_with_blobs(blobs)) + AvailableBlockData::new_with_blobs(blobs) } else { - Some(AvailableBlockData::NoData) + AvailableBlockData::NoData }; - let rpc_block = RpcBlock::new( + let range_sync_block = RangeSyncBlock::new( block, block_data, &self.harness.chain.data_availability_checker, @@ -907,8 +908,9 @@ impl TestRig { ) .unwrap(); self.network_blocks_by_slot - .insert(block_slot, rpc_block.clone()); - self.network_blocks_by_root.insert(block_root, rpc_block); + .insert(block_slot, range_sync_block.clone()); + self.network_blocks_by_root + .insert(block_root, range_sync_block); } /// Trigger a lookup with the last created block @@ -947,7 +949,7 @@ impl TestRig { /// Import a block directly into the chain without going through lookup sync async fn import_block_by_root(&mut self, block_root: Hash256) { - let rpc_block = self + let range_sync_block = self .network_blocks_by_root .get(&block_root) .unwrap_or_else(|| panic!("No block for root {block_root}")) @@ -957,9 +959,9 @@ impl TestRig { .chain .process_block( block_root, - rpc_block, + range_sync_block, NotifyExecutionLayer::Yes, - BlockImportSource::Gossip, + BlockImportSource::RangeSync, || Ok(()), ) .await @@ -979,7 +981,7 @@ impl TestRig { let blobs = self .get_last_block() .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs"); let blob = blobs.first().expect("empty blobs"); self.trigger_unknown_parent_blob(peer_id, blob.clone()); @@ -990,7 +992,7 @@ impl TestRig { let columns = self .get_last_block() .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("No data columns"); let column = columns.first().expect("empty columns"); self.trigger_unknown_parent_column(peer_id, column.clone()); @@ -1475,15 +1477,14 @@ impl TestRig { ) -> AvailabilityProcessingStatus { // Simulate importing block from another source. Don't use GossipVerified as it checks with // the clock, which does not match the timestamp in the payload. - let block_root = block.canonical_root(); - let rpc_block = RpcBlock::BlockOnly { block_root, block }; + let lookup_block = LookupBlock::new(block); self.harness .chain .process_block( - block_root, - rpc_block, + lookup_block.block_root(), + lookup_block, NotifyExecutionLayer::Yes, - BlockImportSource::Gossip, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2196,10 +2197,7 @@ async fn blobs_in_da_checker_skip_download() { }; r.build_chain(1).await; let block = r.get_last_block().clone(); - let blobs = block - .block_data() - .and_then(|d| d.blobs()) - .expect("block with no blobs"); + let blobs = block.block_data().blobs().expect("block with no blobs"); for blob in &blobs { r.insert_blob_to_da_checker(blob.clone()); } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index f00cf5841d..6e948e4726 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -3,7 +3,7 @@ use crate::sync::SyncMessage; use crate::sync::block_lookups::BlockLookupsMetrics; use crate::sync::manager::SyncManager; use crate::sync::tests::lookups::SimulateConfig; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::builder::Witness; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; @@ -77,8 +77,8 @@ struct TestRig { rng: ChaCha20Rng, fork_name: ForkName, /// Blocks that will be used in the test but may not be known to `harness` yet. - network_blocks_by_root: HashMap>, - network_blocks_by_slot: HashMap>, + network_blocks_by_root: HashMap>, + network_blocks_by_slot: HashMap>, penalties: Vec, /// All seen lookups through the test run seen_lookups: HashMap, diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 67395ccd25..c19ee8eb6d 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -10,7 +10,7 @@ use beacon_chain::block_verification_types::AvailableBlockData; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; +use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RangeSyncBlock}; use beacon_processor::WorkType; use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ @@ -430,7 +430,7 @@ impl TestRig { .chain .process_block( block_root, - build_rpc_block(block.into(), &data_sidecars, self.harness.chain.clone()), + build_range_sync_block(block.into(), &data_sidecars, self.harness.chain.clone()), NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -443,17 +443,17 @@ impl TestRig { } } -fn build_rpc_block( +fn build_range_sync_block( block: Arc>, data_sidecars: &Option>, chain: Arc>, -) -> RpcBlock { +) -> RangeSyncBlock { match data_sidecars { Some(DataSidecars::Blobs(blobs)) => { let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) @@ -466,18 +466,18 @@ fn build_rpc_block( .map(|c| c.as_data_column().clone()) .collect::>(), ); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) .unwrap() } // Block has no data, expects zero columns - None => RpcBlock::new( + None => RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &chain.data_availability_checker, chain.spec.clone(), ) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index ca77dc8d79..07a7d4c6b6 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -3,7 +3,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; use beacon_chain::blob_verification::GossipBlobError; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::chain_config::{ DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, DisallowedReOrgOffsets, @@ -561,21 +561,13 @@ impl Tester { let block = Arc::new(block); let result: Result, _> = self - .block_on_dangerous( - self.harness.chain.process_block( - block_root, - RpcBlock::new( - block.clone(), - None, - &self.harness.chain.data_availability_checker, - self.harness.chain.spec.clone(), - ) - .map_err(|e| Error::InternalError(format!("{:?}", e)))?, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ), - )? + .block_on_dangerous(self.harness.chain.process_block( + block_root, + LookupBlock::new(block.clone()), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); let success = data_column_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { @@ -659,21 +651,13 @@ impl Tester { let block = Arc::new(block); let result: Result, _> = self - .block_on_dangerous( - self.harness.chain.process_block( - block_root, - RpcBlock::new( - block.clone(), - None, - &self.harness.chain.data_availability_checker, - self.harness.chain.spec.clone(), - ) - .map_err(|e| Error::InternalError(format!("{:?}", e)))?, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ), - )? + .block_on_dangerous(self.harness.chain.process_block( + block_root, + LookupBlock::new(block.clone()), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); let success = blob_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { From 4eecca6da737e922c973516edb502772eba2b204 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 16 Mar 2026 07:53:22 +0300 Subject: [PATCH 25/43] Update `/rewards` endpoints to match spec (#8967) I believe one of our rewards endpoints is slightly out of spec. We do not return the `finalized` status for `post_beacon_rewards_attestations`. Additionally, the `eth2` client doesn't expect the correct wrapper types for some other endpoints. - Update `post_beacon_rewards_attestations` server implementation to match spec. - Update all three client functions in `eth2` to the correct wrapper type. - Add missing tests for `http_api` to detect any regressions. Co-Authored-By: Mac L --- beacon_node/http_api/src/lib.rs | 12 ++- beacon_node/http_api/tests/tests.rs | 118 ++++++++++++++++++++++++++-- common/eth2/src/lib.rs | 6 +- 3 files changed, 124 insertions(+), 12 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 26bad809df..fc92128c91 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1801,8 +1801,16 @@ pub fn serve( let execution_optimistic = chain.is_optimistic_or_invalid_head().unwrap_or_default(); - Ok(api_types::GenericResponse::from(attestation_rewards)) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + let finalized = epoch + 2 + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch; + + Ok(api_types::GenericResponse::from(attestation_rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index aed7a6b200..c9086dd876 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -7195,15 +7195,16 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(true)); } - async fn test_get_beacon_rewards_blocks_at_head(&self) -> StandardBlockReward { + async fn test_get_beacon_rewards_blocks_at_head( + &self, + ) -> ExecutionOptimisticFinalizedResponse { self.client .get_beacon_rewards_blocks(CoreBlockId::Head) .await .unwrap() - .data } - async fn test_beacon_block_rewards_electra(self) -> Self { + async fn test_beacon_block_rewards_fulu(self) -> Self { for _ in 0..E::slots_per_epoch() { let state = self.harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -7217,8 +7218,80 @@ impl ApiTester { .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); self.harness.extend_slots(1).await; - let api_beacon_block_reward = self.test_get_beacon_rewards_blocks_at_head().await; - assert_eq!(beacon_block_reward, api_beacon_block_reward); + let response = self.test_get_beacon_rewards_blocks_at_head().await; + assert_eq!(response.execution_optimistic, Some(false)); + assert_eq!(response.finalized, Some(false)); + assert_eq!(beacon_block_reward, response.data); + } + self + } + + async fn test_get_beacon_rewards_sync_committee_at_head( + &self, + ) -> ExecutionOptimisticFinalizedResponse> { + self.client + .post_beacon_rewards_sync_committee(CoreBlockId::Head, &[]) + .await + .unwrap() + } + + async fn test_beacon_sync_committee_rewards_fulu(self) -> Self { + for _ in 0..E::slots_per_epoch() { + let state = self.harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + let ((signed_block, _maybe_blob_sidecars), mut state) = + self.harness.make_block_return_pre_state(state, slot).await; + + let mut expected_rewards = self + .harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + expected_rewards.sort_by_key(|r| r.validator_index); + + self.harness.extend_slots(1).await; + + let response = self.test_get_beacon_rewards_sync_committee_at_head().await; + assert_eq!(response.execution_optimistic, Some(false)); + assert_eq!(response.finalized, Some(false)); + let mut api_rewards = response.data; + api_rewards.sort_by_key(|r| r.validator_index); + assert_eq!(expected_rewards, api_rewards); + } + self + } + + async fn test_get_beacon_rewards_attestations( + &self, + epoch: Epoch, + ) -> ExecutionOptimisticFinalizedResponse { + self.client + .post_beacon_rewards_attestations(epoch, &[]) + .await + .unwrap() + } + + async fn test_beacon_attestation_rewards_fulu(self) -> Self { + // Check 3 epochs. + let num_epochs = 3; + for _ in 0..num_epochs { + self.harness + .extend_slots(E::slots_per_epoch() as usize) + .await; + + let epoch = self.chain.epoch().unwrap() - 1; + + let expected_rewards = self + .harness + .chain + .compute_attestation_rewards(epoch, vec![]) + .unwrap(); + + let response = self.test_get_beacon_rewards_attestations(epoch).await; + assert_eq!(response.execution_optimistic, Some(false)); + assert_eq!(response.finalized, Some(false)); + assert_eq!(expected_rewards, response.data); } self } @@ -8534,16 +8607,47 @@ async fn expected_withdrawals_valid_capella() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_beacon_rewards_blocks_electra() { +async fn get_beacon_rewards_blocks_fulu() { let mut config = ApiTesterConfig::default(); config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); config.spec.capella_fork_epoch = Some(Epoch::new(0)); config.spec.deneb_fork_epoch = Some(Epoch::new(0)); config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); ApiTester::new_from_config(config) .await - .test_beacon_block_rewards_electra() + .test_beacon_block_rewards_fulu() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_beacon_rewards_sync_committee_fulu() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_sync_committee_rewards_fulu() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_beacon_rewards_attestations_fulu() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + config.spec.fulu_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_attestation_rewards_fulu() .await; } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index af87af14ba..40c5ef58a6 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1802,7 +1802,7 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, validators: &[ValidatorId], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1819,7 +1819,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_rewards_blocks( &self, block_id: BlockId, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1837,7 +1837,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, validators: &[ValidatorId], - ) -> Result { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() From 95b99ee7247aa966aac48dc5e9bfb3d73db25b39 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 16 Mar 2026 22:40:22 +1100 Subject: [PATCH 26/43] Spec v1.7.0 alpha.3 (#8988) Update spec code for compliance with spec v1.7.0-alpha.3: https://github.com/ethereum/consensus-specs/releases/tag/v1.7.0-alpha.3 The actual consensus changes are minimal. There are few more changes that are only relevant to fork choice or P2P validation that we will pick up in future PRs. The change "Ignore beacon block if parent payload unknown" is currently covered in a hacky way by `load_parent` and can be improved once we have fork choice. The change "Add parent_block_root to bid filtering key" is relevant to bid gossip validation, which we don't have at all in unstable yet. Co-Authored-By: Michael Sproul --- .../beacon_chain/src/block_verification.rs | 1 + .../process_operations.rs | 139 +++++++++++++++++- consensus/types/src/core/consts.rs | 6 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 2 + testing/ef_tests/download_test_vectors.sh | 7 +- testing/ef_tests/src/cases/operations.rs | 9 +- testing/ef_tests/src/handler.rs | 5 - 8 files changed, 151 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 06ec26185f..802b090f6a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1940,6 +1940,7 @@ fn load_parent>( { if block.as_block().is_parent_block_full(parent_bid_block_hash) { // TODO(gloas): loading the envelope here is not very efficient + // TODO(gloas): check parent payload existence prior to this point? let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { BeaconChainError::DBInconsistent(format!( "Missing envelope for parent block {root:?}", diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9743812632..ac64398655 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -4,7 +4,10 @@ use crate::common::{ get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, slash_validator, }; -use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use crate::per_block_processing::builder::{ + convert_validator_index_to_builder_index, is_builder_index, +}; +use crate::per_block_processing::errors::{BlockProcessingError, ExitInvalid, IntoWithIndex}; use crate::per_block_processing::verify_payload_attestation::verify_payload_attestation; use bls::{PublicKeyBytes, SignatureBytes}; use ssz_types::FixedVector; @@ -507,7 +510,26 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(state, None, exit, verify_signatures, spec) + // Exits must specify an epoch when they become valid; they are not valid before then. + let current_epoch = state.current_epoch(); + if current_epoch < exit.message.epoch { + return Err(BlockOperationError::invalid(ExitInvalid::FutureEpoch { + state: current_epoch, + exit: exit.message.epoch, + }) + .into_with_index(i)); + } + + // [New in Gloas:EIP7732] + if state.fork_name_unchecked().gloas_enabled() + && is_builder_index(exit.message.validator_index) + { + process_builder_voluntary_exit(state, exit, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + continue; + } + + verify_exit(state, Some(current_epoch), exit, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; @@ -515,6 +537,87 @@ pub fn process_exits( Ok(()) } +/// Process a builder voluntary exit. [New in Gloas:EIP7732] +fn process_builder_voluntary_exit( + state: &mut BeaconState, + signed_exit: &SignedVoluntaryExit, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockOperationError> { + let builder_index = + convert_validator_index_to_builder_index(signed_exit.message.validator_index); + + let builder = state + .builders()? + .get(builder_index as usize) + .cloned() + .ok_or(BlockOperationError::invalid(ExitInvalid::ValidatorUnknown( + signed_exit.message.validator_index, + )))?; + + // Verify the builder is active + let finalized_epoch = state.finalized_checkpoint().epoch; + if !builder.is_active_at_finalized_epoch(finalized_epoch, spec) { + return Err(BlockOperationError::invalid(ExitInvalid::NotActive( + signed_exit.message.validator_index, + ))); + } + + // Only exit builder if it has no pending withdrawals in the queue + let pending_balance = state.get_pending_balance_to_withdraw_for_builder(builder_index)?; + if pending_balance != 0 { + return Err(BlockOperationError::invalid( + ExitInvalid::PendingWithdrawalInQueue(signed_exit.message.validator_index), + )); + } + + // Verify signature (using EIP-7044 domain: capella_fork_version for Deneb+) + if verify_signatures.is_true() { + let pubkey = builder.pubkey; + let domain = spec.compute_domain( + Domain::VoluntaryExit, + spec.capella_fork_version, + state.genesis_validators_root(), + ); + let message = signed_exit.message.signing_root(domain); + // TODO(gloas): use builder pubkey cache once available + let bls_pubkey = pubkey + .decompress() + .map_err(|_| BlockOperationError::invalid(ExitInvalid::BadSignature))?; + if !signed_exit.signature.verify(&bls_pubkey, message) { + return Err(BlockOperationError::invalid(ExitInvalid::BadSignature)); + } + } + + // Initiate builder exit + initiate_builder_exit(state, builder_index, spec)?; + + Ok(()) +} + +/// Initiate the exit of a builder. [New in Gloas:EIP7732] +fn initiate_builder_exit( + state: &mut BeaconState, + builder_index: u64, + spec: &ChainSpec, +) -> Result<(), BeaconStateError> { + let current_epoch = state.current_epoch(); + let builder = state + .builders_mut()? + .get_mut(builder_index as usize) + .ok_or(BeaconStateError::UnknownBuilder(builder_index))?; + + // Return if builder already initiated exit + if builder.withdrawable_epoch != spec.far_future_epoch { + return Ok(()); + } + + // Set builder exit epoch + builder.withdrawable_epoch = current_epoch.safe_add(spec.min_builder_withdrawability_delay)?; + + Ok(()) +} + /// Validates each `bls_to_execution_change` and updates the state /// /// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns @@ -814,6 +917,30 @@ pub fn process_deposit_requests_post_gloas( Ok(()) } +/// Check if there is a pending deposit for a new validator with the given pubkey. +// TODO(gloas): cache the deposit signature validation or remove this loop entirely if possible, +// it is `O(n * m)` where `n` is max 8192 and `m` is max 128M. +fn is_pending_validator( + state: &BeaconState, + pubkey: &PublicKeyBytes, + spec: &ChainSpec, +) -> Result { + for deposit in state.pending_deposits()?.iter() { + if deposit.pubkey == *pubkey { + let deposit_data = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature.clone(), + }; + if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + return Ok(true); + } + } + } + Ok(false) +} + pub fn process_deposit_request_post_gloas( state: &mut BeaconState, deposit_request: &DepositRequest, @@ -835,10 +962,14 @@ pub fn process_deposit_request_post_gloas( let validator_index = state.get_validator_index(&deposit_request.pubkey)?; let is_validator = validator_index.is_some(); - let is_builder_prefix = + let has_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials, spec); - if is_builder || (is_builder_prefix && !is_validator) { + if is_builder + || (has_builder_prefix + && !is_validator + && !is_pending_validator(state, &deposit_request.pubkey, spec)?) + { // Apply builder deposits immediately apply_deposit_for_builder( state, diff --git a/consensus/types/src/core/consts.rs b/consensus/types/src/core/consts.rs index 0d4c0591cb..049094da76 100644 --- a/consensus/types/src/core/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -31,9 +31,9 @@ pub mod gloas { // Fork choice constants pub type PayloadStatus = u8; - pub const PAYLOAD_STATUS_PENDING: PayloadStatus = 0; - pub const PAYLOAD_STATUS_EMPTY: PayloadStatus = 1; - pub const PAYLOAD_STATUS_FULL: PayloadStatus = 2; + pub const PAYLOAD_STATUS_EMPTY: PayloadStatus = 0; + pub const PAYLOAD_STATUS_FULL: PayloadStatus = 1; + pub const PAYLOAD_STATUS_PENDING: PayloadStatus = 2; pub const ATTESTATION_TIMELINESS_INDEX: usize = 0; pub const PTC_TIMELINESS_INDEX: usize = 1; diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index fd8a3f6da0..48378a4c95 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.2 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.3 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 782b554ff1..dd6be14306 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -47,6 +47,8 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2", "tests/.*/eip7732", "tests/.*/eip7805", + # Heze fork is not implemented + "tests/.*/heze/.*", # TODO(gloas): remove these ignores as Gloas consensus is implemented "tests/.*/gloas/fork_choice/.*", # Ignore MatrixEntry SSZ tests for now. diff --git a/testing/ef_tests/download_test_vectors.sh b/testing/ef_tests/download_test_vectors.sh index ff5b61bb47..f91b2d1c38 100755 --- a/testing/ef_tests/download_test_vectors.sh +++ b/testing/ef_tests/download_test_vectors.sh @@ -10,7 +10,7 @@ if [[ "$version" == "nightly" || "$version" =~ ^nightly-[0-9]+$ ]]; then exit 1 fi - for cmd in unzip jq; do + for cmd in jq; do if ! command -v "${cmd}" >/dev/null 2>&1; then echo "Error ${cmd} is not installed" exit 1 @@ -48,13 +48,10 @@ if [[ "$version" == "nightly" || "$version" =~ ^nightly-[0-9]+$ ]]; then echo "Downloading artifact: ${name}" curl --progress-bar --location --show-error --retry 3 --retry-all-errors --fail \ -H "${auth_header}" -H "Accept: application/vnd.github+json" \ - --output "${name}.zip" "${url}" || { + --output "${name}" "${url}" || { echo "Failed to download ${name}" exit 1 } - - unzip -qo "${name}.zip" - rm -f "${name}.zip" done else for test in "${TESTS[@]}"; do diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index ca0124e1aa..798c66b666 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -716,8 +716,13 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. - let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match O::decode(&path.join(O::filename()), fork_name, spec) { + let operation_path = path.join(O::filename()); + let (operation, bls_error) = if !operation_path.is_file() { + // Some test cases (e.g. builder_voluntary_exit__success) have no operation file. + // TODO(gloas): remove this once the test vectors are fixed + (None, None) + } else if metadata.bls_setting.unwrap_or_default().check().is_ok() { + match O::decode(&operation_path, fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index da3c5533b6..f8c16aec0b 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -537,11 +537,6 @@ impl Handler for RandomHandler { fn handler_name(&self) -> String { "random".into() } - - fn disabled_forks(&self) -> Vec { - // TODO(gloas): remove once we have Gloas random tests - vec![ForkName::Gloas] - } } #[derive(Educe)] From 17d183eb5bf1718054598d4fc91efd2c8ef33431 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 17 Mar 2026 16:35:05 +0900 Subject: [PATCH 27/43] Unknown block for envelope (#8992) Add a queue that allows us to reprocess an envelope when it arrives over gossip references a unknown block root. When the block is finally imported, we immediately reprocess the queued envelope. Note that we don't trigger a block lookup sync. Incoming attestations for this block root will already trigger a lookup for us. I think thats good enough Co-Authored-By: Eitan Seri- Levi --- beacon_node/beacon_processor/src/lib.rs | 33 +- .../src/scheduler/work_queue.rs | 5 + .../src/scheduler/work_reprocessing_queue.rs | 281 ++++++++++++++++++ .../gossip_methods.rs | 63 +++- .../src/network_beacon_processor/tests.rs | 5 + 5 files changed, 383 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 33a00bfa49..c33f4840e0 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -41,7 +41,8 @@ pub use crate::scheduler::BeaconProcessorQueueLengths; use crate::scheduler::work_queue::WorkQueues; use crate::work_reprocessing_queue::{ - QueuedBackfillBatch, QueuedColumnReconstruction, QueuedGossipBlock, ReprocessQueueMessage, + QueuedBackfillBatch, QueuedColumnReconstruction, QueuedGossipBlock, QueuedGossipEnvelope, + ReprocessQueueMessage, }; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; @@ -242,6 +243,18 @@ impl From for WorkEvent { process_fn, }, }, + ReadyWork::Envelope(QueuedGossipEnvelope { + beacon_block_slot, + beacon_block_root, + process_fn, + }) => Self { + drop_during_sync: false, + work: Work::DelayedImportEnvelope { + beacon_block_slot, + beacon_block_root, + process_fn, + }, + }, ReadyWork::RpcBlock(QueuedRpcBlock { beacon_block_root, process_fn, @@ -384,6 +397,11 @@ pub enum Work { beacon_block_root: Hash256, process_fn: AsyncFn, }, + DelayedImportEnvelope { + beacon_block_slot: Slot, + beacon_block_root: Hash256, + process_fn: AsyncFn, + }, GossipVoluntaryExit(BlockingFn), GossipProposerSlashing(BlockingFn), GossipAttesterSlashing(BlockingFn), @@ -447,6 +465,7 @@ pub enum WorkType { GossipBlobSidecar, GossipDataColumnSidecar, DelayedImportBlock, + DelayedImportEnvelope, GossipVoluntaryExit, GossipProposerSlashing, GossipAttesterSlashing, @@ -498,6 +517,7 @@ impl Work { Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, + Work::DelayedImportEnvelope { .. } => WorkType::DelayedImportEnvelope, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, Work::GossipAttesterSlashing(_) => WorkType::GossipAttesterSlashing, @@ -793,6 +813,8 @@ impl BeaconProcessor { // on the delayed ones. } else if let Some(item) = work_queues.delayed_block_queue.pop() { Some(item) + } else if let Some(item) = work_queues.delayed_envelope_queue.pop() { + Some(item) // Check gossip blocks and payloads before gossip attestations, since a block might be // required to verify some attestations. } else if let Some(item) = work_queues.gossip_block_queue.pop() { @@ -1111,6 +1133,9 @@ impl BeaconProcessor { Work::DelayedImportBlock { .. } => { work_queues.delayed_block_queue.push(work, work_id) } + Work::DelayedImportEnvelope { .. } => { + work_queues.delayed_envelope_queue.push(work, work_id) + } Work::GossipVoluntaryExit { .. } => { work_queues.gossip_voluntary_exit_queue.push(work, work_id) } @@ -1238,6 +1263,7 @@ impl BeaconProcessor { work_queues.gossip_data_column_queue.len() } WorkType::DelayedImportBlock => work_queues.delayed_block_queue.len(), + WorkType::DelayedImportEnvelope => work_queues.delayed_envelope_queue.len(), WorkType::GossipVoluntaryExit => { work_queues.gossip_voluntary_exit_queue.len() } @@ -1435,6 +1461,11 @@ impl BeaconProcessor { beacon_block_slot: _, beacon_block_root: _, process_fn, + } + | Work::DelayedImportEnvelope { + beacon_block_slot: _, + beacon_block_root: _, + process_fn, } => task_spawner.spawn_async(process_fn), Work::RpcBlock { process_fn, diff --git a/beacon_node/beacon_processor/src/scheduler/work_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_queue.rs index 934659b304..e48c776b6d 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_queue.rs @@ -127,6 +127,7 @@ pub struct BeaconProcessorQueueLengths { gossip_blob_queue: usize, gossip_data_column_queue: usize, delayed_block_queue: usize, + delayed_envelope_queue: usize, status_queue: usize, block_brange_queue: usize, block_broots_queue: usize, @@ -197,6 +198,7 @@ impl BeaconProcessorQueueLengths { gossip_blob_queue: 1024, gossip_data_column_queue: 1024, delayed_block_queue: 1024, + delayed_envelope_queue: 1024, status_queue: 1024, block_brange_queue: 1024, block_broots_queue: 1024, @@ -250,6 +252,7 @@ pub struct WorkQueues { pub gossip_blob_queue: FifoQueue>, pub gossip_data_column_queue: FifoQueue>, pub delayed_block_queue: FifoQueue>, + pub delayed_envelope_queue: FifoQueue>, pub status_queue: FifoQueue>, pub block_brange_queue: FifoQueue>, pub block_broots_queue: FifoQueue>, @@ -315,6 +318,7 @@ impl WorkQueues { let gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); let delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); + let delayed_envelope_queue = FifoQueue::new(queue_lengths.delayed_envelope_queue); let status_queue = FifoQueue::new(queue_lengths.status_queue); let block_brange_queue = FifoQueue::new(queue_lengths.block_brange_queue); @@ -375,6 +379,7 @@ impl WorkQueues { gossip_blob_queue, gossip_data_column_queue, delayed_block_queue, + delayed_envelope_queue, status_queue, block_brange_queue, block_broots_queue, diff --git a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs index c99388287c..38306b3bb6 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_reprocessing_queue.rs @@ -35,6 +35,7 @@ use types::{EthSpec, Hash256, Slot}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; +const GOSSIP_ENVELOPES: &str = "gossip_envelopes"; const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; const ATTESTATIONS_PER_ROOT: &str = "attestations_per_root"; @@ -51,6 +52,10 @@ pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); /// For how long to queue light client updates for re-processing. pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); +/// Envelope timeout as a multiplier of slot duration. Envelopes waiting for their block will be +/// sent for processing after this many slots worth of time, even if the block hasn't arrived. +const QUEUED_ENVELOPE_DELAY_SLOTS: u32 = 1; + /// For how long to queue rpc blocks before sending them back for reprocessing. pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); @@ -65,6 +70,9 @@ pub const QUEUED_RECONSTRUCTION_DELAY: Duration = Duration::from_millis(150); /// it's nice to have extra protection. const MAXIMUM_QUEUED_BLOCKS: usize = 16; +/// Set an arbitrary upper-bound on the number of queued envelopes to avoid DoS attacks. +const MAXIMUM_QUEUED_ENVELOPES: usize = 16; + /// How many attestations we keep before new ones get dropped. const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; @@ -93,6 +101,8 @@ pub const RECONSTRUCTION_DEADLINE: (u64, u64) = (1, 4); pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. EarlyBlock(QueuedGossipBlock), + /// An execution payload envelope that references a block not yet in fork choice. + UnknownBlockForEnvelope(QueuedGossipEnvelope), /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), @@ -120,6 +130,7 @@ pub enum ReprocessQueueMessage { /// Events sent by the scheduler once they are ready for re-processing. pub enum ReadyWork { Block(QueuedGossipBlock), + Envelope(QueuedGossipEnvelope), RpcBlock(QueuedRpcBlock), IgnoredRpcBlock(IgnoredRpcBlock), Unaggregate(QueuedUnaggregate), @@ -157,6 +168,13 @@ pub struct QueuedGossipBlock { pub process_fn: AsyncFn, } +/// An execution payload envelope that arrived early and has been queued for later import. +pub struct QueuedGossipEnvelope { + pub beacon_block_slot: Slot, + pub beacon_block_root: Hash256, + pub process_fn: AsyncFn, +} + /// A block that arrived for processing when the same block was being imported over gossip. /// It is queued for later import. pub struct QueuedRpcBlock { @@ -209,6 +227,8 @@ impl From for WorkEvent { enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. ReadyGossipBlock(QueuedGossipBlock), + /// An envelope whose block has been imported and is now ready for processing. + ReadyEnvelope(Hash256), /// A rpc block that was queued because the same gossip block was being imported /// will now be retried for import. ReadyRpcBlock(QueuedRpcBlock), @@ -234,6 +254,8 @@ struct ReprocessQueue { /* Queues */ /// Queue to manage scheduled early blocks. gossip_block_delay_queue: DelayQueue, + /// Queue to manage envelope timeouts (keyed by block root). + envelope_delay_queue: DelayQueue, /// Queue to manage scheduled early blocks. rpc_block_delay_queue: DelayQueue, /// Queue to manage scheduled attestations. @@ -246,6 +268,8 @@ struct ReprocessQueue { /* Queued items */ /// Queued blocks. queued_gossip_block_roots: HashSet, + /// Queued envelopes awaiting their block, keyed by block root. + awaiting_envelopes_per_root: HashMap, /// Queued aggregated attestations. queued_aggregates: FnvHashMap, /// Queued attestations. @@ -266,6 +290,7 @@ struct ReprocessQueue { next_attestation: usize, next_lc_update: usize, early_block_debounce: TimeLatch, + envelope_delay_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, @@ -315,6 +340,13 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + match self.envelope_delay_queue.poll_expired(cx) { + Poll::Ready(Some(block_root)) => { + return Poll::Ready(Some(InboundEvent::ReadyEnvelope(block_root.into_inner()))); + } + Poll::Ready(None) | Poll::Pending => (), + } + match self.rpc_block_delay_queue.poll_expired(cx) { Poll::Ready(Some(queued_block)) => { return Poll::Ready(Some(InboundEvent::ReadyRpcBlock(queued_block.into_inner()))); @@ -418,11 +450,13 @@ impl ReprocessQueue { work_reprocessing_rx, ready_work_tx, gossip_block_delay_queue: DelayQueue::new(), + envelope_delay_queue: DelayQueue::new(), rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), lc_updates_delay_queue: DelayQueue::new(), column_reconstructions_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), + awaiting_envelopes_per_root: HashMap::new(), queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), @@ -433,6 +467,7 @@ impl ReprocessQueue { next_attestation: 0, next_lc_update: 0, early_block_debounce: TimeLatch::default(), + envelope_delay_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), lc_update_delay_debounce: TimeLatch::default(), @@ -498,6 +533,52 @@ impl ReprocessQueue { } } } + // An envelope that references an unknown block. Queue it until the block is + // imported, or until the timeout expires. + InboundEvent::Msg(UnknownBlockForEnvelope(queued_envelope)) => { + let block_root = queued_envelope.beacon_block_root; + + // TODO(gloas): Perform lightweight pre-validation before queuing + // (e.g. verify builder signature) to prevent unsigned garbage from + // consuming queue slots. + + // Don't add the same envelope to the queue twice. This prevents DoS attacks. + if self.awaiting_envelopes_per_root.contains_key(&block_root) { + trace!( + ?block_root, + "Duplicate envelope for same block root, dropping" + ); + return; + } + + // When the queue is full, evict the oldest entry to make room for newer envelopes. + if self.awaiting_envelopes_per_root.len() >= MAXIMUM_QUEUED_ENVELOPES { + if self.envelope_delay_debounce.elapsed() { + warn!( + queue_size = MAXIMUM_QUEUED_ENVELOPES, + msg = "system resources may be saturated", + "Envelope delay queue is full, evicting oldest entry" + ); + } + if let Some(oldest_root) = + self.awaiting_envelopes_per_root.keys().next().copied() + && let Some((_envelope, delay_key)) = + self.awaiting_envelopes_per_root.remove(&oldest_root) + { + self.envelope_delay_queue.remove(&delay_key); + } + } + + // Register the timeout. + let delay_key = self.envelope_delay_queue.insert( + block_root, + self.slot_clock.slot_duration() * QUEUED_ENVELOPE_DELAY_SLOTS, + ); + + // Store the envelope keyed by block root. + self.awaiting_envelopes_per_root + .insert(block_root, (queued_envelope, delay_key)); + } // A rpc block arrived for processing at the same time when a gossip block // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` // and then send the rpc block back for processing assuming the gossip import @@ -647,6 +728,23 @@ impl ReprocessQueue { block_root, parent_root, }) => { + // Unqueue the envelope we have for this root, if any. + if let Some((envelope, delay_key)) = + self.awaiting_envelopes_per_root.remove(&block_root) + { + self.envelope_delay_queue.remove(&delay_key); + if self + .ready_work_tx + .try_send(ReadyWork::Envelope(envelope)) + .is_err() + { + error!( + ?block_root, + "Failed to send envelope for reprocessing after block import" + ); + } + } + // Unqueue the attestations we have for this root, if any. if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&block_root) { let mut sent_count = 0; @@ -802,6 +900,25 @@ impl ReprocessQueue { error!("Failed to pop queued block"); } } + // An envelope's timeout has expired. Send it for processing regardless of + // whether the block has been imported. + InboundEvent::ReadyEnvelope(block_root) => { + if let Some((envelope, _delay_key)) = + self.awaiting_envelopes_per_root.remove(&block_root) + { + debug!( + ?block_root, + "Envelope timed out waiting for block, sending for processing" + ); + if self + .ready_work_tx + .try_send(ReadyWork::Envelope(envelope)) + .is_err() + { + error!(?block_root, "Failed to send envelope after timeout"); + } + } + } InboundEvent::ReadyAttestation(queued_id) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS, @@ -941,6 +1058,11 @@ impl ReprocessQueue { &[GOSSIP_BLOCKS], self.gossip_block_delay_queue.len() as i64, ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[GOSSIP_ENVELOPES], + self.awaiting_envelopes_per_root.len() as i64, + ); metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, &[RPC_BLOCKS], @@ -1339,4 +1461,163 @@ mod tests { assert_eq!(reconstruction.block_root, block_root); } } + + // Test that envelopes are properly cleaned up from `awaiting_envelopes_per_root` on timeout. + #[tokio::test] + async fn prune_awaiting_envelopes_per_root() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + + // Insert an envelope. + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + + // Process the event to enter it into the delay queue. + queue.handle_message(InboundEvent::Msg(msg)); + + // Check that it is queued. + assert_eq!(queue.awaiting_envelopes_per_root.len(), 1); + assert!( + queue + .awaiting_envelopes_per_root + .contains_key(&beacon_block_root) + ); + + // Advance time to expire the envelope. + advance_time( + &queue.slot_clock, + queue.slot_clock.slot_duration() * QUEUED_ENVELOPE_DELAY_SLOTS * 2, + ) + .await; + let ready_msg = queue.next().await.unwrap(); + assert!(matches!(ready_msg, InboundEvent::ReadyEnvelope(_))); + queue.handle_message(ready_msg); + + // The entry for the block root should be gone. + assert!(queue.awaiting_envelopes_per_root.is_empty()); + } + + #[tokio::test] + async fn envelope_released_on_block_imported() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + let parent_root = Hash256::repeat_byte(0xab); + + // Insert an envelope. + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + + // Process the event to enter it into the delay queue. + queue.handle_message(InboundEvent::Msg(msg)); + + // Check that it is queued. + assert_eq!(queue.awaiting_envelopes_per_root.len(), 1); + + // Simulate block import. + let imported = ReprocessQueueMessage::BlockImported { + block_root: beacon_block_root, + parent_root, + }; + queue.handle_message(InboundEvent::Msg(imported)); + + // The entry for the block root should be gone. + assert!(queue.awaiting_envelopes_per_root.is_empty()); + // Delay queue entry should also be cancelled. + assert_eq!(queue.envelope_delay_queue.len(), 0); + } + + #[tokio::test] + async fn envelope_dedup_drops_second() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + let beacon_block_root = Hash256::repeat_byte(0xaf); + + // Insert an envelope. + let msg1 = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + let msg2 = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root, + process_fn: Box::pin(async {}), + }); + + // Process both events. + queue.handle_message(InboundEvent::Msg(msg1)); + queue.handle_message(InboundEvent::Msg(msg2)); + + // Only one should be queued. + assert_eq!(queue.awaiting_envelopes_per_root.len(), 1); + assert_eq!(queue.envelope_delay_queue.len(), 1); + } + + #[tokio::test] + async fn envelope_capacity_evicts_oldest() { + create_test_tracing_subscriber(); + + let mut queue = test_queue(); + + // Pause time so it only advances manually + tokio::time::pause(); + + // Fill the queue to capacity. + for i in 0..MAXIMUM_QUEUED_ENVELOPES { + let block_root = Hash256::repeat_byte(i as u8); + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root: block_root, + process_fn: Box::pin(async {}), + }); + queue.handle_message(InboundEvent::Msg(msg)); + } + assert_eq!( + queue.awaiting_envelopes_per_root.len(), + MAXIMUM_QUEUED_ENVELOPES + ); + + // One more should evict the oldest and insert the new one. + let overflow_root = Hash256::repeat_byte(0xff); + let msg = ReprocessQueueMessage::UnknownBlockForEnvelope(QueuedGossipEnvelope { + beacon_block_slot: Slot::new(1), + beacon_block_root: overflow_root, + process_fn: Box::pin(async {}), + }); + queue.handle_message(InboundEvent::Msg(msg)); + + // Queue should still be at capacity, with the new root present. + assert_eq!( + queue.awaiting_envelopes_per_root.len(), + MAXIMUM_QUEUED_ENVELOPES + ); + assert!( + queue + .awaiting_envelopes_per_root + .contains_key(&overflow_root) + ); + } } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 3335315157..1f55d9a878 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -20,7 +20,9 @@ use beacon_chain::{ }; use beacon_chain::{ blob_verification::{GossipBlobError, GossipVerifiedBlob}, - payload_envelope_verification::gossip_verified_envelope::GossipVerifiedEnvelope, + payload_envelope_verification::{ + EnvelopeError, gossip_verified_envelope::GossipVerifiedEnvelope, + }, }; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; @@ -49,8 +51,8 @@ use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; use beacon_processor::{ DuplicateCache, GossipAggregatePackage, GossipAttestationBatch, work_reprocessing_queue::{ - QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, - ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedGossipEnvelope, QueuedLightClientUpdate, + QueuedUnaggregate, ReprocessQueueMessage, }, }; @@ -3332,6 +3334,61 @@ impl NetworkBeaconProcessor { verified_envelope } + + Err(EnvelopeError::BlockRootUnknown { block_root }) => { + let envelope_slot = envelope.slot(); + + debug!( + ?block_root, + %envelope_slot, + "Envelope references unknown block, deferring to reprocess queue" + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + + let inner_self = self.clone(); + let chain = self.chain.clone(); + let process_fn = Box::pin(async move { + match chain.verify_envelope_for_gossip(envelope).await { + Ok(verified_envelope) => { + inner_self + .process_gossip_verified_execution_payload_envelope( + peer_id, + verified_envelope, + ) + .await; + } + Err(e) => { + debug!( + error = ?e, + "Deferred envelope failed verification" + ); + } + } + }); + + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::UnknownBlockForEnvelope( + QueuedGossipEnvelope { + beacon_block_slot: envelope_slot, + beacon_block_root: block_root, + process_fn, + }, + )), + }) + .is_err() + { + error!( + %envelope_slot, + ?block_root, + "Failed to defer envelope import" + ); + } + return None; + } // TODO(gloas) penalize peers accordingly Err(_) => return None, }; diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 5fa8c729cb..c5ccbc2ae6 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -2090,3 +2090,8 @@ async fn test_data_columns_by_range_no_duplicates_with_skip_slots() { unique_roots.len(), ); } + +// TODO(ePBS): Add integration tests for envelope deferral (UnknownBlockForEnvelope): +// 1. Gossip envelope arrives before its block → queued via UnknownBlockForEnvelope +// 2. Block imported → envelope released and processed successfully +// 3. Timeout path → envelope released and re-verified From a965bfdf77a0b1a3cb2471b9df787edbe99779e8 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 18 Mar 2026 04:24:58 +0300 Subject: [PATCH 28/43] Remove `lighthouse/analysis` endpoints (#8968) Some of our custom `lighthouse/analysis` endpoints will require maintenance for the Gloas hard fork. We have decided instead to remove those endpoints. We don't utilize them internally and they have pretty limited utility and so we feel they are not worth maintaining. Remove `lighthouse/analysis/attestation_performance` and `lighthouse/analysis/block_packing_efficiency` endpoints. Co-Authored-By: Mac L --- .github/forbidden-files.txt | 4 + .../http_api/src/attestation_performance.rs | 217 --------- .../http_api/src/block_packing_efficiency.rs | 410 ------------------ beacon_node/http_api/src/lib.rs | 37 -- book/src/api_lighthouse.md | 120 ----- common/eth2/src/lighthouse.rs | 56 --- .../src/lighthouse/attestation_performance.rs | 39 -- .../lighthouse/block_packing_efficiency.rs | 34 -- testing/simulator/src/checks.rs | 46 +- 9 files changed, 26 insertions(+), 937 deletions(-) delete mode 100644 beacon_node/http_api/src/attestation_performance.rs delete mode 100644 beacon_node/http_api/src/block_packing_efficiency.rs delete mode 100644 common/eth2/src/lighthouse/attestation_performance.rs delete mode 100644 common/eth2/src/lighthouse/block_packing_efficiency.rs diff --git a/.github/forbidden-files.txt b/.github/forbidden-files.txt index a08a6b4e98..b070067350 100644 --- a/.github/forbidden-files.txt +++ b/.github/forbidden-files.txt @@ -6,5 +6,9 @@ beacon_node/beacon_chain/src/otb_verification_service.rs beacon_node/store/src/partial_beacon_state.rs beacon_node/store/src/consensus_context.rs beacon_node/beacon_chain/src/block_reward.rs +beacon_node/http_api/src/attestation_performance.rs +beacon_node/http_api/src/block_packing_efficiency.rs beacon_node/http_api/src/block_rewards.rs +common/eth2/src/lighthouse/attestation_performance.rs +common/eth2/src/lighthouse/block_packing_efficiency.rs common/eth2/src/lighthouse/block_rewards.rs diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs deleted file mode 100644 index 05ed36e68b..0000000000 --- a/beacon_node/http_api/src/attestation_performance.rs +++ /dev/null @@ -1,217 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::lighthouse::{ - AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, -}; -use state_processing::{ - BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary, -}; -use std::sync::Arc; -use types::{BeaconState, BeaconStateError, EthSpec, Hash256}; -use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error}; - -const MAX_REQUEST_RANGE_EPOCHS: usize = 100; -const BLOCK_ROOT_CHUNK_SIZE: usize = 100; - -#[derive(Debug)] -// We don't use the inner values directly, but they're used in the Debug impl. -enum AttestationPerformanceError { - BlockReplay(#[allow(dead_code)] BlockReplayError), - BeaconState(#[allow(dead_code)] BeaconStateError), - UnableToFindValidator(#[allow(dead_code)] usize), -} - -impl From for AttestationPerformanceError { - fn from(e: BlockReplayError) -> Self { - Self::BlockReplay(e) - } -} - -impl From for AttestationPerformanceError { - fn from(e: BeaconStateError) -> Self { - Self::BeaconState(e) - } -} - -pub fn get_attestation_performance( - target: String, - query: AttestationPerformanceQuery, - chain: Arc>, -) -> Result, warp::Rejection> { - let spec = &chain.spec; - // We increment by 2 here so that when we build the state from the `prior_slot` it is - // still 1 epoch ahead of the first epoch we want to analyse. - // This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results - // for the correct epoch. - let start_epoch = query.start_epoch + 2; - let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let prior_slot = start_slot - 1; - - let end_epoch = query.end_epoch + 2; - let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); - - // Ensure end_epoch is smaller than the current epoch - 1. - let current_epoch = chain.epoch().map_err(unhandled_error)?; - if query.end_epoch >= current_epoch - 1 { - return Err(custom_bad_request(format!( - "end_epoch must be less than the current epoch - 1. current: {}, end: {}", - current_epoch, query.end_epoch - ))); - } - - // Check query is valid. - if start_epoch > end_epoch { - return Err(custom_bad_request(format!( - "start_epoch must not be larger than end_epoch. start: {}, end: {}", - query.start_epoch, query.end_epoch - ))); - } - - // The response size can grow exceptionally large therefore we should check that the - // query is within permitted bounds to prevent potential OOM errors. - if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { - return Err(custom_bad_request(format!( - "end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}", - MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch - ))); - } - - // Either use the global validator set, or the specified index. - // - // Does no further validation of the indices, so in the event an index has not yet been - // activated or does not yet exist (according to the head state), it will return all fields as - // `false`. - let index_range = if target.to_lowercase() == "global" { - chain - .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) - .map_err(unhandled_error::)? - } else { - vec![target.parse::().map_err(|_| { - custom_bad_request(format!( - "Invalid validator index: {:?}", - target.to_lowercase() - )) - })?] - }; - - // Load block roots. - let mut block_roots: Vec = chain - .forwards_iter_block_roots_until(start_slot, end_slot) - .map_err(unhandled_error)? - .map(|res| res.map(|(root, _)| root)) - .collect::, _>>() - .map_err(unhandled_error)?; - block_roots.dedup(); - - // Load first block so we can get its parent. - let first_block_root = block_roots.first().ok_or_else(|| { - custom_server_error( - "No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(), - ) - })?; - let first_block = chain - .get_blinded_block(first_block_root) - .and_then(|maybe_block| { - maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) - }) - .map_err(unhandled_error)?; - - // Load the block of the prior slot which will be used to build the starting state. - let prior_block = chain - .get_blinded_block(&first_block.parent_root()) - .and_then(|maybe_block| { - maybe_block - .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) - }) - .map_err(unhandled_error)?; - - // Load state for block replay. - let state_root = prior_block.state_root(); - - // This branch is reached from the HTTP API. We assume the user wants - // to cache states so that future calls are faster. - let state = chain - .get_state(&state_root, Some(prior_slot), true) - .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) - .map_err(unhandled_error)?; - - // Allocate an AttestationPerformance vector for each validator in the range. - let mut perfs: Vec = - AttestationPerformance::initialize(index_range.clone()); - - let post_slot_hook = |state: &mut BeaconState, - summary: Option>, - _is_skip_slot: bool| - -> Result<(), AttestationPerformanceError> { - // If a `summary` was not output then an epoch boundary was not crossed - // so we move onto the next slot. - if let Some(summary) = summary { - for (position, i) in index_range.iter().enumerate() { - let index = *i as usize; - - let val = perfs - .get_mut(position) - .ok_or(AttestationPerformanceError::UnableToFindValidator(index))?; - - // We are two epochs ahead since the summary is generated for - // `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return - // data for the epoch before that. - let epoch = state.previous_epoch().as_u64() - 1; - - let is_active = summary.is_active_unslashed_in_previous_epoch(index); - - let received_source_reward = summary.is_previous_epoch_source_attester(index)?; - - let received_head_reward = summary.is_previous_epoch_head_attester(index)?; - - let received_target_reward = summary.is_previous_epoch_target_attester(index)?; - - let inclusion_delay = summary - .previous_epoch_inclusion_info(index) - .map(|info| info.delay); - - let perf = AttestationPerformanceStatistics { - active: is_active, - head: received_head_reward, - target: received_target_reward, - source: received_source_reward, - delay: inclusion_delay, - }; - - val.epochs.insert(epoch, perf); - } - } - Ok(()) - }; - - // Initialize block replayer - let mut replayer = BlockReplayer::new(state, spec) - .no_state_root_iter() - .no_signature_verification() - .minimal_block_root_verification() - .post_slot_hook(Box::new(post_slot_hook)); - - // Iterate through block roots in chunks to reduce load on memory. - for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) { - // Load blocks from the block root chunks. - let blocks = block_root_chunks - .iter() - .map(|root| { - chain - .get_blinded_block(root) - .and_then(|maybe_block| { - maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) - }) - .map_err(unhandled_error) - }) - .collect::, _>>()?; - - // TODO(gloas): add payloads - replayer = replayer - .apply_blocks(blocks, vec![], None) - .map_err(|e| custom_server_error(format!("{:?}", e)))?; - } - - drop(replayer); - - Ok(perfs) -} diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs deleted file mode 100644 index 725a0648a5..0000000000 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ /dev/null @@ -1,410 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::lighthouse::{ - BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, -}; -use parking_lot::Mutex; -use state_processing::{ - BlockReplayError, BlockReplayer, per_epoch_processing::EpochProcessingSummary, -}; -use std::collections::{HashMap, HashSet}; -use std::marker::PhantomData; -use std::sync::Arc; -use types::{ - AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, - Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, -}; -use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error}; - -/// Load blocks from block roots in chunks to reduce load on memory. -const BLOCK_ROOT_CHUNK_SIZE: usize = 100; - -#[derive(Debug)] -// We don't use the inner values directly, but they're used in the Debug impl. -enum PackingEfficiencyError { - BlockReplay(#[allow(dead_code)] BlockReplayError), - BeaconState(#[allow(dead_code)] BeaconStateError), - CommitteeStoreError(#[allow(dead_code)] Slot), - InvalidAttestationError, -} - -impl From for PackingEfficiencyError { - fn from(e: BlockReplayError) -> Self { - Self::BlockReplay(e) - } -} - -impl From for PackingEfficiencyError { - fn from(e: BeaconStateError) -> Self { - Self::BeaconState(e) - } -} - -struct CommitteeStore { - current_epoch_committees: Vec, - previous_epoch_committees: Vec, -} - -impl CommitteeStore { - fn new() -> Self { - CommitteeStore { - current_epoch_committees: Vec::new(), - previous_epoch_committees: Vec::new(), - } - } -} - -struct PackingEfficiencyHandler { - current_slot: Slot, - current_epoch: Epoch, - prior_skip_slots: u64, - available_attestations: HashSet, - included_attestations: HashMap, - committee_store: CommitteeStore, - _phantom: PhantomData, -} - -impl PackingEfficiencyHandler { - fn new( - start_epoch: Epoch, - starting_state: BeaconState, - spec: &ChainSpec, - ) -> Result { - let mut handler = PackingEfficiencyHandler { - current_slot: start_epoch.start_slot(E::slots_per_epoch()), - current_epoch: start_epoch, - prior_skip_slots: 0, - available_attestations: HashSet::new(), - included_attestations: HashMap::new(), - committee_store: CommitteeStore::new(), - _phantom: PhantomData, - }; - - handler.compute_epoch(start_epoch, &starting_state, spec)?; - Ok(handler) - } - - fn update_slot(&mut self, slot: Slot) { - self.current_slot = slot; - if slot % E::slots_per_epoch() == 0 { - self.current_epoch = Epoch::new(slot.as_u64() / E::slots_per_epoch()); - } - } - - fn prune_included_attestations(&mut self) { - let epoch = self.current_epoch; - self.included_attestations.retain(|x, _| { - x.slot >= Epoch::new(epoch.as_u64().saturating_sub(2)).start_slot(E::slots_per_epoch()) - }); - } - - fn prune_available_attestations(&mut self) { - let slot = self.current_slot; - self.available_attestations - .retain(|x| x.slot >= (slot.as_u64().saturating_sub(E::slots_per_epoch()))); - } - - fn apply_block( - &mut self, - block: &SignedBeaconBlock>, - ) -> Result { - let block_body = block.message().body(); - let attestations = block_body.attestations(); - - let mut attestations_in_block = HashMap::new(); - for attestation in attestations { - match attestation { - AttestationRef::Base(attn) => { - for (position, voted) in attn.aggregation_bits.iter().enumerate() { - if voted { - let unique_attestation = UniqueAttestation { - slot: attn.data.slot, - committee_index: attn.data.index, - committee_position: position, - }; - let inclusion_distance: u64 = block - .slot() - .as_u64() - .checked_sub(attn.data.slot.as_u64()) - .ok_or(PackingEfficiencyError::InvalidAttestationError)?; - - self.available_attestations.remove(&unique_attestation); - attestations_in_block.insert(unique_attestation, inclusion_distance); - } - } - } - AttestationRef::Electra(attn) => { - for (position, voted) in attn.aggregation_bits.iter().enumerate() { - if voted { - let unique_attestation = UniqueAttestation { - slot: attn.data.slot, - committee_index: attn.data.index, - committee_position: position, - }; - let inclusion_distance: u64 = block - .slot() - .as_u64() - .checked_sub(attn.data.slot.as_u64()) - .ok_or(PackingEfficiencyError::InvalidAttestationError)?; - - self.available_attestations.remove(&unique_attestation); - attestations_in_block.insert(unique_attestation, inclusion_distance); - } - } - } - } - } - - // Remove duplicate attestations as these yield no reward. - attestations_in_block.retain(|x, _| !self.included_attestations.contains_key(x)); - self.included_attestations - .extend(attestations_in_block.clone()); - - Ok(attestations_in_block.len()) - } - - fn add_attestations(&mut self, slot: Slot) -> Result<(), PackingEfficiencyError> { - let committees = self.get_committees_at_slot(slot)?; - for committee in committees { - for position in 0..committee.committee.len() { - let unique_attestation = UniqueAttestation { - slot, - committee_index: committee.index, - committee_position: position, - }; - self.available_attestations.insert(unique_attestation); - } - } - - Ok(()) - } - - fn compute_epoch( - &mut self, - epoch: Epoch, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result<(), PackingEfficiencyError> { - // Free some memory by pruning old attestations from the included set. - self.prune_included_attestations(); - - let new_committees = if state.committee_cache_is_initialized(RelativeEpoch::Current) { - state - .get_beacon_committees_at_epoch(RelativeEpoch::Current)? - .into_iter() - .map(BeaconCommittee::into_owned) - .collect::>() - } else { - state - .initialize_committee_cache(epoch, spec)? - .get_all_beacon_committees()? - .into_iter() - .map(BeaconCommittee::into_owned) - .collect::>() - }; - - self.committee_store - .previous_epoch_committees - .clone_from(&self.committee_store.current_epoch_committees); - - self.committee_store.current_epoch_committees = new_committees; - - Ok(()) - } - - fn get_committees_at_slot( - &self, - slot: Slot, - ) -> Result, PackingEfficiencyError> { - let mut committees = Vec::new(); - - for committee in &self.committee_store.current_epoch_committees { - if committee.slot == slot { - committees.push(committee.clone()); - } - } - for committee in &self.committee_store.previous_epoch_committees { - if committee.slot == slot { - committees.push(committee.clone()); - } - } - - if committees.is_empty() { - return Err(PackingEfficiencyError::CommitteeStoreError(slot)); - } - - Ok(committees) - } -} - -pub fn get_block_packing_efficiency( - query: BlockPackingEfficiencyQuery, - chain: Arc>, -) -> Result, warp::Rejection> { - let spec = &chain.spec; - - let start_epoch = query.start_epoch; - let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let prior_slot = start_slot - 1; - - let end_epoch = query.end_epoch; - let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); - - // Check query is valid. - if start_epoch > end_epoch || start_epoch == 0 { - return Err(custom_bad_request(format!( - "invalid start and end epochs: {}, {}", - start_epoch, end_epoch - ))); - } - - let prior_epoch = start_epoch - 1; - let start_slot_of_prior_epoch = prior_epoch.start_slot(T::EthSpec::slots_per_epoch()); - - // Load block roots. - let mut block_roots: Vec = chain - .forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot) - .map_err(unhandled_error)? - .collect::, _>>() - .map_err(unhandled_error)? - .iter() - .map(|(root, _)| *root) - .collect(); - block_roots.dedup(); - - let first_block_root = block_roots - .first() - .ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?; - - let first_block = chain - .get_blinded_block(first_block_root) - .and_then(|maybe_block| { - maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) - }) - .map_err(unhandled_error)?; - - // Load state for block replay. - let starting_state_root = first_block.state_root(); - - // This branch is reached from the HTTP API. We assume the user wants - // to cache states so that future calls are faster. - let starting_state = chain - .get_state(&starting_state_root, Some(prior_slot), true) - .and_then(|maybe_state| { - maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root)) - }) - .map_err(unhandled_error)?; - - // Initialize response vector. - let mut response = Vec::new(); - - // Initialize handler. - let handler = Arc::new(Mutex::new( - PackingEfficiencyHandler::new(prior_epoch, starting_state.clone(), spec) - .map_err(|e| custom_server_error(format!("{:?}", e)))?, - )); - - let pre_slot_hook = - |_, state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { - // Add attestations to `available_attestations`. - handler.lock().add_attestations(state.slot())?; - Ok(()) - }; - - let post_slot_hook = |state: &mut BeaconState, - _summary: Option>, - is_skip_slot: bool| - -> Result<(), PackingEfficiencyError> { - handler.lock().update_slot(state.slot()); - - // Check if this a new epoch. - if state.slot() % T::EthSpec::slots_per_epoch() == 0 { - handler.lock().compute_epoch( - state.slot().epoch(T::EthSpec::slots_per_epoch()), - state, - spec, - )?; - } - - if is_skip_slot { - handler.lock().prior_skip_slots += 1; - } - - // Remove expired attestations. - handler.lock().prune_available_attestations(); - - Ok(()) - }; - - let pre_block_hook = |_state: &mut BeaconState, - block: &SignedBeaconBlock<_, BlindedPayload<_>>| - -> Result<(), PackingEfficiencyError> { - let slot = block.slot(); - - let block_message = block.message(); - // Get block proposer info. - let proposer_info = ProposerInfo { - validator_index: block_message.proposer_index(), - graffiti: block_message.body().graffiti().as_utf8_lossy(), - }; - - // Store the count of available attestations at this point. - // In the future it may be desirable to check that the number of available attestations - // does not exceed the maximum possible amount given the length of available committees. - let available_count = handler.lock().available_attestations.len(); - - // Get all attestations included in the block. - let included = handler.lock().apply_block(block)?; - - let efficiency = BlockPackingEfficiency { - slot, - block_hash: block.canonical_root(), - proposer_info, - available_attestations: available_count, - included_attestations: included, - prior_skip_slots: handler.lock().prior_skip_slots, - }; - - // Write to response. - if slot >= start_slot { - response.push(efficiency); - } - - handler.lock().prior_skip_slots = 0; - - Ok(()) - }; - - // Build BlockReplayer. - let mut replayer = BlockReplayer::new(starting_state, spec) - .no_state_root_iter() - .no_signature_verification() - .minimal_block_root_verification() - .pre_slot_hook(Box::new(pre_slot_hook)) - .post_slot_hook(Box::new(post_slot_hook)) - .pre_block_hook(Box::new(pre_block_hook)); - - // Iterate through the block roots, loading blocks in chunks to reduce load on memory. - for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) { - // Load blocks from the block root chunks. - let blocks = block_root_chunks - .iter() - .map(|root| { - chain - .get_blinded_block(root) - .and_then(|maybe_block| { - maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) - }) - .map_err(unhandled_error) - }) - .collect::, _>>()?; - - // TODO(gloas): add payloads - replayer = replayer - .apply_blocks(blocks, vec![], None) - .map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?; - } - - drop(replayer); - - Ok(response) -} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fc92128c91..29e2d39aee 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -7,11 +7,9 @@ //! used for development. mod aggregate_attestation; -mod attestation_performance; mod attester_duties; mod beacon; mod block_id; -mod block_packing_efficiency; mod build_block_contents; mod builder_states; mod custody; @@ -3091,39 +3089,6 @@ pub fn serve( }, ); - // GET lighthouse/analysis/attestation_performance/{index} - let get_lighthouse_attestation_performance = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("attestation_performance")) - .and(warp::path::param::()) - .and(warp::query::()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |target, query, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - attestation_performance::get_attestation_performance(target, query, chain) - }) - }, - ); - - // GET lighthouse/analysis/block_packing_efficiency - let get_lighthouse_block_packing_efficiency = warp::path("lighthouse") - .and(warp::path("analysis")) - .and(warp::path("block_packing_efficiency")) - .and(warp::query::()) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |query, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_packing_efficiency::get_block_packing_efficiency(query, chain) - }) - }, - ); - let get_events = eth_v1 .clone() .and(warp::path("events")) @@ -3359,12 +3324,10 @@ pub fn serve( .uor(get_lighthouse_database_info) .uor(get_lighthouse_database_invariants) .uor(get_lighthouse_custody_info) - .uor(get_lighthouse_attestation_performance) .uor(get_beacon_light_client_optimistic_update) .uor(get_beacon_light_client_finality_update) .uor(get_beacon_light_client_bootstrap) .uor(get_beacon_light_client_updates) - .uor(get_lighthouse_block_packing_efficiency) .uor(get_events) .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 2fd7290cb2..c2e4fbdd5a 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -512,126 +512,6 @@ As all testnets and Mainnet have been merged, both values will be the same after } ``` -## `/lighthouse/analysis/attestation_performance/{index}` - -Fetch information about the attestation performance of a validator index or all validators for a -range of consecutive epochs. - -Two query parameters are required: - -- `start_epoch` (inclusive): the first epoch to compute attestation performance for. -- `end_epoch` (inclusive): the final epoch to compute attestation performance for. - -Example: - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/1?start_epoch=1&end_epoch=1" | jq -``` - -```json -[ - { - "index": 1, - "epochs": { - "1": { - "active": true, - "head": true, - "target": true, - "source": true, - "delay": 1 - } - } - } -] -``` - -Instead of specifying a validator index, you can specify the entire validator set by using `global`: - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=1&end_epoch=1" | jq -``` - -```json -[ - { - "index": 0, - "epochs": { - "1": { - "active": true, - "head": true, - "target": true, - "source": true, - "delay": 1 - } - } - }, - { - "index": 1, - "epochs": { - "1": { - "active": true, - "head": true, - "target": true, - "source": true, - "delay": 1 - } - } - }, - { - .. - } -] - -``` - -Caveats: - -- For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state *prior* to the `start_epoch` needs to be loaded from the database, - and loading a state on a boundary is most efficient. - -## `/lighthouse/analysis/block_packing` - -Fetch information about the block packing efficiency of blocks for a range of consecutive -epochs. - -Two query parameters are required: - -- `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. -- `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. - -```bash -curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq -``` - -An excerpt of the response looks like: - -```json -[ - { - "slot": "33", - "block_hash": "0xb20970bb97c6c6de6b1e2b689d6381dd15b3d3518fbaee032229495f963bd5da", - "proposer_info": { - "validator_index": 855, - "graffiti": "poapZoJ7zWNfK7F3nWjEausWVBvKa6gA" - }, - "available_attestations": 3805, - "included_attestations": 1143, - "prior_skip_slots": 1 - }, - { - .. - } -] -``` - -Caveats: - -- `start_epoch` must not be `0`. -- For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and - loading a state on a boundary is most efficient. - ## `/lighthouse/logs` This is a Server Side Event subscription endpoint. This allows a user to read diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 3c039b16b3..5ff7a7e0f0 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,7 +1,5 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. -mod attestation_performance; -mod block_packing_efficiency; mod custody; pub mod sync_state; @@ -15,12 +13,6 @@ use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; -pub use attestation_performance::{ - AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, -}; -pub use block_packing_efficiency::{ - BlockPackingEfficiency, BlockPackingEfficiencyQuery, ProposerInfo, UniqueAttestation, -}; pub use custody::CustodyInfo; // Define "legacy" implementations of `Option` which use four bytes for encoding the union @@ -310,52 +302,4 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &req).await } - - /* - Analysis endpoints. - */ - - /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch - pub async fn get_lighthouse_analysis_block_packing( - &self, - start_epoch: Epoch, - end_epoch: Epoch, - ) -> Result, Error> { - let mut path = self.server.expose_full().clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("analysis") - .push("block_packing_efficiency"); - - path.query_pairs_mut() - .append_pair("start_epoch", &start_epoch.to_string()) - .append_pair("end_epoch", &end_epoch.to_string()); - - self.get(path).await - } - - /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch - pub async fn get_lighthouse_analysis_attestation_performance( - &self, - start_epoch: Epoch, - end_epoch: Epoch, - target: String, - ) -> Result, Error> { - let mut path = self.server.expose_full().clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("analysis") - .push("attestation_performance") - .push(&target); - - path.query_pairs_mut() - .append_pair("start_epoch", &start_epoch.to_string()) - .append_pair("end_epoch", &end_epoch.to_string()); - - self.get(path).await - } } diff --git a/common/eth2/src/lighthouse/attestation_performance.rs b/common/eth2/src/lighthouse/attestation_performance.rs deleted file mode 100644 index 5ce1d90a38..0000000000 --- a/common/eth2/src/lighthouse/attestation_performance.rs +++ /dev/null @@ -1,39 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use types::Epoch; - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationPerformanceStatistics { - pub active: bool, - pub head: bool, - pub target: bool, - pub source: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub delay: Option, -} - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationPerformance { - pub index: u64, - pub epochs: HashMap, -} - -impl AttestationPerformance { - pub fn initialize(indices: Vec) -> Vec { - let mut vec = Vec::with_capacity(indices.len()); - for index in indices { - vec.push(Self { - index, - ..Default::default() - }) - } - vec - } -} - -/// Query parameters for the `/lighthouse/analysis/attestation_performance` endpoint. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct AttestationPerformanceQuery { - pub start_epoch: Epoch, - pub end_epoch: Epoch, -} diff --git a/common/eth2/src/lighthouse/block_packing_efficiency.rs b/common/eth2/src/lighthouse/block_packing_efficiency.rs deleted file mode 100644 index 0ad6f46031..0000000000 --- a/common/eth2/src/lighthouse/block_packing_efficiency.rs +++ /dev/null @@ -1,34 +0,0 @@ -use serde::{Deserialize, Serialize}; -use types::{Epoch, Hash256, Slot}; - -type CommitteePosition = usize; -type Committee = u64; -type ValidatorIndex = u64; - -#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] -pub struct UniqueAttestation { - pub slot: Slot, - pub committee_index: Committee, - pub committee_position: CommitteePosition, -} -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct ProposerInfo { - pub validator_index: ValidatorIndex, - pub graffiti: String, -} - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockPackingEfficiency { - pub slot: Slot, - pub block_hash: Hash256, - pub proposer_info: ProposerInfo, - pub available_attestations: usize, - pub included_attestations: usize, - pub prior_skip_slots: u64, -} - -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] -pub struct BlockPackingEfficiencyQuery { - pub start_epoch: Epoch, - pub end_epoch: Epoch, -} diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 35200692c3..de202e5812 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -463,6 +463,9 @@ pub async fn reconnect_to_execution_layer( } /// Ensure all validators have attested correctly. +/// +/// Checks attestation rewards for head, target, and source. +/// A positive reward indicates a correct vote. pub async fn check_attestation_correctness( network: LocalNetwork, start_epoch: u64, @@ -476,54 +479,49 @@ pub async fn check_attestation_correctness( let remote_node = &network.remote_nodes()?[node_index]; - let results = remote_node - .get_lighthouse_analysis_attestation_performance( - Epoch::new(start_epoch), - Epoch::new(upto_epoch - 2), - "global".to_string(), - ) - .await - .map_err(|e| format!("Unable to get attestation performance: {e}"))?; - - let mut active_successes: f64 = 0.0; let mut head_successes: f64 = 0.0; let mut target_successes: f64 = 0.0; let mut source_successes: f64 = 0.0; - let mut total: f64 = 0.0; - for result in results { - for epochs in result.epochs.values() { + let end_epoch = upto_epoch + .checked_sub(2) + .ok_or_else(|| "upto_epoch must be >= 2 to have attestation rewards".to_string())?; + for epoch in start_epoch..=end_epoch { + let response = remote_node + .post_beacon_rewards_attestations(Epoch::new(epoch), &[]) + .await + .map_err(|e| format!("Unable to get attestation rewards for epoch {epoch}: {e}"))?; + + for reward in &response.data.total_rewards { total += 1.0; - if epochs.active { - active_successes += 1.0; - } - if epochs.head { + // A positive reward means the validator made a correct vote. + if reward.head > 0 { head_successes += 1.0; } - if epochs.target { + if reward.target > 0 { target_successes += 1.0; } - if epochs.source { + if reward.source > 0 { source_successes += 1.0; } } } - let active_percent = active_successes / total * 100.0; + + if total == 0.0 { + return Err("No attestation rewards data found".to_string()); + } + let head_percent = head_successes / total * 100.0; let target_percent = target_successes / total * 100.0; let source_percent = source_successes / total * 100.0; eprintln!("Total Attestations: {}", total); - eprintln!("Active: {}: {}%", active_successes, active_percent); eprintln!("Head: {}: {}%", head_successes, head_percent); eprintln!("Target: {}: {}%", target_successes, target_percent); eprintln!("Source: {}: {}%", source_successes, source_percent); - if active_percent < acceptable_attestation_performance { - return Err("Active percent was below required level".to_string()); - } if head_percent < acceptable_attestation_performance { return Err("Head percent was below required level".to_string()); } From 06025228ae489ac55137a238d0c16e9c76005da2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 19 Mar 2026 20:09:13 +1100 Subject: [PATCH 29/43] Gloas cold DB (#8991) Closes: - https://github.com/sigp/lighthouse/issues/8958 - Update the `HotColdStore` to handle storage of cold states. - Update `BeaconSnapshot` to hold the execution envelope. This is required to make `chain_dump`-related checks sane, and will be generally useful (see: https://github.com/sigp/lighthouse/issues/8956). - Bug fix in the `BlockReplayer` for the case where the starting state is already `Full` (we should not try to apply another payload). This happens on the cold DB path because we try to replay from the closest cached state (which is often full). - Update `test_gloas_hot_state_hierarchy` to cover the cold DB migration. Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 49 ++++++++++++++--- .../beacon_chain/src/beacon_snapshot.rs | 16 +++++- beacon_node/beacon_chain/src/builder.rs | 4 ++ .../beacon_chain/src/canonical_head.rs | 2 + .../beacon_chain/tests/block_verification.rs | 2 + beacon_node/beacon_chain/tests/store_tests.rs | 6 +- beacon_node/store/src/hot_cold_store.rs | 55 +++++++++++++++++-- beacon_node/store/src/invariants.rs | 29 +++++++--- .../state_processing/src/block_replayer.rs | 1 + 9 files changed, 139 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 20af7b4630..c7009fc6dc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6689,6 +6689,9 @@ impl BeaconChain { let mut prev_block_root = None; let mut prev_beacon_state = None; + // Collect all blocks. + let mut blocks = vec![]; + for res in self.forwards_iter_block_roots(from_slot)? { let (beacon_block_root, _) = res?; @@ -6704,16 +6707,42 @@ impl BeaconChain { .ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; - let beacon_state_root = beacon_block.state_root(); + blocks.push((beacon_block_root, Arc::new(beacon_block))); + } + + // Collect states, using the next blocks to determine if states are full (have Gloas + // payloads). + for (i, (block_root, block)) in blocks.iter().enumerate() { + let (opt_envelope, state_root) = if block.fork_name_unchecked().gloas_enabled() { + let opt_envelope = self.store.get_payload_envelope(block_root)?.map(Arc::new); + + if let Some((_, next_block)) = blocks.get(i + 1) { + let block_hash = block.payload_bid_block_hash()?; + if next_block.is_parent_block_full(block_hash) { + let envelope = opt_envelope.ok_or_else(|| { + Error::DBInconsistent(format!("Missing envelope {block_root:?}")) + })?; + let state_root = envelope.message.state_root; + (Some(envelope), state_root) + } else { + (None, block.state_root()) + } + } else { + // TODO(gloas): should use fork choice/cached head for last block in sequence + opt_envelope + .as_ref() + .map_or((None, block.state_root()), |envelope| { + (Some(envelope.clone()), envelope.message.state_root) + }) + } + } else { + (None, block.state_root()) + }; - // This branch is reached from the HTTP API. We assume the user wants - // to cache states so that future calls are faster. let mut beacon_state = self .store - .get_state(&beacon_state_root, Some(beacon_block.slot()), true)? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root)) - })?; + .get_state(&state_root, Some(block.slot()), true)? + .ok_or_else(|| Error::DBInconsistent(format!("Missing state {:?}", state_root)))?; // This beacon state might come from the freezer DB, which means it could have pending // updates or lots of untethered memory. We rebase it on the previous state in order to @@ -6726,12 +6755,14 @@ impl BeaconChain { prev_beacon_state = Some(beacon_state.clone()); let snapshot = BeaconSnapshot { - beacon_block: Arc::new(beacon_block), - beacon_block_root, + beacon_block: block.clone(), + execution_envelope: opt_envelope, + beacon_block_root: *block_root, beacon_state, }; dump.push(snapshot); } + Ok(dump) } diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index e9fde48ac6..566713e3f3 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -2,7 +2,7 @@ use serde::Serialize; use std::sync::Arc; use types::{ AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, - SignedBlindedBeaconBlock, + SignedBlindedBeaconBlock, SignedExecutionPayloadEnvelope, }; /// Represents some block and its associated state. Generally, this will be used for tracking the @@ -10,6 +10,7 @@ use types::{ #[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot = FullPayload> { pub beacon_block: Arc>, + pub execution_envelope: Option>>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -31,33 +32,42 @@ impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( beacon_block: Arc>, + execution_envelope: Option>>, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { Self { beacon_block, + execution_envelope, beacon_block_root, beacon_state, } } - /// Returns the state root from `self.beacon_block`. + /// Returns the state root from `self.beacon_block` or `self.execution_envelope` as + /// appropriate. /// /// ## Caution /// /// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`. pub fn beacon_state_root(&self) -> Hash256 { - self.beacon_block.message().state_root() + if let Some(ref envelope) = self.execution_envelope { + envelope.message.state_root + } else { + self.beacon_block.message().state_root() + } } /// Update all fields of the checkpoint. pub fn update( &mut self, beacon_block: Arc>, + execution_envelope: Option>>, beacon_block_root: Hash256, beacon_state: BeaconState, ) { self.beacon_block = beacon_block; + self.execution_envelope = execution_envelope; self.beacon_block_root = beacon_block_root; self.beacon_state = beacon_state; } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 59fa5ec9ec..7eb92060a2 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -358,6 +358,7 @@ where Ok(( BeaconSnapshot { beacon_block_root, + execution_envelope: None, beacon_block: Arc::new(beacon_block), beacon_state, }, @@ -616,8 +617,10 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); + // TODO(gloas): add check that checkpoint state is Pending let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, + execution_envelope: None, beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; @@ -800,6 +803,7 @@ where let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, + execution_envelope: None, beacon_block: Arc::new(head_block), beacon_state: head_state, }; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index fd060e2b59..0faddd1792 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -319,6 +319,7 @@ impl CanonicalHead { let snapshot = BeaconSnapshot { beacon_block_root, + execution_envelope: None, beacon_block: Arc::new(beacon_block), beacon_state, }; @@ -695,6 +696,7 @@ impl BeaconChain { BeaconSnapshot { beacon_block: Arc::new(beacon_block), + execution_envelope: None, beacon_block_root: new_view.head_block_root, beacon_state, } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 8981b20a55..2bb60f111a 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -77,8 +77,10 @@ async fn get_chain_segment() -> (Vec>, Vec>(); + let some_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); @@ -5886,6 +5889,7 @@ async fn test_gloas_hot_state_hierarchy() { // Verify chain dump and iterators work with Gloas states. check_chain_dump(&harness, num_blocks + 1); check_iterators(&harness); + check_db_invariants(&harness); } /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 428086c464..8ef91b3c74 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1906,6 +1906,51 @@ impl, Cold: ItemStore> HotColdDB } } + /// Recompute the payload status for a state at `slot` that is stored in the cold DB. + /// + /// This function returns an error for any `slot` that is outside the range of slots stored in + /// the freezer DB. + /// + /// For all slots prior to Gloas, it returns `Pending`. + /// + /// For post-Gloas slots the algorithm is: + /// + /// 1. Load the most recently applied block at `slot` (may not be from `slot` in case of a skip) + /// 2. Load the canonical `state_root` at the slot of the block. If this `state_root` matches + /// the one in the block then we know the state at *that* slot is canonically empty (no + /// payload). Conversely, if it is different, we know that the block's slot is full (assuming + /// no database corruption). + /// 3. The payload status of `slot` is the same as the payload status of `block.slot()`, because + /// we only care about whether a beacon block or payload was applied most recently, and + /// `block` is by definition the most-recently-applied block. + /// + /// All of this mucking around could be avoided if we do a schema migration to record the + /// payload status in the database. For now, this is simpler. + fn get_cold_state_payload_status(&self, slot: Slot) -> Result { + // Pre-Gloas states are always `Pending`. + if !self.spec.fork_name_at_slot::(slot).gloas_enabled() { + return Ok(StatePayloadStatus::Pending); + } + + let block_root = self + .get_cold_block_root(slot)? + .ok_or(HotColdDBError::MissingFrozenBlock(slot))?; + + let block = self + .get_blinded_block(&block_root)? + .ok_or(Error::MissingBlock(block_root))?; + + let state_root = self + .get_cold_state_root(block.slot())? + .ok_or(HotColdDBError::MissingRestorePointState(block.slot()))?; + + if block.state_root() != state_root { + Ok(StatePayloadStatus::Full) + } else { + Ok(StatePayloadStatus::Pending) + } + } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -2454,8 +2499,7 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - // TODO(gloas): calculate correct payload status for cold states - let payload_status = StatePayloadStatus::Pending; + let payload_status = self.get_cold_state_payload_status(slot)?; let state = self.replay_blocks( base_state, blocks, @@ -2591,9 +2635,10 @@ impl, Cold: ItemStore> HotColdDB { return Ok((blocks, vec![])); } - // TODO(gloas): wire this up - let end_block_root = Hash256::ZERO; - let desired_payload_status = StatePayloadStatus::Pending; + let end_block_root = self + .get_cold_block_root(end_slot)? + .ok_or(HotColdDBError::MissingFrozenBlock(end_slot))?; + let desired_payload_status = self.get_cold_state_payload_status(end_slot)?; let envelopes = self.load_payload_envelopes_for_blocks( &blocks, end_block_root, diff --git a/beacon_node/store/src/invariants.rs b/beacon_node/store/src/invariants.rs index eb5232d344..d251fb8800 100644 --- a/beacon_node/store/src/invariants.rs +++ b/beacon_node/store/src/invariants.rs @@ -319,6 +319,10 @@ impl, Cold: ItemStore> HotColdDB .spec .fulu_fork_epoch .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let gloas_fork_slot = self + .spec + .gloas_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); let oldest_blob_slot = self.get_blob_info().oldest_blob_slot; let oldest_data_column_slot = self.get_data_column_info().oldest_data_column_slot; @@ -343,17 +347,28 @@ impl, Cold: ItemStore> HotColdDB } // Invariant 5: execution payload consistency. - // TODO(gloas): reconsider this invariant if check_payloads && let Some(bellatrix_slot) = bellatrix_fork_slot && slot >= bellatrix_slot - && !self.execution_payload_exists(&block_root)? - && !self.payload_envelope_exists(&block_root)? { - result.add_violation(InvariantViolation::ExecutionPayloadMissing { - block_root, - slot, - }); + if let Some(gloas_slot) = gloas_fork_slot + && slot >= gloas_slot + { + // For Gloas there is never a true payload stored at slot 0. + // TODO(gloas): still need to account for non-canonical payloads once pruning + // is implemented. + if slot != 0 && !self.payload_envelope_exists(&block_root)? { + result.add_violation(InvariantViolation::ExecutionPayloadMissing { + block_root, + slot, + }); + } + } else if !self.execution_payload_exists(&block_root)? { + result.add_violation(InvariantViolation::ExecutionPayloadMissing { + block_root, + slot, + }); + } } // Invariant 6: blob sidecar consistency. diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index a10d6179fe..f5f06d1cb9 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -313,6 +313,7 @@ where // indicates that the parent is full (and it hasn't already been applied). state_root = if block.fork_name_unchecked().gloas_enabled() && self.state.slot() == self.state.latest_block_header().slot + && self.state.payload_status() == StatePayloadStatus::Pending { let latest_bid_block_hash = self .state From 54d62d0017c772e58bc752dea325f43c96a2571d Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Thu, 19 Mar 2026 12:36:36 +0100 Subject: [PATCH 30/43] fix: update kurtosis apt source to sdk.kurtosis.com (#9000) Co-Authored-By: Barnabas Busa --- .github/workflows/local-testnet.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 9992273e0a..308ddcf819 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -38,7 +38,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -106,7 +106,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -142,7 +142,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -185,7 +185,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable @@ -227,7 +227,7 @@ jobs: - name: Install Kurtosis run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + echo "deb [trusted=yes] https://sdk.kurtosis.com/kurtosis-cli-release-artifacts/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt install -y kurtosis-cli kurtosis analytics disable From 8f9c1ca9ca2e6bbfdadaa8f70f842dcc55cbc08e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 21 Mar 2026 20:45:20 +1100 Subject: [PATCH 31/43] Bump rustls and ignore unpatched version due to Warp (#9010) Fix the cargo-audit failure caused by: - https://rustsec.org/advisories/RUSTSEC-2026-0049 We can't fix it completely yet because `warp 0.3` is keeping us on an old version of `rustls`. Mac's PR here will fix it: - https://github.com/sigp/lighthouse/pull/9001 Co-Authored-By: Michael Sproul --- Cargo.lock | 10 +++++----- Makefile | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cba93f2fd5..72ec9c6e4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5306,7 +5306,7 @@ dependencies = [ "rcgen", "ring", "rustls 0.23.35", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.10", "thiserror 2.0.17", "x509-parser", "yasna", @@ -7196,7 +7196,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7740,7 +7740,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.10", "subtle", "zeroize", ] @@ -7789,9 +7789,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "ring", "rustls-pki-types", diff --git a/Makefile b/Makefile index 9d08c3ebe1..d55fcd7e87 100644 --- a/Makefile +++ b/Makefile @@ -324,7 +324,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit + cargo audit --ignore RUSTSEC-2026-0049 # Runs cargo deny (check for banned crates, duplicate versions, and source restrictions) deny: install-deny deny-CI From b3d51858938283604651bdda1a41482586faeee9 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 23 Mar 2026 06:46:39 +0900 Subject: [PATCH 32/43] Carry forward withdrawals from the current `BeaconState` when a parent envelope is missed (#9014) Co-Authored-By: Eitan Seri- Levi --- beacon_node/beacon_chain/src/block_production/gloas.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_production/gloas.rs b/beacon_node/beacon_chain/src/block_production/gloas.rs index 5d7d99b5bd..2fc4fb51f7 100644 --- a/beacon_node/beacon_chain/src/block_production/gloas.rs +++ b/beacon_node/beacon_chain/src/block_production/gloas.rs @@ -763,8 +763,12 @@ fn get_execution_payload_gloas( let latest_execution_block_hash = *state.latest_block_hash()?; let latest_gas_limit = state.latest_execution_payload_bid()?.gas_limit; - let withdrawals = - Withdrawals::::from(get_expected_withdrawals(state, spec)?).into(); + let withdrawals = if state.is_parent_block_full() { + Withdrawals::::from(get_expected_withdrawals(state, spec)?).into() + } else { + // If the previous payload was missed, carry forward the withdrawals from the state. + state.payload_expected_withdrawals()?.to_vec() + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. From e21053311d2b1aa38508fe7adedf886b264c88ef Mon Sep 17 00:00:00 2001 From: antondlr Date: Mon, 23 Mar 2026 07:25:06 +0100 Subject: [PATCH 33/43] Scrap redundant docker builds on releases (#8999) Our release workflow is pretty inefficient and slow. This PR aims to consolidate and cut down on duplicate tasks. 1) We now run the whole build process both on pushing to the `stable` branch and pushing a version tag. A quick win is to not fire off separate builds. ~~2) The Docker release workflow could re-use the binaries being built instead of doing its own cross-compilation. ~~ we won't take this on _right now_ Co-Authored-By: antondlr Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- .github/workflows/docker-reproducible.yml | 15 +++++++++++---- .github/workflows/docker.yml | 15 +++++++++------ 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docker-reproducible.yml b/.github/workflows/docker-reproducible.yml index f3479e9468..7e46fc691b 100644 --- a/.github/workflows/docker-reproducible.yml +++ b/.github/workflows/docker-reproducible.yml @@ -4,7 +4,6 @@ on: push: branches: - unstable - - stable tags: - v* workflow_dispatch: # allows manual triggering for testing purposes and skips publishing an image @@ -25,9 +24,6 @@ jobs: if [[ "${{ github.ref }}" == refs/tags/* ]]; then # It's a tag (e.g., v1.2.3) VERSION="${GITHUB_REF#refs/tags/}" - elif [[ "${{ github.ref }}" == refs/heads/stable ]]; then - # stable branch -> latest - VERSION="latest" elif [[ "${{ github.ref }}" == refs/heads/unstable ]]; then # unstable branch -> latest-unstable VERSION="latest-unstable" @@ -174,3 +170,14 @@ jobs: ${IMAGE_NAME}:${VERSION}-arm64 docker manifest push ${IMAGE_NAME}:${VERSION} + + # For version tags, also create/update the latest tag to keep stable up to date + # Only create latest tag for proper release versions (e.g. v1.2.3, not v1.2.3-alpha) + if [[ "${GITHUB_REF}" == refs/tags/* ]] && [[ "${VERSION}" =~ ^v[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}$ ]]; then + docker manifest create \ + ${IMAGE_NAME}:latest \ + ${IMAGE_NAME}:${VERSION}-amd64 \ + ${IMAGE_NAME}:${VERSION}-arm64 + + docker manifest push ${IMAGE_NAME}:latest + fi diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 415f4db0e6..e3f6e5d8b8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -4,7 +4,6 @@ on: push: branches: - unstable - - stable tags: - v* @@ -28,11 +27,6 @@ jobs: extract-version: runs-on: ubuntu-22.04 steps: - - name: Extract version (if stable) - if: github.event.ref == 'refs/heads/stable' - run: | - echo "VERSION=latest" >> $GITHUB_ENV - echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if unstable) if: github.event.ref == 'refs/heads/unstable' run: | @@ -159,7 +153,16 @@ jobs: - name: Create and push multiarch manifests run: | + # Create the main tag (versioned for releases, latest-unstable for unstable) docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + # For version tags, also create/update the latest tag to keep stable up to date + # Only create latest tag for proper release versions (e.g. v1.2.3, not v1.2.3-alpha) + if [[ "${GITHUB_REF}" == refs/tags/* ]] && [[ "${VERSION}" =~ ^v[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}$ ]]; then + docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:latest \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + fi + From 7ffc637eefd38622e7940010a0e557d3520f9326 Mon Sep 17 00:00:00 2001 From: Alleysira <56925051+Alleysira@users.noreply.github.com> Date: Tue, 24 Mar 2026 06:07:37 +0800 Subject: [PATCH 34/43] fix(network): set ENR nfd to zero bytes when next fork is unknown (#9009) Fixes #8996 When no next fork is scheduled, the `nfd` field in the ENR was set to the current fork digest via `.unwrap_or_else(|| ctx.fork_context.current_fork_digest())`. According to the [spec](https://github.com/ethereum/consensus-specs/blob/1baa05e71148b0975e28918ac6022d2256b56f4a/specs/fulu/p2p-interface.md?plain=1#L636-L637), `nfd` should be zero-valued bytes when the next fork is unknown. Co-Authored-By: Alleysira <1367108378@qq.com> Co-Authored-By: Alleysira <56925051+Alleysira@users.noreply.github.com> Co-Authored-By: Pawan Dhananjay --- beacon_node/lighthouse_network/src/service/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 94e0ad0710..184a334591 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -187,10 +187,9 @@ impl Network { // set up a collection of variables accessible outside of the network crate // Create an ENR or load from disk if appropriate - let next_fork_digest = ctx - .fork_context - .next_fork_digest() - .unwrap_or_else(|| ctx.fork_context.current_fork_digest()); + // Per [spec](https://github.com/ethereum/consensus-specs/blob/1baa05e71148b0975e28918ac6022d2256b56f4a/specs/fulu/p2p-interface.md?plain=1#L636-L637) + // `nfd` must be zero-valued when no next fork is scheduled. + let next_fork_digest = ctx.fork_context.next_fork_digest().unwrap_or_default(); let advertised_cgc = config .advertise_false_custody_group_count From c451ae763c975058cde26f4a50b5e1d1c9665163 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Mar 2026 12:43:19 +1100 Subject: [PATCH 35/43] Use BTreeMap for state.validators pending updates (#9017) Closes: - https://github.com/sigp/lighthouse/issues/9003 Milhouse `List`s use a map in front of the binary tree to cache updates. Ever since we adopted Milhouse, we've been using `VecMap`, which is essentially `Vec>`. Turns out, when you've got 2M indices and only 2 non-`None` entries (changes), this is inefficient. Milhouse is generic in the choice of map (`U: UpdateMap`) and has always supported `BTreeMap`, so this PR switches us over to `BTreeMap`. In previous benchmarks (years ago) it had been slower than `VecMap`, but now it is vastly superior. Co-Authored-By: Michael Sproul --- .../src/per_epoch_processing/epoch_processing_summary.rs | 6 +++--- consensus/types/src/state/beacon_state.rs | 6 ++++-- consensus/types/src/state/mod.rs | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index a818e08775..3c043a65f2 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -4,8 +4,8 @@ use milhouse::List; use std::sync::Arc; use types::{ BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, - Validator, consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, + state::Validators, }; /// Provides a summary of validator participation during the epoch. @@ -26,7 +26,7 @@ pub enum EpochProcessingSummary { #[derive(PartialEq, Debug)] pub struct ParticipationEpochSummary { /// Copy of the validator registry prior to mutation. - validators: List, + validators: Validators, /// Copy of the participation flags for the previous epoch. previous_epoch_participation: List, /// Copy of the participation flags for the current epoch. @@ -37,7 +37,7 @@ pub struct ParticipationEpochSummary { impl ParticipationEpochSummary { pub fn new( - validators: List, + validators: Validators, previous_epoch_participation: List, current_epoch_participation: List, previous_epoch: Epoch, diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 3f8fa4cfff..9c7b8285d4 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -14,6 +14,7 @@ use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{BitVector, FixedVector}; +use std::collections::BTreeMap; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; @@ -58,7 +59,8 @@ pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; const MAX_RANDOM_VALUE: u64 = (1 << 16) - 1; -pub type Validators = List::ValidatorRegistryLimit>; +pub type Validators = + List::ValidatorRegistryLimit, BTreeMap>; pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] @@ -453,7 +455,7 @@ where // Registry #[compare_fields(as_iter)] #[test_random(default)] - pub validators: List, + pub validators: Validators, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] #[compare_fields(as_iter)] #[test_random(default)] diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs index 309796d359..321c66671a 100644 --- a/consensus/types/src/state/mod.rs +++ b/consensus/types/src/state/mod.rs @@ -17,7 +17,7 @@ pub use balance::Balance; pub use beacon_state::{ BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, BeaconStateCapella, BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateFulu, BeaconStateGloas, - BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, + BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, Validators, }; pub use committee_cache::{ CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, From 91c25794fe15af6b3097a0670c77cb218fb8cff4 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 23 Mar 2026 22:50:14 -0500 Subject: [PATCH 36/43] Schedule Fulu fork for Gnosis mainnet (#9007) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Michael Sproul --- .../built_in_network_configs/gnosis/config.yaml | 7 ++++++- consensus/types/src/core/chain_spec.rs | 5 ++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 34313aa393..d27f7a09e8 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -46,7 +46,7 @@ ELECTRA_FORK_VERSION: 0x05000064 ELECTRA_FORK_EPOCH: 1337856 # 2025-04-30T14:03:40.000Z # Fulu FULU_FORK_VERSION: 0x06000064 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 1714688 # Tue Apr 14 2026 12:06:20 GMT+0000 # Gloas GLOAS_FORK_VERSION: 0x07000064 GLOAS_FORK_EPOCH: 18446744073709551615 @@ -156,6 +156,11 @@ NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# `2**14` (= 16384 epochs, ~15 days) +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 16384 MAX_BLOBS_PER_BLOCK_FULU: 12 # Gloas \ No newline at end of file diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 6d25e3baf4..f505c9f0d9 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1604,7 +1604,7 @@ impl ChainSpec { * Fulu hard fork params */ fulu_fork_version: [0x06, 0x00, 0x00, 0x64], - fulu_fork_epoch: None, + fulu_fork_epoch: Some(Epoch::new(1714688)), custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, @@ -1673,8 +1673,7 @@ impl ChainSpec { * Networking Fulu specific */ blob_schedule: BlobSchedule::default(), - min_epochs_for_data_column_sidecars_requests: - default_min_epochs_for_data_column_sidecars_requests(), + min_epochs_for_data_column_sidecars_requests: 16384, max_data_columns_by_root_request: default_data_columns_by_root_request(), /* From 5d6f787a06c3fb5c51205a91eef94afb0f0157f6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Mar 2026 12:44:43 +1100 Subject: [PATCH 37/43] Bump quinn --- Cargo.lock | 17 ++++++++++------- Cargo.toml | 1 - 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72ec9c6e4e..96e84ed73e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7149,7 +7149,8 @@ dependencies = [ [[package]] name = "quinn" version = "0.11.9" -source = "git+https://github.com/sigp/quinn?rev=59af87979c8411864c1cb68613222f54ed2930a7#59af87979c8411864c1cb68613222f54ed2930a7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", @@ -7159,7 +7160,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls 0.23.35", - "socket2 0.5.10", + "socket2 0.6.1", "thiserror 2.0.17", "tokio", "tracing", @@ -7168,8 +7169,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.13" -source = "git+https://github.com/sigp/quinn?rev=59af87979c8411864c1cb68613222f54ed2930a7#59af87979c8411864c1cb68613222f54ed2930a7" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" dependencies = [ "bytes", "getrandom 0.3.4", @@ -7189,14 +7191,15 @@ dependencies = [ [[package]] name = "quinn-udp" version = "0.5.14" -source = "git+https://github.com/sigp/quinn?rev=59af87979c8411864c1cb68613222f54ed2930a7#59af87979c8411864c1cb68613222f54ed2930a7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.1", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f483e998c9..63cfb39ba4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,6 @@ debug = true [patch.crates-io] quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "87c4ccb9bb2af494de375f5f6c62850badd26304" } yamux = { git = "https://github.com/sigp/rust-yamux", rev = "29efa6aebd4bdfcb16bfb21969ec0c785e570b74" } -quinn = { git = "https://github.com/sigp/quinn", rev = "59af87979c8411864c1cb68613222f54ed2930a7" } [patch."https://github.com/libp2p/rust-libp2p.git"] libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "f88e43de9eba00b416d0374b1a1fb2de47b65864" } From e5facc2faf31ae1f3d9923f397bcea4c239ec5f0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Mar 2026 12:52:10 +1100 Subject: [PATCH 38/43] Bump yamux --- Cargo.lock | 7 ++++--- Cargo.toml | 1 - 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96e84ed73e..4043cb9e12 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5337,7 +5337,7 @@ dependencies = [ "thiserror 2.0.17", "tracing", "yamux 0.12.1", - "yamux 0.13.8", + "yamux 0.13.10", ] [[package]] @@ -10606,8 +10606,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.8" -source = "git+https://github.com/sigp/rust-yamux?rev=29efa6aebd4bdfcb16bfb21969ec0c785e570b74#29efa6aebd4bdfcb16bfb21969ec0c785e570b74" +version = "0.13.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1991f6690292030e31b0144d73f5e8368936c58e45e7068254f7138b23b00672" dependencies = [ "futures", "log", diff --git a/Cargo.toml b/Cargo.toml index 63cfb39ba4..6910d02427 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,7 +279,6 @@ debug = true [patch.crates-io] quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "87c4ccb9bb2af494de375f5f6c62850badd26304" } -yamux = { git = "https://github.com/sigp/rust-yamux", rev = "29efa6aebd4bdfcb16bfb21969ec0c785e570b74" } [patch."https://github.com/libp2p/rust-libp2p.git"] libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "f88e43de9eba00b416d0374b1a1fb2de47b65864" } From c7055b604f9958db410b2e42023763cb19dd7138 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 25 Mar 2026 15:45:24 +0900 Subject: [PATCH 39/43] Gloas serve envelope rpc (#8896) Serves envelope by range and by root requests. Added PayloadEnvelopeStreamer so that we dont need to alter upstream code when we introduce blinded payload envelopes. Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 17 + .../beacon_chain/src/canonical_head.rs | 7 + beacon_node/beacon_chain/src/errors.rs | 2 + beacon_node/beacon_chain/src/lib.rs | 1 + .../beacon_chain_adapter.rs | 42 ++ .../src/payload_envelope_streamer/mod.rs | 219 ++++++++++ .../src/payload_envelope_streamer/tests.rs | 386 ++++++++++++++++++ beacon_node/beacon_processor/src/lib.rs | 31 +- .../src/scheduler/work_queue.rs | 12 + .../src/peer_manager/mod.rs | 6 + .../lighthouse_network/src/rpc/codec.rs | 66 +++ .../lighthouse_network/src/rpc/config.rs | 28 ++ .../lighthouse_network/src/rpc/handler.rs | 29 ++ .../lighthouse_network/src/rpc/methods.rs | 68 ++- .../lighthouse_network/src/rpc/protocol.rs | 92 ++++- .../src/rpc/rate_limiter.rs | 38 +- .../src/service/api_types.rs | 15 + .../lighthouse_network/src/service/mod.rs | 38 ++ .../src/network_beacon_processor/mod.rs | 43 +- .../network_beacon_processor/rpc_methods.rs | 285 ++++++++++++- .../src/network_beacon_processor/tests.rs | 254 +++++++++++- beacon_node/network/src/router.rs | 23 ++ .../types/src/block/signed_beacon_block.rs | 10 + consensus/types/src/core/chain_spec.rs | 20 + .../execution/execution_payload_envelope.rs | 40 ++ .../signed_execution_payload_envelope.rs | 19 + 26 files changed, 1778 insertions(+), 13 deletions(-) create mode 100644 beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs create mode 100644 beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c7009fc6dc..81735bdd9d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -54,6 +54,8 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; +#[cfg(not(test))] +use crate::payload_envelope_streamer::{EnvelopeRequestSource, launch_payload_envelope_stream}; use crate::pending_payload_envelopes::PendingPayloadEnvelopes; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::persist_custody_context; @@ -1135,6 +1137,21 @@ impl BeaconChain { .map_or_else(|| self.get_blobs(block_root), Ok) } + #[cfg(not(test))] + #[allow(clippy::type_complexity)] + pub fn get_payload_envelopes( + self: &Arc, + block_roots: Vec, + request_source: EnvelopeRequestSource, + ) -> impl Stream< + Item = ( + Hash256, + Arc>>, Error>>, + ), + > { + launch_payload_envelope_stream(self.clone(), block_roots, request_source) + } + pub fn get_data_columns_checking_all_caches( &self, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 0faddd1792..3a429bdb8a 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -371,6 +371,13 @@ impl CanonicalHead { Ok((head, execution_status)) } + // TODO(gloas) just a stub for now, implement this once we have fork choice. + /// Returns true if the payload for this block is canonical according to fork choice + /// Returns an error if the block root doesn't exist in fork choice. + pub fn block_has_canonical_payload(&self, _root: &Hash256) -> Result { + Ok(true) + } + /// Returns a clone of `self.cached_head`. /// /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 6c8f0d2794..210c4a4482 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -8,6 +8,7 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; +use crate::payload_envelope_streamer::Error as EnvelopeStreamerError; use bls::PublicKeyBytes; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; @@ -157,6 +158,7 @@ pub enum BeaconChainError { reconstructed_transactions_root: Hash256, }, BlockStreamerError(BlockStreamerError), + EnvelopeStreamerError(EnvelopeStreamerError), AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), PrepareProposerFailed(BlockProcessingError), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 29081fd767..cf427d1a40 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -43,6 +43,7 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; +pub mod payload_envelope_streamer; pub mod payload_envelope_verification; pub mod pending_payload_envelopes; pub mod persisted_beacon_chain; diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs new file mode 100644 index 0000000000..47c58f07b9 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs @@ -0,0 +1,42 @@ +use std::sync::Arc; + +#[cfg(test)] +use mockall::automock; +use task_executor::TaskExecutor; +use types::{Hash256, SignedExecutionPayloadEnvelope, Slot}; + +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; + +/// An adapter to the `BeaconChain` functionalities to remove `BeaconChain` from direct dependency to enable testing envelope streamer logic. +pub(crate) struct EnvelopeStreamerBeaconAdapter { + chain: Arc>, +} + +#[cfg_attr(test, automock, allow(dead_code))] +impl EnvelopeStreamerBeaconAdapter { + pub(crate) fn new(chain: Arc>) -> Self { + Self { chain } + } + + pub(crate) fn executor(&self) -> &TaskExecutor { + &self.chain.task_executor + } + + pub(crate) fn get_payload_envelope( + &self, + root: &Hash256, + ) -> Result>, store::Error> { + self.chain.store.get_payload_envelope(root) + } + + pub(crate) fn get_split_slot(&self) -> Slot { + self.chain.store.get_split_info().slot + } + + pub(crate) fn block_has_canonical_payload( + &self, + root: &Hash256, + ) -> Result { + self.chain.canonical_head.block_has_canonical_payload(root) + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs new file mode 100644 index 0000000000..d10e3762a4 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs @@ -0,0 +1,219 @@ +mod beacon_chain_adapter; +#[cfg(test)] +mod tests; + +use std::sync::Arc; + +#[cfg_attr(test, double)] +use crate::payload_envelope_streamer::beacon_chain_adapter::EnvelopeStreamerBeaconAdapter; +use futures::Stream; +#[cfg(test)] +use mockall_double::double; +use tokio::sync::mpsc::{self, UnboundedSender}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::{debug, error, warn}; +use types::{EthSpec, Hash256, SignedExecutionPayloadEnvelope}; + +#[cfg(not(test))] +use crate::BeaconChain; +use crate::{BeaconChainError, BeaconChainTypes}; + +type PayloadEnvelopeResult = + Result>>, BeaconChainError>; + +#[derive(Debug)] +pub enum Error { + BlockMissingFromForkChoice, +} + +#[derive(Debug, PartialEq)] +pub enum EnvelopeRequestSource { + ByRoot, + ByRange, +} + +pub struct PayloadEnvelopeStreamer { + adapter: EnvelopeStreamerBeaconAdapter, + request_source: EnvelopeRequestSource, +} + +// TODO(gloas) eventually we'll need to expand this to support loading blinded payload envelopes from the db +// and fetching the execution payload from the EL. See BlockStreamer impl as an example +impl PayloadEnvelopeStreamer { + pub(crate) fn new( + adapter: EnvelopeStreamerBeaconAdapter, + request_source: EnvelopeRequestSource, + ) -> Arc { + Arc::new(Self { + adapter, + request_source, + }) + } + + // TODO(gloas) simply a stub impl for now. Should check some exec payload envelope cache + // and return the envelope if it exists in the cache + fn check_payload_envelope_cache( + &self, + _beacon_block_root: &Hash256, + ) -> Option>> { + // if self.check_caches == CheckCaches::Yes + None + } + + fn load_envelope( + self: &Arc, + beacon_block_root: &Hash256, + ) -> Result>>, BeaconChainError> { + if let Some(cached_envelope) = self.check_payload_envelope_cache(beacon_block_root) { + Ok(Some(cached_envelope)) + } else { + // TODO(gloas) we'll want to use the execution layer directly to call + // the engine api method eth_getPayloadBodiesByRange() + match self.adapter.get_payload_envelope(beacon_block_root) { + Ok(opt_envelope) => Ok(opt_envelope.map(Arc::new)), + Err(e) => Err(BeaconChainError::DBError(e)), + } + } + } + + async fn load_envelopes( + self: &Arc, + block_roots: &[Hash256], + ) -> Result)>, BeaconChainError> { + let streamer = self.clone(); + let block_roots = block_roots.to_vec(); + let split_slot = streamer.adapter.get_split_slot(); + // Loading from the DB is slow -> spawn a blocking task + self.adapter + .executor() + .spawn_blocking_handle( + move || { + let mut results: Vec<(Hash256, PayloadEnvelopeResult)> = Vec::new(); + for root in block_roots.iter() { + // TODO(gloas) we are loading the full envelope from the db. + // in a future PR we will only be storing the blinded envelope. + // When that happens we'll need to use the EL here to fetch + // the payload and reconstruct the non-blinded envelope. + let opt_envelope = match streamer.load_envelope(root) { + Ok(opt_envelope) => opt_envelope, + Err(e) => { + results.push((*root, Err(e))); + continue; + } + }; + + if streamer.request_source == EnvelopeRequestSource::ByRoot { + // No envelope verification required for `ENVELOPE_BY_ROOT` requests. + // If we only served envelopes that match our canonical view, nodes + // wouldn't be able to sync other branches. + results.push((*root, Ok(opt_envelope))); + continue; + } + + // When loading envelopes on or after the split slot, we must cross reference the bid from the child beacon block. + // There can be payloads that have been imported into the hot db but don't match our current view + // of the canonical chain. + + if let Some(envelope) = opt_envelope { + // Ensure that the envelopes we're serving match our view of the canonical chain. + + // When loading envelopes before the split slot, there is no need to check. + // Non-canonical payload envelopes will have already been pruned. + if split_slot > envelope.slot() { + results.push((*root, Ok(Some(envelope)))); + continue; + } + + match streamer.adapter.block_has_canonical_payload(root) { + Ok(is_envelope_canonical) => { + if is_envelope_canonical { + results.push((*root, Ok(Some(envelope)))); + } else { + results.push((*root, Ok(None))); + } + } + Err(_) => { + results.push(( + *root, + Err(BeaconChainError::EnvelopeStreamerError( + Error::BlockMissingFromForkChoice, + )), + )); + } + } + } else { + results.push((*root, Ok(None))); + } + } + results + }, + "load_execution_payload_envelopes", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin) + } + + async fn stream_payload_envelopes( + self: Arc, + beacon_block_roots: Vec, + sender: UnboundedSender<(Hash256, Arc>)>, + ) { + let results = match self.load_envelopes(&beacon_block_roots).await { + Ok(results) => results, + Err(e) => { + warn!(error = ?e, "Failed to load payload envelopes"); + send_errors(&beacon_block_roots, sender, e).await; + return; + } + }; + + for (root, result) in results { + if sender.send((root, Arc::new(result))).is_err() { + break; + } + } + } + + pub fn launch_stream( + self: Arc, + block_roots: Vec, + ) -> impl Stream>)> { + let (envelope_tx, envelope_rx) = mpsc::unbounded_channel(); + debug!( + envelopes = block_roots.len(), + "Launching a PayloadEnvelopeStreamer" + ); + let executor = self.adapter.executor().clone(); + executor.spawn( + self.stream_payload_envelopes(block_roots, envelope_tx), + "get_payload_envelopes_sender", + ); + UnboundedReceiverStream::new(envelope_rx) + } +} + +/// Create a `PayloadEnvelopeStreamer` from a `BeaconChain` and launch a stream. +#[cfg(not(test))] +pub fn launch_payload_envelope_stream( + chain: Arc>, + block_roots: Vec, + request_source: EnvelopeRequestSource, +) -> impl Stream>)> { + let adapter = beacon_chain_adapter::EnvelopeStreamerBeaconAdapter::new(chain); + PayloadEnvelopeStreamer::new(adapter, request_source).launch_stream(block_roots) +} + +async fn send_errors( + block_roots: &[Hash256], + sender: UnboundedSender<(Hash256, Arc>)>, + beacon_chain_error: BeaconChainError, +) { + let result = Arc::new(Err(beacon_chain_error)); + for beacon_block_root in block_roots { + if sender.send((*beacon_block_root, result.clone())).is_err() { + error!("EnvelopeStreamer channel closed unexpectedly"); + break; + } + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs new file mode 100644 index 0000000000..9e869a59b8 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs @@ -0,0 +1,386 @@ +use super::*; +use crate::payload_envelope_streamer::beacon_chain_adapter::MockEnvelopeStreamerBeaconAdapter; +use crate::test_utils::EphemeralHarnessType; +use bls::{FixedBytesExtended, Signature}; +use futures::StreamExt; +use std::collections::HashMap; +use task_executor::test_utils::TestRuntime; +use types::{ + ExecutionBlockHash, ExecutionPayloadEnvelope, ExecutionPayloadGloas, Hash256, MinimalEthSpec, + SignedExecutionPayloadEnvelope, Slot, +}; + +type E = MinimalEthSpec; +type T = EphemeralHarnessType; + +struct SlotEntry { + block_root: Hash256, + slot: Slot, + envelope: Option>, + non_canonical_envelope: bool, +} + +impl SlotEntry { + fn expect_envelope(&self, split_slot: Option) -> bool { + if self.envelope.is_none() { + return false; + } + if !self.non_canonical_envelope { + return true; + } + // Non-canonical envelopes before the split slot are returned + // (in production they would have been pruned). + split_slot.is_some_and(|s| self.slot < s) + } +} + +fn roots(chain: &[SlotEntry]) -> Vec { + chain.iter().map(|s| s.block_root).collect() +} + +/// Build test chain data. +fn build_chain( + num_slots: u64, + skipped_slots: &[u64], + missing_envelope_slots: &[u64], + non_canonical_envelope_slots: &[u64], +) -> Vec { + let mut chain = Vec::new(); + for i in 1..=num_slots { + if skipped_slots.contains(&i) { + continue; + } + let slot = Slot::new(i); + let block_root = Hash256::from_low_u64_be(i); + let has_envelope = !missing_envelope_slots.contains(&i); + let is_non_canonical = non_canonical_envelope_slots.contains(&i); + + let envelope = if has_envelope { + let block_hash = if is_non_canonical { + ExecutionBlockHash::from_root(Hash256::repeat_byte(0xFF)) + } else { + ExecutionBlockHash::from_root(Hash256::from_low_u64_be(i)) + }; + Some(SignedExecutionPayloadEnvelope { + message: ExecutionPayloadEnvelope { + payload: ExecutionPayloadGloas { + block_hash, + ..Default::default() + }, + execution_requests: Default::default(), + builder_index: 0, + beacon_block_root: block_root, + slot, + state_root: Hash256::zero(), + }, + signature: Signature::empty(), + }) + } else { + None + }; + + chain.push(SlotEntry { + block_root, + slot, + envelope, + non_canonical_envelope: is_non_canonical, + }); + } + chain +} + +fn mock_adapter() -> (MockEnvelopeStreamerBeaconAdapter, TestRuntime) { + let runtime = TestRuntime::default(); + let mut mock = MockEnvelopeStreamerBeaconAdapter::default(); + mock.expect_executor() + .return_const(runtime.task_executor.clone()); + (mock, runtime) +} + +/// Configure `get_payload_envelope` to return envelopes from chain data. +fn mock_envelopes(mock: &mut MockEnvelopeStreamerBeaconAdapter, chain: &[SlotEntry]) { + let envelope_map: HashMap>> = chain + .iter() + .map(|entry| (entry.block_root, entry.envelope.clone())) + .collect(); + mock.expect_get_payload_envelope() + .returning(move |root| Ok(envelope_map.get(root).cloned().flatten())); +} + +/// Configure `block_has_canonical_payload` based on chain's non-canonical entries. +fn mock_canonical_head(mock: &mut MockEnvelopeStreamerBeaconAdapter, chain: &[SlotEntry]) { + let non_canonical: Vec = chain + .iter() + .filter(|e| e.non_canonical_envelope) + .map(|e| e.block_root) + .collect(); + mock.expect_block_has_canonical_payload() + .returning(move |root| Ok(!non_canonical.contains(root))); +} + +fn unwrap_result( + result: &Arc>, +) -> &Option>> { + result + .as_ref() + .as_ref() + .expect("unexpected error in stream result") +} + +async fn assert_stream_matches( + stream: &mut (impl Stream>)> + Unpin), + chain: &[SlotEntry], + split_slot: Option, +) { + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + + let result = unwrap_result(&result); + + if entry.expect_envelope(split_slot) { + let envelope = result + .as_ref() + .unwrap_or_else(|| panic!("expected Some at index {i} but got None")); + let expected_envelope = entry.envelope.as_ref().unwrap(); + assert_eq!( + envelope.block_hash(), + expected_envelope.block_hash(), + "block_hash mismatch at index {i}" + ); + } else { + assert!( + result.is_none(), + "expected None at index {i} (missing or non-canonical), got Some" + ); + } + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// Happy path: all envelopes exist and are canonical. +#[tokio::test] +async fn stream_envelopes_by_range() { + let chain = build_chain(8, &[], &[], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + assert_stream_matches(&mut stream, &chain, None).await; +} + +/// Mixed chain: skipped slots, missing envelopes, and non-canonical envelopes. +#[tokio::test] +async fn stream_envelopes_by_range_mixed() { + let chain = build_chain(12, &[3, 8], &[5], &[7, 11]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + assert_stream_matches(&mut stream, &chain, None).await; +} + +/// Non-canonical envelopes before the split slot bypass canonical verification +/// and are returned. Non-canonical envelopes after the split slot are filtered out. +#[tokio::test] +async fn stream_envelopes_by_range_before_split() { + // Non-canonical envelopes at slots 2 and 4 (before split), slot 8 (after split). + let chain = build_chain(10, &[], &[], &[2, 4, 8]); + let split_slot = Slot::new(6); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(split_slot); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + assert_stream_matches(&mut stream, &chain, Some(split_slot)).await; +} + +#[tokio::test] +async fn stream_envelopes_empty_roots() { + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(vec![]); + assert!( + stream.next().await.is_none(), + "empty roots should produce no results" + ); +} + +#[tokio::test] +async fn stream_envelopes_single_root() { + let chain = build_chain(3, &[], &[], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock_canonical_head(&mut mock, &chain); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(vec![chain[1].block_root]); + + let (root, result) = stream.next().await.expect("should get one result"); + assert_eq!(root, chain[1].block_root); + let envelope = unwrap_result(&result) + .as_ref() + .expect("should have envelope"); + assert_eq!( + envelope.block_hash(), + chain[1].envelope.as_ref().unwrap().block_hash(), + ); + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// ByRoot requests skip canonical verification, so non-canonical envelopes +/// should still be returned. `block_has_canonical_payload` should never be called. +#[tokio::test] +async fn stream_envelopes_by_root() { + let chain = build_chain(8, &[], &[], &[3, 5, 7]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock.expect_block_has_canonical_payload().times(0); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRoot); + let mut stream = streamer.launch_stream(roots(&chain)); + + // Every envelope should come back as Some, even the non-canonical ones. + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + + let envelope = unwrap_result(&result) + .as_ref() + .unwrap_or_else(|| panic!("expected Some at index {i} for ByRoot request")); + let expected_envelope = entry.envelope.as_ref().unwrap(); + assert_eq!( + envelope.block_hash(), + expected_envelope.block_hash(), + "block_hash mismatch at index {i}" + ); + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// When `block_has_canonical_payload` returns an error, the streamer should +/// yield `Err(EnvelopeStreamerError(BlockMissingFromForkChoice))` for those roots. +#[tokio::test] +async fn stream_envelopes_error() { + let chain = build_chain(4, &[], &[], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock.expect_block_has_canonical_payload() + .returning(|_| Err(BeaconChainError::CanonicalHeadLockTimeout)); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(roots(&chain)); + + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + assert!( + matches!( + result.as_ref(), + Err(BeaconChainError::EnvelopeStreamerError( + Error::BlockMissingFromForkChoice + )) + ), + "expected BlockMissingFromForkChoice error at index {i}, got {:?}", + result + ); + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// Requesting unknown roots (not in the store) via ByRange should return Ok(None). +#[tokio::test] +async fn stream_envelopes_by_range_unknown_roots() { + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock.expect_get_payload_envelope().returning(|_| Ok(None)); + + let unknown_roots: Vec = (1..=4) + .map(|i| Hash256::from_low_u64_be(i * 1000)) + .collect(); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); + let mut stream = streamer.launch_stream(unknown_roots.clone()); + + for (i, expected_root) in unknown_roots.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, *expected_root, "root mismatch at index {i}"); + let envelope = unwrap_result(&result); + assert!( + envelope.is_none(), + "expected None for unknown root at index {i}" + ); + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} + +/// Requesting roots via ByRoot where some envelopes are missing should +/// return Ok(None) for those roots. +#[tokio::test] +async fn stream_envelopes_by_root_missing_envelopes() { + let chain = build_chain(6, &[], &[2, 4], &[]); + let (mut mock, _runtime) = mock_adapter(); + mock.expect_get_split_slot().return_const(Slot::new(0)); + mock_envelopes(&mut mock, &chain); + mock.expect_block_has_canonical_payload().times(0); + + let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRoot); + let mut stream = streamer.launch_stream(roots(&chain)); + + for (i, entry) in chain.iter().enumerate() { + let (root, result) = stream + .next() + .await + .unwrap_or_else(|| panic!("stream ended early at index {i}")); + assert_eq!(root, entry.block_root, "root mismatch at index {i}"); + + let envelope_opt = unwrap_result(&result); + if let Some(entry_envelope) = &entry.envelope { + let envelope = envelope_opt + .as_ref() + .unwrap_or_else(|| panic!("expected Some at index {i}")); + assert_eq!( + envelope.block_hash(), + entry_envelope.block_hash(), + "block_hash mismatch at index {i}" + ); + } else { + assert!( + envelope_opt.is_none(), + "expected None for missing envelope at index {i}" + ); + } + } + + assert!(stream.next().await.is_none(), "stream should be exhausted"); +} diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index c33f4840e0..724c41cfc9 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -426,6 +426,8 @@ pub enum Work { Status(BlockingFn), BlocksByRangeRequest(AsyncFn), BlocksByRootsRequest(AsyncFn), + PayloadEnvelopesByRangeRequest(AsyncFn), + PayloadEnvelopesByRootRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), DataColumnsByRootsRequest(BlockingFn), @@ -483,6 +485,8 @@ pub enum WorkType { Status, BlocksByRangeRequest, BlocksByRootsRequest, + PayloadEnvelopesByRangeRequest, + PayloadEnvelopesByRootRequest, BlobsByRangeRequest, BlobsByRootsRequest, DataColumnsByRootsRequest, @@ -542,6 +546,8 @@ impl Work { Work::Status(_) => WorkType::Status, Work::BlocksByRangeRequest(_) => WorkType::BlocksByRangeRequest, Work::BlocksByRootsRequest(_) => WorkType::BlocksByRootsRequest, + Work::PayloadEnvelopesByRangeRequest(_) => WorkType::PayloadEnvelopesByRangeRequest, + Work::PayloadEnvelopesByRootRequest(_) => WorkType::PayloadEnvelopesByRootRequest, Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, @@ -991,6 +997,12 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = work_queues.dcbrange_queue.pop() { Some(item) + } else if let Some(item) = work_queues.payload_envelopes_brange_queue.pop() + { + Some(item) + } else if let Some(item) = work_queues.payload_envelopes_broots_queue.pop() + { + Some(item) // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1180,6 +1192,12 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { work_queues.block_broots_queue.push(work, work_id) } + Work::PayloadEnvelopesByRangeRequest { .. } => work_queues + .payload_envelopes_brange_queue + .push(work, work_id), + Work::PayloadEnvelopesByRootRequest { .. } => work_queues + .payload_envelopes_broots_queue + .push(work, work_id), Work::BlobsByRangeRequest { .. } => { work_queues.blob_brange_queue.push(work, work_id) } @@ -1296,6 +1314,12 @@ impl BeaconProcessor { WorkType::Status => work_queues.status_queue.len(), WorkType::BlocksByRangeRequest => work_queues.block_brange_queue.len(), WorkType::BlocksByRootsRequest => work_queues.block_broots_queue.len(), + WorkType::PayloadEnvelopesByRangeRequest => { + work_queues.payload_envelopes_brange_queue.len() + } + WorkType::PayloadEnvelopesByRootRequest => { + work_queues.payload_envelopes_broots_queue.len() + } WorkType::BlobsByRangeRequest => work_queues.blob_brange_queue.len(), WorkType::BlobsByRootsRequest => work_queues.blob_broots_queue.len(), WorkType::DataColumnsByRootsRequest => work_queues.dcbroots_queue.len(), @@ -1487,9 +1511,10 @@ impl BeaconProcessor { | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } - Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { - task_spawner.spawn_async(work) - } + Work::BlocksByRangeRequest(work) + | Work::BlocksByRootsRequest(work) + | Work::PayloadEnvelopesByRangeRequest(work) + | Work::PayloadEnvelopesByRootRequest(work) => task_spawner.spawn_async(work), Work::ChainSegmentBackfill(process_fn) => { if self.config.enable_backfill_rate_limiting { task_spawner.spawn_blocking_with_rayon(RayonPoolType::LowPriority, process_fn) diff --git a/beacon_node/beacon_processor/src/scheduler/work_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_queue.rs index e48c776b6d..363ec06097 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_queue.rs @@ -135,6 +135,8 @@ pub struct BeaconProcessorQueueLengths { blob_brange_queue: usize, dcbroots_queue: usize, dcbrange_queue: usize, + payload_envelopes_brange_queue: usize, + payload_envelopes_broots_queue: usize, gossip_bls_to_execution_change_queue: usize, gossip_execution_payload_queue: usize, gossip_execution_payload_bid_queue: usize, @@ -206,6 +208,8 @@ impl BeaconProcessorQueueLengths { blob_brange_queue: 1024, dcbroots_queue: 1024, dcbrange_queue: 1024, + payload_envelopes_brange_queue: 1024, + payload_envelopes_broots_queue: 1024, gossip_bls_to_execution_change_queue: 16384, // TODO(EIP-7732): verify 1024 is preferable. I used same value as `gossip_block_queue` and `gossip_blob_queue` gossip_execution_payload_queue: 1024, @@ -256,6 +260,8 @@ pub struct WorkQueues { pub status_queue: FifoQueue>, pub block_brange_queue: FifoQueue>, pub block_broots_queue: FifoQueue>, + pub payload_envelopes_brange_queue: FifoQueue>, + pub payload_envelopes_broots_queue: FifoQueue>, pub blob_broots_queue: FifoQueue>, pub blob_brange_queue: FifoQueue>, pub dcbroots_queue: FifoQueue>, @@ -327,6 +333,10 @@ impl WorkQueues { let blob_brange_queue = FifoQueue::new(queue_lengths.blob_brange_queue); let dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); let dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); + let payload_envelopes_brange_queue = + FifoQueue::new(queue_lengths.payload_envelopes_brange_queue); + let payload_envelopes_broots_queue = + FifoQueue::new(queue_lengths.payload_envelopes_broots_queue); let gossip_bls_to_execution_change_queue = FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); @@ -387,6 +397,8 @@ impl WorkQueues { blob_brange_queue, dcbroots_queue, dcbrange_queue, + payload_envelopes_brange_queue, + payload_envelopes_broots_queue, gossip_bls_to_execution_change_queue, gossip_execution_payload_queue, gossip_execution_payload_bid_queue, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 43a44c85fc..2edd9de2d9 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -590,6 +590,8 @@ impl PeerManager { Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, + Protocol::PayloadEnvelopesByRange => PeerAction::MidToleranceError, + Protocol::PayloadEnvelopesByRoot => PeerAction::MidToleranceError, // Lighthouse does not currently make light client requests; therefore, this // is an unexpected scenario. We do not ban the peer for rate limiting. Protocol::LightClientBootstrap => return, @@ -615,6 +617,8 @@ impl PeerManager { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, + Protocol::PayloadEnvelopesByRange => return, + Protocol::PayloadEnvelopesByRoot => return, Protocol::BlobsByRange => return, Protocol::BlobsByRoot => return, Protocol::DataColumnsByRoot => return, @@ -638,6 +642,8 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::PayloadEnvelopesByRange => PeerAction::MidToleranceError, + Protocol::PayloadEnvelopesByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index d1a3182fad..346e350825 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -15,6 +15,7 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; +use types::SignedExecutionPayloadEnvelope; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, @@ -76,6 +77,8 @@ impl SSZSnappyInboundCodec { }, RpcSuccessResponse::BlocksByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::PayloadEnvelopesByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::PayloadEnvelopesByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::BlobsByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), @@ -356,6 +359,8 @@ impl Encoder> for SSZSnappyOutboundCodec { BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), }, + RequestType::PayloadEnvelopesByRange(req) => req.as_ssz_bytes(), + RequestType::PayloadEnvelopesByRoot(req) => req.beacon_block_roots.as_ssz_bytes(), RequestType::BlobsByRange(req) => req.as_ssz_bytes(), RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), @@ -548,6 +553,19 @@ fn handle_rpc_request( )?, }), ))), + SupportedProtocol::PayloadEnvelopesByRangeV1 => { + Ok(Some(RequestType::PayloadEnvelopesByRange( + PayloadEnvelopesByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } + SupportedProtocol::PayloadEnvelopesByRootV1 => Ok(Some( + RequestType::PayloadEnvelopesByRoot(PayloadEnvelopesByRootRequest { + beacon_block_roots: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_payloads(), + )?, + }), + )), SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), @@ -650,6 +668,48 @@ fn handle_rpc_response( SupportedProtocol::BlocksByRootV1 => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + SupportedProtocol::PayloadEnvelopesByRangeV1 => match fork_name { + Some(fork_name) => { + if fork_name.gloas_enabled() { + Ok(Some(RpcSuccessResponse::PayloadEnvelopesByRange(Arc::new( + SignedExecutionPayloadEnvelope::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + "Invalid fork name for payload envelopes by range".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::PayloadEnvelopesByRootV1 => match fork_name { + Some(fork_name) => { + if fork_name.gloas_enabled() { + Ok(Some(RpcSuccessResponse::PayloadEnvelopesByRoot(Arc::new( + SignedExecutionPayloadEnvelope::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + "Invalid fork name for payload envelopes by root".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, SupportedProtocol::BlobsByRangeV1 => match fork_name { Some(fork_name) => { if fork_name.deneb_enabled() { @@ -1260,6 +1320,12 @@ mod tests { RequestType::BlobsByRange(blbrange) => { assert_eq!(decoded, RequestType::BlobsByRange(blbrange)) } + RequestType::PayloadEnvelopesByRange(perange) => { + assert_eq!(decoded, RequestType::PayloadEnvelopesByRange(perange)) + } + RequestType::PayloadEnvelopesByRoot(peroot) => { + assert_eq!(decoded, RequestType::PayloadEnvelopesByRoot(peroot)) + } RequestType::BlobsByRoot(bbroot) => { assert_eq!(decoded, RequestType::BlobsByRoot(bbroot)) } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index b0ee6fea64..9e1c6541ec 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -89,6 +89,8 @@ pub struct RateLimiterConfig { pub(super) goodbye_quota: Quota, pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_root_quota: Quota, + pub(super) payload_envelopes_by_range_quota: Quota, + pub(super) payload_envelopes_by_root_quota: Quota, pub(super) blobs_by_range_quota: Quota, pub(super) blobs_by_root_quota: Quota, pub(super) data_columns_by_root_quota: Quota, @@ -111,6 +113,10 @@ impl RateLimiterConfig { Quota::n_every(NonZeroU64::new(128).unwrap(), 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + pub const DEFAULT_PAYLOAD_ENVELOPES_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + pub const DEFAULT_PAYLOAD_ENVELOPES_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(NonZeroU64::new(896).unwrap(), 10); @@ -137,6 +143,8 @@ impl Default for RateLimiterConfig { goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + payload_envelopes_by_range_quota: Self::DEFAULT_PAYLOAD_ENVELOPES_BY_RANGE_QUOTA, + payload_envelopes_by_root_quota: Self::DEFAULT_PAYLOAD_ENVELOPES_BY_ROOT_QUOTA, blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, @@ -169,6 +177,14 @@ impl Debug for RateLimiterConfig { .field("goodbye", fmt_q!(&self.goodbye_quota)) .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .field( + "payload_envelopes_by_range", + fmt_q!(&self.payload_envelopes_by_range_quota), + ) + .field( + "payload_envelopes_by_root", + fmt_q!(&self.payload_envelopes_by_root_quota), + ) .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) .field("blobs_by_root", fmt_q!(&self.blobs_by_root_quota)) .field( @@ -197,6 +213,8 @@ impl FromStr for RateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; + let mut payload_envelopes_by_range_quota = None; + let mut payload_envelopes_by_root_quota = None; let mut blobs_by_range_quota = None; let mut blobs_by_root_quota = None; let mut data_columns_by_root_quota = None; @@ -214,6 +232,12 @@ impl FromStr for RateLimiterConfig { Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::PayloadEnvelopesByRange => { + payload_envelopes_by_range_quota = payload_envelopes_by_range_quota.or(quota) + } + Protocol::PayloadEnvelopesByRoot => { + payload_envelopes_by_root_quota = payload_envelopes_by_root_quota.or(quota) + } Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota), Protocol::DataColumnsByRoot => { @@ -250,6 +274,10 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), blocks_by_root_quota: blocks_by_root_quota .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + payload_envelopes_by_range_quota: payload_envelopes_by_range_quota + .unwrap_or(Self::DEFAULT_PAYLOAD_ENVELOPES_BY_RANGE_QUOTA), + payload_envelopes_by_root_quota: payload_envelopes_by_root_quota + .unwrap_or(Self::DEFAULT_PAYLOAD_ENVELOPES_BY_ROOT_QUOTA), blobs_by_range_quota: blobs_by_range_quota .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA), diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9861119ac1..336747fb83 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -954,6 +954,35 @@ where return; } } + RequestType::PayloadEnvelopesByRange(request) => { + let max_allowed = spec.max_request_payloads; + if request.count > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::PayloadEnvelopesByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, request.count + )), + })); + return; + } + } + RequestType::DataColumnsByRange(request) => { + let max_requested = request.max_requested::(); + let max_allowed = spec.max_request_data_column_sidecars; + if max_requested > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::DataColumnsByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, max_requested + )), + })); + return; + } + } _ => {} }; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 5a9a683b75..baabf48683 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -17,7 +17,8 @@ use types::light_client::consts::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, Slot, }; /// Maximum length of error message. @@ -362,6 +363,16 @@ impl BlocksByRangeRequest { } } +/// Request a number of execution payload envelopes from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct PayloadEnvelopesByRangeRequest { + /// The starting slot to request execution payload envelopes. + pub start_slot: u64, + + /// The number of slots from the start slot. + pub count: u64, +} + /// Request a number of beacon blobs from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct BlobsByRangeRequest { @@ -505,6 +516,29 @@ impl BlocksByRootRequest { } } +/// Request a number of execution payload envelopes from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct PayloadEnvelopesByRootRequest { + /// The list of beacon block roots used to request execution payload envelopes. + pub beacon_block_roots: RuntimeVariableList, +} + +impl PayloadEnvelopesByRootRequest { + pub fn new( + beacon_block_roots: Vec, + fork_context: &ForkContext, + ) -> Result { + let max_requests_envelopes = fork_context.spec.max_request_payloads(); + + let beacon_block_roots = + RuntimeVariableList::new(beacon_block_roots, max_requests_envelopes).map_err(|e| { + format!("ExecutionPayloadEnvelopesByRootRequest too many beacon block roots: {e:?}") + })?; + + Ok(Self { beacon_block_roots }) + } +} + /// Request a number of beacon blocks and blobs from a peer. #[derive(Clone, Debug, PartialEq)] pub struct BlobsByRootRequest { @@ -588,6 +622,13 @@ pub enum RpcSuccessResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get EXECUTION_PAYLOAD_ENVELOPES_BY_RANGE request. A None response signifies + /// the end of the batch. + PayloadEnvelopesByRange(Arc>), + + /// A response to a get EXECUTION_PAYLOAD_ENVELOPES_BY_ROOT request. + PayloadEnvelopesByRoot(Arc>), + /// A response to a get BLOBS_BY_RANGE request BlobsByRange(Arc>), @@ -628,6 +669,12 @@ pub enum ResponseTermination { /// Blocks by root stream termination. BlocksByRoot, + /// Execution payload envelopes by range stream termination. + PayloadEnvelopesByRange, + + /// Execution payload envelopes by root stream termination. + PayloadEnvelopesByRoot, + /// Blobs by range stream termination. BlobsByRange, @@ -649,6 +696,8 @@ impl ResponseTermination { match self { ResponseTermination::BlocksByRange => Protocol::BlocksByRange, ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + ResponseTermination::PayloadEnvelopesByRange => Protocol::PayloadEnvelopesByRange, + ResponseTermination::PayloadEnvelopesByRoot => Protocol::PayloadEnvelopesByRoot, ResponseTermination::BlobsByRange => Protocol::BlobsByRange, ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, @@ -744,6 +793,8 @@ impl RpcSuccessResponse { RpcSuccessResponse::Status(_) => Protocol::Status, RpcSuccessResponse::BlocksByRange(_) => Protocol::BlocksByRange, RpcSuccessResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RpcSuccessResponse::PayloadEnvelopesByRange(_) => Protocol::PayloadEnvelopesByRange, + RpcSuccessResponse::PayloadEnvelopesByRoot(_) => Protocol::PayloadEnvelopesByRoot, RpcSuccessResponse::BlobsByRange(_) => Protocol::BlobsByRange, RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, @@ -762,6 +813,7 @@ impl RpcSuccessResponse { pub fn slot(&self) -> Option { match self { Self::BlocksByRange(r) | Self::BlocksByRoot(r) => Some(r.slot()), + Self::PayloadEnvelopesByRoot(r) | Self::PayloadEnvelopesByRange(r) => Some(r.slot()), Self::BlobsByRange(r) | Self::BlobsByRoot(r) => Some(r.slot()), Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => Some(r.slot()), Self::LightClientBootstrap(r) => Some(r.get_slot()), @@ -812,6 +864,20 @@ impl std::fmt::Display for RpcSuccessResponse { RpcSuccessResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } + RpcSuccessResponse::PayloadEnvelopesByRange(envelope) => { + write!( + f, + "ExecutionPayloadEnvelopesByRange: Envelope slot: {}", + envelope.slot() + ) + } + RpcSuccessResponse::PayloadEnvelopesByRoot(envelope) => { + write!( + f, + "ExecutionPayloadEnvelopesByRoot: Envelope slot: {}", + envelope.slot() + ) + } RpcSuccessResponse::BlobsByRange(blob) => { write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index b75ca72eda..2c92e17c44 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,7 +22,7 @@ use types::{ LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, - SignedBeaconBlock, + SignedBeaconBlock, SignedExecutionPayloadEnvelope, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -65,6 +65,12 @@ pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = + types::ExecutionPayload::::max_execution_payload_bellatrix_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET); // Adding the additional ssz offset for the `ExecutionPayload` field +pub static SIGNED_EXECUTION_PAYLOAD_ENVELOPE_MIN: LazyLock = + LazyLock::new(SignedExecutionPayloadEnvelope::::min_size); + +pub static SIGNED_EXECUTION_PAYLOAD_ENVELOPE_MAX: LazyLock = + LazyLock::new(SignedExecutionPayloadEnvelope::::max_size); + pub static BLOB_SIDECAR_SIZE: LazyLock = LazyLock::new(BlobSidecar::::max_size); @@ -140,13 +146,30 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { ), // After the merge the max SSZ size of a block is absurdly big. The size is actually // bound by other constants, so here we default to the bellatrix's max value - _ => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks - *SIGNED_BEACON_BLOCK_BELLATRIX_MAX, // Bellatrix block is larger than base and altair blocks + // After the merge the max SSZ size includes the execution payload. + // Gloas blocks no longer contain the execution payload, but we must + // still accept pre-Gloas blocks during historical sync, so we keep the + // Bellatrix max as the upper bound. + ForkName::Bellatrix + | ForkName::Capella + | ForkName::Deneb + | ForkName::Electra + | ForkName::Fulu + | ForkName::Gloas => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, + *SIGNED_BEACON_BLOCK_BELLATRIX_MAX, ), } } +/// Returns the rpc limits for payload_envelope_by_range and payload_envelope_by_root responses. +pub fn rpc_payload_limits() -> RpcLimits { + RpcLimits::new( + *SIGNED_EXECUTION_PAYLOAD_ENVELOPE_MIN, + *SIGNED_EXECUTION_PAYLOAD_ENVELOPE_MAX, + ) +} + fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> RpcLimits { let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); @@ -242,6 +265,12 @@ pub enum Protocol { /// The `BlobsByRange` protocol name. #[strum(serialize = "blob_sidecars_by_range")] BlobsByRange, + /// The `ExecutionPayloadEnvelopesByRoot` protocol name. + #[strum(serialize = "execution_payload_envelopes_by_root")] + PayloadEnvelopesByRoot, + /// The `ExecutionPayloadEnvelopesByRange` protocol name. + #[strum(serialize = "execution_payload_envelopes_by_range")] + PayloadEnvelopesByRange, /// The `BlobsByRoot` protocol name. #[strum(serialize = "blob_sidecars_by_root")] BlobsByRoot, @@ -277,6 +306,8 @@ impl Protocol { Protocol::Goodbye => None, Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange), Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), + Protocol::PayloadEnvelopesByRange => Some(ResponseTermination::PayloadEnvelopesByRange), + Protocol::PayloadEnvelopesByRoot => Some(ResponseTermination::PayloadEnvelopesByRoot), Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange), Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), @@ -307,6 +338,8 @@ pub enum SupportedProtocol { BlocksByRangeV2, BlocksByRootV1, BlocksByRootV2, + PayloadEnvelopesByRangeV1, + PayloadEnvelopesByRootV1, BlobsByRangeV1, BlobsByRootV1, DataColumnsByRootV1, @@ -329,6 +362,8 @@ impl SupportedProtocol { SupportedProtocol::GoodbyeV1 => "1", SupportedProtocol::BlocksByRangeV1 => "1", SupportedProtocol::BlocksByRangeV2 => "2", + SupportedProtocol::PayloadEnvelopesByRangeV1 => "1", + SupportedProtocol::PayloadEnvelopesByRootV1 => "1", SupportedProtocol::BlocksByRootV1 => "1", SupportedProtocol::BlocksByRootV2 => "2", SupportedProtocol::BlobsByRangeV1 => "1", @@ -355,6 +390,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange, SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot, SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, + SupportedProtocol::PayloadEnvelopesByRangeV1 => Protocol::PayloadEnvelopesByRange, + SupportedProtocol::PayloadEnvelopesByRootV1 => Protocol::PayloadEnvelopesByRoot, SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange, SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, @@ -409,6 +446,18 @@ impl SupportedProtocol { ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.fork_exists(ForkName::Gloas) { + supported.extend_from_slice(&[ + ProtocolId::new( + SupportedProtocol::PayloadEnvelopesByRangeV1, + Encoding::SSZSnappy, + ), + ProtocolId::new( + SupportedProtocol::PayloadEnvelopesByRootV1, + Encoding::SSZSnappy, + ), + ]); + } supported } } @@ -511,6 +560,13 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::BlocksByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), + Protocol::PayloadEnvelopesByRange => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), + Protocol::PayloadEnvelopesByRoot => { + RpcLimits::new(0, spec.max_payload_envelopes_by_root_request) + } Protocol::BlobsByRange => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -549,6 +605,8 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork_name()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork_name()), + Protocol::PayloadEnvelopesByRange => rpc_payload_limits(), + Protocol::PayloadEnvelopesByRoot => rpc_payload_limits(), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), Protocol::DataColumnsByRoot => { @@ -586,6 +644,8 @@ impl ProtocolId { match self.versioned_protocol { SupportedProtocol::BlocksByRangeV2 | SupportedProtocol::BlocksByRootV2 + | SupportedProtocol::PayloadEnvelopesByRangeV1 + | SupportedProtocol::PayloadEnvelopesByRootV1 | SupportedProtocol::BlobsByRangeV1 | SupportedProtocol::BlobsByRootV1 | SupportedProtocol::DataColumnsByRootV1 @@ -737,6 +797,8 @@ pub enum RequestType { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + PayloadEnvelopesByRange(PayloadEnvelopesByRangeRequest), + PayloadEnvelopesByRoot(PayloadEnvelopesByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), DataColumnsByRoot(DataColumnsByRootRequest), @@ -760,6 +822,8 @@ impl RequestType { RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, + RequestType::PayloadEnvelopesByRange(req) => req.count, + RequestType::PayloadEnvelopesByRoot(req) => req.beacon_block_roots.len() as u64, RequestType::BlobsByRange(req) => req.max_blobs_requested(digest_epoch, spec), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, @@ -789,6 +853,8 @@ impl RequestType { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, + RequestType::PayloadEnvelopesByRange(_) => SupportedProtocol::PayloadEnvelopesByRangeV1, + RequestType::PayloadEnvelopesByRoot(_) => SupportedProtocol::PayloadEnvelopesByRootV1, RequestType::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, @@ -820,6 +886,8 @@ impl RequestType { // variants that have `multiple_responses()` can have values. RequestType::BlocksByRange(_) => ResponseTermination::BlocksByRange, RequestType::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + RequestType::PayloadEnvelopesByRange(_) => ResponseTermination::PayloadEnvelopesByRange, + RequestType::PayloadEnvelopesByRoot(_) => ResponseTermination::PayloadEnvelopesByRoot, RequestType::BlobsByRange(_) => ResponseTermination::BlobsByRange, RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, @@ -854,6 +922,14 @@ impl RequestType { ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), ], + RequestType::PayloadEnvelopesByRange(_) => vec![ProtocolId::new( + SupportedProtocol::PayloadEnvelopesByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::PayloadEnvelopesByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::PayloadEnvelopesByRootV1, + Encoding::SSZSnappy, + )], RequestType::BlobsByRange(_) => vec![ProtocolId::new( SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy, @@ -905,6 +981,8 @@ impl RequestType { RequestType::BlocksByRange(_) => false, RequestType::BlocksByRoot(_) => false, RequestType::BlobsByRange(_) => false, + RequestType::PayloadEnvelopesByRange(_) => false, + RequestType::PayloadEnvelopesByRoot(_) => false, RequestType::BlobsByRoot(_) => false, RequestType::DataColumnsByRoot(_) => false, RequestType::DataColumnsByRange(_) => false, @@ -1015,6 +1093,12 @@ impl std::fmt::Display for RequestType { RequestType::Goodbye(reason) => write!(f, "Goodbye: {}", reason), RequestType::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), RequestType::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + RequestType::PayloadEnvelopesByRange(req) => { + write!(f, "Payload envelopes by range: {:?}", req) + } + RequestType::PayloadEnvelopesByRoot(req) => { + write!(f, "Payload envelopes by root: {:?}", req) + } RequestType::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), RequestType::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), RequestType::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 2407038bc3..ebdca386d8 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -109,7 +109,11 @@ pub struct RPCRateLimiter { blbrange_rl: Limiter, /// BlobsByRoot rate limiter. blbroot_rl: Limiter, - /// DataColumnssByRoot rate limiter. + /// PayloadEnvelopesByRange rate limiter. + envrange_rl: Limiter, + /// PayloadEnvelopesByRoot rate limiter. + envroots_rl: Limiter, + /// DataColumnsByRoot rate limiter. dcbroot_rl: Limiter, /// DataColumnsByRange rate limiter. dcbrange_rl: Limiter, @@ -148,6 +152,10 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the ExecutionPayloadEnvelopesByRange protocol. + perange_quota: Option, + /// Quota for the ExecutionPayloadEnvelopesByRoot protocol. + peroots_quota: Option, /// Quota for the BlobsByRange protocol. blbrange_quota: Option, /// Quota for the BlobsByRoot protocol. @@ -177,6 +185,8 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::PayloadEnvelopesByRange => self.perange_quota = q, + Protocol::PayloadEnvelopesByRoot => self.peroots_quota = q, Protocol::BlobsByRange => self.blbrange_quota = q, Protocol::BlobsByRoot => self.blbroot_quota = q, Protocol::DataColumnsByRoot => self.dcbroot_quota = q, @@ -201,6 +211,12 @@ impl RPCRateLimiterBuilder { let bbrange_quota = self .bbrange_quota .ok_or("BlocksByRange quota not specified")?; + let perange_quota = self + .perange_quota + .ok_or("PayloadEnvelopesByRange quota not specified")?; + let peroots_quota = self + .peroots_quota + .ok_or("PayloadEnvelopesByRoot quota not specified")?; let lc_bootstrap_quota = self .lcbootstrap_quota .ok_or("LightClientBootstrap quota not specified")?; @@ -236,6 +252,8 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let envrange_rl = Limiter::from_quota(perange_quota)?; + let envroots_rl = Limiter::from_quota(peroots_quota)?; let blbrange_rl = Limiter::from_quota(blbrange_quota)?; let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; @@ -259,6 +277,8 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + envrange_rl, + envroots_rl, blbrange_rl, blbroot_rl, dcbroot_rl, @@ -312,6 +332,8 @@ impl RPCRateLimiter { goodbye_quota, blocks_by_range_quota, blocks_by_root_quota, + payload_envelopes_by_range_quota, + payload_envelopes_by_root_quota, blobs_by_range_quota, blobs_by_root_quota, data_columns_by_root_quota, @@ -329,6 +351,14 @@ impl RPCRateLimiter { .set_quota(Protocol::Goodbye, goodbye_quota) .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota( + Protocol::PayloadEnvelopesByRange, + payload_envelopes_by_range_quota, + ) + .set_quota( + Protocol::PayloadEnvelopesByRoot, + payload_envelopes_by_root_quota, + ) .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) @@ -376,6 +406,8 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::PayloadEnvelopesByRange => &mut self.envrange_rl, + Protocol::PayloadEnvelopesByRoot => &mut self.envroots_rl, Protocol::BlobsByRange => &mut self.blbrange_rl, Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, @@ -400,6 +432,8 @@ impl RPCRateLimiter { status_rl, bbrange_rl, bbroots_rl, + envrange_rl, + envroots_rl, blbrange_rl, blbroot_rl, dcbroot_rl, @@ -417,6 +451,8 @@ impl RPCRateLimiter { status_rl.prune(time_since_start); bbrange_rl.prune(time_since_start); bbroots_rl.prune(time_since_start); + envrange_rl.prune(time_since_start); + envroots_rl.prune(time_since_start); blbrange_rl.prune(time_since_start); blbroot_rl.prune(time_since_start); dcbrange_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index d0323bab52..486a443857 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use types::{ BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, }; pub type Id = u32; @@ -160,6 +161,10 @@ pub enum Response { DataColumnsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a get `EXECUTION_PAYLOAD_ENVELOPES_BY_ROOT` request. + PayloadEnvelopesByRoot(Option>>), + /// A response to a get `EXECUTION_PAYLOAD_ENVELOPES_BY_RANGE` request. + PayloadEnvelopesByRange(Option>>), /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Option>>), /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. @@ -185,6 +190,16 @@ impl std::convert::From> for RpcResponse { Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRange(b)), None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRange), }, + Response::PayloadEnvelopesByRoot(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::PayloadEnvelopesByRoot(p)), + None => RpcResponse::StreamTermination(ResponseTermination::PayloadEnvelopesByRoot), + }, + Response::PayloadEnvelopesByRange(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::PayloadEnvelopesByRange(p)), + None => { + RpcResponse::StreamTermination(ResponseTermination::PayloadEnvelopesByRange) + } + }, Response::BlobsByRoot(r) => match r { Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(b)), None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRoot), diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 184a334591..56fcbb3bb6 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1524,6 +1524,28 @@ impl Network { request_type, }) } + RequestType::PayloadEnvelopesByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["payload_envelopes_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } + RequestType::PayloadEnvelopesByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["payload_envelopes_by_root"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } RequestType::BlobsByRange(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]); Some(NetworkEvent::RequestReceived { @@ -1638,6 +1660,16 @@ impl Network { RpcSuccessResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + RpcSuccessResponse::PayloadEnvelopesByRange(resp) => self.build_response( + id, + peer_id, + Response::PayloadEnvelopesByRange(Some(resp)), + ), + RpcSuccessResponse::PayloadEnvelopesByRoot(resp) => self.build_response( + id, + peer_id, + Response::PayloadEnvelopesByRoot(Some(resp)), + ), RpcSuccessResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } @@ -1672,6 +1704,12 @@ impl Network { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), + ResponseTermination::PayloadEnvelopesByRange => { + Response::PayloadEnvelopesByRange(None) + } + ResponseTermination::PayloadEnvelopesByRoot => { + Response::PayloadEnvelopesByRoot(None) + } ResponseTermination::BlobsByRange => Response::BlobsByRange(None), ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index e40eacce08..f74e7dacfb 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -15,7 +15,8 @@ use beacon_processor::{ use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - LightClientUpdatesByRangeRequest, + LightClientUpdatesByRangeRequest, PayloadEnvelopesByRangeRequest, + PayloadEnvelopesByRootRequest, }; use lighthouse_network::service::api_types::CustodyBackfillBatchId; use lighthouse_network::{ @@ -693,6 +694,46 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `PayloadEnvelopesByRootRequest`s from the RPC network. + pub fn send_payload_envelopes_by_roots_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, // Use ResponseId here + request: PayloadEnvelopesByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .handle_payload_envelopes_by_root_request(peer_id, inbound_request_id, request) + .await; + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::PayloadEnvelopesByRootRequest(Box::pin(process_fn)), + }) + } + + /// Create a new work event to process `PayloadEnvelopesByRangeRequest`s from the RPC network. + pub fn send_payload_envelopes_by_range_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: PayloadEnvelopesByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .handle_payload_envelopes_by_range_request(peer_id, inbound_request_id, request) + .await; + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::PayloadEnvelopesByRangeRequest(Box::pin(process_fn)), + }) + } + /// Create a new work event to process `BlobsByRangeRequest`s from the RPC network. pub fn send_blobs_by_range_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 279870d444..8b31b67acb 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -3,10 +3,12 @@ use crate::network_beacon_processor::{FUTURE_SLOT_TOLERANCE, NetworkBeaconProces use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; +use beacon_chain::payload_envelope_streamer::EnvelopeRequestSource; use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenSlotSkipped}; use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + PayloadEnvelopesByRangeRequest, PayloadEnvelopesByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -15,7 +17,7 @@ use slot_clock::SlotClock; use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::sync::Arc; use tokio_stream::StreamExt; -use tracing::{Span, debug, error, field, instrument, warn}; +use tracing::{Span, debug, error, field, instrument, trace, warn}; use types::data::BlobIdentifier; use types::{ColumnIndex, Epoch, EthSpec, Hash256, Slot}; @@ -254,6 +256,104 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `ExecutionPayloadEnvelopesByRoot` request from the peer. + #[instrument( + name = "lh_handle_payload_envelopes_by_root_request", + parent = None, + level = "debug", + skip_all, + fields(peer_id = %peer_id, client = tracing::field::Empty) + )] + pub async fn handle_payload_envelopes_by_root_request( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: PayloadEnvelopesByRootRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.clone() + .handle_payload_envelopes_by_root_request_inner( + peer_id, + inbound_request_id, + request, + ) + .await, + Response::PayloadEnvelopesByRoot, + ); + } + + /// Handle a `ExecutionPayloadEnvelopesByRoot` request from the peer. + async fn handle_payload_envelopes_by_root_request_inner( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: PayloadEnvelopesByRootRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + let log_results = |peer_id, requested_envelopes, send_envelope_count| { + debug!( + %peer_id, + requested = requested_envelopes, + returned = %send_envelope_count, + "ExecutionPayloadEnvelopes outgoing response processed" + ); + }; + + let requested_envelopes = request.beacon_block_roots.len(); + let mut envelope_stream = self.chain.get_payload_envelopes( + request.beacon_block_roots.to_vec(), + EnvelopeRequestSource::ByRoot, + ); + // Fetching payload envelopes is async because it may have to hit the execution layer for payloads. + let mut send_envelope_count = 0; + while let Some((root, result)) = envelope_stream.next().await { + match result.as_ref() { + Ok(Some(envelope)) => { + self.send_response( + peer_id, + inbound_request_id, + Response::PayloadEnvelopesByRoot(Some(envelope.clone())), + ); + send_envelope_count += 1; + } + Ok(None) => { + debug!( + %peer_id, + request_root = ?root, + "Peer requested unknown payload envelope" + ); + } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + block_root = ?root, + reason = "execution layer not synced", + "Failed to fetch execution payload for payload envelopes by root request" + ); + log_results(peer_id, requested_envelopes, send_envelope_count); + return Err(( + RpcErrorResponse::ResourceUnavailable, + "Execution layer not synced", + )); + } + Err(e) => { + debug!( + ?peer_id, + request_root = ?root, + error = ?e, + "Error fetching payload envelope for peer" + ); + } + } + } + log_results(peer_id, requested_envelopes, send_envelope_count); + + Ok(()) + } + /// Handle a `BlobsByRoot` request from the peer. #[instrument( name = "lh_handle_blobs_by_root_request", @@ -983,6 +1083,189 @@ impl NetworkBeaconProcessor { .collect::>()) } + /// Handle a `ExecutionPayloadEnvelopesByRange` request from the peer. + #[instrument( + name = "lh_handle_payload_envelopes_by_range_request", + parent = None, + level = "debug", + skip_all, + fields(peer_id = %peer_id, client = tracing::field::Empty) + )] + pub async fn handle_payload_envelopes_by_range_request( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: PayloadEnvelopesByRangeRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.clone() + .handle_payload_envelopes_by_range_request_inner(peer_id, inbound_request_id, req) + .await, + Response::PayloadEnvelopesByRange, + ); + } + + /// Handle a `ExecutionPayloadEnvelopesByRange` request from the peer. + async fn handle_payload_envelopes_by_range_request_inner( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: PayloadEnvelopesByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + let req_start_slot = req.start_slot; + let req_count = req.count; + + debug!( + %peer_id, + count = req_count, + start_slot = %req_start_slot, + "Received ExecutionPayloadEnvelopesByRange Request" + ); + + let request_start_slot = Slot::from(req_start_slot); + let fork_name = self + .chain + .spec + .fork_name_at_slot::(request_start_slot); + + if !fork_name.gloas_enabled() { + return Err(( + RpcErrorResponse::InvalidRequest, + "Requested envelopes for pre-gloas slots", + )); + } + + // Spawn a blocking handle since get_block_roots_for_slot_range takes a sync lock on the + // fork-choice. + let network_beacon_processor = self.clone(); + let block_roots = self + .executor + .spawn_blocking_handle( + move || { + network_beacon_processor.get_block_roots_for_slot_range( + req_start_slot, + req_count, + "ExecutionPayloadEnvelopesByRange", + ) + }, + "get_block_roots_for_slot_range", + ) + .ok_or((RpcErrorResponse::ServerError, "shutting down"))? + .await + .map_err(|_| (RpcErrorResponse::ServerError, "tokio join"))?? + .iter() + .map(|(root, _)| *root) + .collect::>(); + + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + + let log_results = |peer_id, payloads_sent| { + if payloads_sent < (req_count as usize) { + debug!( + %peer_id, + msg = "Failed to return all requested payload envelopes", + start_slot = %req_start_slot, + %current_slot, + requested = req_count, + returned = payloads_sent, + "ExecutionPayloadEnvelopesByRange outgoing response processed" + ); + } else { + debug!( + %peer_id, + start_slot = %req_start_slot, + %current_slot, + requested = req_count, + returned = payloads_sent, + "ExecutionPayloadEnvelopesByRange outgoing response processed" + ); + } + }; + + let mut envelope_stream = self + .chain + .get_payload_envelopes(block_roots, EnvelopeRequestSource::ByRange); + + // Fetching payload envelopes is async because it may have to hit the execution layer for payloads. + let mut envelopes_sent = 0; + while let Some((root, result)) = envelope_stream.next().await { + match result.as_ref() { + Ok(Some(envelope)) => { + // Due to skip slots, blocks could be out of the range, we ensure they + // are in the range before sending + if envelope.slot() >= req_start_slot + && envelope.slot() < req_start_slot.saturating_add(req.count) + { + envelopes_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::PayloadEnvelopesByRange(Some(envelope.clone())), + }); + } + } + Ok(None) => { + trace!( + request = ?req, + %peer_id, + request_root = ?root, + "No envelope for block root" + ); + } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + block_root = ?root, + reason = "execution layer not synced", + "Failed to fetch execution payload for envelope by range request" + ); + log_results(peer_id, envelopes_sent); + // send the stream terminator + return Err(( + RpcErrorResponse::ResourceUnavailable, + "Execution layer not synced", + )); + } + Err(e) => { + if matches!( + e, + BeaconChainError::ExecutionLayerErrorPayloadReconstruction(_block_hash, boxed_error) + if matches!(**boxed_error, execution_layer::Error::EngineError(_)) + ) { + warn!( + info = "this may occur occasionally when the EE is busy", + block_root = ?root, + error = ?e, + "Error rebuilding payload for peer" + ); + } else { + error!( + block_root = ?root, + error = ?e, + "Error fetching payload envelope for peer" + ); + } + log_results(peer_id, envelopes_sent); + // send the stream terminator + return Err(( + RpcErrorResponse::ServerError, + "Failed fetching payload envelopes", + )); + } + } + } + + log_results(peer_id, envelopes_sent); + Ok(()) + } + /// Handle a `BlobsByRange` request from the peer. #[instrument( name = "lh_handle_blobs_by_range_request", diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index c5ccbc2ae6..d0f0557223 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -19,11 +19,14 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; +use bls::Signature; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use libp2p::gossipsub::MessageAcceptance; use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, MetaDataV3, + PayloadEnvelopesByRangeRequest, PayloadEnvelopesByRootRequest, }; use lighthouse_network::{ Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, @@ -41,8 +44,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::{ AttesterSlashing, BlobSidecar, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, - EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + EthSpec, ExecutionPayloadEnvelope, ExecutionPayloadGloas, ExecutionRequests, Hash256, + MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, }; use types::{ BlobSidecarList, @@ -522,6 +526,29 @@ impl TestRig { .unwrap(); } + pub fn enqueue_payload_envelopes_by_range_request(&self, start_slot: u64, count: u64) { + self.network_beacon_processor + .send_payload_envelopes_by_range_request( + PeerId::random(), + InboundRequestId::new_unchecked(42, 24), + PayloadEnvelopesByRangeRequest { start_slot, count }, + ) + .unwrap(); + } + + pub fn enqueue_payload_envelopes_by_root_request( + &self, + beacon_block_roots: RuntimeVariableList, + ) { + self.network_beacon_processor + .send_payload_envelopes_by_roots_request( + PeerId::random(), + InboundRequestId::new_unchecked(42, 24), + PayloadEnvelopesByRootRequest { beacon_block_roots }, + ) + .unwrap(); + } + pub fn enqueue_backfill_batch(&self, epoch: Epoch) { self.network_beacon_processor .send_chain_segment( @@ -2091,6 +2118,229 @@ async fn test_data_columns_by_range_no_duplicates_with_skip_slots() { ); } +/// Create a test `SignedExecutionPayloadEnvelope` with the given slot and beacon block root. +fn make_test_payload_envelope( + slot: Slot, + beacon_block_root: Hash256, +) -> SignedExecutionPayloadEnvelope { + SignedExecutionPayloadEnvelope { + message: ExecutionPayloadEnvelope { + payload: ExecutionPayloadGloas::default(), + execution_requests: ExecutionRequests::default(), + builder_index: 0, + beacon_block_root, + slot, + state_root: Hash256::zero(), + }, + signature: Signature::empty(), + } +} + +#[tokio::test] +async fn test_payload_envelopes_by_range() { + // Only test when Gloas fork is scheduled + if test_spec::().gloas_fork_epoch.is_none() { + return; + }; + + let mut rig = TestRig::new(64).await; + let start_slot = 0; + let slot_count = 32; + + // Manually store payload envelopes for each block in the range + let mut expected_roots = Vec::new(); + for slot in start_slot..slot_count { + if let Some(root) = rig + .chain + .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) + .unwrap() + { + let envelope = make_test_payload_envelope(Slot::new(slot), root); + rig.chain + .store + .put_payload_envelope(&root, envelope) + .unwrap(); + expected_roots.push(root); + } + } + + rig.enqueue_payload_envelopes_by_range_request(start_slot, slot_count); + + let mut actual_roots = Vec::new(); + while let Some(next) = rig.network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::PayloadEnvelopesByRange(envelope), + inbound_request_id: _, + } = next + { + if let Some(env) = envelope { + actual_roots.push(env.beacon_block_root()); + } else { + break; + } + } else if let NetworkMessage::SendErrorResponse { .. } = next { + // Error response terminates the stream + break; + } else { + panic!("unexpected message {:?}", next); + } + } + assert_eq!(expected_roots, actual_roots); +} + +#[tokio::test] +async fn test_payload_envelopes_by_root() { + // Only test when Gloas fork is scheduled + if test_spec::().gloas_fork_epoch.is_none() { + return; + }; + + let mut rig = TestRig::new(64).await; + + let block_root = rig + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + + // Manually store a payload envelope for this block + let envelope = make_test_payload_envelope(Slot::new(1), block_root); + rig.chain + .store + .put_payload_envelope(&block_root, envelope) + .unwrap(); + + let roots = RuntimeVariableList::new(vec![block_root], 1).unwrap(); + rig.enqueue_payload_envelopes_by_root_request(roots); + + let mut actual_roots = Vec::new(); + while let Some(next) = rig.network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::PayloadEnvelopesByRoot(envelope), + inbound_request_id: _, + } = next + { + if let Some(env) = envelope { + actual_roots.push(env.beacon_block_root()); + } else { + break; + } + } else { + panic!("unexpected message {:?}", next); + } + } + assert_eq!(vec![block_root], actual_roots); +} + +#[tokio::test] +async fn test_payload_envelopes_by_root_unknown_root_returns_empty() { + // Only test when Gloas fork is scheduled + if test_spec::().gloas_fork_epoch.is_none() { + return; + }; + + let mut rig = TestRig::new(64).await; + + // Request envelope for a root that has no stored envelope + let block_root = rig + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + + // Don't store any envelope — the handler should return 0 envelopes + let roots = RuntimeVariableList::new(vec![block_root], 1).unwrap(); + rig.enqueue_payload_envelopes_by_root_request(roots); + + let mut actual_count = 0; + while let Some(next) = rig.network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::PayloadEnvelopesByRoot(envelope), + inbound_request_id: _, + } = next + { + if envelope.is_some() { + actual_count += 1; + } else { + break; + } + } else { + panic!("unexpected message {:?}", next); + } + } + assert_eq!(0, actual_count); +} + +#[tokio::test] +async fn test_payload_envelopes_by_range_no_duplicates_with_skip_slots() { + // Only test when Gloas fork is scheduled + if test_spec::().gloas_fork_epoch.is_none() { + return; + }; + + // Build a chain of 128 slots (4 epochs) with skip slots at positions 5 and 6. + let skip_slots: HashSet = [5, 6].into_iter().collect(); + let mut rig = TestRig::new_with_skip_slots(128, &skip_slots).await; + + let start_slot = 0u64; + let slot_count = 10u64; + + // Store payload envelopes for all blocks in the range (skipping the skip slots) + for slot in start_slot..slot_count { + if let Some(root) = rig + .chain + .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) + .unwrap() + { + let envelope = make_test_payload_envelope(Slot::new(slot), root); + rig.chain + .store + .put_payload_envelope(&root, envelope) + .unwrap(); + } + } + + rig.enqueue_payload_envelopes_by_range_request(start_slot, slot_count); + + let mut beacon_block_roots: Vec = Vec::new(); + while let Some(next) = rig.network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::PayloadEnvelopesByRange(envelope), + inbound_request_id: _, + } = next + { + if let Some(env) = envelope { + beacon_block_roots.push(env.beacon_block_root()); + } else { + break; + } + } else if let NetworkMessage::SendErrorResponse { .. } = next { + break; + } else { + panic!("unexpected message {:?}", next); + } + } + + assert!( + !beacon_block_roots.is_empty(), + "Should have received at least some payload envelopes" + ); + + // Skip slots should not cause duplicate envelopes for the same block root + let unique_roots: HashSet<_> = beacon_block_roots.iter().collect(); + assert_eq!( + beacon_block_roots.len(), + unique_roots.len(), + "Response contained duplicate block roots: got {} envelopes but only {} unique roots", + beacon_block_roots.len(), + unique_roots.len(), + ); +} + // TODO(ePBS): Add integration tests for envelope deferral (UnknownBlockForEnvelope): // 1. Gossip envelope arrives before its block → queued via UnknownBlockForEnvelope // 2. Block imported → envelope released and processed successfully diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 77d64c92e6..e6982e6a84 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -229,6 +229,24 @@ impl Router { request, ), ), + RequestType::PayloadEnvelopesByRoot(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_payload_envelopes_by_roots_request( + peer_id, + inbound_request_id, + request, + ), + ), + RequestType::PayloadEnvelopesByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_payload_envelopes_by_range_request( + peer_id, + inbound_request_id, + request, + ), + ), RequestType::BlobsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blobs_by_range_request( peer_id, @@ -309,6 +327,11 @@ impl Router { Response::DataColumnsByRange(data_column) => { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } + // TODO(EIP-7732): implement outgoing payload envelopes by range and root + // responses once sync manager requests them. + Response::PayloadEnvelopesByRoot(_) | Response::PayloadEnvelopesByRange(_) => { + debug!("Requesting envelopes by root and by range not supported yet"); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index b6218ba64d..dd6f52426a 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -377,6 +377,16 @@ impl> SignedBeaconBlock .map(|bid| bid.message.block_hash) } + /// Convenience accessor for the block's bid's `parent_block_hash`. + /// + /// This method returns an error prior to Gloas. + pub fn payload_bid_parent_block_hash(&self) -> Result { + self.message() + .body() + .signed_execution_payload_bid() + .map(|bid| bid.message.parent_block_hash) + } + /// Check if the `parent_hash` in this block's `signed_payload_bid` matches `parent_block_hash`. /// /// This function is useful post-Gloas for determining if the parent block is full, *without* diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index adf87dee94..2f3b5da956 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -295,6 +295,7 @@ pub struct ChainSpec { /* * Networking Gloas */ + pub max_request_payloads: u64, /* * Networking Derived @@ -305,6 +306,7 @@ pub struct ChainSpec { pub max_blocks_by_root_request_deneb: usize, pub max_blobs_by_root_request: usize, pub max_data_columns_by_root_request: usize, + pub max_payload_envelopes_by_root_request: usize, /* * Application params @@ -700,6 +702,10 @@ impl ChainSpec { } } + pub fn max_request_payloads(&self) -> usize { + self.max_request_payloads as usize + } + pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { if fork_name.electra_enabled() { self.max_request_blob_sidecars_electra as usize @@ -964,6 +970,8 @@ impl ChainSpec { max_blobs_by_root_request_common(self.max_request_blob_sidecars); self.max_data_columns_by_root_request = max_data_columns_by_root_request_common::(self.max_request_blocks_deneb); + self.max_payload_envelopes_by_root_request = + max_blocks_by_root_request_common(self.max_request_payloads); self } @@ -1228,6 +1236,7 @@ impl ChainSpec { builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, min_builder_withdrawability_delay: Epoch::new(4096), + max_request_payloads: 128, /* * Network specific @@ -1293,6 +1302,7 @@ impl ChainSpec { min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + max_payload_envelopes_by_root_request: default_max_payload_envelopes_by_root_request(), /* * Application specific @@ -1622,6 +1632,7 @@ impl ChainSpec { builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, min_builder_withdrawability_delay: Epoch::new(4096), + max_request_payloads: 128, /* * Network specific @@ -1678,6 +1689,7 @@ impl ChainSpec { min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + max_payload_envelopes_by_root_request: default_max_payload_envelopes_by_root_request(), /* * Application specific @@ -2342,6 +2354,14 @@ fn default_data_columns_by_root_request() -> usize { max_data_columns_by_root_request_common::(default_max_request_blocks_deneb()) } +fn default_max_payload_envelopes_by_root_request() -> usize { + max_blocks_by_root_request_common(default_max_request_payloads()) +} + +fn default_max_request_payloads() -> u64 { + 128 +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); diff --git a/consensus/types/src/execution/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs index 7f68dae037..169331a884 100644 --- a/consensus/types/src/execution/execution_payload_envelope.rs +++ b/consensus/types/src/execution/execution_payload_envelope.rs @@ -3,7 +3,9 @@ use crate::test_utils::TestRandom; use crate::{EthSpec, ForkName, Hash256, SignedRoot, Slot}; use context_deserialize::context_deserialize; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; +use ssz::{BYTES_PER_LENGTH_OFFSET, Encode as SszEncode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -22,6 +24,44 @@ pub struct ExecutionPayloadEnvelope { pub state_root: Hash256, } +impl ExecutionPayloadEnvelope { + /// Returns an empty envelope with all fields zeroed. Used for SSZ size calculations. + pub fn empty() -> Self { + Self { + payload: ExecutionPayloadGloas::default(), + execution_requests: ExecutionRequests::default(), + builder_index: 0, + beacon_block_root: Hash256::zero(), + slot: Slot::new(0), + state_root: Hash256::zero(), + } + } + + /// Returns the minimum SSZ-encoded size (all variable-length fields empty). + pub fn min_size() -> usize { + Self::empty().as_ssz_bytes().len() + } + + /// Returns the maximum SSZ-encoded size. + #[allow(clippy::arithmetic_side_effects)] + pub fn max_size() -> usize { + Self::min_size() + // ExecutionPayloadGloas variable-length fields: + + (E::max_extra_data_bytes() * ::ssz_fixed_len()) + + (E::max_transactions_per_payload() + * (BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) + + (E::max_withdrawals_per_payload() + * ::ssz_fixed_len()) + // ExecutionRequests variable-length fields: + + (E::max_deposit_requests_per_payload() + * ::ssz_fixed_len()) + + (E::max_withdrawal_requests_per_payload() + * ::ssz_fixed_len()) + + (E::max_consolidation_requests_per_payload() + * ::ssz_fixed_len()) + } +} + impl SignedRoot for ExecutionPayloadEnvelope {} #[cfg(test)] diff --git a/consensus/types/src/execution/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs index b1d949f863..76fa841680 100644 --- a/consensus/types/src/execution/signed_execution_payload_envelope.rs +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -8,6 +8,7 @@ use bls::{PublicKey, Signature}; use context_deserialize::context_deserialize; use educe::Educe; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -22,6 +23,24 @@ pub struct SignedExecutionPayloadEnvelope { } impl SignedExecutionPayloadEnvelope { + /// Returns the minimum SSZ-encoded size (all variable-length fields empty). + pub fn min_size() -> usize { + Self { + message: ExecutionPayloadEnvelope::empty(), + signature: Signature::empty(), + } + .as_ssz_bytes() + .len() + } + + /// Returns the maximum SSZ-encoded size. + #[allow(clippy::arithmetic_side_effects)] + pub fn max_size() -> usize { + // Signature is fixed-size, so the variable-length delta is entirely from the envelope. + Self::min_size() + ExecutionPayloadEnvelope::::max_size() + - ExecutionPayloadEnvelope::::min_size() + } + pub fn slot(&self) -> Slot { self.message.slot } From dfd715b65ee9ee30f73115ff1e24f4cfadef7aca Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Mar 2026 12:56:45 +1100 Subject: [PATCH 40/43] Bump libp2p --- Cargo.lock | 215 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 4 +- 2 files changed, 109 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4043cb9e12..87f1d08d9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -399,7 +399,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -510,7 +510,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -526,7 +526,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "syn-solidity", "tiny-keccak", ] @@ -543,7 +543,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "syn-solidity", ] @@ -632,7 +632,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -815,7 +815,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -853,7 +853,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -964,7 +964,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "synstructure", ] @@ -976,7 +976,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1055,7 +1055,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1066,7 +1066,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1108,7 +1108,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1378,7 +1378,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.111", + "syn 2.0.117", "which", ] @@ -1553,7 +1553,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1820,7 +1820,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -1909,7 +1909,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -2276,7 +2276,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2334,7 +2334,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2349,7 +2349,7 @@ dependencies = [ "quote", "serde", "strsim 0.11.1", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2371,7 +2371,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2382,7 +2382,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2549,7 +2549,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2570,7 +2570,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2580,7 +2580,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2593,7 +2593,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2615,7 +2615,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.117", "unicode-xid", ] @@ -2722,7 +2722,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -2817,7 +2817,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3040,7 +3040,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3060,7 +3060,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3098,7 +3098,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -3287,7 +3287,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3700,7 +3700,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -3819,7 +3819,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -4613,7 +4613,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -4729,7 +4729,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -4958,9 +4958,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.178" +version = "0.2.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" [[package]] name = "libloading" @@ -4996,7 +4996,7 @@ dependencies = [ [[package]] name = "libp2p" version = "0.56.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "bytes", "either", @@ -5027,7 +5027,7 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" version = "0.6.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5037,7 +5037,7 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" version = "0.6.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5047,7 +5047,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.43.2" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "either", "fnv", @@ -5070,10 +5070,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.44.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +version = "0.45.0" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ - "async-trait", "futures", "hickory-resolver", "libp2p-core", @@ -5086,7 +5085,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.50.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "async-channel 2.5.0", "asynchronous-codec", @@ -5116,7 +5115,7 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.47.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "either", @@ -5156,7 +5155,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.48.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "hickory-proto", @@ -5174,7 +5173,7 @@ dependencies = [ [[package]] name = "libp2p-metrics" version = "0.17.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "libp2p-core", @@ -5190,7 +5189,7 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.43.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "bytes", @@ -5208,7 +5207,7 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.46.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "bytes", @@ -5230,7 +5229,7 @@ dependencies = [ [[package]] name = "libp2p-quic" version = "0.13.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-timer", @@ -5251,7 +5250,7 @@ dependencies = [ [[package]] name = "libp2p-swarm" version = "0.47.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "either", "fnv", @@ -5272,17 +5271,17 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.35.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "heck", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] name = "libp2p-tcp" version = "0.44.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-timer", @@ -5297,7 +5296,7 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.6.2" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-rustls", @@ -5315,7 +5314,7 @@ dependencies = [ [[package]] name = "libp2p-upnp" version = "0.6.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "futures-timer", @@ -5329,7 +5328,7 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.47.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "either", "futures", @@ -5673,7 +5672,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5894,7 +5893,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -5906,7 +5905,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6021,7 +6020,7 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.13.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "bytes", "futures", @@ -6074,7 +6073,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6291,7 +6290,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -6385,7 +6384,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6484,7 +6483,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6649,7 +6648,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6740,7 +6739,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6913,7 +6912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -6955,7 +6954,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -7022,7 +7021,7 @@ checksum = "9adf1691c04c0a5ff46ff8f262b58beb07b0dbb61f96f9f54f6cbd82106ed87f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -7052,7 +7051,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -7075,7 +7074,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -7137,7 +7136,7 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" version = "0.3.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "asynchronous-codec", "bytes", @@ -7199,7 +7198,7 @@ dependencies = [ "once_cell", "socket2 0.6.1", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.59.0", ] [[package]] @@ -7412,14 +7411,14 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -7716,7 +7715,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -7822,7 +7821,7 @@ dependencies = [ [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=f88e43de9eba00b416d0374b1a1fb2de47b65864#f88e43de9eba00b416d0374b1a1fb2de47b65864" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" dependencies = [ "futures", "pin-project", @@ -8071,7 +8070,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8095,7 +8094,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8138,7 +8137,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8603,7 +8602,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8623,7 +8622,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8649,9 +8648,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -8667,7 +8666,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8687,7 +8686,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8781,7 +8780,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -8805,7 +8804,7 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8834,7 +8833,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -8845,7 +8844,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9002,9 +9001,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" dependencies = [ "bytes", "libc", @@ -9026,7 +9025,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9276,7 +9275,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9379,7 +9378,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -9984,7 +9983,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "wasm-bindgen-shared", ] @@ -10132,7 +10131,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -10194,7 +10193,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10205,7 +10204,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10648,7 +10647,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "synstructure", ] @@ -10669,7 +10668,7 @@ checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10689,7 +10688,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", "synstructure", ] @@ -10711,7 +10710,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] @@ -10744,7 +10743,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.117", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6910d02427..4cd1dfcea2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -281,5 +281,5 @@ debug = true quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "87c4ccb9bb2af494de375f5f6c62850badd26304" } [patch."https://github.com/libp2p/rust-libp2p.git"] -libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "f88e43de9eba00b416d0374b1a1fb2de47b65864" } -libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p.git", rev = "f88e43de9eba00b416d0374b1a1fb2de47b65864" } +libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" } +libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p.git", rev = "defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" } From 7ca91b8ef43311768241c4c4252e0eb9c1264de5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Mar 2026 10:14:09 +1100 Subject: [PATCH 41/43] Bump c-kzg --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87f1d08d9c..8efa6897cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1617,9 +1617,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "2.1.5" +version = "2.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" +checksum = "6648ed1e4ea8e8a1a4a2c78e1cda29a3fd500bc622899c340d8525ea9a76b24a" dependencies = [ "blst", "cc", From 176cce585c1ba979a6210ed79b6b6528596cdb8c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 12:21:13 +1100 Subject: [PATCH 42/43] Release v8.1.3 --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8efa6897cd..26730562c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.1.2" +version = "8.1.3" dependencies = [ "account_utils", "bls", @@ -1276,7 +1276,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.1.2" +version = "8.1.3" dependencies = [ "account_utils", "beacon_chain", @@ -1513,7 +1513,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.1.2" +version = "8.1.3" dependencies = [ "beacon_node", "bytes", @@ -4897,7 +4897,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.1.2" +version = "8.1.3" dependencies = [ "account_utils", "beacon_chain", @@ -5382,7 +5382,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.1.2" +version = "8.1.3" dependencies = [ "account_manager", "account_utils", @@ -5514,7 +5514,7 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.1.2" +version = "8.1.3" dependencies = [ "regex", ] @@ -9621,7 +9621,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "8.1.2" +version = "8.1.3" dependencies = [ "account_utils", "beacon_node_fallback", diff --git a/Cargo.toml b/Cargo.toml index 4cd1dfcea2..065741d117 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,7 @@ resolver = "2" [workspace.package] edition = "2024" -version = "8.1.2" +version = "8.1.3" [workspace.dependencies] account_utils = { path = "common/account_utils" } From bd34bb14305b11af087447df2d53c03f69685d18 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Mar 2026 13:10:34 +1100 Subject: [PATCH 43/43] Remove schema migrations for v28 and earlier (#9031) With LH v8.1.3 supporting Fulu-on-Gnosis, we no longer need these DB migrations. All Lighthouse nodes running in prod will soon be updated to LH v8.0.0+ and schema v28+. This PR helps with Gloas fork choice changes, by allowing us to avoid updating old schema migrations when adding V29 for Gloas: - https://github.com/sigp/lighthouse/pull/9025 Co-Authored-By: Michael Sproul --- .../src/beacon_fork_choice_store.rs | 61 +- beacon_node/beacon_chain/src/lib.rs | 2 +- .../beacon_chain/src/persisted_fork_choice.rs | 39 +- beacon_node/beacon_chain/src/schema_change.rs | 79 +-- .../src/schema_change/migration_schema_v23.rs | 180 ------ .../src/schema_change/migration_schema_v24.rs | 607 ------------------ .../src/schema_change/migration_schema_v25.rs | 20 - .../src/schema_change/migration_schema_v26.rs | 91 --- .../src/schema_change/migration_schema_v27.rs | 26 - .../src/schema_change/migration_schema_v28.rs | 152 ----- beacon_node/beacon_chain/src/summaries_dag.rs | 198 ------ beacon_node/beacon_chain/tests/store_tests.rs | 6 +- .../store/src/database/leveldb_impl.rs | 6 +- beacon_node/store/src/hot_cold_store.rs | 56 +- beacon_node/store/src/lib.rs | 6 +- consensus/fork_choice/src/fork_choice.rs | 33 +- consensus/fork_choice/src/lib.rs | 2 +- consensus/proto_array/src/lib.rs | 2 +- consensus/proto_array/src/ssz_container.rs | 35 +- 19 files changed, 23 insertions(+), 1578 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs delete mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 60487f9c46..95fde28f5b 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -231,35 +231,6 @@ where } } - /// Restore `Self` from a previously-generated `PersistedForkChoiceStore`. - /// - /// DEPRECATED. Can be deleted once migrations no longer require it. - pub fn from_persisted_v17( - persisted: PersistedForkChoiceStoreV17, - justified_state_root: Hash256, - unrealized_justified_state_root: Hash256, - store: Arc>, - ) -> Result { - let justified_balances = - JustifiedBalances::from_effective_balances(persisted.justified_balances)?; - - Ok(Self { - store, - balances_cache: <_>::default(), - time: persisted.time, - finalized_checkpoint: persisted.finalized_checkpoint, - justified_checkpoint: persisted.justified_checkpoint, - justified_balances, - justified_state_root, - unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, - unrealized_justified_state_root, - unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, - proposer_boost_root: persisted.proposer_boost_root, - equivocating_indices: persisted.equivocating_indices, - _phantom: PhantomData, - }) - } - /// Restore `Self` from a previously-generated `PersistedForkChoiceStore`. pub fn from_persisted( persisted: PersistedForkChoiceStore, @@ -411,45 +382,15 @@ where pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV28; /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - /// The balances cache was removed from disk storage in schema V28. - #[superstruct(only(V17))] - pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, - /// The justified balances were removed from disk storage in schema V28. - #[superstruct(only(V17))] - pub justified_balances: Vec, - /// The justified state root is stored so that it can be used to load the justified balances. - #[superstruct(only(V28))] pub justified_state_root: Hash256, pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V28))] pub unrealized_justified_state_root: Hash256, pub unrealized_finalized_checkpoint: Checkpoint, pub proposer_boost_root: Hash256, pub equivocating_indices: BTreeSet, } - -// Convert V28 to V17 by adding balances and removing justified state roots. -impl From<(PersistedForkChoiceStoreV28, JustifiedBalances)> for PersistedForkChoiceStoreV17 { - fn from((v28, balances): (PersistedForkChoiceStoreV28, JustifiedBalances)) -> Self { - Self { - balances_cache: Default::default(), - time: v28.time, - finalized_checkpoint: v28.finalized_checkpoint, - justified_checkpoint: v28.justified_checkpoint, - justified_balances: balances.effective_balances, - unrealized_justified_checkpoint: v28.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: v28.unrealized_finalized_checkpoint, - proposer_boost_root: v28.proposer_boost_root, - equivocating_indices: v28.equivocating_indices, - } - } -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index cf427d1a40..d71aec6987 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -75,7 +75,7 @@ pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{ - BeaconForkChoiceStore, Error as ForkChoiceStoreError, PersistedForkChoiceStoreV17, + BeaconForkChoiceStore, Error as ForkChoiceStoreError, PersistedForkChoiceStore, PersistedForkChoiceStoreV28, }; pub use block_verification::{ diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index d8fcc0901b..6229544e81 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,52 +1,19 @@ -use crate::{ - beacon_fork_choice_store::{PersistedForkChoiceStoreV17, PersistedForkChoiceStoreV28}, - metrics, -}; +use crate::{beacon_fork_choice_store::PersistedForkChoiceStoreV28, metrics}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use store::{DBColumn, Error, KeyValueStoreOp, StoreConfig, StoreItem}; +use store::{DBColumn, Error, KeyValueStoreOp, StoreConfig}; use superstruct::superstruct; use types::Hash256; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV28; -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { - #[superstruct(only(V17))] - pub fork_choice_v17: fork_choice::PersistedForkChoiceV17, - #[superstruct(only(V28))] pub fork_choice: fork_choice::PersistedForkChoiceV28, - #[superstruct(only(V17))] - pub fork_choice_store_v17: PersistedForkChoiceStoreV17, - #[superstruct(only(V28))] pub fork_choice_store: PersistedForkChoiceStoreV28, } -macro_rules! impl_store_item { - ($type:ty) => { - impl StoreItem for $type { - fn db_column() -> DBColumn { - DBColumn::ForkChoice - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } - } - }; -} - -impl_store_item!(PersistedForkChoiceV17); - impl PersistedForkChoiceV28 { pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { let decompressed_bytes = store_config diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ddc5978339..ed82143c38 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,11 +1,4 @@ //! Utilities for managing database schema changes. -mod migration_schema_v23; -mod migration_schema_v24; -mod migration_schema_v25; -mod migration_schema_v26; -mod migration_schema_v27; -mod migration_schema_v28; - use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; use store::Error as StoreError; @@ -13,81 +6,17 @@ use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}; /// Migrate the database from one schema version to another, applying all requisite mutations. +/// +/// All migrations for schema versions up to and including v28 have been removed. Nodes on live +/// networks are already running v28, so only the current version check remains. pub fn migrate_schema( - db: Arc>, + _db: Arc>, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), - // Upgrade across multiple versions by recursively migrating one step at a time. - (_, _) if from.as_u64() + 1 < to.as_u64() => { - let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), from, next)?; - migrate_schema::(db, next, to) - } - // Downgrade across multiple versions by recursively migrating one step at a time. - (_, _) if to.as_u64() + 1 < from.as_u64() => { - let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), from, next)?; - migrate_schema::(db, next, to) - } - - // - // Migrations from before SchemaVersion(22) are deprecated. - // - (SchemaVersion(22), SchemaVersion(23)) => { - let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(23), SchemaVersion(22)) => { - let ops = migration_schema_v23::downgrade_from_v23::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(23), SchemaVersion(24)) => { - let ops = migration_schema_v24::upgrade_to_v24::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(24), SchemaVersion(23)) => { - let ops = migration_schema_v24::downgrade_from_v24::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(24), SchemaVersion(25)) => { - let ops = migration_schema_v25::upgrade_to_v25()?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(25), SchemaVersion(24)) => { - let ops = migration_schema_v25::downgrade_from_v25()?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(25), SchemaVersion(26)) => { - let ops = migration_schema_v26::upgrade_to_v26::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(26), SchemaVersion(25)) => { - let ops = migration_schema_v26::downgrade_from_v26::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(26), SchemaVersion(27)) => { - // This migration updates the blobs db. The schema version - // is bumped inside upgrade_to_v27. - migration_schema_v27::upgrade_to_v27::(db.clone()) - } - (SchemaVersion(27), SchemaVersion(26)) => { - // Downgrading is essentially a no-op and is only possible - // if peer das isn't scheduled. - migration_schema_v27::downgrade_from_v27::(db.clone())?; - db.store_schema_version_atomically(to, vec![]) - } - (SchemaVersion(27), SchemaVersion(28)) => { - let ops = migration_schema_v28::upgrade_to_v28::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(28), SchemaVersion(27)) => { - let ops = migration_schema_v28::downgrade_from_v28::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs deleted file mode 100644 index e238e1efb6..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ /dev/null @@ -1,180 +0,0 @@ -use crate::BeaconForkChoiceStore; -use crate::beacon_chain::BeaconChainTypes; -use crate::persisted_fork_choice::PersistedForkChoiceV17; -use crate::schema_change::StoreError; -use crate::test_utils::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, PersistedBeaconChain}; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; -use tracing::{debug, info}; -use types::{Hash256, Slot}; - -/// Dummy value to use for the canonical head block root, see below. -pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); - -pub fn upgrade_to_v23( - db: Arc>, -) -> Result, Error> { - info!("Upgrading DB schema from v22 to v23"); - - // 1) Set the head-tracker to empty - let Some(persisted_beacon_chain_v22) = - db.get_item::(&BEACON_CHAIN_DB_KEY)? - else { - return Err(Error::MigrationError( - "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string() - )); - }; - - let persisted_beacon_chain = PersistedBeaconChain { - genesis_block_root: persisted_beacon_chain_v22.genesis_block_root, - }; - - let mut ops = vec![persisted_beacon_chain.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; - - // 2) Wipe out all state temporary flags. While un-used in V23, if there's a rollback we could - // end-up with an inconsistent DB. - for state_root_result in db - .hot_db - .iter_column_keys::(DBColumn::BeaconStateTemporary) - { - let state_root = state_root_result?; - debug!( - ?state_root, - "Deleting temporary state on v23 schema migration" - ); - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateTemporary, - state_root.as_slice().to_vec(), - )); - - // We also delete the temporary states themselves. Although there are known issue with - // temporary states and this could lead to DB corruption, we will only corrupt the DB in - // cases where the DB would be corrupted by restarting on v7.0.x. We consider these DBs - // "too far gone". Deleting here has the advantage of not generating warnings about - // disjoint state DAGs in the v24 upgrade, or the first pruning after migration. - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - )); - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - )); - } - - Ok(ops) -} - -pub fn downgrade_from_v23( - db: Arc>, -) -> Result, Error> { - let Some(persisted_beacon_chain) = db.get_item::(&BEACON_CHAIN_DB_KEY)? - else { - // The `PersistedBeaconChain` must exist if fork choice exists. - return Err(Error::MigrationError( - "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string(), - )); - }; - - // Recreate head-tracker from fork choice. - let Some(persisted_fork_choice) = db.get_item::(&FORK_CHOICE_DB_KEY)? - else { - // Fork choice should exist if the database exists. - return Err(Error::MigrationError( - "No fork choice found in DB".to_string(), - )); - }; - - // We use dummy roots for the justified states because we can source the balances from the v17 - // persited fork choice. The justified state root isn't required to look up the justified state's - // balances (as it would be in V28). This fork choice object with corrupt state roots SHOULD NOT - // be written to disk. - let dummy_justified_state_root = Hash256::repeat_byte(0x66); - let dummy_unrealized_justified_state_root = Hash256::repeat_byte(0x77); - - let fc_store = BeaconForkChoiceStore::from_persisted_v17( - persisted_fork_choice.fork_choice_store_v17, - dummy_justified_state_root, - dummy_unrealized_justified_state_root, - db.clone(), - ) - .map_err(|e| { - Error::MigrationError(format!( - "Error loading fork choice store from persisted: {e:?}" - )) - })?; - - // Doesn't matter what policy we use for invalid payloads, as our head calculation just - // considers descent from finalization. - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - &db.spec, - ) - .map_err(|e| { - Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) - })?; - - let heads = fork_choice - .proto_array() - .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()); - - let head_roots = heads.iter().map(|node| node.root).collect(); - let head_slots = heads.iter().map(|node| node.slot).collect(); - - let persisted_beacon_chain_v22 = PersistedBeaconChainV22 { - _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, - genesis_block_root: persisted_beacon_chain.genesis_block_root, - ssz_head_tracker: SszHeadTracker { - roots: head_roots, - slots: head_slots, - }, - }; - - let ops = vec![persisted_beacon_chain_v22.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; - - Ok(ops) -} - -/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. -/// -/// This is used when persisting the state of the `BeaconChain` to disk. -#[derive(Encode, Decode, Clone)] -pub struct SszHeadTracker { - roots: Vec, - slots: Vec, -} - -#[derive(Clone, Encode, Decode)] -pub struct PersistedBeaconChainV22 { - /// This value is ignored to resolve the issue described here: - /// - /// https://github.com/sigp/lighthouse/pull/1639 - /// - /// Its removal is tracked here: - /// - /// https://github.com/sigp/lighthouse/issues/1784 - pub _canonical_head_block_root: Hash256, - pub genesis_block_root: Hash256, - /// DEPRECATED - pub ssz_head_tracker: SszHeadTracker, -} - -impl StoreItem for PersistedBeaconChainV22 { - fn db_column() -> DBColumn { - DBColumn::BeaconChain - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs deleted file mode 100644 index c8dfe1ac9b..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ /dev/null @@ -1,607 +0,0 @@ -use crate::{ - beacon_chain::BeaconChainTypes, - summaries_dag::{DAGStateSummary, DAGStateSummaryV22, StateSummariesDAG}, -}; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::Encode; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use store::{ - DBColumn, Error, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem, - hdiff::StorageStrategy, - hot_cold_store::{HotStateSummaryV22, OptionalDiffBaseState}, -}; -use tracing::{debug, info, warn}; -use types::{ - BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, - execution::StatePayloadStatus, -}; - -/// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. -/// -/// We delete it as part of the v24 migration. -pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); - -pub fn store_full_state_v22( - state_root: &Hash256, - state: &BeaconState, - ops: &mut Vec, -) -> Result<(), Error> { - let bytes = StorageContainer::new(state).as_ssz_bytes(); - ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - bytes, - )); - Ok(()) -} - -/// Fetch a V22 state from the database either as a full state or using block replay. -pub fn get_state_v22( - db: &Arc>, - state_root: &Hash256, - spec: &ChainSpec, -) -> Result>, Error> { - let Some(summary) = db.get_item::(state_root)? else { - return Ok(None); - }; - let Some(base_state) = - get_full_state_v22(&db.hot_db, &summary.epoch_boundary_state_root, spec)? - else { - return Ok(None); - }; - // Loading hot states via block replay doesn't care about the schema version, so we can use - // the DB's current method for this. - let update_cache = false; - db.load_hot_state_using_replay( - base_state, - summary.slot, - summary.latest_block_root, - StatePayloadStatus::Pending, - update_cache, - ) - .map(Some) -} - -pub fn get_full_state_v22, E: EthSpec>( - db: &KV, - state_root: &Hash256, - spec: &ChainSpec, -) -> Result>, Error> { - match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? { - Some(bytes) => { - let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; - Ok(Some(container.try_into()?)) - } - None => Ok(None), - } -} - -/// A container for storing `BeaconState` components. -/// -/// DEPRECATED. -#[derive(Encode)] -pub struct StorageContainer { - state: BeaconState, - committee_caches: Vec>, -} - -impl StorageContainer { - /// Create a new instance for storing a `BeaconState`. - pub fn new(state: &BeaconState) -> Self { - Self { - state: state.clone(), - committee_caches: state.committee_caches().to_vec(), - } - } - - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't - // compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here. - let mut builder = ssz::SszDecoderBuilder::new(bytes); - - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?; - let committee_caches = decoder.decode_next()?; - - Ok(Self { - state, - committee_caches, - }) - } -} - -impl TryInto> for StorageContainer { - type Error = Error; - - fn try_into(mut self) -> Result, Error> { - let mut state = self.state; - - for i in (0..CACHED_EPOCHS).rev() { - if i >= self.committee_caches.len() { - return Err(Error::SszDecodeError(DecodeError::BytesInvalid( - "Insufficient committees for BeaconState".to_string(), - ))); - }; - - state.committee_caches_mut()[i] = self.committee_caches.remove(i); - } - - Ok(state) - } -} - -/// The checkpoint used for pruning the database. -/// -/// Updated whenever pruning is successful. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct PruningCheckpoint { - pub checkpoint: Checkpoint, -} - -impl StoreItem for PruningCheckpoint { - fn db_column() -> DBColumn { - DBColumn::BeaconMeta - } - - fn as_store_bytes(&self) -> Vec { - self.checkpoint.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(PruningCheckpoint { - checkpoint: Checkpoint::from_ssz_bytes(bytes)?, - }) - } -} - -pub fn upgrade_to_v24( - db: Arc>, -) -> Result, Error> { - let mut migrate_ops = vec![]; - let split = db.get_split_info(); - let hot_hdiff_start_slot = split.slot; - - // Delete the `PruningCheckpoint` (no longer used). - migrate_ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconMeta, - PRUNING_CHECKPOINT_KEY.as_slice().to_vec(), - )); - - // Sanity check to make sure the HDiff grid is aligned with the epoch start - if hot_hdiff_start_slot % T::EthSpec::slots_per_epoch() != 0 { - return Err(Error::MigrationError(format!( - "hot_hdiff_start_slot is not first slot in epoch {hot_hdiff_start_slot}" - ))); - } - - // After V24 hot tree states, the in-memory `anchor_info.anchor_slot` is the start slot of the - // hot HDiff grid. Before the migration, it's set to the slot of the anchor state in the DB: - // - the genesis state on a genesis sync, or - // - the checkpoint state on a checkpoint sync. - // - // If the node has been running for a while the `anchor_slot` might be less than the finalized - // checkpoint. This upgrade constructs a grid only with unfinalized states, rooted in the - // current finalized state. So we set the `anchor_slot` to `split.slot` to root the grid in the - // current finalized state. Each migration sets the split to - // ``` - // Split { slot: finalized_state.slot(), state_root: finalized_state_root } - // ``` - { - let anchor_info = db.get_anchor_info(); - - // If the node is already an archive node, we can set the anchor slot to 0 and copy - // snapshots and diffs from the freezer DB to the hot DB in order to establish an initial - // hot grid that is aligned/"perfect" (no `start_slot`/`anchor_slot` to worry about). - // - // This only works if all of the following are true: - // - // - We have the previous snapshot for the split state stored in the freezer DB, i.e. - // if `previous_snapshot_slot >= state_upper_limit`. - // - The split state itself will be stored as a diff or snapshot in the new grid. We choose - // not to support a split state that requires block replay, because computing its previous - // state root from the DAG is not straight-forward. - let dummy_start_slot = Slot::new(0); - let closest_layer_points = db - .hierarchy - .closest_layer_points(split.slot, dummy_start_slot); - - let previous_snapshot_slot = - closest_layer_points - .iter() - .copied() - .min() - .ok_or(Error::MigrationError( - "closest_layer_points must not be empty".to_string(), - ))?; - - if previous_snapshot_slot >= anchor_info.state_upper_limit - && db - .hierarchy - .storage_strategy(split.slot, dummy_start_slot) - .is_ok_and(|strategy| !strategy.is_replay_from()) - { - info!( - %previous_snapshot_slot, - split_slot = %split.slot, - "Aligning hot diff grid to freezer" - ); - - // Set anchor slot to 0 in case it was set to something else by a previous checkpoint - // sync. - let mut new_anchor_info = anchor_info.clone(); - new_anchor_info.anchor_slot = Slot::new(0); - - // Update the anchor on disk atomically if migration is successful - migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?); - - // Copy each of the freezer layers to the hot DB in slot ascending order. - for layer_slot in closest_layer_points.into_iter().rev() { - // Do not try to load the split state itself from the freezer, it won't be there. - // It will be migrated in the main loop below. - if layer_slot == split.slot { - continue; - } - - let mut freezer_state = db.load_cold_state_by_slot(layer_slot)?; - - let state_root = freezer_state.canonical_root()?; - - let mut state_ops = vec![]; - db.store_hot_state(&state_root, &freezer_state, &mut state_ops)?; - db.hot_db.do_atomically(state_ops)?; - } - } else { - // Otherwise for non-archive nodes, set the anchor slot for the hot grid to the current - // split slot (the oldest slot available). - let mut new_anchor_info = anchor_info.clone(); - new_anchor_info.anchor_slot = hot_hdiff_start_slot; - - // Update the anchor in disk atomically if migration is successful - migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?); - } - } - - let state_summaries_dag = new_dag::(&db)?; - - // We compute the state summaries DAG outside of a DB migration. Therefore if the DB is properly - // prunned, it should have a single root equal to the split. - let state_summaries_dag_roots = state_summaries_dag.tree_roots(); - if state_summaries_dag_roots.len() == 1 { - let (root_summary_state_root, root_summary) = - state_summaries_dag_roots.first().expect("len == 1"); - if *root_summary_state_root != split.state_root { - warn!( - ?root_summary_state_root, - ?root_summary, - ?split, - "State summaries DAG root is not the split" - ); - } - } else { - warn!( - location = "migration", - state_summaries_dag_roots = ?state_summaries_dag_roots, - "State summaries DAG found more than one root" - ); - } - - // Sort summaries by slot so we have their ancestor diffs already stored when we store them. - // If the summaries are sorted topologically we can insert them into the DB like if they were a - // new state, re-using existing code. As states are likely to be sequential the diff cache - // should kick in making the migration more efficient. If we just iterate the column of - // summaries we may get distance state of each iteration. - let summaries_by_slot = state_summaries_dag.summaries_by_slot_ascending(); - debug!( - summaries_count = state_summaries_dag.summaries_count(), - slots_count = summaries_by_slot.len(), - min_slot = ?summaries_by_slot.first_key_value().map(|(slot, _)| slot), - max_slot = ?summaries_by_slot.last_key_value().map(|(slot, _)| slot), - ?state_summaries_dag_roots, - %hot_hdiff_start_slot, - split_state_root = ?split.state_root, - "Starting hot states migration" - ); - - // Upgrade all hot DB state summaries to the new type: - // - Set all summaries of boundary states to `Snapshot` type - // - Set all others to `Replay` pointing to `epoch_boundary_state_root` - - let mut diffs_written = 0; - let mut summaries_written = 0; - let mut last_log_time = Instant::now(); - - for (slot, old_hot_state_summaries) in summaries_by_slot { - for (state_root, old_summary) in old_hot_state_summaries { - if slot < hot_hdiff_start_slot { - // To reach here, there must be some pruning issue with the DB where we still have - // hot states below the split slot. This states can't be migrated as we can't compute - // a storage strategy for them. After this if else block, the summary and state are - // scheduled for deletion. - debug!( - %slot, - ?state_root, - "Ignoring state summary prior to split slot" - ); - } else { - // 1. Store snapshot or diff at this slot (if required). - let storage_strategy = db.hot_storage_strategy(slot)?; - debug!( - %slot, - ?state_root, - ?storage_strategy, - "Migrating state summary" - ); - - match storage_strategy { - StorageStrategy::DiffFrom(_) | StorageStrategy::Snapshot => { - // Load the state and re-store it as a snapshot or diff. - let state = get_state_v22::(&db, &state_root, &db.spec)? - .ok_or(Error::MissingState(state_root))?; - - // Store immediately so that future diffs can load and diff from it. - let mut ops = vec![]; - // We must commit the hot state summary immediately, otherwise we can't diff - // against it and future writes will fail. That's why we write the new hot - // summaries in a different column to have both new and old data present at - // once. Otherwise if the process crashes during the migration the database will - // be broken. - db.store_hot_state_summary(&state_root, &state, &mut ops)?; - db.store_hot_state_diffs(&state_root, &state, &mut ops)?; - db.hot_db.do_atomically(ops)?; - diffs_written += 1; - } - StorageStrategy::ReplayFrom(diff_base_slot) => { - // Optimization: instead of having to load the state of each summary we load x32 - // less states by manually computing the HotStateSummary roots using the - // computed state dag. - // - // No need to store diffs for states that will be reconstructed by replaying - // blocks. - // - // 2. Convert the summary to the new format. - if state_root == split.state_root { - return Err(Error::MigrationError( - "unreachable: split state should be stored as a snapshot or diff" - .to_string(), - )); - } - let previous_state_root = state_summaries_dag - .previous_state_root(state_root) - .map_err(|e| { - Error::MigrationError(format!( - "error computing previous_state_root {e:?}" - )) - })?; - - let diff_base_state = OptionalDiffBaseState::new( - diff_base_slot, - state_summaries_dag - .ancestor_state_root_at_slot(state_root, diff_base_slot) - .map_err(|e| { - Error::MigrationError(format!( - "error computing ancestor_state_root_at_slot \ - ({state_root:?}, {diff_base_slot}): {e:?}" - )) - })?, - ); - - let new_summary = HotStateSummary { - slot, - latest_block_root: old_summary.latest_block_root, - latest_block_slot: old_summary.latest_block_slot, - previous_state_root, - diff_base_state, - }; - let op = new_summary.as_kv_store_op(state_root); - // It's not necessary to immediately commit the summaries of states that are - // ReplayFrom. However we do so for simplicity. - db.hot_db.do_atomically(vec![op])?; - } - } - } - - // 3. Stage old data for deletion. - if slot % T::EthSpec::slots_per_epoch() == 0 { - migrate_ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - )); - } - - // Delete previous summaries - migrate_ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - )); - - summaries_written += 1; - if last_log_time.elapsed() > Duration::from_secs(5) { - last_log_time = Instant::now(); - info!( - diffs_written, - summaries_written, - summaries_count = state_summaries_dag.summaries_count(), - "Hot states migration in progress" - ); - } - } - } - - info!( - diffs_written, - summaries_written, - summaries_count = state_summaries_dag.summaries_count(), - "Hot states migration complete" - ); - - Ok(migrate_ops) -} - -pub fn downgrade_from_v24( - db: Arc>, -) -> Result, Error> { - let state_summaries = db - .load_hot_state_summaries()? - .into_iter() - .map(|(state_root, summary)| (state_root, summary.into())) - .collect::>(); - - info!( - summaries_count = state_summaries.len(), - "DB downgrade of v24 state summaries started" - ); - - let state_summaries_dag = StateSummariesDAG::new(state_summaries) - .map_err(|e| Error::MigrationError(format!("Error on new StateSumariesDAG {e:?}")))?; - - let mut migrate_ops = vec![]; - let mut states_written = 0; - let mut summaries_written = 0; - let mut summaries_skipped = 0; - let mut last_log_time = Instant::now(); - - // Rebuild the PruningCheckpoint from the split. - let split = db.get_split_info(); - let pruning_checkpoint = PruningCheckpoint { - checkpoint: Checkpoint { - epoch: split.slot.epoch(T::EthSpec::slots_per_epoch()), - root: split.block_root, - }, - }; - migrate_ops.push(pruning_checkpoint.as_kv_store_op(PRUNING_CHECKPOINT_KEY)); - - // Convert state summaries back to the old format. - for (state_root, summary) in state_summaries_dag - .summaries_by_slot_ascending() - .into_iter() - .flat_map(|(_, summaries)| summaries) - { - // No need to migrate any states prior to the split. The v22 schema does not need them, and - // they would generate warnings about a disjoint DAG when re-upgrading to V24. - if summary.slot < split.slot { - debug!( - slot = %summary.slot, - ?state_root, - "Skipping migration of pre-split state" - ); - summaries_skipped += 1; - continue; - } - - // If boundary state: persist. - // Do not cache these states as they are unlikely to be relevant later. - let update_cache = false; - if summary.slot % T::EthSpec::slots_per_epoch() == 0 { - let (state, _) = db - .load_hot_state(&state_root, update_cache)? - .ok_or(Error::MissingState(state_root))?; - - // Immediately commit the state, so we don't OOM. It's stored in a different - // column so if the migration crashes we'll just store extra harmless junk in the DB. - let mut state_write_ops = vec![]; - store_full_state_v22(&state_root, &state, &mut state_write_ops)?; - db.hot_db.do_atomically(state_write_ops)?; - states_written += 1; - } - - // Persist old summary. - let epoch_boundary_state_slot = summary.slot - summary.slot % T::EthSpec::slots_per_epoch(); - let old_summary = HotStateSummaryV22 { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - epoch_boundary_state_root: state_summaries_dag - .ancestor_state_root_at_slot(state_root, epoch_boundary_state_slot) - .map_err(|e| { - Error::MigrationError(format!( - "error computing ancestor_state_root_at_slot({state_root:?}, {epoch_boundary_state_slot}) {e:?}" - )) - })?, - }; - migrate_ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - old_summary.as_ssz_bytes(), - )); - summaries_written += 1; - - if last_log_time.elapsed() > Duration::from_secs(5) { - last_log_time = Instant::now(); - info!( - states_written, - summaries_written, - summaries_count = state_summaries_dag.summaries_count(), - "DB downgrade of v24 state summaries in progress" - ); - } - } - - // Delete all V24 schema data. We do this outside the loop over summaries to ensure we cover - // every piece of data and to simplify logic around skipping certain summaries that do not get - // migrated. - for db_column in [ - DBColumn::BeaconStateHotSummary, - DBColumn::BeaconStateHotDiff, - DBColumn::BeaconStateHotSnapshot, - ] { - for key in db.hot_db.iter_column_keys::(db_column) { - let state_root = key?; - migrate_ops.push(KeyValueStoreOp::DeleteKey( - db_column, - state_root.as_slice().to_vec(), - )); - } - } - - info!( - states_written, - summaries_written, - summaries_skipped, - summaries_count = state_summaries_dag.summaries_count(), - "DB downgrade of v24 state summaries completed" - ); - - Ok(migrate_ops) -} - -fn new_dag( - db: &HotColdDB, -) -> Result { - // Collect all sumaries for unfinalized states - let state_summaries_v22 = db - .hot_db - // Collect summaries from the legacy V22 column BeaconStateSummary - .iter_column::(DBColumn::BeaconStateSummary) - .map(|res| { - let (key, value) = res?; - let state_root: Hash256 = key; - let summary = HotStateSummaryV22::from_ssz_bytes(&value)?; - let block_root = summary.latest_block_root; - // Read blocks to get the block slot and parent root. In Holesky forced finalization it - // took 5100 ms to read 15072 state summaries, so it's not really necessary to - // de-duplicate block reads. - let block = db - .get_blinded_block(&block_root)? - .ok_or(Error::MissingBlock(block_root))?; - - Ok(( - state_root, - DAGStateSummaryV22 { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - block_slot: block.slot(), - block_parent_root: block.parent_root(), - }, - )) - }) - .collect::, Error>>()?; - - StateSummariesDAG::new_from_v22(state_summaries_v22) - .map_err(|e| Error::MigrationError(format!("error computing states summaries dag {e:?}"))) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs deleted file mode 100644 index 44e8894d6f..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs +++ /dev/null @@ -1,20 +0,0 @@ -use store::{DBColumn, Error, KeyValueStoreOp}; -use tracing::info; -use types::Hash256; - -pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; - -/// Delete the on-disk eth1 data. -pub fn upgrade_to_v25() -> Result, Error> { - info!("Deleting eth1 data from disk for v25 DB upgrade"); - Ok(vec![KeyValueStoreOp::DeleteKey( - DBColumn::Eth1Cache, - ETH1_CACHE_DB_KEY.as_slice().to_vec(), - )]) -} - -/// No-op: we don't need to recreate on-disk eth1 data, as previous versions gracefully handle -/// data missing from disk. -pub fn downgrade_from_v25() -> Result, Error> { - Ok(vec![]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs deleted file mode 100644 index 38714ea060..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs +++ /dev/null @@ -1,91 +0,0 @@ -use crate::BeaconChainTypes; -use crate::custody_context::CustodyContextSsz; -use crate::persisted_custody::{CUSTODY_DB_KEY, PersistedCustody}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::info; - -#[derive(Debug, Encode, Decode, Clone)] -pub(crate) struct CustodyContextSszV24 { - pub(crate) validator_custody_at_head: u64, - pub(crate) persisted_is_supernode: bool, -} - -pub(crate) struct PersistedCustodyV24(CustodyContextSszV24); - -impl StoreItem for PersistedCustodyV24 { - fn db_column() -> DBColumn { - DBColumn::CustodyContext - } - - fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - let custody_context = CustodyContextSszV24::from_ssz_bytes(bytes)?; - Ok(PersistedCustodyV24(custody_context)) - } -} - -/// Upgrade the `CustodyContext` entry to v26. -pub fn upgrade_to_v26( - db: Arc>, -) -> Result, Error> { - let ops = if db.spec.is_peer_das_scheduled() { - match db.get_item::(&CUSTODY_DB_KEY) { - Ok(Some(PersistedCustodyV24(ssz_v24))) => { - info!("Migrating `CustodyContext` to v26 schema"); - let custody_context_v2 = CustodyContextSsz { - validator_custody_at_head: ssz_v24.validator_custody_at_head, - persisted_is_supernode: ssz_v24.persisted_is_supernode, - epoch_validator_custody_requirements: vec![], - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - PersistedCustody(custody_context_v2).as_store_bytes(), - )] - } - _ => { - vec![] - } - } - } else { - // Delete it from db if PeerDAS hasn't been scheduled - vec![KeyValueStoreOp::DeleteKey( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - )] - }; - - Ok(ops) -} - -pub fn downgrade_from_v26( - db: Arc>, -) -> Result, Error> { - let res = db.get_item::(&CUSTODY_DB_KEY); - let ops = match res { - Ok(Some(PersistedCustody(ssz_v26))) => { - info!("Migrating `CustodyContext` back from v26 schema"); - let custody_context_v24 = CustodyContextSszV24 { - validator_custody_at_head: ssz_v26.validator_custody_at_head, - persisted_is_supernode: ssz_v26.persisted_is_supernode, - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - PersistedCustodyV24(custody_context_v24).as_store_bytes(), - )] - } - _ => { - // no op if it's not on the db, as previous versions gracefully handle data missing from disk. - vec![] - } - }; - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs deleted file mode 100644 index fbe865ee27..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::BeaconChainTypes; -use std::sync::Arc; -use store::{Error, HotColdDB, metadata::SchemaVersion}; - -/// Add `DataColumnCustodyInfo` entry to v27. -pub fn upgrade_to_v27( - db: Arc>, -) -> Result<(), Error> { - if db.spec.is_peer_das_scheduled() { - db.put_data_column_custody_info(None)?; - db.store_schema_version_atomically(SchemaVersion(27), vec![])?; - } - - Ok(()) -} - -pub fn downgrade_from_v27( - db: Arc>, -) -> Result<(), Error> { - if db.spec.is_peer_das_scheduled() { - return Err(Error::MigrationError( - "Cannot downgrade from v27 if peerDAS is scheduled".to_string(), - )); - } - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs deleted file mode 100644 index 5885eaabc0..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::{ - BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, PersistedForkChoiceStoreV17, - beacon_chain::FORK_CHOICE_DB_KEY, - persisted_fork_choice::{PersistedForkChoiceV17, PersistedForkChoiceV28}, - summaries_dag::{DAGStateSummary, StateSummariesDAG}, -}; -use fork_choice::{ForkChoice, ForkChoiceStore, ResetPayloadStatuses}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::{info, warn}; -use types::{EthSpec, Hash256}; - -/// Upgrade `PersistedForkChoice` from V17 to V28. -pub fn upgrade_to_v28( - db: Arc>, -) -> Result, Error> { - let Some(persisted_fork_choice_v17) = - db.get_item::(&FORK_CHOICE_DB_KEY)? - else { - warn!("No fork choice found to upgrade to v28"); - return Ok(vec![]); - }; - - // Load state DAG in order to compute justified checkpoint roots. - let state_summaries_dag = { - let state_summaries = db - .load_hot_state_summaries()? - .into_iter() - .map(|(state_root, summary)| (state_root, summary.into())) - .collect::>(); - - StateSummariesDAG::new(state_summaries).map_err(|e| { - Error::MigrationError(format!("Error loading state summaries DAG: {e:?}")) - })? - }; - - // Determine the justified state roots. - let justified_checkpoint = persisted_fork_choice_v17 - .fork_choice_store_v17 - .justified_checkpoint; - let justified_block_root = justified_checkpoint.root; - let justified_slot = justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let justified_state_root = state_summaries_dag - .state_root_at_slot(justified_block_root, justified_slot) - .ok_or_else(|| { - Error::MigrationError(format!( - "Missing state root for justified slot {justified_slot} with latest_block_root \ - {justified_block_root:?}" - )) - })?; - - let unrealized_justified_checkpoint = persisted_fork_choice_v17 - .fork_choice_store_v17 - .unrealized_justified_checkpoint; - let unrealized_justified_block_root = unrealized_justified_checkpoint.root; - let unrealized_justified_slot = unrealized_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let unrealized_justified_state_root = state_summaries_dag - .state_root_at_slot(unrealized_justified_block_root, unrealized_justified_slot) - .ok_or_else(|| { - Error::MigrationError(format!( - "Missing state root for unrealized justified slot {unrealized_justified_slot} \ - with latest_block_root {unrealized_justified_block_root:?}" - )) - })?; - - let fc_store = BeaconForkChoiceStore::from_persisted_v17( - persisted_fork_choice_v17.fork_choice_store_v17, - justified_state_root, - unrealized_justified_state_root, - db.clone(), - ) - .map_err(|e| { - Error::MigrationError(format!( - "Error loading fork choice store from persisted: {e:?}" - )) - })?; - - info!( - ?justified_state_root, - %justified_slot, - "Added justified state root to fork choice" - ); - - // Construct top-level ForkChoice struct using the patched fork choice store, and the converted - // proto array. - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice_v17.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - db.get_chain_spec(), - ) - .map_err(|e| Error::MigrationError(format!("Unable to build ForkChoice: {e:?}")))?; - - let ops = vec![BeaconChain::::persist_fork_choice_in_batch_standalone( - &fork_choice, - db.get_config(), - )?]; - - info!("Upgraded fork choice for DB schema v28"); - - Ok(ops) -} - -pub fn downgrade_from_v28( - db: Arc>, -) -> Result, Error> { - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let Some(fork_choice) = - BeaconChain::::load_fork_choice(db.clone(), reset_payload_statuses, db.get_chain_spec()) - .map_err(|e| Error::MigrationError(format!("Unable to load fork choice: {e:?}")))? - else { - warn!("No fork choice to downgrade"); - return Ok(vec![]); - }; - - // Recreate V28 persisted fork choice, then convert each field back to its V17 version. - let persisted_fork_choice = PersistedForkChoiceV28 { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - - let justified_balances = fork_choice.fc_store().justified_balances(); - - // 1. Create `proto_array::PersistedForkChoiceV17`. - let fork_choice_v17: fork_choice::PersistedForkChoiceV17 = ( - persisted_fork_choice.fork_choice, - justified_balances.clone(), - ) - .into(); - - let fork_choice_store_v17: PersistedForkChoiceStoreV17 = ( - persisted_fork_choice.fork_choice_store, - justified_balances.clone(), - ) - .into(); - - let persisted_fork_choice_v17 = PersistedForkChoiceV17 { - fork_choice_v17, - fork_choice_store_v17, - }; - - let ops = vec![persisted_fork_choice_v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]; - - info!("Downgraded fork choice for DB schema v28"); - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs index 4ddcdaab5a..50fc0b3820 100644 --- a/beacon_node/beacon_chain/src/summaries_dag.rs +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -14,14 +14,6 @@ pub struct DAGStateSummary { pub previous_state_root: Hash256, } -#[derive(Debug, Clone, Copy)] -pub struct DAGStateSummaryV22 { - pub slot: Slot, - pub latest_block_root: Hash256, - pub block_slot: Slot, - pub block_parent_root: Hash256, -} - pub struct StateSummariesDAG { // state_root -> state_summary state_summaries_by_state_root: HashMap, @@ -40,10 +32,6 @@ pub enum Error { new_state_summary: (Slot, Hash256), }, MissingStateSummary(Hash256), - MissingStateSummaryByBlockRoot { - state_root: Hash256, - latest_block_root: Hash256, - }, MissingChildStateRoot(Hash256), RequestedSlotAboveSummary { starting_state_root: Hash256, @@ -109,89 +97,6 @@ impl StateSummariesDAG { }) } - /// Computes a DAG from a sequence of state summaries, including their parent block - /// relationships. - /// - /// - Expects summaries to be contiguous per slot: there must exist a summary at every slot - /// of each tree branch - /// - Maybe include multiple disjoint trees. The root of each tree will have a ZERO parent state - /// root, which will error later when calling `previous_state_root`. - pub fn new_from_v22( - state_summaries_v22: Vec<(Hash256, DAGStateSummaryV22)>, - ) -> Result { - // Group them by latest block root, and sorted state slot - let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); - for (state_root, summary) in state_summaries_v22.iter() { - let summaries = state_summaries_by_block_root - .entry(summary.latest_block_root) - .or_default(); - - // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) - match summaries.entry(summary.slot) { - Entry::Vacant(entry) => { - entry.insert((state_root, summary)); - } - Entry::Occupied(existing) => { - return Err(Error::DuplicateStateSummary { - block_root: summary.latest_block_root, - existing_state_summary: (summary.slot, *state_root).into(), - new_state_summary: (*existing.key(), *existing.get().0), - }); - } - } - } - - let state_summaries = state_summaries_v22 - .iter() - .map(|(state_root, summary)| { - let previous_state_root = if summary.slot == 0 { - Hash256::ZERO - } else { - let previous_slot = summary.slot - 1; - - // Check the set of states in the same state's block root - let same_block_root_summaries = state_summaries_by_block_root - .get(&summary.latest_block_root) - // Should never error: we construct the HashMap here and must have at least - // one entry per block root - .ok_or(Error::MissingStateSummaryByBlockRoot { - state_root: *state_root, - latest_block_root: summary.latest_block_root, - })?; - if let Some((state_root, _)) = same_block_root_summaries.get(&previous_slot) { - // Skipped slot: block root at previous slot is the same as latest block root. - **state_root - } else { - // Common case: not a skipped slot. - // - // If we can't find a state summmary for the parent block and previous slot, - // then there is some amount of disjointedness in the DAG. We set the parent - // state root to 0x0 in this case, and will prune any dangling states. - let parent_block_root = summary.block_parent_root; - state_summaries_by_block_root - .get(&parent_block_root) - .and_then(|parent_block_summaries| { - parent_block_summaries.get(&previous_slot) - }) - .map_or(Hash256::ZERO, |(parent_state_root, _)| **parent_state_root) - } - }; - - Ok(( - *state_root, - DAGStateSummary { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - latest_block_slot: summary.block_slot, - previous_state_root, - }, - )) - }) - .collect::, _>>()?; - - Self::new(state_summaries) - } - // Returns all non-unique latest block roots of a given set of states pub fn blocks_of_states<'a, I: Iterator>( &self, @@ -379,106 +284,3 @@ impl From for DAGStateSummary { } } } - -#[cfg(test)] -mod tests { - use super::{DAGStateSummaryV22, Error, StateSummariesDAG}; - use bls::FixedBytesExtended; - use types::{Hash256, Slot}; - - fn root(n: u64) -> Hash256 { - Hash256::from_low_u64_le(n) - } - - #[test] - fn new_from_v22_empty() { - StateSummariesDAG::new_from_v22(vec![]).unwrap(); - } - - fn assert_previous_state_root_is_zero(dag: &StateSummariesDAG, root: Hash256) { - assert!(matches!( - dag.previous_state_root(root).unwrap_err(), - Error::RootUnknownPreviousStateRoot { .. } - )); - } - - #[test] - fn new_from_v22_one_state() { - let root_a = root(0xa); - let root_1 = root(1); - let root_2 = root(2); - let summary_1 = DAGStateSummaryV22 { - slot: Slot::new(1), - latest_block_root: root_1, - block_parent_root: root_2, - block_slot: Slot::new(1), - }; - - let dag = StateSummariesDAG::new_from_v22(vec![(root_a, summary_1)]).unwrap(); - - // The parent of the root summary is ZERO - assert_previous_state_root_is_zero(&dag, root_a); - } - - #[test] - fn new_from_v22_multiple_states() { - let dag = StateSummariesDAG::new_from_v22(vec![ - ( - root(0xa), - DAGStateSummaryV22 { - slot: Slot::new(3), - latest_block_root: root(3), - block_parent_root: root(1), - block_slot: Slot::new(3), - }, - ), - ( - root(0xb), - DAGStateSummaryV22 { - slot: Slot::new(4), - latest_block_root: root(4), - block_parent_root: root(3), - block_slot: Slot::new(4), - }, - ), - // fork 1 - ( - root(0xc), - DAGStateSummaryV22 { - slot: Slot::new(5), - latest_block_root: root(5), - block_parent_root: root(4), - block_slot: Slot::new(5), - }, - ), - // fork 2 - // skipped slot - ( - root(0xd), - DAGStateSummaryV22 { - slot: Slot::new(5), - latest_block_root: root(4), - block_parent_root: root(3), - block_slot: Slot::new(4), - }, - ), - // normal slot - ( - root(0xe), - DAGStateSummaryV22 { - slot: Slot::new(6), - latest_block_root: root(6), - block_parent_root: root(4), - block_slot: Slot::new(6), - }, - ), - ]) - .unwrap(); - - // The parent of the root summary is ZERO - assert_previous_state_root_is_zero(&dag, root(0xa)); - assert_eq!(dag.previous_state_root(root(0xc)).unwrap(), root(0xb)); - assert_eq!(dag.previous_state_root(root(0xd)).unwrap(), root(0xb)); - assert_eq!(dag.previous_state_root(root(0xe)).unwrap(), root(0xd)); - } -} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0e187a8f4b..2b4152b550 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3995,11 +3995,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = if spec.is_fulu_scheduled() { - SchemaVersion(27) - } else { - SchemaVersion(22) - }; + let min_version = CURRENT_SCHEMA_VERSION; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 6b8c615631..6e01648263 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -186,10 +186,8 @@ impl LevelDB { ) }; - for (start_key, end_key) in [ - endpoints(DBColumn::BeaconState), - endpoints(DBColumn::BeaconStateSummary), - ] { + { + let (start_key, end_key) = endpoints(DBColumn::BeaconStateHotSummary); self.db.compact(&start_key, &end_key); } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8ef91b3c74..78dd69e55a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -3270,12 +3270,10 @@ impl, Cold: ItemStore> HotColdDB Some(mut split) => { debug!(?split, "Loaded split partial"); // Load the hot state summary to get the block root. - let latest_block_root = self - .load_block_root_from_summary_any_version(&split.state_root) - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; + let latest_block_root = + self.load_block_root_from_summary(&split.state_root).ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )?; split.block_root = latest_block_root; Ok(Some(split)) } @@ -3306,29 +3304,11 @@ impl, Cold: ItemStore> HotColdDB .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) } - /// Load a hot state's summary in V22 format, given its root. - pub fn load_hot_state_summary_v22( - &self, - state_root: &Hash256, - ) -> Result, Error> { - self.hot_db - .get(state_root) - .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) - } - - /// Load the latest block root for a hot state summary either in modern form, or V22 form. - /// - /// This function is required to open a V22 database for migration to V24, or vice versa. - pub fn load_block_root_from_summary_any_version( - &self, - state_root: &Hash256, - ) -> Option { + /// Load the latest block root for a hot state summary. + pub fn load_block_root_from_summary(&self, state_root: &Hash256) -> Option { if let Ok(Some(summary)) = self.load_hot_state_summary(state_root) { return Some(summary.latest_block_root); } - if let Ok(Some(summary)) = self.load_hot_state_summary_v22(state_root) { - return Some(summary.latest_block_root); - } None } @@ -4287,30 +4267,6 @@ impl HotStateSummary { } } -/// Legacy hot state summary used in schema V22 and before. -/// -/// This can be deleted when we remove V22 support. -#[derive(Debug, Clone, Copy, Encode, Decode)] -pub struct HotStateSummaryV22 { - pub slot: Slot, - pub latest_block_root: Hash256, - pub epoch_boundary_state_root: Hash256, -} - -impl StoreItem for HotStateSummaryV22 { - fn db_column() -> DBColumn { - DBColumn::BeaconStateSummary - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - /// Struct for summarising a state in the freezer database. #[derive(Debug, Clone, Copy, Default, Encode, Decode)] pub(crate) struct ColdStateSummary { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index bfa1200602..bd8caa3ad5 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -77,11 +77,7 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn compact(&self) -> Result<(), Error> { // Compact state and block related columns as they are likely to have the most churn, // i.e. entries being created and deleted. - for column in [ - DBColumn::BeaconState, - DBColumn::BeaconStateHotSummary, - DBColumn::BeaconBlock, - ] { + for column in [DBColumn::BeaconStateHotSummary, DBColumn::BeaconBlock] { self.compact_column(column)?; } Ok(()) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 9744b9fa08..74b287975e 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -6,7 +6,6 @@ use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, @@ -1529,47 +1528,17 @@ where /// /// This is used when persisting the state of the fork choice to disk. #[superstruct( - variants(V17, V28), + variants(V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct PersistedForkChoice { - #[superstruct(only(V17))] - pub proto_array_bytes: Vec, - #[superstruct(only(V28))] pub proto_array: proto_array::core::SszContainerV28, pub queued_attestations: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV28; -impl TryFrom for PersistedForkChoiceV28 { - type Error = ssz::DecodeError; - - fn try_from(v17: PersistedForkChoiceV17) -> Result { - let container_v17 = - proto_array::core::SszContainerV17::from_ssz_bytes(&v17.proto_array_bytes)?; - let container_v28 = container_v17.into(); - - Ok(Self { - proto_array: container_v28, - queued_attestations: v17.queued_attestations, - }) - } -} - -impl From<(PersistedForkChoiceV28, JustifiedBalances)> for PersistedForkChoiceV17 { - fn from((v28, balances): (PersistedForkChoiceV28, JustifiedBalances)) -> Self { - let container_v17 = proto_array::core::SszContainerV17::from((v28.proto_array, balances)); - let proto_array_bytes = container_v17.as_ssz_bytes(); - - Self { - proto_array_bytes, - queued_attestations: v28.queued_attestations, - } - } -} - #[cfg(test)] mod tests { use types::MainnetEthSpec; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index afe06dee1b..8cf2936db4 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV17, PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, + PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 964e836d91..04e57d791b 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -16,5 +16,5 @@ pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::{SszContainer, SszContainerV17, SszContainerV28}; + pub use super::ssz_container::{SszContainer, SszContainerV28}; } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 1e01b74c8c..42696256f7 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -17,14 +17,12 @@ four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); pub type SszContainer = SszContainerV28; #[superstruct( - variants(V17, V28), + variants(V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct SszContainer { pub votes: Vec, - #[superstruct(only(V17))] - pub balances: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration justified_checkpoint: Checkpoint, @@ -73,34 +71,3 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { }) } } - -// Convert V17 to V28 by dropping balances. -impl From for SszContainerV28 { - fn from(v17: SszContainerV17) -> Self { - Self { - votes: v17.votes, - prune_threshold: v17.prune_threshold, - justified_checkpoint: v17.justified_checkpoint, - finalized_checkpoint: v17.finalized_checkpoint, - nodes: v17.nodes, - indices: v17.indices, - previous_proposer_boost: v17.previous_proposer_boost, - } - } -} - -// Convert V28 to V17 by re-adding balances. -impl From<(SszContainerV28, JustifiedBalances)> for SszContainerV17 { - fn from((v28, balances): (SszContainerV28, JustifiedBalances)) -> Self { - Self { - votes: v28.votes, - balances: balances.effective_balances.clone(), - prune_threshold: v28.prune_threshold, - justified_checkpoint: v28.justified_checkpoint, - finalized_checkpoint: v28.finalized_checkpoint, - nodes: v28.nodes, - indices: v28.indices, - previous_proposer_boost: v28.previous_proposer_boost, - } - } -}