From 9065e4a56e7327d7c084e996d5ee071a0eec038b Mon Sep 17 00:00:00 2001 From: 0xMushow <105550256+0xMushow@users.noreply.github.com> Date: Thu, 4 Dec 2025 14:35:12 +0100 Subject: [PATCH 01/35] fix(beacon_node): add pruning of observed_column_sidecars (#8531) None I noticed that `observed_column_sidecars` is missing its prune call in the finalization handler, which results in a memory leak on long-running nodes (very slow (**7MB/day**)) : https://github.com/sigp/lighthouse/blob/13dfa9200f822c41ccd81b95a3f052df54c888e9/beacon_node/beacon_chain/src/canonical_head.rs#L940-L959 Both caches use the same generic type `ObservedDataSidecars:` https://github.com/sigp/lighthouse/blob/22ec4b327186c4a4a87d2c8c745caf3b36cb6dd6/beacon_node/beacon_chain/src/beacon_chain.rs#L413-L416 The type's documentation explicitly requires manual pruning: > "*The cache supports pruning based upon the finalized epoch. It does not automatically prune, you must call Self::prune manually.*" https://github.com/sigp/lighthouse/blob/b4704eab4ac8edf0ea0282ed9a5758b784038dd2/beacon_node/beacon_chain/src/observed_data_sidecars.rs#L66-L74 Currently: - `observed_blob_sidecars` => pruned - `observed_column_sidecars` => **NOT** pruned Without pruning, the underlying HashMap accumulates entries indefinitely, causing continuous memory growth until the node restarts. Co-Authored-By: Antoine James --- beacon_node/beacon_chain/src/canonical_head.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index db071db166..1a08ac3f88 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -918,6 +918,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_column_sidecars.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.observed_slashable.write().prune( new_view .finalized_checkpoint From d4ec006a3419f15041a02792b1981e68645c501d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 18 Feb 2026 14:01:22 +1100 Subject: [PATCH 02/35] Update `time` to fix `cargo audit` failure (#8764) --- Cargo.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8748be726c..7d75f5c197 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6323,9 +6323,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -8899,30 +8899,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", From c4ff9b137c9a2cb8daf7a1cf6b708dc4b0011659 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 17 Feb 2026 20:26:06 -0700 Subject: [PATCH 03/35] Add critical instructions and hooks for Claude Code (#8715) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- .claude/settings.json | 15 +++++++++++++++ .githooks/pre-commit | 5 +++++ CLAUDE.md | 8 ++++++++ Makefile | 6 ++++++ 4 files changed, 34 insertions(+) create mode 100644 .claude/settings.json create mode 100755 .githooks/pre-commit diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000000..ae426dd254 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,15 @@ +{ + "hooks": { + "PostToolUse": [ + { + "matcher": "Edit|Write", + "hooks": [ + { + "type": "command", + "command": "echo '\n[Reminder] Run: cargo fmt --all && make lint-fix'" + } + ] + } + ] + } +} diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100755 index 0000000000..42a5ca79e0 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,5 @@ +#!/bin/sh +# Pre-commit hook: runs cargo fmt --check +# Install with: make install-hooks + +exec cargo fmt --check diff --git a/CLAUDE.md b/CLAUDE.md index 441c8e4274..79ed344e35 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -2,6 +2,14 @@ This file provides guidance for AI assistants (Claude Code, Codex, etc.) working with Lighthouse. +## CRITICAL - Always Follow + +After completing ANY code changes: +1. **MUST** run `cargo fmt --all && make lint-fix` to format and fix linting issues +2. **MUST** run `cargo check` to verify compilation before considering task complete + +Run `make install-hooks` if you have not already to install git hooks. Never skip git hooks. If cargo is not available install the toolchain. + ## Quick Reference ```bash diff --git a/Makefile b/Makefile index 0995a869f4..9786c17cc9 100644 --- a/Makefile +++ b/Makefile @@ -361,3 +361,9 @@ clean: cargo clean make -C $(EF_TESTS) clean make -C $(STATE_TRANSITION_VECTORS) clean + +# Installs git hooks from .githooks/ directory +install-hooks: + @ln -sf ../../.githooks/pre-commit .git/hooks/pre-commit + @chmod +x .githooks/pre-commit + @echo "Git hooks installed. Pre-commit hook runs 'cargo fmt --check'." From c61665b3a1efc6b353b57be37816e69825f2bab6 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Wed, 11 Feb 2026 07:49:20 +0900 Subject: [PATCH 04/35] Penalize peers that send an invalid rpc request (#6986) Since https://github.com/sigp/lighthouse/pull/6847, invalid `BlocksByRange`/`BlobsByRange` requests, which do not comply with the spec, are [handled in the Handler](https://github.com/sigp/lighthouse/blob/3d16d1080f5b93193404967dcb5525fa68840ea0/beacon_node/lighthouse_network/src/rpc/handler.rs#L880-L911). Any peer that sends an invalid request is penalized and disconnected. However, other kinds of invalid rpc request, which result in decoding errors, are just dropped. No penalty is applied and the connection with the peer remains. I have added handling for the `ListenUpgradeError` event to notify the application of an `RPCError:InvalidData` error and disconnect to the peer that sent the invalid rpc request. I also added tests for handling invalid rpc requests. Co-Authored-By: ackintosh --- .../lighthouse_network/src/rpc/handler.rs | 17 +- .../lighthouse_network/src/rpc/protocol.rs | 10 +- .../lighthouse_network/tests/rpc_tests.rs | 160 +++++++++++++++++- 3 files changed, 179 insertions(+), 8 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 720895bbe7..9861119ac1 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -13,7 +13,8 @@ use futures::prelude::*; use libp2p::PeerId; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, StreamUpgradeError, + SubstreamProtocol, }; use libp2p::swarm::{ConnectionId, Stream}; use logging::crit; @@ -888,6 +889,16 @@ where ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => { self.on_dial_upgrade_error(info, error) } + ConnectionEvent::ListenUpgradeError(ListenUpgradeError { + error: (proto, error), + .. + }) => { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto, + error, + })); + } _ => { // NOTE: ConnectionEvent is a non exhaustive enum so updates should be based on // release notes more than compiler feedback @@ -924,7 +935,7 @@ where request.count() )), })); - return self.shutdown(None); + return; } } RequestType::BlobsByRange(request) => { @@ -940,7 +951,7 @@ where max_allowed, max_requested_blobs )), })); - return self.shutdown(None); + return; } } _ => {} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f0ac9d00f9..34d8efccd1 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -675,7 +675,7 @@ where E: EthSpec, { type Output = InboundOutput; - type Error = RPCError; + type Error = (Protocol, RPCError); type Future = BoxFuture<'static, Result>; fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future { @@ -717,10 +717,12 @@ where ) .await { - Err(e) => Err(RPCError::from(e)), + Err(e) => Err((versioned_protocol.protocol(), RPCError::from(e))), Ok((Some(Ok(request)), stream)) => Ok((request, stream)), - Ok((Some(Err(e)), _)) => Err(e), - Ok((None, _)) => Err(RPCError::IncompleteStream), + Ok((Some(Err(e)), _)) => Err((versioned_protocol.protocol(), e)), + Ok((None, _)) => { + Err((versioned_protocol.protocol(), RPCError::IncompleteStream)) + } } } } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 553cfa6f0d..137136e97e 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -5,8 +5,12 @@ use crate::common::spec_with_all_forks_enabled; use crate::common::{Protocol, build_tracing_subscriber}; use bls::Signature; use fixed_bytes::FixedBytesExtended; +use libp2p::PeerId; use lighthouse_network::rpc::{RequestType, methods::*}; -use lighthouse_network::service::api_types::AppRequestId; +use lighthouse_network::service::api_types::{ + AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, + DataColumnsByRangeRequestId, DataColumnsByRangeRequester, RangeRequestId, SyncRequestId, +}; use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; use ssz_types::{RuntimeVariableList, VariableList}; @@ -1785,3 +1789,157 @@ fn test_active_requests() { } }) } + +// Test that when a node receives an invalid BlocksByRange request exceeding the maximum count, +// it bans the sender. +#[test] +fn test_request_too_large_blocks_by_range() { + let spec = Arc::new(spec_with_all_forks_enabled()); + + test_request_too_large( + AppRequestId::Sync(SyncRequestId::BlocksByRange(BlocksByRangeRequestId { + id: 1, + parent_request_id: ComponentsByRangeRequestId { + id: 1, + requester: RangeRequestId::RangeSync { + chain_id: 1, + batch_id: Epoch::new(1), + }, + }, + })), + RequestType::BlocksByRange(OldBlocksByRangeRequest::new( + 0, + spec.max_request_blocks(ForkName::Base) as u64 + 1, // exceeds the max request defined in the spec. + 1, + )), + ); +} + +// Test that when a node receives an invalid BlobsByRange request exceeding the maximum count, +// it bans the sender. +#[test] +fn test_request_too_large_blobs_by_range() { + let spec = Arc::new(spec_with_all_forks_enabled()); + + let max_request_blobs_count = spec.max_request_blob_sidecars(ForkName::Base) as u64 + / spec.max_blobs_per_block_within_fork(ForkName::Base); + test_request_too_large( + AppRequestId::Sync(SyncRequestId::BlobsByRange(BlobsByRangeRequestId { + id: 1, + parent_request_id: ComponentsByRangeRequestId { + id: 1, + requester: RangeRequestId::RangeSync { + chain_id: 1, + batch_id: Epoch::new(1), + }, + }, + })), + RequestType::BlobsByRange(BlobsByRangeRequest { + start_slot: 0, + count: max_request_blobs_count + 1, // exceeds the max request defined in the spec. + }), + ); +} + +// Test that when a node receives an invalid DataColumnsByRange request exceeding the columns count, +// it bans the sender. +#[test] +fn test_request_too_large_data_columns_by_range() { + test_request_too_large( + AppRequestId::Sync(SyncRequestId::DataColumnsByRange( + DataColumnsByRangeRequestId { + id: 1, + parent_request_id: DataColumnsByRangeRequester::ComponentsByRange( + ComponentsByRangeRequestId { + id: 1, + requester: RangeRequestId::RangeSync { + chain_id: 1, + batch_id: Epoch::new(1), + }, + }, + ), + peer: PeerId::random(), + }, + )), + RequestType::DataColumnsByRange(DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + // exceeds the max request defined in the spec. + columns: vec![0; E::number_of_columns() + 1], + }), + ); +} + +fn test_request_too_large(app_request_id: AppRequestId, request: RequestType) { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + let rt = Arc::new(Runtime::new().unwrap()); + let spec = Arc::new(spec_with_all_forks_enabled()); + + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + ForkName::Base, + spec, + Protocol::Tcp, + false, + None, + ) + .await; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!(?request, %peer_id, "Sending RPC request"); + sender + .send_request(peer_id, app_request_id, request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + app_request_id, + response, + .. + } => { + debug!(?app_request_id, ?response, "Received response"); + } + NetworkEvent::RPCFailed { error, .. } => { + // This variant should be unreachable, as the receiver doesn't respond with an error when a request exceeds the limit. + debug!(?error, "RPC failed"); + unreachable!(); + } + NetworkEvent::PeerDisconnected(peer_id) => { + // The receiver should disconnect as a result of the invalid request. + debug!(%peer_id, "Peer disconnected"); + // End the test. + return; + } + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + if let NetworkEvent::RequestReceived { .. } = receiver.next_event().await { + // This event should be unreachable, as the handler drops the invalid request. + unreachable!(); + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }); +} From 691c8cf8e69d4d40c4969c2bc493dc1eed9af99f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 18 Feb 2026 15:16:57 +1100 Subject: [PATCH 05/35] Fix duplicate data columns in DataColumnsByRange responses (#8843) Co-Authored-By: Jimmy Chen --- .../network_beacon_processor/rpc_methods.rs | 5 +- .../src/network_beacon_processor/tests.rs | 116 ++++++++++++++++++ 2 files changed, 120 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 5edd661bb6..279870d444 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -977,7 +977,10 @@ impl NetworkBeaconProcessor { }; // remove all skip slots i.e. duplicated roots - Ok(block_roots.into_iter().unique().collect::>()) + Ok(block_roots + .into_iter() + .unique_by(|(root, _)| *root) + .collect::>()) } /// Handle a `BlobsByRange` request from the peer. diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 49b1c0c262..32ca84453a 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -120,6 +120,39 @@ impl TestRig { .await } + pub async fn new_with_skip_slots(chain_length: u64, skip_slots: &HashSet) -> Self { + let mut spec = test_spec::(); + spec.shard_committee_period = 2; + let spec = Arc::new(spec); + let beacon_processor_config = BeaconProcessorConfig::default(); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .node_custody_type(NodeCustodyType::Fullnode) + .chain_config(<_>::default()) + .build(); + + harness.advance_slot(); + + for slot in 1..=chain_length { + if !skip_slots.contains(&slot) { + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + + harness.advance_slot(); + } + + Self::from_harness(harness, beacon_processor_config, spec).await + } + pub async fn new_parametric( chain_length: u64, beacon_processor_config: BeaconProcessorConfig, @@ -150,6 +183,14 @@ impl TestRig { harness.advance_slot(); } + Self::from_harness(harness, beacon_processor_config, spec).await + } + + async fn from_harness( + harness: BeaconChainHarness, + beacon_processor_config: BeaconProcessorConfig, + spec: Arc, + ) -> Self { let head = harness.chain.head_snapshot(); assert_eq!( @@ -1986,3 +2027,78 @@ async fn test_data_columns_by_range_request_only_returns_requested_columns() { "Should have received at least some data columns" ); } + +/// Test that DataColumnsByRange does not return duplicate data columns for skip slots. +/// +/// When skip slots occur, `forwards_iter_block_roots` returns the same block root for +/// consecutive slots. The deduplication in `get_block_roots_from_store` must use +/// `unique_by` on the root (not the full `(root, slot)` tuple) to avoid serving +/// duplicate data columns for the same block. +#[tokio::test] +async fn test_data_columns_by_range_no_duplicates_with_skip_slots() { + if test_spec::().fulu_fork_epoch.is_none() { + return; + }; + + // Build a chain of 128 slots (4 epochs) with skip slots at positions 5 and 6. + // After 4 epochs, finalized_epoch=2 (finalized_slot=64). Requesting slots 0-9 + // satisfies req_start_slot + req_count <= finalized_slot (10 <= 64), which routes + // through `get_block_roots_from_store` — the code path with the bug. + let skip_slots: HashSet = [5, 6].into_iter().collect(); + let mut rig = TestRig::new_with_skip_slots(128, &skip_slots).await; + + let all_custody_columns = rig.chain.custody_columns_for_epoch(Some(Epoch::new(0))); + let requested_column = vec![all_custody_columns[0]]; + + // Request a range that spans the skip slots (slots 0 through 9). + let start_slot = 0; + let slot_count = 10; + + rig.network_beacon_processor + .send_data_columns_by_range_request( + PeerId::random(), + InboundRequestId::new_unchecked(42, 24), + DataColumnsByRangeRequest { + start_slot, + count: slot_count, + columns: requested_column.clone(), + }, + ) + .unwrap(); + + // Collect block roots from all data column responses. + let mut block_roots: Vec = Vec::new(); + + while let Some(next) = rig.network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::DataColumnsByRange(data_column), + inbound_request_id: _, + } = next + { + if let Some(column) = data_column { + block_roots.push(column.block_root()); + } else { + break; + } + } else { + panic!("unexpected message {:?}", next); + } + } + + assert!( + !block_roots.is_empty(), + "Should have received at least some data columns" + ); + + // Before the fix, skip slots caused the same block root to appear multiple times + // (once per skip slot) because .unique() on (Hash256, Slot) tuples didn't deduplicate. + let unique_roots: HashSet<_> = block_roots.iter().collect(); + assert_eq!( + block_roots.len(), + unique_roots.len(), + "Response contained duplicate block roots: got {} columns but only {} unique roots", + block_roots.len(), + unique_roots.len(), + ); +} From c5b4580e37b44a57605a07a9acdd5057c1b06010 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 18 Feb 2026 09:47:07 +0530 Subject: [PATCH 06/35] Return correct variant for snappy errors (#8841) N/A Handle snappy crate errors as InvalidData instead of IoError. Co-Authored-By: Pawan Dhananjay --- .../lighthouse_network/src/rpc/codec.rs | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 36d9726dd9..d1a3182fad 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -457,6 +457,9 @@ fn handle_error( Ok(None) } } + // All snappy errors from the snap crate bubble up as `Other` kind errors + // that imply invalid response + ErrorKind::Other => Err(RPCError::InvalidData(err.to_string())), _ => Err(RPCError::from(err)), } } @@ -2317,4 +2320,43 @@ mod tests { RPCError::InvalidData(_) )); } + + /// Test invalid snappy response. + #[test] + fn test_invalid_snappy_response() { + let spec = spec_with_all_forks_enabled(); + let fork_ctx = Arc::new(fork_context(ForkName::latest(), &spec)); + let max_packet_size = spec.max_payload_size as usize; // 10 MiB. + + let protocol = ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy); + + let mut codec = SSZSnappyOutboundCodec::::new( + protocol.clone(), + max_packet_size, + fork_ctx.clone(), + ); + + let mut payload = BytesMut::new(); + payload.extend_from_slice(&[0u8]); + let deneb_epoch = spec.deneb_fork_epoch.unwrap(); + payload.extend_from_slice(&fork_ctx.context_bytes(deneb_epoch)); + + // Claim the MAXIMUM allowed size (10 MiB) + let claimed_size = max_packet_size; + let mut uvi_codec: Uvi = Uvi::default(); + uvi_codec.encode(claimed_size, &mut payload).unwrap(); + payload.extend_from_slice(&[0xBB; 16]); // Junk snappy. + + let result = codec.decode(&mut payload); + + assert!(result.is_err(), "Expected decode to fail"); + + // IoError = reached snappy decode (allocation happened). + let err = result.unwrap_err(); + assert!( + matches!(err, RPCError::InvalidData(_)), + "Should return invalid data variant {}", + err + ); + } } From be799cb2ad2fb0243cdc2a2f368091ffee29fe8e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 18 Feb 2026 16:28:17 +1100 Subject: [PATCH 07/35] Validator client head monitor timeout fix (#8846) Fix a bug in v8.1.0 whereby the VC times out continuously with: > Feb 18 02:03:48.030 WARN Head service failed retrying starting next slot error: "Head monitoring stream error, node: 0, error: SseClient(Transport(reqwest::Error { kind: Decode, source: reqwest::Error { kind: Body, source: TimedOut } }))" - Remove the existing timeout for the events API by using `Duration::MAX`. This is necessary as the client is configured with a default timeout. This is the only way to override/remove it. - DO NOT add a `read_timeout` (yet), as this would need to be configured on a per-client basis. We do not want to create a new Client for every call as the early commits on this branch were doing, as this would bypass the TLS cert config, and is also wasteful. Co-Authored-By: hopinheimer Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- common/eth2/src/lib.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 10382b028a..76b05130d7 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -76,8 +76,6 @@ const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; -// Generally the timeout for events should be longer than a slot. -const HTTP_GET_EVENTS_TIMEOUT_MULTIPLIER: u32 = 50; const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; /// A struct to define a variety of different timeouts for different validator tasks to ensure @@ -98,7 +96,6 @@ pub struct Timeouts { pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, pub get_validator_block: Duration, - pub events: Duration, pub default: Duration, } @@ -119,7 +116,6 @@ impl Timeouts { get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, get_validator_block: timeout, - events: HTTP_GET_EVENTS_TIMEOUT_MULTIPLIER * timeout, default: timeout, } } @@ -142,7 +138,6 @@ impl Timeouts { get_debug_beacon_states: base_timeout / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: base_timeout / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, get_validator_block: base_timeout / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, - events: HTTP_GET_EVENTS_TIMEOUT_MULTIPLIER * base_timeout, default: base_timeout / HTTP_DEFAULT_TIMEOUT_QUOTIENT, } } @@ -2805,10 +2800,14 @@ impl BeaconNodeHttpClient { .join(","); path.query_pairs_mut().append_pair("topics", &topic_string); + // Do not use a timeout for the events endpoint. Using a regular timeout will trigger a + // timeout every `timeout` seconds, regardless of any data streamed from the endpoint. + // In future we could add a read_timeout, but that can only be configured globally on the + // Client. let mut es = self .client .get(path) - .timeout(self.timeouts.events) + .timeout(Duration::MAX) .eventsource() .map_err(Error::SseEventSource)?; // If we don't await `Event::Open` here, then the consumer From 54b35761452d73147a80665614230dd2b5dc2951 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 18 Feb 2026 20:31:57 +1100 Subject: [PATCH 08/35] Update agent review instructions on large PRs (#8845) Co-Authored-By: Jimmy Chen --- .ai/CODE_REVIEW.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.ai/CODE_REVIEW.md b/.ai/CODE_REVIEW.md index e4da3b22d5..2ce60c80fd 100644 --- a/.ai/CODE_REVIEW.md +++ b/.ai/CODE_REVIEW.md @@ -190,6 +190,14 @@ we typically try to avoid runtime panics outside of startup." - Edge cases handled? - Context provided with errors? +## Large PR Strategy + +Large PRs (10+ files) make it easy to miss subtle bugs in individual files. + +- **Group files by subsystem** (networking, store, types, etc.) and review each group, but pay extra attention to changes that cross subsystem boundaries. +- **Review shared type/interface changes first** — changes to function signatures, return types, or struct definitions ripple through all callers. When reviewing a large PR, identify these first and trace their impact across the codebase. Downstream code may silently change behavior even if it looks untouched. +- **Flag missing test coverage for changed behavior** — if a code path's semantics change (even subtly), check that tests exercise it. If not, flag the gap. + ## Deep Review Techniques ### Verify Against Specifications @@ -275,3 +283,4 @@ Group related state and behavior together. If two fields are always set together - [ ] Tests present: Non-trivial changes have tests - [ ] Lock safety: Lock ordering is safe and documented - [ ] No blocking: Async code doesn't block runtime + From fab77f4fc9fc8fc5d9e9b9d82999cdd015d14859 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 19 Feb 2026 08:55:13 +1100 Subject: [PATCH 09/35] Skip payload_invalidation tests prior to Bellatrix (#8856) Fix the failure of the beacon-chain tests for phase0/altair, which now only runs nightly. Just skip the payload invalidation tests, they don't make any sense prior to Bellatrix anyway. Co-Authored-By: Michael Sproul --- .../tests/payload_invalidation.rs | 68 ++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index eb8e57a5d5..7fd70f0e77 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -6,7 +6,7 @@ use beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, canonical_head::{CachedHead, CanonicalHead}, - test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}, + test_utils::{BeaconChainHarness, EphemeralHarnessType, fork_name_from_env, test_spec}, }; use execution_layer::{ ExecutionLayer, ForkchoiceState, PayloadAttributes, @@ -389,6 +389,9 @@ impl InvalidPayloadRig { /// Simple test of the different import types. #[tokio::test] async fn valid_invalid_syncing() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); @@ -404,6 +407,9 @@ async fn valid_invalid_syncing() { /// `latest_valid_hash`. #[tokio::test] async fn invalid_payload_invalidates_parent() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -460,6 +466,9 @@ async fn immediate_forkchoice_update_invalid_test( #[tokio::test] async fn immediate_forkchoice_update_payload_invalid() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { latest_valid_hash, }) @@ -468,11 +477,17 @@ async fn immediate_forkchoice_update_payload_invalid() { #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_block_hash() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await } #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_terminal_block() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } immediate_forkchoice_update_invalid_test(|_| Payload::Invalid { latest_valid_hash: Some(ExecutionBlockHash::zero()), }) @@ -482,6 +497,9 @@ async fn immediate_forkchoice_update_payload_invalid_terminal_block() { /// Ensure the client tries to exit when the justified checkpoint is invalidated. #[tokio::test] async fn justified_checkpoint_becomes_invalid() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -524,6 +542,9 @@ async fn justified_checkpoint_becomes_invalid() { /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. #[tokio::test] async fn pre_finalized_latest_valid_hash() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let num_blocks = E::slots_per_epoch() * 4; let finalized_epoch = 2; @@ -571,6 +592,9 @@ async fn pre_finalized_latest_valid_hash() { /// - Will not validate `latest_valid_root` and its ancestors. #[tokio::test] async fn latest_valid_hash_will_not_validate() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -618,6 +642,9 @@ async fn latest_valid_hash_will_not_validate() { /// Check behaviour when the `latest_valid_hash` is a junk value. #[tokio::test] async fn latest_valid_hash_is_junk() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let num_blocks = E::slots_per_epoch() * 5; let finalized_epoch = 3; @@ -659,6 +686,9 @@ async fn latest_valid_hash_is_junk() { /// Check that descendants of invalid blocks are also invalidated. #[tokio::test] async fn invalidates_all_descendants() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; @@ -766,6 +796,9 @@ async fn invalidates_all_descendants() { /// Check that the head will switch after the canonical branch is invalidated. #[tokio::test] async fn switches_heads() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; @@ -869,6 +902,9 @@ async fn switches_heads() { #[tokio::test] async fn invalid_during_processing() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); @@ -901,6 +937,9 @@ async fn invalid_during_processing() { #[tokio::test] async fn invalid_after_optimistic_sync() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -939,6 +978,9 @@ async fn invalid_after_optimistic_sync() { #[tokio::test] async fn manually_validate_child() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -957,6 +999,9 @@ async fn manually_validate_child() { #[tokio::test] async fn manually_validate_parent() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -975,6 +1020,9 @@ async fn manually_validate_parent() { #[tokio::test] async fn payload_preparation() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; @@ -1036,6 +1084,9 @@ async fn payload_preparation() { #[tokio::test] async fn invalid_parent() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -1108,6 +1159,9 @@ async fn invalid_parent() { /// Tests to ensure that we will still send a proposer preparation #[tokio::test] async fn payload_preparation_before_transition_block() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); @@ -1180,6 +1234,9 @@ async fn payload_preparation_before_transition_block() { #[tokio::test] async fn attesting_to_optimistic_head() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. @@ -1392,6 +1449,9 @@ impl InvalidHeadSetup { #[tokio::test] async fn recover_from_invalid_head_by_importing_blocks() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let InvalidHeadSetup { rig, fork_block, @@ -1437,6 +1497,9 @@ async fn recover_from_invalid_head_by_importing_blocks() { #[tokio::test] async fn recover_from_invalid_head_after_persist_and_reboot() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let InvalidHeadSetup { rig, fork_block: _, @@ -1479,6 +1542,9 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { #[tokio::test] async fn weights_after_resetting_optimistic_status() { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + return; + } let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. From 5e2d296de619d582ceff214584d8056776c05fd7 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 19 Feb 2026 05:55:16 +0800 Subject: [PATCH 10/35] Validator manager import to allow overriding fields with CLI flag (#7684) * #7651 Co-Authored-By: Tan Chee Keong Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> Co-Authored-By: Lion - dapplion <35266934+dapplion@users.noreply.github.com> --- book/src/help_vm_import.md | 3 + lighthouse/tests/validator_manager.rs | 65 +++++++++ validator_manager/src/import_validators.rs | 161 +++++++++++++++------ 3 files changed, 183 insertions(+), 46 deletions(-) diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 3c768f6705..09c1b74f4d 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -24,6 +24,9 @@ Options: --debug-level Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: info, debug, trace, warn, error] + --enabled + When provided, the imported validator will be enabled or disabled. + [possible values: true, false] --gas-limit When provided, the imported validator will use this gas limit. It is recommended to leave this as the default value by not specifying this diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index d6d720a561..9bad1cdc91 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -16,6 +16,7 @@ use validator_manager::{ list_validators::ListConfig, move_validators::{MoveConfig, PasswordSource, Validators}, }; +use zeroize::Zeroizing; const EXAMPLE_ETH1_ADDRESS: &str = "0x00000000219ab540356cBB839Cbe05303d7705Fa"; @@ -280,6 +281,40 @@ pub fn validator_import_using_both_file_flags() { .assert_failed(); } +#[test] +pub fn validator_import_keystore_file_without_password_flag_should_fail() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .flag("--keystore-file", Some("./keystore.json")) + .assert_failed(); +} + +#[test] +pub fn validator_import_keystore_file_with_password_flag_should_pass() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .flag("--keystore-file", Some("./keystore.json")) + .flag("--password", Some("abcd")) + .assert_success(|config| { + let expected = ImportConfig { + validators_file_path: None, + keystore_file_path: Some(PathBuf::from("./keystore.json")), + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + ignore_duplicates: false, + password: Some(Zeroizing::new("abcd".into())), + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, + }; + assert_eq!(expected, config); + println!("{:?}", expected); + }); +} + #[test] pub fn validator_import_missing_both_file_flags() { CommandLineTest::validators_import() @@ -287,6 +322,36 @@ pub fn validator_import_missing_both_file_flags() { .assert_failed(); } +#[test] +pub fn validator_import_fee_recipient_override() { + CommandLineTest::validators_import() + .flag("--validators-file", Some("./vals.json")) + .flag("--vc-token", Some("./token.json")) + .flag("--suggested-fee-recipient", Some(EXAMPLE_ETH1_ADDRESS)) + .flag("--gas-limit", Some("1337")) + .flag("--builder-proposals", Some("true")) + .flag("--builder-boost-factor", Some("150")) + .flag("--prefer-builder-proposals", Some("true")) + .flag("--enabled", Some("false")) + .assert_success(|config| { + let expected = ImportConfig { + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + ignore_duplicates: false, + password: None, + fee_recipient: Some(Address::from_str(EXAMPLE_ETH1_ADDRESS).unwrap()), + builder_boost_factor: Some(150), + gas_limit: Some(1337), + builder_proposals: Some(true), + enabled: Some(false), + prefer_builder_proposals: Some(true), + }; + assert_eq!(expected, config); + }); +} + #[test] pub fn validator_move_defaults() { CommandLineTest::validators_move() diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 24917f7d1b..0d6d358edb 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -112,8 +112,7 @@ pub fn cli_app() -> Command { .value_name("ETH1_ADDRESS") .help("When provided, the imported validator will use the suggested fee recipient. Omit this flag to use the default value from the VC.") .action(ArgAction::Set) - .display_order(0) - .requires(KEYSTORE_FILE_FLAG), + .display_order(0), ) .arg( Arg::new(GAS_LIMIT) @@ -122,8 +121,7 @@ pub fn cli_app() -> Command { .help("When provided, the imported validator will use this gas limit. It is recommended \ to leave this as the default value by not specifying this flag.",) .action(ArgAction::Set) - .display_order(0) - .requires(KEYSTORE_FILE_FLAG), + .display_order(0), ) .arg( Arg::new(BUILDER_PROPOSALS) @@ -132,8 +130,7 @@ pub fn cli_app() -> Command { blocks via builder rather than the local EL.",) .value_parser(["true","false"]) .action(ArgAction::Set) - .display_order(0) - .requires(KEYSTORE_FILE_FLAG), + .display_order(0), ) .arg( Arg::new(BUILDER_BOOST_FACTOR) @@ -144,8 +141,7 @@ pub fn cli_app() -> Command { when choosing between a builder payload header and payload from \ the local execution node.",) .action(ArgAction::Set) - .display_order(0) - .requires(KEYSTORE_FILE_FLAG), + .display_order(0), ) .arg( Arg::new(PREFER_BUILDER_PROPOSALS) @@ -154,8 +150,16 @@ pub fn cli_app() -> Command { constructed by builders, regardless of payload value.",) .value_parser(["true","false"]) .action(ArgAction::Set) - .display_order(0) - .requires(KEYSTORE_FILE_FLAG), + .display_order(0), + ) + .arg( + Arg::new(ENABLED) + .long(ENABLED) + .help("When provided, the imported validator will be \ + enabled or disabled.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -225,48 +229,113 @@ async fn run(config: ImportConfig) -> Result<(), String> { enabled, } = config; - let validators: Vec = - if let Some(validators_format_path) = &validators_file_path { - if !validators_format_path.exists() { - return Err(format!( - "Unable to find file at {:?}", - validators_format_path - )); - } + let validators: Vec = if let Some(validators_format_path) = + &validators_file_path + { + if !validators_format_path.exists() { + return Err(format!( + "Unable to find file at {:?}", + validators_format_path + )); + } - let validators_file = fs::OpenOptions::new() - .read(true) - .create(false) - .open(validators_format_path) - .map_err(|e| format!("Unable to open {:?}: {:?}", validators_format_path, e))?; + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(validators_format_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_format_path, e))?; - serde_json::from_reader(&validators_file).map_err(|e| { + // Define validators as mutable so that if a relevant flag is supplied, the fields can be overridden. + let mut validators: Vec = serde_json::from_reader(&validators_file) + .map_err(|e| { format!( "Unable to parse JSON in {:?}: {:?}", validators_format_path, e ) - })? - } else if let Some(keystore_format_path) = &keystore_file_path { - vec![ValidatorSpecification { - voting_keystore: KeystoreJsonStr( - Keystore::from_json_file(keystore_format_path).map_err(|e| format!("{e:?}"))?, - ), - voting_keystore_password: password.ok_or_else(|| { - "The --password flag is required to supply the keystore password".to_string() - })?, - slashing_protection: None, - fee_recipient, - gas_limit, - builder_proposals, - builder_boost_factor, - prefer_builder_proposals, - enabled, - }] - } else { - return Err(format!( - "One of the flag --{VALIDATORS_FILE_FLAG} or --{KEYSTORE_FILE_FLAG} is required." - )); - }; + })?; + + // Log the overridden note when one or more flags is supplied + if let Some(override_fee_recipient) = fee_recipient { + eprintln!( + "Please note! --suggested-fee-recipient is provided. This will override existing fee recipient defined in validators.json with: {:?}", + override_fee_recipient + ); + } + if let Some(override_gas_limit) = gas_limit { + eprintln!( + "Please note! --gas-limit is provided. This will override existing gas limit defined in validators.json with: {}", + override_gas_limit + ); + } + if let Some(override_builder_proposals) = builder_proposals { + eprintln!( + "Please note! --builder-proposals is provided. This will override existing builder proposal setting defined in validators.json with: {}", + override_builder_proposals + ); + } + if let Some(override_builder_boost_factor) = builder_boost_factor { + eprintln!( + "Please note! --builder-boost-factor is provided. This will override existing builder boost factor defined in validators.json with: {}", + override_builder_boost_factor + ); + } + if let Some(override_prefer_builder_proposals) = prefer_builder_proposals { + eprintln!( + "Please note! --prefer-builder-proposals is provided. This will override existing prefer builder proposal setting defined in validators.json with: {}", + override_prefer_builder_proposals + ); + } + if let Some(override_enabled) = enabled { + eprintln!( + "Please note! --enabled flag is provided. This will override existing setting defined in validators.json with: {}", + override_enabled + ); + } + + // Override the fields in validators.json file if the flag is supplied + for validator in &mut validators { + if let Some(override_fee_recipient) = fee_recipient { + validator.fee_recipient = Some(override_fee_recipient); + } + if let Some(override_gas_limit) = gas_limit { + validator.gas_limit = Some(override_gas_limit); + } + if let Some(override_builder_proposals) = builder_proposals { + validator.builder_proposals = Some(override_builder_proposals); + } + if let Some(override_builder_boost_factor) = builder_boost_factor { + validator.builder_boost_factor = Some(override_builder_boost_factor); + } + if let Some(override_prefer_builder_proposals) = prefer_builder_proposals { + validator.prefer_builder_proposals = Some(override_prefer_builder_proposals); + } + if let Some(override_enabled) = enabled { + validator.enabled = Some(override_enabled); + } + } + + validators + } else if let Some(keystore_format_path) = &keystore_file_path { + vec![ValidatorSpecification { + voting_keystore: KeystoreJsonStr( + Keystore::from_json_file(keystore_format_path).map_err(|e| format!("{e:?}"))?, + ), + voting_keystore_password: password.ok_or_else(|| { + "The --password flag is required to supply the keystore password".to_string() + })?, + slashing_protection: None, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, + }] + } else { + return Err(format!( + "One of the flag --{VALIDATORS_FILE_FLAG} or --{KEYSTORE_FILE_FLAG} is required." + )); + }; let count = validators.len(); From 2d91009ab4b6452b50371aa759e40a3a7dc9be4a Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 19 Feb 2026 23:32:42 +0400 Subject: [PATCH 11/35] Bump sqlite deps to remove `hashlink 0.8` (#8866) #8547 Bump the following crates to remove `hashlink 0.8`: - `rusqlite` - `r2d2-sqlite` - `yaml-rust2` Co-Authored-By: Mac L --- Cargo.lock | 70 +++++++++++++------ Cargo.toml | 2 +- consensus/int_to_bytes/Cargo.toml | 2 +- .../slashing_protection/Cargo.toml | 2 +- 4 files changed, 50 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a8e76a8a8..419ba679db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3427,9 +3427,9 @@ dependencies = [ [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -3933,7 +3933,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", - "allocator-api2", ] [[package]] @@ -3958,15 +3957,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "hashlink" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" -dependencies = [ - "hashbrown 0.14.5", -] - [[package]] name = "hashlink" version = "0.9.1" @@ -3985,6 +3975,15 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "hashlink" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" +dependencies = [ + "hashbrown 0.16.1", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -5323,9 +5322,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" dependencies = [ "cc", "pkg-config", @@ -7163,12 +7162,13 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.21.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" +checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" dependencies = [ "r2d2", "rusqlite", + "uuid 1.19.0", ] [[package]] @@ -7503,6 +7503,16 @@ dependencies = [ "archery", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.17", +] + [[package]] name = "rtnetlink" version = "0.13.1" @@ -7558,16 +7568,17 @@ checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.10.0", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.4", + "hashlink 0.11.0", "libsqlite3-sys", "smallvec", + "sqlite-wasm-rs", ] [[package]] @@ -8374,6 +8385,18 @@ dependencies = [ "der", ] +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" +dependencies = [ + "cc", + "js-sys", + "rsqlite-vfs", + "wasm-bindgen", +] + [[package]] name = "ssz_types" version = "0.14.0" @@ -9514,6 +9537,7 @@ checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", + "rand 0.9.2", "wasm-bindgen", ] @@ -10479,13 +10503,13 @@ dependencies = [ [[package]] name = "yaml-rust2" -version = "0.8.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" +checksum = "631a50d867fafb7093e709d75aaee9e0e0d5deb934021fcea25ac2fe09edc51e" dependencies = [ "arraydeque", "encoding_rs", - "hashlink 0.8.4", + "hashlink 0.11.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 98e8c057b5..44f3a60b2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,7 @@ reqwest = { version = "0.12", default-features = false, features = [ ] } ring = "0.17" rpds = "0.11" -rusqlite = { version = "0.28", features = ["bundled"] } +rusqlite = { version = "0.38", features = ["bundled"] } rust_eth_kzg = "0.9" safe_arith = "0.1" sensitive_url = { version = "0.1", features = ["serde"] } diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index c639dfce8d..75196d7437 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -9,4 +9,4 @@ bytes = { workspace = true } [dev-dependencies] hex = { workspace = true } -yaml-rust2 = "0.8" +yaml-rust2 = "0.11" diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 86df6d01fe..45244c2e62 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -17,7 +17,7 @@ ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } fixed_bytes = { workspace = true } r2d2 = { workspace = true } -r2d2_sqlite = "0.21.0" +r2d2_sqlite = "0.32" rusqlite = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } From 9cb72100d4c6f008ee5f2ac7274bd7f12128b4eb Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 19 Feb 2026 23:32:46 +0400 Subject: [PATCH 12/35] Feature-gate all uses of `arbitrary` (#8867) Feature gate all uses of `arbitrary` so it is not compiled during release builds. Co-Authored-By: Mac L --- Cargo.toml | 2 +- consensus/state_processing/Cargo.toml | 4 +++- consensus/state_processing/src/verify_operation.rs | 12 +++++++++--- consensus/types/Cargo.toml | 1 + crypto/bls/Cargo.toml | 4 ++-- crypto/kzg/Cargo.toml | 3 ++- crypto/kzg/src/kzg_commitment.rs | 1 + crypto/kzg/src/kzg_proof.rs | 1 + validator_client/slashing_protection/Cargo.toml | 4 ++-- 9 files changed, 22 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 44f3a60b2f..3b5a7dd6ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -240,7 +240,7 @@ signing_method = { path = "validator_client/signing_method" } slasher = { path = "slasher", default-features = false } slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } -smallvec = { version = "1.11.2", features = ["arbitrary"] } +smallvec = "1" snap = "1" ssz_types = { version = "0.14.0", features = ["context_deserialize", "runtime_types"] } state_processing = { path = "consensus/state_processing" } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index a83e443e80..7426995439 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -8,6 +8,8 @@ edition = { workspace = true } default = [] fake_crypto = ["bls/fake_crypto"] arbitrary-fuzz = [ + "dep:arbitrary", + "smallvec/arbitrary", "types/arbitrary-fuzz", "merkle_proof/arbitrary", "ethereum_ssz/arbitrary", @@ -17,7 +19,7 @@ arbitrary-fuzz = [ portable = ["bls/supranational-portable"] [dependencies] -arbitrary = { workspace = true } +arbitrary = { workspace = true, optional = true } bls = { workspace = true } educe = { workspace = true } ethereum_hashing = { workspace = true } diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 1f76f19586..a13786f9f6 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -7,6 +7,7 @@ use crate::per_block_processing::{ verify_attester_slashing, verify_bls_to_execution_change, verify_exit, verify_proposer_slashing, }; +#[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; use educe::Educe; use smallvec::{SmallVec, smallvec}; @@ -39,13 +40,17 @@ pub trait TransformPersist { /// /// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. -#[derive(Educe, Debug, Clone, Arbitrary)] +#[derive(Educe, Debug, Clone)] +#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] #[educe( PartialEq, Eq, Hash(bound(T: TransformPersist + std::hash::Hash, E: EthSpec)) )] -#[arbitrary(bound = "T: TransformPersist + Arbitrary<'arbitrary>, E: EthSpec")] +#[cfg_attr( + feature = "arbitrary-fuzz", + arbitrary(bound = "T: TransformPersist + Arbitrary<'arbitrary>, E: EthSpec") +)] pub struct SigVerifiedOp { op: T, verified_against: VerifiedAgainst, @@ -133,7 +138,8 @@ struct SigVerifiedOpDecode { /// /// We need to store multiple `ForkVersion`s because attester slashings contain two indexed /// attestations which may be signed using different versions. -#[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode, TestRandom, Arbitrary)] +#[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode, TestRandom)] +#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] pub struct VerifiedAgainst { fork_versions: SmallVec<[ForkVersion; MAX_FORKS_VERIFIED_AGAINST]>, } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index a4b879ddb2..e7e382714b 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -16,6 +16,7 @@ sqlite = ["dep:rusqlite"] arbitrary = [ "dep:arbitrary", "bls/arbitrary", + "kzg/arbitrary", "ethereum_ssz/arbitrary", "milhouse/arbitrary", "ssz_types/arbitrary", diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 4661288679..ac04e1fecf 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [features] -arbitrary = [] +arbitrary = ["dep:arbitrary"] default = ["supranational"] fake_crypto = [] supranational = ["blst"] @@ -14,7 +14,7 @@ supranational-force-adx = ["supranational", "blst/force-adx"] [dependencies] alloy-primitives = { workspace = true } -arbitrary = { workspace = true } +arbitrary = { workspace = true, optional = true } blst = { version = "0.3.3", optional = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index d2558663d5..840f8cfc9c 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -7,10 +7,11 @@ edition = "2021" [features] default = [] +arbitrary = ["dep:arbitrary"] fake_crypto = [] [dependencies] -arbitrary = { workspace = true } +arbitrary = { workspace = true, optional = true } c-kzg = { workspace = true } educe = { workspace = true } ethereum_hashing = { workspace = true } diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs index 5a5e689429..bc5fc5f5aa 100644 --- a/crypto/kzg/src/kzg_commitment.rs +++ b/crypto/kzg/src/kzg_commitment.rs @@ -114,6 +114,7 @@ impl Debug for KzgCommitment { } } +#[cfg(feature = "arbitrary")] impl arbitrary::Arbitrary<'_> for KzgCommitment { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { let mut bytes = [0u8; BYTES_PER_COMMITMENT]; diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs index 5a83466d0c..aa9ed185a0 100644 --- a/crypto/kzg/src/kzg_proof.rs +++ b/crypto/kzg/src/kzg_proof.rs @@ -110,6 +110,7 @@ impl Debug for KzgProof { } } +#[cfg(feature = "arbitrary")] impl arbitrary::Arbitrary<'_> for KzgProof { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { let mut bytes = [0u8; BYTES_PER_PROOF]; diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 45244c2e62..695a693385 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -6,11 +6,11 @@ edition = { workspace = true } autotests = false [features] -arbitrary-fuzz = ["types/arbitrary-fuzz", "eip_3076/arbitrary-fuzz"] +arbitrary-fuzz = ["dep:arbitrary", "types/arbitrary-fuzz", "eip_3076/arbitrary-fuzz"] portable = ["types/portable"] [dependencies] -arbitrary = { workspace = true, features = ["derive"] } +arbitrary = { workspace = true, features = ["derive"], optional = true } bls = { workspace = true } eip_3076 = { workspace = true, features = ["json"] } ethereum_serde_utils = { workspace = true } From 8d4af658bd5f33be3ac1c3d41443938c3808ddef Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 20 Feb 2026 15:27:33 +1100 Subject: [PATCH 13/35] Remove unreachable void pattern for ConnectionLimits (#8871) Co-Authored-By: Jimmy Chen --- beacon_node/lighthouse_network/src/service/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 3d709ed9b5..94e0ad0710 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1861,8 +1861,6 @@ impl Network { self.inject_upnp_event(e); None } - #[allow(unreachable_patterns)] - BehaviourEvent::ConnectionLimits(le) => libp2p::core::util::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionClosed { .. } => None, From 48071b7ae722ac915c678fe518110aee988e6d74 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Sat, 21 Feb 2026 01:22:13 +1100 Subject: [PATCH 14/35] Add --jwt-secret-path to lcli mock-el (#8864) Co-Authored-By: Jimmy Chen --- lcli/src/main.rs | 12 +++++++++++- lcli/src/mock_el.rs | 24 ++++++++++++++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index a21dfd4386..63dd0f2c5b 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -492,10 +492,20 @@ fn main() { .long("jwt-output-path") .value_name("PATH") .action(ArgAction::Set) - .required(true) + .required_unless_present("jwt-secret-path") + .conflicts_with("jwt-secret-path") .help("Path to write the JWT secret.") .display_order(0) ) + .arg( + Arg::new("jwt-secret-path") + .long("jwt-secret-path") + .value_name("PATH") + .action(ArgAction::Set) + .help("Path to an existing hex-encoded JWT secret file. \ + When provided, this secret is used instead of the default.") + .display_order(0) + ) .arg( Arg::new("listen-address") .long("listen-address") diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index d6bdfb0d71..544010b6a2 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -2,7 +2,7 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required}; use environment::Environment; use execution_layer::{ - auth::JwtKey, + auth::{JwtKey, strip_prefix}, test_utils::{ Config, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, MockExecutionConfig, MockServer, }, @@ -13,7 +13,8 @@ use std::sync::Arc; use types::*; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { - let jwt_path: PathBuf = parse_required(matches, "jwt-output-path")?; + let jwt_output_path: Option = parse_optional(matches, "jwt-output-path")?; + let jwt_secret_path: Option = parse_optional(matches, "jwt-secret-path")?; let listen_addr: Ipv4Addr = parse_required(matches, "listen-address")?; let listen_port: u16 = parse_required(matches, "listen-port")?; let all_payloads_valid: bool = parse_required(matches, "all-payloads-valid")?; @@ -25,8 +26,23 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let handle = env.core_context().executor.handle().unwrap(); let spec = Arc::new(E::default_spec()); - let jwt_key = JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(); - std::fs::write(jwt_path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); + + let jwt_key = if let Some(secret_path) = jwt_secret_path { + let hex_str = std::fs::read_to_string(&secret_path) + .map_err(|e| format!("Failed to read JWT secret file: {}", e))?; + let secret_bytes = hex::decode(strip_prefix(hex_str.trim())) + .map_err(|e| format!("Invalid hex in JWT secret file: {}", e))?; + JwtKey::from_slice(&secret_bytes) + .map_err(|e| format!("Invalid JWT secret length (expected 32 bytes): {}", e))? + } else if let Some(jwt_path) = jwt_output_path { + let jwt_key = JwtKey::from_slice(&DEFAULT_JWT_SECRET) + .map_err(|e| format!("Default JWT secret invalid: {}", e))?; + std::fs::write(jwt_path, hex::encode(jwt_key.as_bytes())) + .map_err(|e| format!("Failed to write JWT secret to output path: {}", e))?; + jwt_key + } else { + return Err("either --jwt-secret-path or --jwt-output-path must be provided".to_string()); + }; let config = MockExecutionConfig { server_config: Config { From 9452d5186729aab2d24460d4a293606f875409f6 Mon Sep 17 00:00:00 2001 From: Mac L Date: Sun, 22 Feb 2026 02:03:59 +0400 Subject: [PATCH 15/35] Bump `uuid` to remove duplicate (#8874) #8547 Bump the version of `uuid` in our Cargo.toml to version `1` which removes `uuid 0.8` and unifies it across the workspace to version `1.19.0`. Co-Authored-By: Mac L --- Cargo.lock | 19 +++++-------------- Cargo.toml | 2 +- deny.toml | 1 + 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 419ba679db..eccdc8b29c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3199,7 +3199,7 @@ dependencies = [ "sha2", "tempfile", "unicode-normalization", - "uuid 0.8.2", + "uuid", "zeroize", ] @@ -3239,7 +3239,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid 0.8.2", + "uuid", ] [[package]] @@ -5901,7 +5901,7 @@ dependencies = [ "rustc_version 0.4.1", "smallvec", "tagptr", - "uuid 1.19.0", + "uuid", ] [[package]] @@ -7168,7 +7168,7 @@ checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" dependencies = [ "r2d2", "rusqlite", - "uuid 1.19.0", + "uuid", ] [[package]] @@ -9519,16 +9519,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom 0.2.16", - "serde", -] - [[package]] name = "uuid" version = "1.19.0" @@ -9538,6 +9528,7 @@ dependencies = [ "getrandom 0.3.4", "js-sys", "rand 0.9.2", + "serde_core", "wasm-bindgen", ] diff --git a/Cargo.toml b/Cargo.toml index 3b5a7dd6ba..f735b97540 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -273,7 +273,7 @@ tree_hash_derive = "0.12.0" typenum = "1" types = { path = "consensus/types", features = ["saturating-arith"] } url = "2" -uuid = { version = "0.8", features = ["serde", "v4"] } +uuid = { version = "1", features = ["serde", "v4"] } validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } validator_http_api = { path = "validator_client/http_api" } diff --git a/deny.toml b/deny.toml index e6c30f6a48..3b230155f7 100644 --- a/deny.toml +++ b/deny.toml @@ -18,6 +18,7 @@ deny = [ { crate = "pbkdf2", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "scrypt", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "syn", deny-multiple-versions = true, reason = "takes a long time to compile" }, + { crate = "uuid", deny-multiple-versions = true, reason = "dependency hygiene" }, ] [sources] From de2362a8202572c59a5c81ba372de77aa34263d8 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sun, 22 Feb 2026 10:17:47 -0800 Subject: [PATCH 16/35] Fix compilation error --- .../gossip_verified_envelope.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index c9bef630aa..504a1d2c70 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -2,7 +2,10 @@ use std::sync::Arc; use educe::Educe; use slot_clock::SlotClock; -use state_processing::{VerifySignatures, envelope_processing::process_execution_payload_envelope}; +use state_processing::{ + VerifySignatures, + envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, +}; use tracing::{Span, debug}; use types::{ EthSpec, SignedBeaconBlock, SignedExecutionPayloadEnvelope, @@ -236,6 +239,7 @@ impl IntoExecutionPendingEnvelope for GossipVerifiedEnve &signed_envelope, // verify signature already done for GossipVerifiedEnvelope VerifySignatures::False, + VerifyStateRoot::True, &chain.spec, )?; From b525fe055fc854f591c6c5a01f33cf9aef48aa10 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Sun, 22 Feb 2026 16:07:48 -0800 Subject: [PATCH 17/35] Fix --- beacon_node/beacon_chain/src/beacon_chain.rs | 30 ------------------- .../beacon_chain/src/execution_payload.rs | 2 +- .../payload_envelope_verification/import.rs | 28 +++++++++++++++++ 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1ed5579fea..a491e8559b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -57,9 +57,6 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; -use crate::payload_envelope_verification::{ - EnvelopeError, ExecutedEnvelope, ExecutionPendingEnvelope, -}; use crate::pending_payload_envelopes::PendingPayloadEnvelopes; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::persist_custody_context; @@ -3557,33 +3554,6 @@ impl BeaconChain { )) } - /// Accepts a fully-verified payload envelope and awaits on its payload verification handle to - /// get a fully `ExecutedEnvelope`. - /// - /// An error is returned if the verification handle couldn't be awaited. - #[instrument(skip_all, level = "debug")] - pub async fn into_executed_payload_envelope( - self: Arc, - pending_envelope: ExecutionPendingEnvelope, - ) -> Result, EnvelopeError> { - let ExecutionPendingEnvelope { - signed_envelope, - import_data, - payload_verification_handle, - } = pending_envelope; - - let payload_verification_outcome = payload_verification_handle - .await - .map_err(BeaconChainError::TokioJoin)? - .ok_or(BeaconChainError::RuntimeShutdown)??; - - Ok(ExecutedEnvelope::new( - signed_envelope, - import_data, - payload_verification_outcome, - )) - } - /* Import methods */ /// Checks if the block is available, and imports immediately if so, otherwise caches the block diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b0644ac8aa..a5c2ead427 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -122,7 +122,7 @@ impl PayloadNotifier { } } -/// Verify that `execution_payload` contained by `block` is considered valid by an execution +/// Verify that `execution_payload` associated with `beacon_block_root` is considered valid by an execution /// engine. /// /// ## Specification diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index f2633e8d5f..603e14446a 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -17,6 +17,7 @@ use crate::{ NotifyExecutionLayer, block_verification_types::{AsBlock, AvailableBlockData}, metrics, + payload_envelope_verification::ExecutionPendingEnvelope, validator_monitor::{get_slot_delay_ms, timestamp_now}, }; @@ -157,6 +158,33 @@ impl BeaconChain { } } + /// Accepts a fully-verified payload envelope and awaits on its payload verification handle to + /// get a fully `ExecutedEnvelope`. + /// + /// An error is returned if the verification handle couldn't be awaited. + #[instrument(skip_all, level = "debug")] + pub async fn into_executed_payload_envelope( + self: Arc, + pending_envelope: ExecutionPendingEnvelope, + ) -> Result, EnvelopeError> { + let ExecutionPendingEnvelope { + signed_envelope, + import_data, + payload_verification_handle, + } = pending_envelope; + + let payload_verification_outcome = payload_verification_handle + .await + .map_err(BeaconChainError::TokioJoin)? + .ok_or(BeaconChainError::RuntimeShutdown)??; + + Ok(ExecutedEnvelope::new( + signed_envelope, + import_data, + payload_verification_outcome, + )) + } + #[instrument(skip_all)] pub async fn import_available_execution_payload_envelope( self: &Arc, From 2b214175d5001b3022321cb0bfcacb13a4ab0d0d Mon Sep 17 00:00:00 2001 From: 0xMushow <105550256+0xMushow@users.noreply.github.com> Date: Mon, 23 Feb 2026 06:02:56 +0400 Subject: [PATCH 18/35] Enforce stricter checks on certain constants (#8500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Which issue # does this PR address? None All of these are performing a check, and adding a batch, or creating a new lookup, or a new query, etc.. Hence all of these limits would be off by one. Example: ```rust // BACKFILL_BATCH_BUFFER_SIZE = 5 if self.batches.iter().filter(...).count() >= BACKFILL_BATCH_BUFFER_SIZE { return None; // ← REJECT } // ... later adds batch via Entry::Vacant(entry).insert(...) ``` Without the `>` being changed to a `>=` , we would allow 6. The same idea applies to all changes proposed. Co-Authored-By: Antoine James Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- beacon_node/lighthouse_network/src/discovery/mod.rs | 2 +- beacon_node/network/src/sync/backfill_sync/mod.rs | 2 +- beacon_node/network/src/sync/block_lookups/mod.rs | 2 +- beacon_node/network/src/sync/custody_backfill_sync/mod.rs | 2 +- beacon_node/network/src/sync/network_context/custody.rs | 2 +- beacon_node/network/src/sync/range_sync/chain.rs | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 38a6a84b44..21b1146aff 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -674,7 +674,7 @@ impl Discovery { /// updates the min_ttl field. fn add_subnet_query(&mut self, subnet: Subnet, min_ttl: Option, retries: usize) { // remove the entry and complete the query if greater than the maximum search count - if retries > MAX_DISCOVERY_RETRY { + if retries >= MAX_DISCOVERY_RETRY { debug!("Subnet peer discovery did not find sufficient peers. Reached max retry limit"); return; } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 9802ec56a1..7ef72c7f3a 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -1071,7 +1071,7 @@ impl BackFillSync { .iter() .filter(|&(_epoch, batch)| in_buffer(batch)) .count() - > BACKFILL_BATCH_BUFFER_SIZE as usize + >= BACKFILL_BATCH_BUFFER_SIZE as usize { return None; } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index cbf65505ef..394f2fc37d 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -398,7 +398,7 @@ impl BlockLookups { // Lookups contain untrusted data, bound the total count of lookups hold in memory to reduce // the risk of OOM in case of bugs of malicious activity. - if self.single_block_lookups.len() > MAX_LOOKUPS { + if self.single_block_lookups.len() >= MAX_LOOKUPS { warn!(?block_root, "Dropping lookup reached max"); return false; } diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index fa8b70c8b4..a964ad9a3c 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -422,7 +422,7 @@ impl CustodyBackFillSync { .iter() .filter(|&(_epoch, batch)| in_buffer(batch)) .count() - > BACKFILL_BATCH_BUFFER_SIZE as usize + >= BACKFILL_BATCH_BUFFER_SIZE as usize { return None; } diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index de5d9b6e0b..ae0eee9964 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -239,7 +239,7 @@ impl ActiveCustodyRequest { if let Some(wait_duration) = request.is_awaiting_download() { // Note: an empty response is considered a successful response, so we may end up // retrying many more times than `MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS`. - if request.download_failures > MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS { + if request.download_failures >= MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS { return Err(Error::TooManyFailures); } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d67d6468a9..25ea1af76a 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1277,7 +1277,7 @@ impl SyncingChain { .iter() .filter(|&(_epoch, batch)| in_buffer(batch)) .count() - > BATCH_BUFFER_SIZE as usize + >= BATCH_BUFFER_SIZE as usize { return None; } From dcc43e3d20f44146963aa880fd46cda9e53bda04 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 22 Feb 2026 22:17:24 -0800 Subject: [PATCH 19/35] Implement gloas block gossip verification changes (#8878) Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 ++-- .../beacon_chain/src/block_verification.rs | 84 +++++++++++++++---- .../beacon_chain/src/execution_payload.rs | 11 ++- .../gossip_methods.rs | 33 ++++---- consensus/types/src/block/beacon_block.rs | 20 +++++ 5 files changed, 132 insertions(+), 34 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9f62bf11f5..26ad2e714b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3378,11 +3378,19 @@ impl BeaconChain { ); } - self.data_availability_checker.put_pre_execution_block( - block_root, - unverified_block.block_cloned(), - block_source, - )?; + // Gloas blocks dont need to be inserted into the DA cache + // they are always available. + if !unverified_block + .block() + .fork_name_unchecked() + .gloas_enabled() + { + self.data_availability_checker.put_pre_execution_block( + block_root, + unverified_block.block_cloned(), + block_source, + )?; + } // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index e0943d5d93..292560d6a7 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -51,7 +51,9 @@ use crate::beacon_snapshot::PreProcessingSnapshot; use crate::blob_verification::GossipBlobError; use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; -use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; +use crate::data_availability_checker::{ + AvailabilityCheckError, AvailableBlock, AvailableBlockData, MaybeAvailableBlock, +}; use crate::data_column_verification::GossipDataColumnError; use crate::execution_payload::{ AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, @@ -334,6 +336,15 @@ pub enum BlockError { max_blobs_at_epoch: usize, block: usize, }, + /// The bid's parent_block_root does not match the block's parent_root. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer should be penalized. + BidParentRootMismatch { + bid_parent_root: Hash256, + block_parent_root: Hash256, + }, } /// Which specific signature(s) are invalid in a SignedBeaconBlock @@ -887,15 +898,15 @@ impl GossipVerifiedBlock { // Do not gossip blocks that claim to contain more blobs than the max allowed // at the given block epoch. - if let Ok(commitments) = block.message().body().blob_kzg_commitments() { + if let Some(blob_kzg_commitments_len) = block.message().blob_kzg_commitments_len() { let max_blobs_at_epoch = chain .spec .max_blobs_per_block(block.slot().epoch(T::EthSpec::slots_per_epoch())) as usize; - if commitments.len() > max_blobs_at_epoch { + if blob_kzg_commitments_len > max_blobs_at_epoch { return Err(BlockError::InvalidBlobCount { max_blobs_at_epoch, - block: commitments.len(), + block: blob_kzg_commitments_len, }); } } @@ -932,6 +943,24 @@ impl GossipVerifiedBlock { let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let (parent_block, block) = verify_parent_block_is_known::(&fork_choice_read_lock, block)?; + + // [New in Gloas]: Verify bid.parent_block_root matches block.parent_root. + if let Ok(bid) = block.message().body().signed_execution_payload_bid() + && bid.message.parent_block_root != block.message().parent_root() + { + return Err(BlockError::BidParentRootMismatch { + bid_parent_root: bid.message.parent_block_root, + block_parent_root: block.message().parent_root(), + }); + } + + // TODO(gloas) The following validation can only be completed once fork choice has been implemented: + // The block's parent execution payload (defined by bid.parent_block_hash) has been seen + // (via gossip or non-gossip sources) (a client MAY queue blocks for processing + // once the parent payload is retrieved). If execution_payload verification of block's execution + // payload parent by an execution node is complete, verify the block's execution payload + // parent (defined by bid.parent_block_hash) passes all validation. + drop(fork_choice_read_lock); // Track the number of skip slots between the block and its parent. @@ -1038,8 +1067,15 @@ impl GossipVerifiedBlock { }); } - // Validate the block's execution_payload (if any). - validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; + // [New in Gloas]: Skip payload validation checks. The payload now arrives separately + // via `ExecutionPayloadEnvelope`. + if !chain + .spec + .fork_name_at_slot::(block.slot()) + .gloas_enabled() + { + validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; + } // Beacon API block_gossip events if let Some(event_handler) = chain.event_handler.as_ref() @@ -1211,15 +1247,35 @@ impl SignatureVerifiedBlock { let result = info_span!("signature_verify").in_scope(|| signature_verifier.verify()); match result { - Ok(_) => Ok(Self { - block: MaybeAvailableBlock::AvailabilityPending { + Ok(_) => { + // gloas blocks are always available. + let maybe_available = if chain + .spec + .fork_name_at_slot::(block.slot()) + .gloas_enabled() + { + MaybeAvailableBlock::Available( + AvailableBlock::new( + block, + AvailableBlockData::NoData, + &chain.data_availability_checker, + chain.spec.clone(), + ) + .map_err(BlockError::AvailabilityCheck)?, + ) + } else { + MaybeAvailableBlock::AvailabilityPending { + block_root: from.block_root, + block, + } + }; + Ok(Self { + block: maybe_available, block_root: from.block_root, - block, - }, - block_root: from.block_root, - parent: Some(parent), - consensus_context, - }), + parent: Some(parent), + consensus_context, + }) + } Err(_) => Err(BlockError::InvalidSignature( InvalidSignature::BlockBodySignatures, )), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index bdf3ab9594..f32a3ba2a3 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -62,7 +62,10 @@ impl PayloadNotifier { state: &BeaconState, notify_execution_layer: NotifyExecutionLayer, ) -> Result { - let payload_verification_status = if is_execution_enabled(state, block.message().body()) { + let payload_verification_status = if block.fork_name_unchecked().gloas_enabled() { + // Gloas blocks don't contain an execution payload. + Some(PayloadVerificationStatus::Irrelevant) + } else if is_execution_enabled(state, block.message().body()) { // Perform the initial stages of payload verification. // // We will duplicate these checks again during `per_block_processing`, however these @@ -294,6 +297,12 @@ pub fn validate_execution_payload_for_gossip( block: BeaconBlockRef<'_, T::EthSpec>, chain: &BeaconChain, ) -> Result<(), BlockError> { + // Gloas blocks don't have an execution payload in the block body. + // Bid-related validations are handled in gossip block verification. + if block.fork_name_unchecked().gloas_enabled() { + return Ok(()); + } + // Only apply this validation if this is a Bellatrix beacon block. if let Ok(execution_payload) = block.body().execution_payload() { // This logic should match `is_execution_enabled`. We use only the execution block hash of diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index a9198f1943..e90018c851 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1356,7 +1356,8 @@ impl NetworkBeaconProcessor { | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::KnownInvalidExecutionPayload(_)) | Err(e @ BlockError::GenesisBlock) - | Err(e @ BlockError::InvalidBlobCount { .. }) => { + | Err(e @ BlockError::InvalidBlobCount { .. }) + | Err(e @ BlockError::BidParentRootMismatch { .. }) => { warn!(error = %e, "Could not verify block for gossip. Rejecting the block"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -1490,19 +1491,23 @@ impl NetworkBeaconProcessor { // Block is gossip valid. Attempt to fetch blobs from the EL using versioned hashes derived // from kzg commitments, without having to wait for all blobs to be sent from the peers. - let publish_blobs = true; - let self_clone = self.clone(); - let block_clone = block.clone(); - let current_span = Span::current(); - self.executor.spawn( - async move { - self_clone - .fetch_engine_blobs_and_publish(block_clone, block_root, publish_blobs) - .await - } - .instrument(current_span), - "fetch_blobs_gossip", - ); + // TODO(gloas) we'll want to use this same optimization, but we need to refactor the + // `fetch_and_process_engine_blobs` flow to support gloas. + if !block.fork_name_unchecked().gloas_enabled() { + let publish_blobs = true; + let self_clone = self.clone(); + let block_clone = block.clone(); + let current_span = Span::current(); + self.executor.spawn( + async move { + self_clone + .fetch_engine_blobs_and_publish(block_clone, block_root, publish_blobs) + .await + } + .instrument(current_span), + "fetch_blobs_gossip", + ); + } let result = self .chain diff --git a/consensus/types/src/block/beacon_block.rs b/consensus/types/src/block/beacon_block.rs index bee3cdb274..5634d842b6 100644 --- a/consensus/types/src/block/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -309,6 +309,26 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl pub fn execution_payload(&self) -> Result, BeaconStateError> { self.body().execution_payload() } + + pub fn blob_kzg_commitments_len(&self) -> Option { + match self { + BeaconBlockRef::Base(_) => None, + BeaconBlockRef::Altair(_) => None, + BeaconBlockRef::Bellatrix(_) => None, + BeaconBlockRef::Capella(_) => None, + BeaconBlockRef::Deneb(block) => Some(block.body.blob_kzg_commitments.len()), + BeaconBlockRef::Electra(block) => Some(block.body.blob_kzg_commitments.len()), + BeaconBlockRef::Fulu(block) => Some(block.body.blob_kzg_commitments.len()), + BeaconBlockRef::Gloas(block) => Some( + block + .body + .signed_execution_payload_bid + .message + .blob_kzg_commitments + .len(), + ), + } + } } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRefMut<'a, E, Payload> { From d12bb4d712602fe2df0d3101863f4e33895fdb88 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Mon, 23 Feb 2026 12:06:15 -0800 Subject: [PATCH 20/35] Trying something out --- beacon_node/beacon_chain/src/beacon_chain.rs | 68 +-- .../beacon_chain/src/beacon_proposer_cache.rs | 79 ++- .../gossip_verified_envelope.rs | 79 ++- .../src/payload_envelope_verification/mod.rs | 19 +- .../payload_envelope_verification/tests.rs | 524 ++++++++++++++++++ 5 files changed, 680 insertions(+), 89 deletions(-) create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a491e8559b..4894bdaee9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,9 +4,7 @@ use crate::attestation_verification::{ batch_verify_unaggregated_attestations, }; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; -use crate::beacon_proposer_cache::{ - BeaconProposerCache, EpochBlockProposers, ensure_state_can_determine_proposers_for_epoch, -}; +use crate::beacon_proposer_cache::{BeaconProposerCache, EpochBlockProposers}; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; @@ -6548,62 +6546,14 @@ impl BeaconChain { accessor: impl Fn(&EpochBlockProposers) -> Result, state_provider: impl FnOnce() -> Result<(Hash256, BeaconState), E>, ) -> Result { - let cache_entry = self - .beacon_proposer_cache - .lock() - .get_or_insert_key(proposal_epoch, shuffling_decision_block); - - // If the cache entry is not initialised, run the code to initialise it inside a OnceCell. - // This prevents duplication of work across multiple threads. - // - // If it is already initialised, then `get_or_try_init` will return immediately without - // executing the initialisation code at all. - let epoch_block_proposers = cache_entry.get_or_try_init(|| { - // Fetch the state on-demand if the required epoch was missing from the cache. - // If the caller wants to not compute the state they must return an error here and then - // catch it at the call site. - let (state_root, mut state) = state_provider()?; - - // Ensure the state can compute proposer duties for `epoch`. - ensure_state_can_determine_proposers_for_epoch( - &mut state, - state_root, - proposal_epoch, - &self.spec, - )?; - - // Sanity check the state. - let latest_block_root = state.get_latest_block_root(state_root); - let state_decision_block_root = state.proposer_shuffling_decision_root_at_epoch( - proposal_epoch, - latest_block_root, - &self.spec, - )?; - if state_decision_block_root != shuffling_decision_block { - return Err(Error::ProposerCacheIncorrectState { - state_decision_block_root, - requested_decision_block_root: shuffling_decision_block, - } - .into()); - } - - let proposers = state.get_beacon_proposer_indices(proposal_epoch, &self.spec)?; - - // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have - // advanced the state completely into the new epoch. - let fork = self.spec.fork_at_epoch(proposal_epoch); - - debug!( - ?shuffling_decision_block, - epoch = %proposal_epoch, - "Priming proposer shuffling cache" - ); - - Ok::<_, E>(EpochBlockProposers::new(proposal_epoch, fork, proposers)) - })?; - - // Run the accessor function on the computed epoch proposers. - accessor(epoch_block_proposers).map_err(Into::into) + crate::beacon_proposer_cache::with_proposer_cache( + &self.beacon_proposer_cache, + &self.spec, + shuffling_decision_block, + proposal_epoch, + accessor, + state_provider, + ) } /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 912f7f3bad..141a79b202 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -12,12 +12,13 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use fork_choice::ExecutionStatus; use lru::LruCache; use once_cell::sync::OnceCell; +use parking_lot::Mutex; use safe_arith::SafeArith; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::num::NonZeroUsize; use std::sync::Arc; -use tracing::instrument; +use tracing::{debug, instrument}; use typenum::Unsigned; use types::new_non_zero_usize; use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot}; @@ -164,6 +165,82 @@ impl BeaconProposerCache { } } +/// Access the proposer cache, computing and caching the proposers if necessary. +/// +/// This is a free function that operates on references to the cache and spec, decoupled from +/// `BeaconChain`. The `accessor` is called with the cached `EpochBlockProposers` for the given +/// `(proposal_epoch, shuffling_decision_block)` key. If the cache entry is missing, the +/// `state_provider` closure is called to produce a state which is then used to compute and +/// cache the proposers. +pub fn with_proposer_cache( + beacon_proposer_cache: &Mutex, + spec: &ChainSpec, + shuffling_decision_block: Hash256, + proposal_epoch: Epoch, + accessor: impl Fn(&EpochBlockProposers) -> Result, + state_provider: impl FnOnce() -> Result<(Hash256, BeaconState), Err>, +) -> Result +where + Spec: EthSpec, + Err: From + From, +{ + let cache_entry = beacon_proposer_cache + .lock() + .get_or_insert_key(proposal_epoch, shuffling_decision_block); + + // If the cache entry is not initialised, run the code to initialise it inside a OnceCell. + // This prevents duplication of work across multiple threads. + // + // If it is already initialised, then `get_or_try_init` will return immediately without + // executing the initialisation code at all. + let epoch_block_proposers = cache_entry.get_or_try_init(|| { + // Fetch the state on-demand if the required epoch was missing from the cache. + // If the caller wants to not compute the state they must return an error here and then + // catch it at the call site. + let (state_root, mut state) = state_provider()?; + + // Ensure the state can compute proposer duties for `epoch`. + ensure_state_can_determine_proposers_for_epoch( + &mut state, + state_root, + proposal_epoch, + spec, + )?; + + // Sanity check the state. + let latest_block_root = state.get_latest_block_root(state_root); + let state_decision_block_root = state.proposer_shuffling_decision_root_at_epoch( + proposal_epoch, + latest_block_root, + spec, + )?; + if state_decision_block_root != shuffling_decision_block { + return Err(BeaconChainError::ProposerCacheIncorrectState { + state_decision_block_root, + requested_decision_block_root: shuffling_decision_block, + } + .into()); + } + + let proposers = state.get_beacon_proposer_indices(proposal_epoch, spec)?; + + // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have + // advanced the state completely into the new epoch. + let fork = spec.fork_at_epoch(proposal_epoch); + + debug!( + ?shuffling_decision_block, + epoch = %proposal_epoch, + "Priming proposer shuffling cache" + ); + + Ok::<_, Err>(EpochBlockProposers::new(proposal_epoch, fork, proposers)) + })?; + + // Run the accessor function on the computed epoch proposers. + accessor(epoch_block_proposers).map_err(Into::into) +} + /// Compute the proposer duties using the head state without cache. /// /// Return: diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 504a1d2c70..492b265fd0 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -1,27 +1,43 @@ use std::sync::Arc; use educe::Educe; +use parking_lot::{Mutex, RwLock}; use slot_clock::SlotClock; use state_processing::{ VerifySignatures, envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, }; +use store::DatabaseBlock; use tracing::{Span, debug}; use types::{ - EthSpec, SignedBeaconBlock, SignedExecutionPayloadEnvelope, + ChainSpec, EthSpec, Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope, consts::gloas::BUILDER_INDEX_SELF_BUILD, }; use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, NotifyExecutionLayer, + BeaconChain, BeaconChainError, BeaconChainTypes, BeaconStore, NotifyExecutionLayer, PayloadVerificationOutcome, + beacon_proposer_cache::{self, BeaconProposerCache}, + canonical_head::CanonicalHead, payload_envelope_verification::{ EnvelopeError, EnvelopeImportData, EnvelopeProcessingSnapshot, ExecutionPendingEnvelope, IntoExecutionPendingEnvelope, MaybeAvailableEnvelope, load_snapshot, payload_notifier::PayloadNotifier, }, + validator_pubkey_cache::ValidatorPubkeyCache, }; +/// Bundles only the dependencies needed for gossip verification of execution payload envelopes, +/// decoupling `GossipVerifiedEnvelope::new` from the full `BeaconChain`. +pub struct GossipVerificationContext<'a, T: BeaconChainTypes> { + pub canonical_head: &'a CanonicalHead, + pub store: &'a BeaconStore, + pub spec: &'a ChainSpec, + pub beacon_proposer_cache: &'a Mutex, + pub validator_pubkey_cache: &'a RwLock>, + pub genesis_validators_root: Hash256, +} + /// A wrapper around a `SignedExecutionPayloadEnvelope` that indicates it has been approved for re-gossiping on /// the p2p network. #[derive(Educe)] @@ -35,7 +51,7 @@ pub struct GossipVerifiedEnvelope { impl GossipVerifiedEnvelope { pub fn new( signed_envelope: Arc>, - chain: &BeaconChain, + ctx: &GossipVerificationContext<'_, T>, ) -> Result { let envelope = &signed_envelope.message; let payload = &envelope.payload; @@ -48,7 +64,7 @@ impl GossipVerifiedEnvelope { // 2. Blocks we've seen that are invalid (REJECT). // // Presently these two cases are conflated. - let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); + let fork_choice_read_lock = ctx.canonical_head.fork_choice_read_lock(); let Some(proto_block) = fork_choice_read_lock.get_block(&beacon_block_root) else { return Err(EnvelopeError::BlockRootUnknown { block_root: beacon_block_root, @@ -64,12 +80,14 @@ impl GossipVerifiedEnvelope { // TODO(EIP-7732): check that we haven't seen another valid `SignedExecutionPayloadEnvelope` // for this block root from this builder - envelope status table check - let block = chain - .get_full_block(&beacon_block_root)? - .ok_or_else(|| { - EnvelopeError::from(BeaconChainError::MissingBeaconBlock(beacon_block_root)) - }) - .map(Arc::new)?; + let block = match ctx.store.try_get_full_block(&beacon_block_root)? { + Some(DatabaseBlock::Full(block)) => Arc::new(block), + Some(DatabaseBlock::Blinded(_)) | None => { + return Err(EnvelopeError::from(BeaconChainError::MissingBeaconBlock( + beacon_block_root, + ))); + } + }; let execution_bid = &block .message() .body() @@ -118,13 +136,15 @@ impl GossipVerifiedEnvelope { let block_slot = envelope.slot; let block_epoch = block_slot.epoch(T::EthSpec::slots_per_epoch()); let proposer_shuffling_decision_block = - proto_block.proposer_shuffling_root_for_child_block(block_epoch, &chain.spec); + proto_block.proposer_shuffling_root_for_child_block(block_epoch, ctx.spec); let (signature_is_valid, opt_snapshot) = if builder_index == BUILDER_INDEX_SELF_BUILD { // Fast path: self-build envelopes can be verified without loading the state. let envelope_ref = signed_envelope.as_ref(); let mut opt_snapshot = None; - let proposer = chain.with_proposer_cache::<_, EnvelopeError>( + let proposer = beacon_proposer_cache::with_proposer_cache( + ctx.beacon_proposer_cache, + ctx.spec, proposer_shuffling_decision_block, block_epoch, |proposers| proposers.get_slot::(block_slot), @@ -133,14 +153,14 @@ impl GossipVerifiedEnvelope { %beacon_block_root, "Proposer shuffling cache miss for envelope verification" ); - let snapshot = load_snapshot(envelope_ref, chain)?; + let snapshot = load_snapshot(envelope_ref, ctx.canonical_head, ctx.store)?; opt_snapshot = Some(Box::new(snapshot.clone())); - Ok((snapshot.state_root, snapshot.pre_state)) + Ok::<_, EnvelopeError>((snapshot.state_root, snapshot.pre_state)) }, )?; let fork = proposer.fork; - let pubkey_cache = chain.validator_pubkey_cache.read(); + let pubkey_cache = ctx.validator_pubkey_cache.read(); let pubkey = pubkey_cache .get(block.message().proposer_index() as usize) .ok_or_else(|| EnvelopeError::UnknownValidator { @@ -149,16 +169,16 @@ impl GossipVerifiedEnvelope { let is_valid = signed_envelope.verify_signature( pubkey, &fork, - chain.genesis_validators_root, - &chain.spec, + ctx.genesis_validators_root, + ctx.spec, ); (is_valid, opt_snapshot) } else { // TODO(gloas) if we implement a builder pubkey cache, we'll need to use it here. // External builder: must load the state to get the builder pubkey. - let snapshot = load_snapshot(signed_envelope.as_ref(), chain)?; + let snapshot = load_snapshot(signed_envelope.as_ref(), ctx.canonical_head, ctx.store)?; let is_valid = - signed_envelope.verify_signature_with_state(&snapshot.pre_state, &chain.spec)?; + signed_envelope.verify_signature_with_state(&snapshot.pre_state, ctx.spec)?; (is_valid, Some(Box::new(snapshot))) }; @@ -228,7 +248,11 @@ impl IntoExecutionPendingEnvelope for GossipVerifiedEnve let snapshot = if let Some(snapshot) = self.snapshot { *snapshot } else { - load_snapshot(signed_envelope.as_ref(), chain)? + load_snapshot( + signed_envelope.as_ref(), + &chain.canonical_head, + &chain.store, + )? }; let mut state = snapshot.pre_state; @@ -263,6 +287,18 @@ impl IntoExecutionPendingEnvelope for GossipVerifiedEnve } impl BeaconChain { + /// Build a `GossipVerificationContext` from this `BeaconChain`. + pub fn gossip_verification_context(&self) -> GossipVerificationContext<'_, T> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + store: &self.store, + spec: &self.spec, + beacon_proposer_cache: &self.beacon_proposer_cache, + validator_pubkey_cache: &self.validator_pubkey_cache, + genesis_validators_root: self.genesis_validators_root, + } + } + /// Returns `Ok(GossipVerifiedEnvelope)` if the supplied `envelope` should be forwarded onto the /// gossip network. The envelope is not imported into the chain, it is just partially verified. /// @@ -287,7 +323,8 @@ impl BeaconChain { let slot = envelope.slot(); let beacon_block_root = envelope.message.beacon_block_root; - match GossipVerifiedEnvelope::new(envelope, &chain) { + let ctx = chain.gossip_verification_context(); + match GossipVerifiedEnvelope::new(envelope, &ctx) { Ok(verified) => { debug!( %slot, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 80e62f93b7..38fdd9f425 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -39,9 +39,9 @@ use types::{ }; use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, - NotifyExecutionLayer, PayloadVerificationOutcome, - block_verification::PayloadVerificationHandle, + BeaconChain, BeaconChainError, BeaconChainTypes, BeaconStore, BlockError, + ExecutionPayloadError, NotifyExecutionLayer, PayloadVerificationOutcome, + block_verification::PayloadVerificationHandle, canonical_head::CanonicalHead, payload_envelope_verification::gossip_verified_envelope::GossipVerifiedEnvelope, }; @@ -49,6 +49,9 @@ pub mod gossip_verified_envelope; pub mod import; mod payload_notifier; +#[cfg(test)] +mod tests; + pub trait IntoExecutionPendingEnvelope: Sized { fn into_execution_pending_envelope( self, @@ -289,7 +292,8 @@ impl From for EnvelopeError { #[instrument(skip_all, level = "debug", fields(beacon_block_root = %envelope.beacon_block_root()))] pub(crate) fn load_snapshot( envelope: &SignedExecutionPayloadEnvelope, - chain: &BeaconChain, + canonical_head: &CanonicalHead, + store: &BeaconStore, ) -> Result, EnvelopeError> { // Reject any envelope if its block is not known to fork choice. // @@ -302,7 +306,7 @@ pub(crate) fn load_snapshot( // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); + let fork_choice_read_lock = canonical_head.fork_choice_read_lock(); let beacon_block_root = envelope.beacon_block_root(); let Some(proto_beacon_block) = fork_choice_read_lock.get_block(&beacon_block_root) else { return Err(EnvelopeError::BlockRootUnknown { @@ -317,8 +321,7 @@ pub(crate) fn load_snapshot( // We can use `get_hot_state` here rather than `get_advanced_hot_state` because the envelope // must be from the same slot as its block (so no advance is required). let cache_state = true; - let state = chain - .store + let state = store .get_hot_state(&block_state_root, cache_state) .map_err(EnvelopeError::from)? .ok_or_else(|| { @@ -342,7 +345,7 @@ impl IntoExecutionPendingEnvelope chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result, EnvelopeError> { - GossipVerifiedEnvelope::new(self, chain)? + GossipVerifiedEnvelope::new(self, &chain.gossip_verification_context())? .into_execution_pending_envelope(chain, notify_execution_layer) } diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs new file mode 100644 index 0000000000..c362bc6180 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs @@ -0,0 +1,524 @@ +use std::sync::Arc; +use std::time::Duration; + +use bls::{FixedBytesExtended, Keypair, Signature}; +use fork_choice::ForkChoice; +use parking_lot::{Mutex, RwLock}; +use ssz_types::VariableList; +use store::{HotColdDB, KeyValueStore, MemoryStore, StoreConfig}; +use types::consts::gloas::BUILDER_INDEX_SELF_BUILD; +use types::test_utils::generate_deterministic_keypairs; +use types::*; + +use crate::BeaconStore; +use crate::beacon_fork_choice_store::BeaconForkChoiceStore; +use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::builder::Witness; +use crate::canonical_head::CanonicalHead; +use crate::payload_envelope_verification::EnvelopeError; +use crate::payload_envelope_verification::gossip_verified_envelope::{ + GossipVerificationContext, GossipVerifiedEnvelope, +}; +use crate::validator_pubkey_cache::ValidatorPubkeyCache; + +type TestEthSpec = MinimalEthSpec; +type TestTypes = Witness< + slot_clock::TestingSlotClock, + TestEthSpec, + MemoryStore, + MemoryStore, +>; + +/// Test context that holds the minimal state needed for gossip verification. +struct TestContext { + store: BeaconStore, + canonical_head: CanonicalHead, + beacon_proposer_cache: Mutex, + validator_pubkey_cache: RwLock>, + spec: Arc, + keypairs: Vec, + genesis_state: BeaconState, + genesis_block_root: Hash256, + genesis_validators_root: Hash256, +} + +impl TestContext { + fn new(validator_count: usize) -> Self { + let spec = Arc::new(ForkName::Gloas.make_genesis_spec(ChainSpec::minimal())); + let keypairs = generate_deterministic_keypairs(validator_count); + + let mut genesis_state = genesis::interop_genesis_state::( + &keypairs, + 0, // genesis_time + Hash256::from_slice(&[0x42; 32]), + None, // no execution payload header + &spec, + ) + .expect("should create genesis state"); + + let genesis_validators_root = genesis_state.genesis_validators_root(); + + let store: BeaconStore = Arc::new( + HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone()) + .expect("should create ephemeral store"), + ); + + // Initialize store metadata. + let genesis_block = BeaconBlock::::empty(&spec); + let genesis_block_root = genesis_block.canonical_root(); + let signed_genesis_block = SignedBeaconBlock::from_block(genesis_block, Signature::empty()); + + // Build caches and compute state root before storing. + genesis_state + .build_caches(&spec) + .expect("should build caches"); + + // Initialize store metadata ops (must be done before put_state). + let ops = vec![ + store + .init_anchor_info( + signed_genesis_block.parent_root(), + signed_genesis_block.slot(), + Slot::new(0), + false, + ) + .expect("should init anchor info"), + store + .init_blob_info(signed_genesis_block.slot()) + .expect("should init blob info"), + store + .init_data_column_info(signed_genesis_block.slot()) + .expect("should init data column info"), + ]; + store + .hot_db + .do_atomically(ops) + .expect("should store metadata"); + + // Store the genesis block and state. + store + .put_block(&genesis_block_root, signed_genesis_block.clone()) + .expect("should store genesis block"); + let state_root = genesis_state + .update_tree_hash_cache() + .expect("should compute state root"); + store + .put_state(&state_root, &genesis_state) + .expect("should store genesis state"); + + // Create BeaconSnapshot and fork choice. + let snapshot = crate::BeaconSnapshot { + beacon_block: Arc::new(signed_genesis_block), + beacon_block_root: genesis_block_root, + beacon_state: genesis_state.clone(), + }; + + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), snapshot.clone()) + .expect("should create fork choice store"); + + let fork_choice = ForkChoice::from_anchor( + fc_store, + genesis_block_root, + &snapshot.beacon_block, + &snapshot.beacon_state, + None, + &spec, + ) + .expect("should create fork choice from anchor"); + + let canonical_head = CanonicalHead::new(fork_choice, Arc::new(snapshot)); + + let validator_pubkey_cache = ValidatorPubkeyCache::new(&genesis_state, store.clone()) + .expect("should create validator pubkey cache"); + + TestContext { + store, + canonical_head, + beacon_proposer_cache: Mutex::new(BeaconProposerCache::default()), + validator_pubkey_cache: RwLock::new(validator_pubkey_cache), + spec, + keypairs, + genesis_state, + genesis_block_root, + genesis_validators_root, + } + } + + fn gossip_verification_context(&self) -> GossipVerificationContext<'_, TestTypes> { + GossipVerificationContext { + canonical_head: &self.canonical_head, + store: &self.store, + spec: &self.spec, + beacon_proposer_cache: &self.beacon_proposer_cache, + validator_pubkey_cache: &self.validator_pubkey_cache, + genesis_validators_root: self.genesis_validators_root, + } + } + + /// Build a gloas block at `slot` with the given proposer, store it, add it to fork choice, + /// and return the signed block, block root, and post-state. + fn build_and_import_block( + &self, + slot: Slot, + proposer_index: usize, + execution_bid: ExecutionPayloadBid, + ) -> ( + Arc>, + Hash256, + BeaconState, + ) { + let mut state = self.genesis_state.clone(); + + // Advance the state to the target slot. + if slot > state.slot() { + state_processing::state_advance::complete_state_advance( + &mut state, None, slot, &self.spec, + ) + .expect("should advance state"); + } + + state.build_caches(&self.spec).expect("should build caches"); + + // Compute the state root so we can embed it in the block. + let state_root = state + .update_tree_hash_cache() + .expect("should compute state root"); + + let signed_bid = SignedExecutionPayloadBid { + message: execution_bid, + signature: Signature::infinity().expect("should create infinity signature"), + }; + + // Create the block body with the actual state root. + let block = BeaconBlock::Gloas(BeaconBlockGloas { + slot, + proposer_index: proposer_index as u64, + parent_root: self.genesis_block_root, + state_root, + body: BeaconBlockBodyGloas { + randao_reveal: Signature::empty(), + eth1_data: state.eth1_data().clone(), + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + bls_to_execution_changes: VariableList::empty(), + signed_execution_payload_bid: signed_bid, + payload_attestations: VariableList::empty(), + _phantom: std::marker::PhantomData, + }, + }); + + let block_root = block.canonical_root(); + let proposer_sk = &self.keypairs[proposer_index].sk; + let fork = self + .spec + .fork_at_epoch(slot.epoch(TestEthSpec::slots_per_epoch())); + let signed_block = block.sign(proposer_sk, &fork, self.genesis_validators_root, &self.spec); + + // Store block and state. + self.store + .put_block(&block_root, signed_block.clone()) + .expect("should store block"); + self.store + .put_state(&state_root, &state) + .expect("should store state"); + + // Add block to fork choice. + let mut fork_choice = self.canonical_head.fork_choice_write_lock(); + fork_choice + .on_block( + slot, + signed_block.message(), + block_root, + Duration::from_secs(0), + &state, + crate::PayloadVerificationStatus::Verified, + &self.spec, + ) + .expect("should add block to fork choice"); + drop(fork_choice); + + (Arc::new(signed_block), block_root, state) + } + + /// Build a signed execution payload envelope for the given block. + fn build_signed_envelope( + &self, + block_root: Hash256, + slot: Slot, + builder_index: u64, + block_hash: ExecutionBlockHash, + signing_key: &bls::SecretKey, + ) -> Arc> { + let mut payload = ExecutionPayloadGloas::default(); + payload.block_hash = block_hash; + + let envelope = ExecutionPayloadEnvelope { + payload, + execution_requests: ExecutionRequests::default(), + builder_index, + beacon_block_root: block_root, + slot, + state_root: Hash256::zero(), + }; + + let fork = self + .spec + .fork_at_epoch(slot.epoch(TestEthSpec::slots_per_epoch())); + let domain = self.spec.get_domain( + slot.epoch(TestEthSpec::slots_per_epoch()), + Domain::BeaconBuilder, + &fork, + self.genesis_validators_root, + ); + let message = envelope.signing_root(domain); + let signature = signing_key.sign(message); + + Arc::new(SignedExecutionPayloadEnvelope { + message: envelope, + signature, + }) + } + + /// Helper: build a block and matching self-build envelope. + fn build_block_and_envelope( + &self, + ) -> ( + Arc>, + Hash256, + Arc>, + ) { + let slot = Slot::new(1); + let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); + + // Get proposer for slot 1. + let mut state = self.genesis_state.clone(); + state_processing::state_advance::complete_state_advance(&mut state, None, slot, &self.spec) + .expect("should advance state"); + state.build_caches(&self.spec).expect("should build caches"); + let proposer_index = state + .get_beacon_proposer_index(slot, &self.spec) + .expect("should get proposer index"); + + let bid = ExecutionPayloadBid { + builder_index: BUILDER_INDEX_SELF_BUILD, + block_hash, + slot, + ..Default::default() + }; + + let (signed_block, block_root, _post_state) = + self.build_and_import_block(slot, proposer_index, bid); + + let proposer_sk = &self.keypairs[proposer_index].sk; + let envelope = self.build_signed_envelope( + block_root, + slot, + BUILDER_INDEX_SELF_BUILD, + block_hash, + proposer_sk, + ); + + (signed_block, block_root, envelope) + } +} + +#[test] +fn test_valid_self_build_envelope() { + let ctx = TestContext::new(32); + let (_block, _block_root, envelope) = ctx.build_block_and_envelope(); + let gossip_ctx = ctx.gossip_verification_context(); + + let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); + assert!( + result.is_ok(), + "valid self-build envelope should pass verification, got: {:?}", + result.err() + ); +} + +#[test] +fn test_unknown_block_root() { + let ctx = TestContext::new(32); + let gossip_ctx = ctx.gossip_verification_context(); + + // Build an envelope referencing a block root not in fork choice. + let unknown_root = Hash256::from_slice(&[0xff; 32]); + let envelope = ctx.build_signed_envelope( + unknown_root, + Slot::new(1), + BUILDER_INDEX_SELF_BUILD, + ExecutionBlockHash::from_root(Hash256::zero()), + &ctx.keypairs[0].sk, + ); + + let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); + assert!( + matches!(result, Err(EnvelopeError::BlockRootUnknown { .. })), + "should reject envelope with unknown block root, got: {:?}", + result + ); +} + +#[test] +fn test_slot_mismatch() { + let ctx = TestContext::new(32); + let (_block, block_root, _good_envelope) = ctx.build_block_and_envelope(); + let gossip_ctx = ctx.gossip_verification_context(); + + // Build an envelope with a different slot than the block. + let wrong_slot = Slot::new(2); + let envelope = ctx.build_signed_envelope( + block_root, + wrong_slot, + BUILDER_INDEX_SELF_BUILD, + ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])), + &ctx.keypairs[0].sk, + ); + + let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); + assert!( + matches!(result, Err(EnvelopeError::SlotMismatch { .. })), + "should reject envelope with slot mismatch, got: {:?}", + result + ); +} + +#[test] +fn test_builder_index_mismatch() { + let ctx = TestContext::new(32); + let gossip_ctx = ctx.gossip_verification_context(); + + let slot = Slot::new(1); + let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); + + // Get proposer for slot 1. + let mut state = ctx.genesis_state.clone(); + state_processing::state_advance::complete_state_advance(&mut state, None, slot, &ctx.spec) + .expect("should advance state"); + state.build_caches(&ctx.spec).expect("should build caches"); + let proposer_index = state + .get_beacon_proposer_index(slot, &ctx.spec) + .expect("should get proposer index"); + + // Block has builder_index = BUILDER_INDEX_SELF_BUILD + let bid = ExecutionPayloadBid { + builder_index: BUILDER_INDEX_SELF_BUILD, + block_hash, + slot, + ..Default::default() + }; + let (_block, block_root, _post_state) = ctx.build_and_import_block(slot, proposer_index, bid); + + // Envelope has a different builder_index. + let wrong_builder_index = 999; + let envelope = ctx.build_signed_envelope( + block_root, + slot, + wrong_builder_index, + block_hash, + &ctx.keypairs[proposer_index].sk, + ); + + let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); + assert!( + matches!(result, Err(EnvelopeError::BuilderIndexMismatch { .. })), + "should reject envelope with builder index mismatch, got: {:?}", + result + ); +} + +#[test] +fn test_block_hash_mismatch() { + let ctx = TestContext::new(32); + let gossip_ctx = ctx.gossip_verification_context(); + + let slot = Slot::new(1); + let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); + let wrong_block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xbb; 32])); + + // Get proposer for slot 1. + let mut state = ctx.genesis_state.clone(); + state_processing::state_advance::complete_state_advance(&mut state, None, slot, &ctx.spec) + .expect("should advance state"); + state.build_caches(&ctx.spec).expect("should build caches"); + let proposer_index = state + .get_beacon_proposer_index(slot, &ctx.spec) + .expect("should get proposer index"); + + // Block has block_hash = 0xaa + let bid = ExecutionPayloadBid { + builder_index: BUILDER_INDEX_SELF_BUILD, + block_hash, + slot, + ..Default::default() + }; + let (_block, block_root, _post_state) = ctx.build_and_import_block(slot, proposer_index, bid); + + // Envelope has a different block_hash. + let envelope = ctx.build_signed_envelope( + block_root, + slot, + BUILDER_INDEX_SELF_BUILD, + wrong_block_hash, + &ctx.keypairs[proposer_index].sk, + ); + + let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); + assert!( + matches!(result, Err(EnvelopeError::BlockHashMismatch { .. })), + "should reject envelope with block hash mismatch, got: {:?}", + result + ); +} + +#[test] +fn test_bad_signature() { + let ctx = TestContext::new(32); + let gossip_ctx = ctx.gossip_verification_context(); + + let slot = Slot::new(1); + let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); + + // Get proposer for slot 1. + let mut state = ctx.genesis_state.clone(); + state_processing::state_advance::complete_state_advance(&mut state, None, slot, &ctx.spec) + .expect("should advance state"); + state.build_caches(&ctx.spec).expect("should build caches"); + let proposer_index = state + .get_beacon_proposer_index(slot, &ctx.spec) + .expect("should get proposer index"); + + let bid = ExecutionPayloadBid { + builder_index: BUILDER_INDEX_SELF_BUILD, + block_hash, + slot, + ..Default::default() + }; + let (_block, block_root, _post_state) = ctx.build_and_import_block(slot, proposer_index, bid); + + // Sign the envelope with the wrong key (some other validator's key). + let wrong_key_index = if proposer_index == 0 { 1 } else { 0 }; + let envelope = ctx.build_signed_envelope( + block_root, + slot, + BUILDER_INDEX_SELF_BUILD, + block_hash, + &ctx.keypairs[wrong_key_index].sk, + ); + + let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); + assert!( + matches!(result, Err(EnvelopeError::BadSignature)), + "should reject envelope with bad signature, got: {:?}", + result + ); +} + +// NOTE: `test_prior_to_finalization` is omitted here because advancing finalization requires +// attestation-based justification which needs the full `BeaconChainHarness`. The +// `PriorToFinalization` code path is tested in the integration tests. From 341682e7196a598d2e767e655d37ce370d27a350 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 24 Feb 2026 11:15:39 +1100 Subject: [PATCH 21/35] Add unit tests for BatchInfo and fix doc comments (#8873) Co-Authored-By: Jimmy Chen --- beacon_node/network/src/sync/batch.rs | 203 +++++++++++++++++- .../network/src/sync/range_sync/mod.rs | 2 + 2 files changed, 202 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index 8de386f5be..f9a1fcce39 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -213,6 +213,9 @@ impl BatchInfo { /// After different operations over a batch, this could be in a state that allows it to /// continue, or in failed state. When the batch has failed, we check if it did mainly due to /// processing failures. In this case the batch is considered failed and faulty. + /// + /// When failure counts are equal, `blacklist` is `false` — we assume network issues over + /// peer fault when the evidence is ambiguous. pub fn outcome(&self) -> BatchOperationOutcome { match self.state { BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -255,8 +258,10 @@ impl BatchInfo { /// Mark the batch as failed and return whether we can attempt a re-download. /// /// This can happen if a peer disconnects or some error occurred that was not the peers fault. - /// The `peer` parameter, when set to None, does not increment the failed attempts of - /// this batch and register the peer, rather attempts a re-download. + /// The `peer` parameter, when set to `None`, still counts toward + /// `max_batch_download_attempts` (to prevent infinite retries on persistent failures) + /// but does not register a peer in `failed_peers()`. Use + /// [`Self::downloading_to_awaiting_download`] to retry without counting a failed attempt. #[must_use = "Batch may have failed"] pub fn download_failed( &mut self, @@ -272,7 +277,6 @@ impl BatchInfo { { BatchState::Failed } else { - // drop the blocks BatchState::AwaitingDownload }; Ok(self.outcome()) @@ -524,3 +528,196 @@ impl BatchState { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::sync::range_sync::RangeSyncBatchConfig; + use types::MinimalEthSpec; + + type Cfg = RangeSyncBatchConfig; + type TestBatch = BatchInfo>; + + fn max_dl() -> u8 { + Cfg::max_batch_download_attempts() + } + + fn max_proc() -> u8 { + Cfg::max_batch_processing_attempts() + } + + fn new_batch() -> TestBatch { + BatchInfo::new(&Epoch::new(0), 1, ByRangeRequestType::Blocks) + } + + fn peer() -> PeerId { + PeerId::random() + } + + fn advance_to_processing(batch: &mut TestBatch, req_id: Id, peer_id: PeerId) { + batch.start_downloading(req_id).unwrap(); + batch.download_completed(vec![1, 2, 3], peer_id).unwrap(); + batch.start_processing().unwrap(); + } + + fn advance_to_awaiting_validation(batch: &mut TestBatch, req_id: Id, peer_id: PeerId) { + advance_to_processing(batch, req_id, peer_id); + batch + .processing_completed(BatchProcessingResult::Success) + .unwrap(); + } + + #[test] + fn happy_path_lifecycle() { + let mut batch = new_batch(); + let p = peer(); + + assert!(matches!(batch.state(), BatchState::AwaitingDownload)); + + batch.start_downloading(1).unwrap(); + assert!(matches!(batch.state(), BatchState::Downloading(1))); + + batch.download_completed(vec![10, 20], p).unwrap(); + assert!(matches!(batch.state(), BatchState::AwaitingProcessing(..))); + + let (data, _duration) = batch.start_processing().unwrap(); + assert_eq!(data, vec![10, 20]); + assert!(matches!(batch.state(), BatchState::Processing(..))); + + let outcome = batch + .processing_completed(BatchProcessingResult::Success) + .unwrap(); + assert!(matches!(outcome, BatchOperationOutcome::Continue)); + assert!(matches!(batch.state(), BatchState::AwaitingValidation(..))); + } + + #[test] + fn download_failures_count_toward_limit() { + let mut batch = new_batch(); + + for i in 1..max_dl() as Id { + batch.start_downloading(i).unwrap(); + let outcome = batch.download_failed(Some(peer())).unwrap(); + assert!(matches!(outcome, BatchOperationOutcome::Continue)); + } + + // Next failure hits the limit + batch.start_downloading(max_dl() as Id).unwrap(); + let outcome = batch.download_failed(Some(peer())).unwrap(); + assert!(matches!( + outcome, + BatchOperationOutcome::Failed { blacklist: false } + )); + } + + #[test] + fn download_failed_none_counts_but_does_not_blame_peer() { + let mut batch = new_batch(); + + // None still counts toward the limit (prevents infinite retry on persistent + // network failures), but doesn't register a peer in failed_peers(). + for i in 0..max_dl() as Id { + batch.start_downloading(i).unwrap(); + batch.download_failed(None).unwrap(); + } + assert!(matches!(batch.state(), BatchState::Failed)); + assert!(batch.failed_peers().is_empty()); + } + + #[test] + fn faulty_processing_failures_count_toward_limit() { + let mut batch = new_batch(); + + for i in 1..max_proc() as Id { + advance_to_processing(&mut batch, i, peer()); + let outcome = batch + .processing_completed(BatchProcessingResult::FaultyFailure) + .unwrap(); + assert!(matches!(outcome, BatchOperationOutcome::Continue)); + } + + // Next faulty failure: limit reached + advance_to_processing(&mut batch, max_proc() as Id, peer()); + let outcome = batch + .processing_completed(BatchProcessingResult::FaultyFailure) + .unwrap(); + assert!(matches!( + outcome, + BatchOperationOutcome::Failed { blacklist: true } + )); + } + + #[test] + fn non_faulty_processing_failures_never_exhaust_batch() { + let mut batch = new_batch(); + + // Well past both limits — non-faulty failures should never cause failure + let iterations = (max_dl() + max_proc()) as Id * 2; + for i in 0..iterations { + advance_to_processing(&mut batch, i, peer()); + let outcome = batch + .processing_completed(BatchProcessingResult::NonFaultyFailure) + .unwrap(); + assert!(matches!(outcome, BatchOperationOutcome::Continue)); + } + // Non-faulty failures also don't register peers as failed + assert!(batch.failed_peers().is_empty()); + } + + #[test] + fn validation_failures_count_toward_processing_limit() { + let mut batch = new_batch(); + + for i in 1..max_proc() as Id { + advance_to_awaiting_validation(&mut batch, i, peer()); + let outcome = batch.validation_failed().unwrap(); + assert!(matches!(outcome, BatchOperationOutcome::Continue)); + } + + advance_to_awaiting_validation(&mut batch, max_proc() as Id, peer()); + let outcome = batch.validation_failed().unwrap(); + assert!(matches!( + outcome, + BatchOperationOutcome::Failed { blacklist: true } + )); + } + + #[test] + fn mixed_failure_types_interact_correctly() { + let mut batch = new_batch(); + let mut req_id: Id = 0; + let mut next_id = || { + req_id += 1; + req_id + }; + + // One download failure + batch.start_downloading(next_id()).unwrap(); + batch.download_failed(Some(peer())).unwrap(); + + // One faulty processing failure (requires a successful download first) + advance_to_processing(&mut batch, next_id(), peer()); + batch + .processing_completed(BatchProcessingResult::FaultyFailure) + .unwrap(); + + // One non-faulty processing failure + advance_to_processing(&mut batch, next_id(), peer()); + batch + .processing_completed(BatchProcessingResult::NonFaultyFailure) + .unwrap(); + assert!(matches!(batch.state(), BatchState::AwaitingDownload)); + + // Fill remaining download failures to hit the limit + for _ in 1..max_dl() { + batch.start_downloading(next_id()).unwrap(); + batch.download_failed(Some(peer())).unwrap(); + } + + // Download failures > processing failures → blacklist: false + assert!(matches!( + batch.outcome(), + BatchOperationOutcome::Failed { blacklist: false } + )); + } +} diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index dd9f17bfd1..3b65e1c84a 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -5,6 +5,8 @@ mod chain_collection; mod range; mod sync_type; +#[cfg(test)] +pub use chain::RangeSyncBatchConfig; pub use chain::{ChainId, EPOCHS_PER_BATCH}; #[cfg(test)] pub use chain_collection::SyncChainStatus; From 886d31fe7e1b6aff7ae81c8b2c35d17061b0b1fd Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Feb 2026 17:27:16 +1100 Subject: [PATCH 22/35] Delete dysfunctional fork_revert feature (#8891) I found myself having to update this code for Gloas, and figured we may as well delete it seeing as it doesn't work. See: - https://github.com/sigp/lighthouse/issues/4198 Delete all `fork_revert` logic and the accompanying test. Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/builder.rs | 43 +--- beacon_node/beacon_chain/src/fork_revert.rs | 204 ------------------ beacon_node/beacon_chain/src/lib.rs | 1 - beacon_node/beacon_chain/tests/store_tests.rs | 182 ---------------- beacon_node/store/src/hot_cold_store.rs | 8 - beacon_node/store/src/iter.rs | 22 +- consensus/types/src/state/beacon_state.rs | 7 +- 7 files changed, 13 insertions(+), 454 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/fork_revert.rs diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 4c82c93ba3..2c1dae9215 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -7,7 +7,6 @@ use crate::beacon_proposer_cache::BeaconProposerCache; use crate::custody_context::NodeCustodyType; use crate::data_availability_checker::DataAvailabilityChecker; use crate::fork_choice_signal::ForkChoiceSignalTx; -use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::kzg_utils::{build_data_column_sidecars_fulu, build_data_column_sidecars_gloas}; use crate::light_client_server_cache::LightClientServerCache; @@ -778,49 +777,17 @@ where .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; - // Try to decode the head block according to the current fork, if that fails, try - // to backtrack to before the most recent fork. - let (head_block_root, head_block, head_reverted) = - match store.get_full_block(&initial_head_block_root) { - Ok(Some(block)) => (initial_head_block_root, block, false), - Ok(None) => return Err("Head block not found in store".into()), - Err(StoreError::SszDecodeError(_)) => { - error!( - message = "This node has likely missed a hard fork. \ - It will try to revert the invalid blocks and keep running, \ - but any stray blocks and states will not be deleted. \ - Long-term you should consider re-syncing this node.", - "Error decoding head block" - ); - let (block_root, block) = revert_to_fork_boundary( - current_slot, - initial_head_block_root, - store.clone(), - &self.spec, - )?; - - (block_root, block, true) - } - Err(e) => return Err(descriptive_db_error("head block", &e)), - }; + let head_block_root = initial_head_block_root; + let head_block = store + .get_full_block(&initial_head_block_root) + .map_err(|e| descriptive_db_error("head block", &e))? + .ok_or("Head block not found in store")?; let (_head_state_root, head_state) = store .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; - // If the head reverted then we need to reset fork choice using the new head's finalized - // checkpoint. - if head_reverted { - fork_choice = reset_fork_choice_to_finalization( - head_block_root, - &head_state, - store.clone(), - Some(current_slot), - &self.spec, - )?; - } - let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?; let mut head_snapshot = BeaconSnapshot { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs deleted file mode 100644 index 4db79790d3..0000000000 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ /dev/null @@ -1,204 +0,0 @@ -use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{ForkChoice, PayloadVerificationStatus}; -use itertools::process_results; -use state_processing::state_advance::complete_state_advance; -use state_processing::{ - ConsensusContext, VerifyBlockRoot, per_block_processing, - per_block_processing::BlockSignatureStrategy, -}; -use std::sync::Arc; -use std::time::Duration; -use store::{HotColdDB, ItemStore, iter::ParentRootBlockIterator}; -use tracing::{info, warn}; -use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; - -const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ - consider deleting it by running with the --purge-db flag."; - -/// Revert the head to the last block before the most recent hard fork. -/// -/// This function is destructive and should only be used if there is no viable alternative. It will -/// cause the reverted blocks and states to be completely forgotten, lying dormant in the database -/// forever. -/// -/// Return the `(head_block_root, head_block)` that should be used post-reversion. -pub fn revert_to_fork_boundary, Cold: ItemStore>( - current_slot: Slot, - head_block_root: Hash256, - store: Arc>, - spec: &ChainSpec, -) -> Result<(Hash256, SignedBeaconBlock), String> { - let current_fork = spec.fork_name_at_slot::(current_slot); - let fork_epoch = spec - .fork_epoch(current_fork) - .ok_or_else(|| format!("Current fork '{}' never activates", current_fork))?; - - if current_fork == ForkName::Base { - return Err(format!( - "Cannot revert to before phase0 hard fork. {}", - CORRUPT_DB_MESSAGE - )); - } - - warn!( - target_fork = %current_fork, - %fork_epoch, - "Reverting invalid head block" - ); - let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root); - - let (block_root, blinded_block) = process_results(block_iter, |mut iter| { - iter.find_map(|(block_root, block)| { - if block.slot() < fork_epoch.start_slot(E::slots_per_epoch()) { - Some((block_root, block)) - } else { - info!( - ?block_root, - slot = %block.slot(), - "Reverting block" - ); - None - } - }) - }) - .map_err(|e| { - format!( - "Error fetching blocks to revert: {:?}. {}", - e, CORRUPT_DB_MESSAGE - ) - })? - .ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE))?; - - let block = store - .make_full_block(&block_root, blinded_block) - .map_err(|e| format!("Unable to add payload to new head block: {:?}", e))?; - - Ok((block_root, block)) -} - -/// Reset fork choice to the finalized checkpoint of the supplied head state. -/// -/// The supplied `head_block_root` should correspond to the most recently applied block on -/// `head_state`. -/// -/// This function avoids quirks of fork choice initialization by replaying all of the blocks from -/// the checkpoint to the head. -/// -/// See this issue for details: https://github.com/ethereum/consensus-specs/issues/2566 -/// -/// It will fail if the finalized state or any of the blocks to replay are unavailable. -/// -/// WARNING: this function is destructive and causes fork choice to permanently forget all -/// chains other than the chain leading to `head_block_root`. It should only be used in extreme -/// circumstances when there is no better alternative. -pub fn reset_fork_choice_to_finalization, Cold: ItemStore>( - head_block_root: Hash256, - head_state: &BeaconState, - store: Arc>, - current_slot: Option, - spec: &ChainSpec, -) -> Result, E>, String> { - // Fetch finalized block. - let finalized_checkpoint = head_state.finalized_checkpoint(); - let finalized_block_root = finalized_checkpoint.root; - let finalized_block = store - .get_full_block(&finalized_block_root) - .map_err(|e| format!("Error loading finalized block: {:?}", e))? - .ok_or_else(|| { - format!( - "Finalized block missing for revert: {:?}", - finalized_block_root - ) - })?; - - // Advance finalized state to finalized epoch (to handle skipped slots). - let finalized_state_root = finalized_block.state_root(); - // The enshrined finalized state should be in the state cache. - let mut finalized_state = store - .get_state(&finalized_state_root, Some(finalized_block.slot()), true) - .map_err(|e| format!("Error loading finalized state: {:?}", e))? - .ok_or_else(|| { - format!( - "Finalized block state missing from database: {:?}", - finalized_state_root - ) - })?; - let finalized_slot = finalized_checkpoint.epoch.start_slot(E::slots_per_epoch()); - complete_state_advance( - &mut finalized_state, - Some(finalized_state_root), - finalized_slot, - spec, - ) - .map_err(|e| { - format!( - "Error advancing finalized state to finalized epoch: {:?}", - e - ) - })?; - let finalized_snapshot = BeaconSnapshot { - beacon_block_root: finalized_block_root, - beacon_block: Arc::new(finalized_block), - beacon_state: finalized_state, - }; - - let fc_store = - BeaconForkChoiceStore::get_forkchoice_store(store.clone(), finalized_snapshot.clone()) - .map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?; - - let mut fork_choice = ForkChoice::from_anchor( - fc_store, - finalized_block_root, - &finalized_snapshot.beacon_block, - &finalized_snapshot.beacon_state, - current_slot, - spec, - ) - .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; - - // Replay blocks from finalized checkpoint back to head. - // We do not replay attestations presently, relying on the absence of other blocks - // to guarantee `head_block_root` as the head. - let blocks = store - .load_blocks_to_replay(finalized_slot + 1, head_state.slot(), head_block_root) - .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; - - let mut state = finalized_snapshot.beacon_state; - for block in blocks { - complete_state_advance(&mut state, None, block.slot(), spec) - .map_err(|e| format!("State advance failed: {:?}", e))?; - - let mut ctxt = ConsensusContext::new(block.slot()) - .set_proposer_index(block.message().proposer_index()); - per_block_processing( - &mut state, - &block, - BlockSignatureStrategy::NoVerification, - VerifyBlockRoot::True, - &mut ctxt, - spec, - ) - .map_err(|e| format!("Error replaying block: {:?}", e))?; - - // Setting this to unverified is the safest solution, since we don't have a way to - // retro-actively determine if they were valid or not. - // - // This scenario is so rare that it seems OK to double-verify some blocks. - let payload_verification_status = PayloadVerificationStatus::Optimistic; - - fork_choice - .on_block( - block.slot(), - block.message(), - block.canonical_root(), - // Reward proposer boost. We are reinforcing the canonical chain. - Duration::from_secs(0), - &state, - payload_verification_status, - spec, - ) - .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; - } - - Ok(fork_choice) -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3b03395a66..e1a190ffb3 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -26,7 +26,6 @@ pub mod events; pub mod execution_payload; pub mod fetch_blobs; pub mod fork_choice_signal; -pub mod fork_revert; pub mod graffiti_calculator; pub mod historical_blocks; pub mod historical_data_columns; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 6bea5f6013..ff20e999bb 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3924,188 +3924,6 @@ async fn finalizes_after_resuming_from_db() { ); } -#[allow(clippy::large_stack_frames)] -#[tokio::test] -async fn revert_minority_fork_on_resume() { - let validator_count = 16; - let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); - - let fork_epoch = Epoch::new(4); - let fork_slot = fork_epoch.start_slot(slots_per_epoch); - let initial_blocks = slots_per_epoch * fork_epoch.as_u64() - 1; - let post_fork_blocks = slots_per_epoch * 3; - - let mut spec1 = MinimalEthSpec::default_spec(); - spec1.altair_fork_epoch = None; - let mut spec2 = MinimalEthSpec::default_spec(); - spec2.altair_fork_epoch = Some(fork_epoch); - - let all_validators = (0..validator_count).collect::>(); - - // Chain with no fork epoch configured. - let db_path1 = tempdir().unwrap(); - let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone()); - let harness1 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec1.clone().into()) - .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .fresh_disk_store(store1) - .mock_execution_layer() - .build(); - - // Chain with fork epoch configured. - let db_path2 = tempdir().unwrap(); - let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone()); - let harness2 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec2.clone().into()) - .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .fresh_disk_store(store2) - .mock_execution_layer() - .build(); - - // Apply the same blocks to both chains initially. - let mut state = harness1.get_current_state(); - let mut block_root = harness1.chain.genesis_block_root; - for slot in (1..=initial_blocks).map(Slot::new) { - let state_root = state.update_tree_hash_cache().unwrap(); - - let attestations = harness1.make_attestations( - &all_validators, - &state, - state_root, - block_root.into(), - slot, - ); - harness1.set_current_slot(slot); - harness2.set_current_slot(slot); - harness1.process_attestations(attestations.clone(), &state); - harness2.process_attestations(attestations, &state); - - let ((block, blobs), new_state) = harness1.make_block(state, slot).await; - - harness1 - .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) - .await - .unwrap(); - harness2 - .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) - .await - .unwrap(); - - state = new_state; - block_root = block.canonical_root(); - } - - assert_eq!(harness1.head_slot(), fork_slot - 1); - assert_eq!(harness2.head_slot(), fork_slot - 1); - - // Fork the two chains. - let mut state1 = state.clone(); - let mut state2 = state.clone(); - - let mut majority_blocks = vec![]; - - for i in 0..post_fork_blocks { - let slot = fork_slot + i; - - // Attestations on majority chain. - let state_root = state.update_tree_hash_cache().unwrap(); - - let attestations = harness2.make_attestations( - &all_validators, - &state2, - state_root, - block_root.into(), - slot, - ); - harness2.set_current_slot(slot); - harness2.process_attestations(attestations, &state2); - - // Minority chain block (no attesters). - let ((block1, blobs1), new_state1) = harness1.make_block(state1, slot).await; - harness1 - .process_block(slot, block1.canonical_root(), (block1, blobs1)) - .await - .unwrap(); - state1 = new_state1; - - // Majority chain block (all attesters). - let ((block2, blobs2), new_state2) = harness2.make_block(state2, slot).await; - harness2 - .process_block(slot, block2.canonical_root(), (block2.clone(), blobs2)) - .await - .unwrap(); - - state2 = new_state2; - block_root = block2.canonical_root(); - - majority_blocks.push(block2); - } - - let end_slot = fork_slot + post_fork_blocks - 1; - assert_eq!(harness1.head_slot(), end_slot); - assert_eq!(harness2.head_slot(), end_slot); - - // Resume from disk with the hard-fork activated: this should revert the post-fork blocks. - // We have to do some hackery with the `slot_clock` so that the correct slot is set when - // the beacon chain builder loads the head block. - drop(harness1); - let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone()); - - let resumed_harness = TestHarness::builder(MinimalEthSpec) - .spec(spec2.clone().into()) - .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .resumed_disk_store(resume_store) - .override_store_mutator(Box::new(move |mut builder| { - builder = builder - .resume_from_db() - .unwrap() - .testing_slot_clock(spec2.get_slot_duration()) - .unwrap(); - builder - .get_slot_clock() - .unwrap() - .set_slot(end_slot.as_u64()); - builder - })) - .mock_execution_layer() - .build(); - - // Head should now be just before the fork. - resumed_harness.chain.recompute_head_at_current_slot().await; - assert_eq!(resumed_harness.head_slot(), fork_slot - 1); - - // Fork choice should only know the canonical head. When we reverted the head we also should - // have called `reset_fork_choice_to_finalization` which rebuilds fork choice from scratch - // without the reverted block. - assert_eq!( - resumed_harness.chain.heads(), - vec![(resumed_harness.head_block_root(), fork_slot - 1)] - ); - - // Apply blocks from the majority chain and trigger finalization. - let initial_split_slot = resumed_harness.chain.store.get_split_slot(); - for block in &majority_blocks { - resumed_harness - .process_block_result((block.clone(), None)) - .await - .unwrap(); - - // The canonical head should be the block from the majority chain. - resumed_harness.chain.recompute_head_at_current_slot().await; - assert_eq!(resumed_harness.head_slot(), block.slot()); - assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); - } - let advanced_split_slot = resumed_harness.chain.store.get_split_slot(); - - // Check that the migration ran successfully. - assert!(advanced_split_slot > initial_split_slot); - - // Check that there is only a single head now matching harness2 (the minority chain is gone). - let heads = resumed_harness.chain.heads(); - assert_eq!(heads, harness2.chain.heads()); - assert_eq!(heads.len(), 1); -} - // This test checks whether the schema downgrade from the latest version to some minimum supported // version is correct. This is the easiest schema test to write without historic versions of // Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6e165702a2..4d00ed1c4a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -721,14 +721,6 @@ impl, Cold: ItemStore> HotColdDB }) } - /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant>( - &self, - block_root: &Hash256, - ) -> Result>, Error> { - self.get_block_with(block_root, SignedBeaconBlock::any_from_ssz_bytes) - } - /// Fetch a block from the store using a custom decode function. /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index e2b666e597..0cb803d1ed 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -249,7 +249,6 @@ impl, Cold: ItemStore> Iterator pub struct ParentRootBlockIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { store: &'a HotColdDB, next_block_root: Hash256, - decode_any_variant: bool, _phantom: PhantomData, } @@ -260,17 +259,6 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Self { store, next_block_root: start_block_root, - decode_any_variant: false, - _phantom: PhantomData, - } - } - - /// Block iterator that is tolerant of blocks that have the wrong fork for their slot. - pub fn fork_tolerant(store: &'a HotColdDB, start_block_root: Hash256) -> Self { - Self { - store, - next_block_root: start_block_root, - decode_any_variant: true, _phantom: PhantomData, } } @@ -285,12 +273,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Ok(None) } else { let block_root = self.next_block_root; - let block = if self.decode_any_variant { - self.store.get_block_any_variant(&block_root) - } else { - self.store.get_blinded_block(&block_root) - }? - .ok_or(Error::BlockNotFound(block_root))?; + let block = self + .store + .get_blinded_block(&block_root)? + .ok_or(Error::BlockNotFound(block_root))?; self.next_block_root = block.message().parent_root(); Ok(Some((block_root, block))) } diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 6228e40ef8..bd67f469d2 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -56,9 +56,10 @@ use crate::{ pub const CACHED_EPOCHS: usize = 3; -// Pre-electra WS calculations are not supported. On mainnet, pre-electra epochs are outside the weak subjectivity -// period. The default pre-electra WS value is set to 256 to allow for `basic-sim``, `fallback-sim`` test case `revert_minority_fork_on_resume` -// to pass. 256 is a small enough number to trigger the WS safety check pre-electra on mainnet. +// Pre-electra WS calculations are not supported. On mainnet, pre-electra epochs are outside the +// weak subjectivity period. The default pre-electra WS value is set to 256 to allow for `basic-sim` +// and `fallback-sim` tests to pass. 256 is a small enough number to trigger the WS safety check +// pre-electra on mainnet. pub const DEFAULT_PRE_ELECTRA_WS_PERIOD: u64 = 256; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; From 8ce81578b7ee61aad396fd2a62a82497139a2570 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 10:46:46 -0800 Subject: [PATCH 23/35] introduce a smalll refactor and unit test --- .../gossip_verified_envelope.rs | 237 ++++++-- .../src/payload_envelope_verification/mod.rs | 3 - .../payload_envelope_verification/tests.rs | 524 ------------------ 3 files changed, 202 insertions(+), 562 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 492b265fd0..9d555e8ad2 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -10,8 +10,8 @@ use state_processing::{ use store::DatabaseBlock; use tracing::{Span, debug}; use types::{ - ChainSpec, EthSpec, Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope, - consts::gloas::BUILDER_INDEX_SELF_BUILD, + ChainSpec, EthSpec, ExecutionPayloadBid, ExecutionPayloadEnvelope, Hash256, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, Slot, consts::gloas::BUILDER_INDEX_SELF_BUILD, }; use crate::{ @@ -38,6 +38,54 @@ pub struct GossipVerificationContext<'a, T: BeaconChainTypes> { pub genesis_validators_root: Hash256, } +/// Verify that an execution payload envelope is consistent with its beacon block +/// and execution bid. This checks: +/// - The envelope slot is not prior to finalization +/// - The envelope slot matches the block slot +/// - The builder index matches the committed bid +/// - The payload block hash matches the committed bid +pub(crate) fn verify_envelope_consistency( + envelope: &ExecutionPayloadEnvelope, + block: &SignedBeaconBlock, + execution_bid: &ExecutionPayloadBid, + latest_finalized_slot: Slot, +) -> Result<(), EnvelopeError> { + // Check that the envelope's slot isn't from a slot prior + // to the latest finalized slot. + if envelope.slot < latest_finalized_slot { + return Err(EnvelopeError::PriorToFinalization { + payload_slot: envelope.slot, + latest_finalized_slot, + }); + } + + // Check that the slot of the envelope matches the slot of the parent block. + if envelope.slot != block.slot() { + return Err(EnvelopeError::SlotMismatch { + block: block.slot(), + envelope: envelope.slot, + }); + } + + // Builder index matches committed bid. + if envelope.builder_index != execution_bid.builder_index { + return Err(EnvelopeError::BuilderIndexMismatch { + committed_bid: execution_bid.builder_index, + envelope: envelope.builder_index, + }); + } + + // The block hash should match the block hash of the execution bid. + if envelope.payload.block_hash != execution_bid.block_hash { + return Err(EnvelopeError::BlockHashMismatch { + committed_bid: execution_bid.block_hash, + envelope: envelope.payload.block_hash, + }); + } + + Ok(()) +} + /// A wrapper around a `SignedExecutionPayloadEnvelope` that indicates it has been approved for re-gossiping on /// the p2p network. #[derive(Educe)] @@ -54,7 +102,6 @@ impl GossipVerifiedEnvelope { ctx: &GossipVerificationContext<'_, T>, ) -> Result { let envelope = &signed_envelope.message; - let payload = &envelope.payload; let beacon_block_root = envelope.beacon_block_root; // Check that we've seen the beacon block for this envelope and that it passes validation. @@ -94,38 +141,7 @@ impl GossipVerifiedEnvelope { .signed_execution_payload_bid()? .message; - // check that the envelopes slot isnt from a slot prior - // to the latest finalized slot. - if envelope.slot < latest_finalized_slot { - return Err(EnvelopeError::PriorToFinalization { - payload_slot: envelope.slot, - latest_finalized_slot, - }); - } - - // check that the slot of the envelope matches the slot of the parent block - if envelope.slot != block.slot() { - return Err(EnvelopeError::SlotMismatch { - block: block.slot(), - envelope: envelope.slot, - }); - } - - // builder index matches committed bid - if envelope.builder_index != execution_bid.builder_index { - return Err(EnvelopeError::BuilderIndexMismatch { - committed_bid: execution_bid.builder_index, - envelope: envelope.builder_index, - }); - } - - // the block hash should match the block hash of the execution bid - if payload.block_hash != execution_bid.block_hash { - return Err(EnvelopeError::BlockHashMismatch { - committed_bid: execution_bid.block_hash, - envelope: payload.block_hash, - }); - } + verify_envelope_consistency(envelope, &block, execution_bid, latest_finalized_slot)?; // Verify the envelope signature. // @@ -353,3 +369,154 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? } } + +#[cfg(test)] +mod tests { + use std::marker::PhantomData; + + use bls::Signature; + use ssz_types::VariableList; + use types::{ + BeaconBlock, BeaconBlockBodyGloas, BeaconBlockGloas, Eth1Data, ExecutionBlockHash, + ExecutionPayloadBid, ExecutionPayloadEnvelope, ExecutionPayloadGloas, ExecutionRequests, + Graffiti, Hash256, MinimalEthSpec, SignedBeaconBlock, SignedExecutionPayloadBid, Slot, + SyncAggregate, + }; + + use super::verify_envelope_consistency; + use crate::payload_envelope_verification::EnvelopeError; + + type E = MinimalEthSpec; + + fn make_envelope( + slot: Slot, + builder_index: u64, + block_hash: ExecutionBlockHash, + ) -> ExecutionPayloadEnvelope { + ExecutionPayloadEnvelope { + payload: ExecutionPayloadGloas { + block_hash, + ..ExecutionPayloadGloas::default() + }, + execution_requests: ExecutionRequests::default(), + builder_index, + beacon_block_root: Hash256::ZERO, + slot, + state_root: Hash256::ZERO, + } + } + + fn make_block(slot: Slot) -> SignedBeaconBlock { + let block = BeaconBlock::Gloas(BeaconBlockGloas { + slot, + proposer_index: 0, + parent_root: Hash256::ZERO, + state_root: Hash256::ZERO, + body: BeaconBlockBodyGloas { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::ZERO, + block_hash: Hash256::ZERO, + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + bls_to_execution_changes: VariableList::empty(), + signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), + payload_attestations: VariableList::empty(), + _phantom: PhantomData, + }, + }); + SignedBeaconBlock::from_block(block, Signature::empty()) + } + + fn make_bid(builder_index: u64, block_hash: ExecutionBlockHash) -> ExecutionPayloadBid { + ExecutionPayloadBid { + builder_index, + block_hash, + ..ExecutionPayloadBid::default() + } + } + + #[test] + fn test_valid_envelope() { + let slot = Slot::new(10); + let builder_index = 5; + let block_hash = ExecutionBlockHash::repeat_byte(0xaa); + + let envelope = make_envelope(slot, builder_index, block_hash); + let block = make_block(slot); + let bid = make_bid(builder_index, block_hash); + + assert!(verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)).is_ok()); + } + + #[test] + fn test_prior_to_finalization() { + let slot = Slot::new(5); + let builder_index = 1; + let block_hash = ExecutionBlockHash::repeat_byte(0xbb); + + let envelope = make_envelope(slot, builder_index, block_hash); + let block = make_block(slot); + let bid = make_bid(builder_index, block_hash); + let latest_finalized_slot = Slot::new(10); + + let result = + verify_envelope_consistency::(&envelope, &block, &bid, latest_finalized_slot); + assert!(matches!( + result, + Err(EnvelopeError::PriorToFinalization { .. }) + )); + } + + #[test] + fn test_slot_mismatch() { + let builder_index = 1; + let block_hash = ExecutionBlockHash::repeat_byte(0xcc); + + let envelope = make_envelope(Slot::new(10), builder_index, block_hash); + let block = make_block(Slot::new(20)); + let bid = make_bid(builder_index, block_hash); + + let result = verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)); + assert!(matches!(result, Err(EnvelopeError::SlotMismatch { .. }))); + } + + #[test] + fn test_builder_index_mismatch() { + let slot = Slot::new(10); + let block_hash = ExecutionBlockHash::repeat_byte(0xdd); + + let envelope = make_envelope(slot, 1, block_hash); + let block = make_block(slot); + let bid = make_bid(2, block_hash); + + let result = verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)); + assert!(matches!( + result, + Err(EnvelopeError::BuilderIndexMismatch { .. }) + )); + } + + #[test] + fn test_block_hash_mismatch() { + let slot = Slot::new(10); + let builder_index = 1; + + let envelope = make_envelope(slot, builder_index, ExecutionBlockHash::repeat_byte(0xee)); + let block = make_block(slot); + let bid = make_bid(builder_index, ExecutionBlockHash::repeat_byte(0xff)); + + let result = verify_envelope_consistency::(&envelope, &block, &bid, Slot::new(0)); + assert!(matches!( + result, + Err(EnvelopeError::BlockHashMismatch { .. }) + )); + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 38fdd9f425..ae5dbfccc0 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -49,9 +49,6 @@ pub mod gossip_verified_envelope; pub mod import; mod payload_notifier; -#[cfg(test)] -mod tests; - pub trait IntoExecutionPendingEnvelope: Sized { fn into_execution_pending_envelope( self, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs deleted file mode 100644 index c362bc6180..0000000000 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/tests.rs +++ /dev/null @@ -1,524 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use bls::{FixedBytesExtended, Keypair, Signature}; -use fork_choice::ForkChoice; -use parking_lot::{Mutex, RwLock}; -use ssz_types::VariableList; -use store::{HotColdDB, KeyValueStore, MemoryStore, StoreConfig}; -use types::consts::gloas::BUILDER_INDEX_SELF_BUILD; -use types::test_utils::generate_deterministic_keypairs; -use types::*; - -use crate::BeaconStore; -use crate::beacon_fork_choice_store::BeaconForkChoiceStore; -use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::builder::Witness; -use crate::canonical_head::CanonicalHead; -use crate::payload_envelope_verification::EnvelopeError; -use crate::payload_envelope_verification::gossip_verified_envelope::{ - GossipVerificationContext, GossipVerifiedEnvelope, -}; -use crate::validator_pubkey_cache::ValidatorPubkeyCache; - -type TestEthSpec = MinimalEthSpec; -type TestTypes = Witness< - slot_clock::TestingSlotClock, - TestEthSpec, - MemoryStore, - MemoryStore, ->; - -/// Test context that holds the minimal state needed for gossip verification. -struct TestContext { - store: BeaconStore, - canonical_head: CanonicalHead, - beacon_proposer_cache: Mutex, - validator_pubkey_cache: RwLock>, - spec: Arc, - keypairs: Vec, - genesis_state: BeaconState, - genesis_block_root: Hash256, - genesis_validators_root: Hash256, -} - -impl TestContext { - fn new(validator_count: usize) -> Self { - let spec = Arc::new(ForkName::Gloas.make_genesis_spec(ChainSpec::minimal())); - let keypairs = generate_deterministic_keypairs(validator_count); - - let mut genesis_state = genesis::interop_genesis_state::( - &keypairs, - 0, // genesis_time - Hash256::from_slice(&[0x42; 32]), - None, // no execution payload header - &spec, - ) - .expect("should create genesis state"); - - let genesis_validators_root = genesis_state.genesis_validators_root(); - - let store: BeaconStore = Arc::new( - HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone()) - .expect("should create ephemeral store"), - ); - - // Initialize store metadata. - let genesis_block = BeaconBlock::::empty(&spec); - let genesis_block_root = genesis_block.canonical_root(); - let signed_genesis_block = SignedBeaconBlock::from_block(genesis_block, Signature::empty()); - - // Build caches and compute state root before storing. - genesis_state - .build_caches(&spec) - .expect("should build caches"); - - // Initialize store metadata ops (must be done before put_state). - let ops = vec![ - store - .init_anchor_info( - signed_genesis_block.parent_root(), - signed_genesis_block.slot(), - Slot::new(0), - false, - ) - .expect("should init anchor info"), - store - .init_blob_info(signed_genesis_block.slot()) - .expect("should init blob info"), - store - .init_data_column_info(signed_genesis_block.slot()) - .expect("should init data column info"), - ]; - store - .hot_db - .do_atomically(ops) - .expect("should store metadata"); - - // Store the genesis block and state. - store - .put_block(&genesis_block_root, signed_genesis_block.clone()) - .expect("should store genesis block"); - let state_root = genesis_state - .update_tree_hash_cache() - .expect("should compute state root"); - store - .put_state(&state_root, &genesis_state) - .expect("should store genesis state"); - - // Create BeaconSnapshot and fork choice. - let snapshot = crate::BeaconSnapshot { - beacon_block: Arc::new(signed_genesis_block), - beacon_block_root: genesis_block_root, - beacon_state: genesis_state.clone(), - }; - - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), snapshot.clone()) - .expect("should create fork choice store"); - - let fork_choice = ForkChoice::from_anchor( - fc_store, - genesis_block_root, - &snapshot.beacon_block, - &snapshot.beacon_state, - None, - &spec, - ) - .expect("should create fork choice from anchor"); - - let canonical_head = CanonicalHead::new(fork_choice, Arc::new(snapshot)); - - let validator_pubkey_cache = ValidatorPubkeyCache::new(&genesis_state, store.clone()) - .expect("should create validator pubkey cache"); - - TestContext { - store, - canonical_head, - beacon_proposer_cache: Mutex::new(BeaconProposerCache::default()), - validator_pubkey_cache: RwLock::new(validator_pubkey_cache), - spec, - keypairs, - genesis_state, - genesis_block_root, - genesis_validators_root, - } - } - - fn gossip_verification_context(&self) -> GossipVerificationContext<'_, TestTypes> { - GossipVerificationContext { - canonical_head: &self.canonical_head, - store: &self.store, - spec: &self.spec, - beacon_proposer_cache: &self.beacon_proposer_cache, - validator_pubkey_cache: &self.validator_pubkey_cache, - genesis_validators_root: self.genesis_validators_root, - } - } - - /// Build a gloas block at `slot` with the given proposer, store it, add it to fork choice, - /// and return the signed block, block root, and post-state. - fn build_and_import_block( - &self, - slot: Slot, - proposer_index: usize, - execution_bid: ExecutionPayloadBid, - ) -> ( - Arc>, - Hash256, - BeaconState, - ) { - let mut state = self.genesis_state.clone(); - - // Advance the state to the target slot. - if slot > state.slot() { - state_processing::state_advance::complete_state_advance( - &mut state, None, slot, &self.spec, - ) - .expect("should advance state"); - } - - state.build_caches(&self.spec).expect("should build caches"); - - // Compute the state root so we can embed it in the block. - let state_root = state - .update_tree_hash_cache() - .expect("should compute state root"); - - let signed_bid = SignedExecutionPayloadBid { - message: execution_bid, - signature: Signature::infinity().expect("should create infinity signature"), - }; - - // Create the block body with the actual state root. - let block = BeaconBlock::Gloas(BeaconBlockGloas { - slot, - proposer_index: proposer_index as u64, - parent_root: self.genesis_block_root, - state_root, - body: BeaconBlockBodyGloas { - randao_reveal: Signature::empty(), - eth1_data: state.eth1_data().clone(), - graffiti: Graffiti::default(), - proposer_slashings: VariableList::empty(), - attester_slashings: VariableList::empty(), - attestations: VariableList::empty(), - deposits: VariableList::empty(), - voluntary_exits: VariableList::empty(), - sync_aggregate: SyncAggregate::empty(), - bls_to_execution_changes: VariableList::empty(), - signed_execution_payload_bid: signed_bid, - payload_attestations: VariableList::empty(), - _phantom: std::marker::PhantomData, - }, - }); - - let block_root = block.canonical_root(); - let proposer_sk = &self.keypairs[proposer_index].sk; - let fork = self - .spec - .fork_at_epoch(slot.epoch(TestEthSpec::slots_per_epoch())); - let signed_block = block.sign(proposer_sk, &fork, self.genesis_validators_root, &self.spec); - - // Store block and state. - self.store - .put_block(&block_root, signed_block.clone()) - .expect("should store block"); - self.store - .put_state(&state_root, &state) - .expect("should store state"); - - // Add block to fork choice. - let mut fork_choice = self.canonical_head.fork_choice_write_lock(); - fork_choice - .on_block( - slot, - signed_block.message(), - block_root, - Duration::from_secs(0), - &state, - crate::PayloadVerificationStatus::Verified, - &self.spec, - ) - .expect("should add block to fork choice"); - drop(fork_choice); - - (Arc::new(signed_block), block_root, state) - } - - /// Build a signed execution payload envelope for the given block. - fn build_signed_envelope( - &self, - block_root: Hash256, - slot: Slot, - builder_index: u64, - block_hash: ExecutionBlockHash, - signing_key: &bls::SecretKey, - ) -> Arc> { - let mut payload = ExecutionPayloadGloas::default(); - payload.block_hash = block_hash; - - let envelope = ExecutionPayloadEnvelope { - payload, - execution_requests: ExecutionRequests::default(), - builder_index, - beacon_block_root: block_root, - slot, - state_root: Hash256::zero(), - }; - - let fork = self - .spec - .fork_at_epoch(slot.epoch(TestEthSpec::slots_per_epoch())); - let domain = self.spec.get_domain( - slot.epoch(TestEthSpec::slots_per_epoch()), - Domain::BeaconBuilder, - &fork, - self.genesis_validators_root, - ); - let message = envelope.signing_root(domain); - let signature = signing_key.sign(message); - - Arc::new(SignedExecutionPayloadEnvelope { - message: envelope, - signature, - }) - } - - /// Helper: build a block and matching self-build envelope. - fn build_block_and_envelope( - &self, - ) -> ( - Arc>, - Hash256, - Arc>, - ) { - let slot = Slot::new(1); - let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); - - // Get proposer for slot 1. - let mut state = self.genesis_state.clone(); - state_processing::state_advance::complete_state_advance(&mut state, None, slot, &self.spec) - .expect("should advance state"); - state.build_caches(&self.spec).expect("should build caches"); - let proposer_index = state - .get_beacon_proposer_index(slot, &self.spec) - .expect("should get proposer index"); - - let bid = ExecutionPayloadBid { - builder_index: BUILDER_INDEX_SELF_BUILD, - block_hash, - slot, - ..Default::default() - }; - - let (signed_block, block_root, _post_state) = - self.build_and_import_block(slot, proposer_index, bid); - - let proposer_sk = &self.keypairs[proposer_index].sk; - let envelope = self.build_signed_envelope( - block_root, - slot, - BUILDER_INDEX_SELF_BUILD, - block_hash, - proposer_sk, - ); - - (signed_block, block_root, envelope) - } -} - -#[test] -fn test_valid_self_build_envelope() { - let ctx = TestContext::new(32); - let (_block, _block_root, envelope) = ctx.build_block_and_envelope(); - let gossip_ctx = ctx.gossip_verification_context(); - - let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); - assert!( - result.is_ok(), - "valid self-build envelope should pass verification, got: {:?}", - result.err() - ); -} - -#[test] -fn test_unknown_block_root() { - let ctx = TestContext::new(32); - let gossip_ctx = ctx.gossip_verification_context(); - - // Build an envelope referencing a block root not in fork choice. - let unknown_root = Hash256::from_slice(&[0xff; 32]); - let envelope = ctx.build_signed_envelope( - unknown_root, - Slot::new(1), - BUILDER_INDEX_SELF_BUILD, - ExecutionBlockHash::from_root(Hash256::zero()), - &ctx.keypairs[0].sk, - ); - - let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); - assert!( - matches!(result, Err(EnvelopeError::BlockRootUnknown { .. })), - "should reject envelope with unknown block root, got: {:?}", - result - ); -} - -#[test] -fn test_slot_mismatch() { - let ctx = TestContext::new(32); - let (_block, block_root, _good_envelope) = ctx.build_block_and_envelope(); - let gossip_ctx = ctx.gossip_verification_context(); - - // Build an envelope with a different slot than the block. - let wrong_slot = Slot::new(2); - let envelope = ctx.build_signed_envelope( - block_root, - wrong_slot, - BUILDER_INDEX_SELF_BUILD, - ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])), - &ctx.keypairs[0].sk, - ); - - let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); - assert!( - matches!(result, Err(EnvelopeError::SlotMismatch { .. })), - "should reject envelope with slot mismatch, got: {:?}", - result - ); -} - -#[test] -fn test_builder_index_mismatch() { - let ctx = TestContext::new(32); - let gossip_ctx = ctx.gossip_verification_context(); - - let slot = Slot::new(1); - let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); - - // Get proposer for slot 1. - let mut state = ctx.genesis_state.clone(); - state_processing::state_advance::complete_state_advance(&mut state, None, slot, &ctx.spec) - .expect("should advance state"); - state.build_caches(&ctx.spec).expect("should build caches"); - let proposer_index = state - .get_beacon_proposer_index(slot, &ctx.spec) - .expect("should get proposer index"); - - // Block has builder_index = BUILDER_INDEX_SELF_BUILD - let bid = ExecutionPayloadBid { - builder_index: BUILDER_INDEX_SELF_BUILD, - block_hash, - slot, - ..Default::default() - }; - let (_block, block_root, _post_state) = ctx.build_and_import_block(slot, proposer_index, bid); - - // Envelope has a different builder_index. - let wrong_builder_index = 999; - let envelope = ctx.build_signed_envelope( - block_root, - slot, - wrong_builder_index, - block_hash, - &ctx.keypairs[proposer_index].sk, - ); - - let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); - assert!( - matches!(result, Err(EnvelopeError::BuilderIndexMismatch { .. })), - "should reject envelope with builder index mismatch, got: {:?}", - result - ); -} - -#[test] -fn test_block_hash_mismatch() { - let ctx = TestContext::new(32); - let gossip_ctx = ctx.gossip_verification_context(); - - let slot = Slot::new(1); - let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); - let wrong_block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xbb; 32])); - - // Get proposer for slot 1. - let mut state = ctx.genesis_state.clone(); - state_processing::state_advance::complete_state_advance(&mut state, None, slot, &ctx.spec) - .expect("should advance state"); - state.build_caches(&ctx.spec).expect("should build caches"); - let proposer_index = state - .get_beacon_proposer_index(slot, &ctx.spec) - .expect("should get proposer index"); - - // Block has block_hash = 0xaa - let bid = ExecutionPayloadBid { - builder_index: BUILDER_INDEX_SELF_BUILD, - block_hash, - slot, - ..Default::default() - }; - let (_block, block_root, _post_state) = ctx.build_and_import_block(slot, proposer_index, bid); - - // Envelope has a different block_hash. - let envelope = ctx.build_signed_envelope( - block_root, - slot, - BUILDER_INDEX_SELF_BUILD, - wrong_block_hash, - &ctx.keypairs[proposer_index].sk, - ); - - let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); - assert!( - matches!(result, Err(EnvelopeError::BlockHashMismatch { .. })), - "should reject envelope with block hash mismatch, got: {:?}", - result - ); -} - -#[test] -fn test_bad_signature() { - let ctx = TestContext::new(32); - let gossip_ctx = ctx.gossip_verification_context(); - - let slot = Slot::new(1); - let block_hash = ExecutionBlockHash::from_root(Hash256::from_slice(&[0xaa; 32])); - - // Get proposer for slot 1. - let mut state = ctx.genesis_state.clone(); - state_processing::state_advance::complete_state_advance(&mut state, None, slot, &ctx.spec) - .expect("should advance state"); - state.build_caches(&ctx.spec).expect("should build caches"); - let proposer_index = state - .get_beacon_proposer_index(slot, &ctx.spec) - .expect("should get proposer index"); - - let bid = ExecutionPayloadBid { - builder_index: BUILDER_INDEX_SELF_BUILD, - block_hash, - slot, - ..Default::default() - }; - let (_block, block_root, _post_state) = ctx.build_and_import_block(slot, proposer_index, bid); - - // Sign the envelope with the wrong key (some other validator's key). - let wrong_key_index = if proposer_index == 0 { 1 } else { 0 }; - let envelope = ctx.build_signed_envelope( - block_root, - slot, - BUILDER_INDEX_SELF_BUILD, - block_hash, - &ctx.keypairs[wrong_key_index].sk, - ); - - let result = GossipVerifiedEnvelope::new(envelope, &gossip_ctx); - assert!( - matches!(result, Err(EnvelopeError::BadSignature)), - "should reject envelope with bad signature, got: {:?}", - result - ); -} - -// NOTE: `test_prior_to_finalization` is omitted here because advancing finalization requires -// attestation-based justification which needs the full `BeaconChainHarness`. The -// `PriorToFinalization` code path is tested in the integration tests. From 6e89ba63be49ff34e4ab70a9ba82b2aee4841f98 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 10:51:09 -0800 Subject: [PATCH 24/35] Added slot to logs --- beacon_node/beacon_chain/src/execution_payload.rs | 12 ++++-------- .../payload_notifier.rs | 2 +- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b2d00a530a..cf6c5d83b4 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -118,6 +118,7 @@ impl PayloadNotifier { notify_new_payload( &self.chain, self.block.message().tree_hash_root(), + self.block.message().slot(), self.block.message().try_into()?, ) .await @@ -137,6 +138,7 @@ impl PayloadNotifier { pub async fn notify_new_payload( chain: &Arc>, beacon_block_root: Hash256, + slot: Slot, new_payload_request: NewPayloadRequest<'_, T::EthSpec>, ) -> Result { let execution_layer = chain @@ -163,11 +165,8 @@ pub async fn notify_new_payload( ?validation_error, ?latest_valid_hash, ?execution_block_hash, - // TODO(gloas) are these other logs important? root = ?beacon_block_root, - // graffiti = block.body().graffiti().as_utf8_lossy(), - // proposer_index = block.proposer_index(), - // slot = %block.slot(), + %slot, method = "new_payload", "Invalid execution payload" ); @@ -209,11 +208,8 @@ pub async fn notify_new_payload( warn!( ?validation_error, ?execution_block_hash, - // TODO(gloas) are these other logs important? root = ?beacon_block_root, - // graffiti = block.body().graffiti().as_utf8_lossy(), - // proposer_index = block.proposer_index(), - // slot = %block.slot(), + %slot, method = "new_payload", "Invalid execution payload block hash" ); diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs index 5b1f332b5a..a468bc5bc4 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs @@ -62,7 +62,7 @@ impl PayloadNotifier { } else { let block_root = self.envelope.message.beacon_block_root; let request = Self::build_new_payload_request(&self.envelope, &self.block)?; - notify_new_payload(&self.chain, block_root, request).await + notify_new_payload(&self.chain, block_root, self.envelope.slot(), request).await } } From 147f2e22e02eadbbec21d532543ae5293086a9d3 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 10:59:03 -0800 Subject: [PATCH 25/35] use cached head and drop fork choice read lock earlier --- .../gossip_verified_envelope.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 9d555e8ad2..0c2ae6dd56 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -118,13 +118,15 @@ impl GossipVerifiedEnvelope { }); }; - let latest_finalized_slot = fork_choice_read_lock + drop(fork_choice_read_lock); + + let latest_finalized_slot = ctx + .canonical_head + .cached_head() .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); - drop(fork_choice_read_lock); - // TODO(EIP-7732): check that we haven't seen another valid `SignedExecutionPayloadEnvelope` // for this block root from this builder - envelope status table check let block = match ctx.store.try_get_full_block(&beacon_block_root)? { From fc7d6c9d24fc9a59c528b37295b0c2a763172514 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 11:20:07 -0800 Subject: [PATCH 26/35] Add an additional defensive expected proposer check --- .../gossip_verified_envelope.rs | 15 +++++++++++++-- .../src/payload_envelope_verification/mod.rs | 5 +++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 0c2ae6dd56..877dcc2c2b 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -106,6 +106,9 @@ impl GossipVerifiedEnvelope { // Check that we've seen the beacon block for this envelope and that it passes validation. // TODO(EIP-7732): We might need some type of status table in order to differentiate between: + // If we have a block_processing_table, we could have a Processed(Bid, bool) state that is only + // entered post adding to fork choice. That way, we could potentially need only a single call to make + // sure the block is valid and to do all consequent checks with the bid // // 1. Blocks we haven't seen (IGNORE), and // 2. Blocks we've seen that are invalid (REJECT). @@ -147,7 +150,7 @@ impl GossipVerifiedEnvelope { // Verify the envelope signature. // - // For self-build envelopes, we can use the proposer cache for the fork and the + // For self-built envelopes, we can use the proposer cache for the fork and the // validator pubkey cache for the proposer's pubkey, avoiding a state load from disk. // For external builder envelopes, we must load the state to access the builder registry. let builder_index = envelope.builder_index; @@ -157,7 +160,7 @@ impl GossipVerifiedEnvelope { proto_block.proposer_shuffling_root_for_child_block(block_epoch, ctx.spec); let (signature_is_valid, opt_snapshot) = if builder_index == BUILDER_INDEX_SELF_BUILD { - // Fast path: self-build envelopes can be verified without loading the state. + // Fast path: self-built envelopes can be verified without loading the state. let envelope_ref = signed_envelope.as_ref(); let mut opt_snapshot = None; let proposer = beacon_proposer_cache::with_proposer_cache( @@ -176,8 +179,16 @@ impl GossipVerifiedEnvelope { Ok::<_, EnvelopeError>((snapshot.state_root, snapshot.pre_state)) }, )?; + let expected_proposer = proposer.index; let fork = proposer.fork; + if block.message().proposer_index() != expected_proposer as u64 { + return Err(EnvelopeError::IncorrectBlockProposer { + block: block.message().proposer_index(), + local_shuffling: expected_proposer as u64, + }); + } + let pubkey_cache = ctx.validator_pubkey_cache.read(); let pubkey = pubkey_cache .get(block.message().proposer_index() as usize) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index ae5dbfccc0..9caff959d0 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -206,6 +206,11 @@ pub enum EnvelopeError { committed_bid: ExecutionBlockHash, envelope: ExecutionBlockHash, }, + // The block's proposer_index does not match the locally computed proposer + IncorrectBlockProposer { + block: u64, + local_shuffling: u64, + }, // The slot belongs to a block that is from a slot prior than // the most recently finalized slot PriorToFinalization { From 30241f54c4883653bf99166907ea4523a99abfb7 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 11:35:01 -0800 Subject: [PATCH 27/35] add load_snapshot_from_state_root that can be used when we've already aquired a --- .../gossip_verified_envelope.rs | 15 +++-- .../src/payload_envelope_verification/mod.rs | 60 +++++++++++-------- 2 files changed, 46 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 877dcc2c2b..68d6e8605e 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -22,7 +22,7 @@ use crate::{ payload_envelope_verification::{ EnvelopeError, EnvelopeImportData, EnvelopeProcessingSnapshot, ExecutionPendingEnvelope, IntoExecutionPendingEnvelope, MaybeAvailableEnvelope, load_snapshot, - payload_notifier::PayloadNotifier, + load_snapshot_from_state_root, payload_notifier::PayloadNotifier, }, validator_pubkey_cache::ValidatorPubkeyCache, }; @@ -161,7 +161,6 @@ impl GossipVerifiedEnvelope { let (signature_is_valid, opt_snapshot) = if builder_index == BUILDER_INDEX_SELF_BUILD { // Fast path: self-built envelopes can be verified without loading the state. - let envelope_ref = signed_envelope.as_ref(); let mut opt_snapshot = None; let proposer = beacon_proposer_cache::with_proposer_cache( ctx.beacon_proposer_cache, @@ -174,7 +173,11 @@ impl GossipVerifiedEnvelope { %beacon_block_root, "Proposer shuffling cache miss for envelope verification" ); - let snapshot = load_snapshot(envelope_ref, ctx.canonical_head, ctx.store)?; + let snapshot = load_snapshot_from_state_root::( + beacon_block_root, + proto_block.state_root, + ctx.store, + )?; opt_snapshot = Some(Box::new(snapshot.clone())); Ok::<_, EnvelopeError>((snapshot.state_root, snapshot.pre_state)) }, @@ -205,7 +208,11 @@ impl GossipVerifiedEnvelope { } else { // TODO(gloas) if we implement a builder pubkey cache, we'll need to use it here. // External builder: must load the state to get the builder pubkey. - let snapshot = load_snapshot(signed_envelope.as_ref(), ctx.canonical_head, ctx.store)?; + let snapshot = load_snapshot_from_state_root::( + beacon_block_root, + proto_block.state_root, + ctx.store, + )?; let is_valid = signed_envelope.verify_signature_with_state(&snapshot.pre_state, ctx.spec)?; (is_valid, Some(Box::new(snapshot))) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 9caff959d0..5e88d62ec1 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -291,35 +291,16 @@ impl From for EnvelopeError { } #[allow(clippy::type_complexity)] -#[instrument(skip_all, level = "debug", fields(beacon_block_root = %envelope.beacon_block_root()))] -pub(crate) fn load_snapshot( - envelope: &SignedExecutionPayloadEnvelope, - canonical_head: &CanonicalHead, +#[instrument(skip_all, level = "debug", fields(beacon_block_root = %beacon_block_root))] +/// Load state from store given a known state root and block root. +/// Use this when the proto block has already been looked up from fork choice. +pub(crate) fn load_snapshot_from_state_root( + beacon_block_root: Hash256, + block_state_root: Hash256, store: &BeaconStore, ) -> Result, EnvelopeError> { - // Reject any envelope if its block is not known to fork choice. - // - // A block that is not in fork choice is either: - // - // - Not yet imported: we should reject this envelope because we should only import it after its parent block - // has been fully imported. - // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore the envelope - // because it will revert finalization. Note that the finalized block is stored in fork - // choice, so we will not reject any child of the finalized block (this is relevant during - // genesis). - - let fork_choice_read_lock = canonical_head.fork_choice_read_lock(); - let beacon_block_root = envelope.beacon_block_root(); - let Some(proto_beacon_block) = fork_choice_read_lock.get_block(&beacon_block_root) else { - return Err(EnvelopeError::BlockRootUnknown { - block_root: beacon_block_root, - }); - }; - drop(fork_choice_read_lock); - // TODO(EIP-7732): add metrics here - let block_state_root = proto_beacon_block.state_root; // We can use `get_hot_state` here rather than `get_advanced_hot_state` because the envelope // must be from the same slot as its block (so no advance is required). let cache_state = true; @@ -339,6 +320,35 @@ pub(crate) fn load_snapshot( }) } +#[instrument(skip_all, level = "debug", fields(beacon_block_root = %envelope.beacon_block_root()))] +pub(crate) fn load_snapshot( + envelope: &SignedExecutionPayloadEnvelope, + canonical_head: &CanonicalHead, + store: &BeaconStore, +) -> Result, EnvelopeError> { + // Reject any envelope if its block is not known to fork choice. + // + // A block that is not in fork choice is either: + // + // - Not yet imported: we should reject this envelope because we should only import it after + // its parent block has been fully imported. + // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore the + // envelope because it will revert finalization. Note that the finalized block is stored in + // fork choice, so we will not reject any child of the finalized block (this is relevant + // during genesis). + + let fork_choice_read_lock = canonical_head.fork_choice_read_lock(); + let beacon_block_root = envelope.beacon_block_root(); + let Some(proto_beacon_block) = fork_choice_read_lock.get_block(&beacon_block_root) else { + return Err(EnvelopeError::BlockRootUnknown { + block_root: beacon_block_root, + }); + }; + drop(fork_choice_read_lock); + + load_snapshot_from_state_root::(beacon_block_root, proto_beacon_block.state_root, store) +} + impl IntoExecutionPendingEnvelope for Arc> { From 2093dc1f39c213d136cee90845bd0fe6911bb5b4 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 12:08:07 -0800 Subject: [PATCH 28/35] move execution pending envelolpe logic to its own file --- .../execution_pending_envelope.rs | 140 ++++++++++++++++++ .../gossip_verified_envelope.rs | 100 +------------ .../src/payload_envelope_verification/mod.rs | 40 +---- 3 files changed, 146 insertions(+), 134 deletions(-) create mode 100644 beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs new file mode 100644 index 0000000000..dbd7478568 --- /dev/null +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -0,0 +1,140 @@ +use std::sync::Arc; + +use slot_clock::SlotClock; +use state_processing::{ + VerifySignatures, + envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, +}; +use types::{EthSpec, SignedExecutionPayloadEnvelope}; + +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, NotifyExecutionLayer, + PayloadVerificationOutcome, + block_verification::PayloadVerificationHandle, + payload_envelope_verification::{ + EnvelopeError, EnvelopeImportData, MaybeAvailableEnvelope, + gossip_verified_envelope::GossipVerifiedEnvelope, load_snapshot, + payload_notifier::PayloadNotifier, + }, +}; + +pub trait IntoExecutionPendingEnvelope: Sized { + fn into_execution_pending_envelope( + self, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, EnvelopeError>; + + fn envelope(&self) -> &Arc>; +} + +pub struct ExecutionPendingEnvelope { + pub signed_envelope: MaybeAvailableEnvelope, + pub import_data: EnvelopeImportData, + pub payload_verification_handle: PayloadVerificationHandle, +} + +impl IntoExecutionPendingEnvelope for GossipVerifiedEnvelope { + fn into_execution_pending_envelope( + self, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, EnvelopeError> { + let signed_envelope = self.signed_envelope; + let envelope = &signed_envelope.message; + let payload = &envelope.payload; + + // Verify the execution payload is valid + let payload_notifier = PayloadNotifier::new( + chain.clone(), + signed_envelope.clone(), + self.block.clone(), + notify_execution_layer, + )?; + let block_root = envelope.beacon_block_root; + let slot = self.block.slot(); + + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + if let Some(started_execution) = chain.slot_clock.now_duration() { + chain + .envelope_times_cache + .write() + .set_time_started_execution(block_root, slot, started_execution); + } + + let payload_verification_status = payload_notifier.notify_new_payload().await?; + Ok(PayloadVerificationOutcome { + payload_verification_status, + // This fork is after the merge so it'll never be the merge transition block + is_valid_merge_transition_block: false, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; + + let snapshot = if let Some(snapshot) = self.snapshot { + *snapshot + } else { + load_snapshot( + signed_envelope.as_ref(), + &chain.canonical_head, + &chain.store, + )? + }; + let mut state = snapshot.pre_state; + + // All the state modifications are done in envelope_processing + process_execution_payload_envelope( + &mut state, + Some(snapshot.state_root), + &signed_envelope, + // verify signature already done for GossipVerifiedEnvelope + VerifySignatures::False, + VerifyStateRoot::True, + &chain.spec, + )?; + + Ok(ExecutionPendingEnvelope { + signed_envelope: MaybeAvailableEnvelope::AvailabilityPending { + block_hash: payload.block_hash, + envelope: signed_envelope, + }, + import_data: EnvelopeImportData { + block_root, + block: self.block, + post_state: Box::new(state), + }, + payload_verification_handle, + }) + } + + fn envelope(&self) -> &Arc> { + &self.signed_envelope + } +} + +impl IntoExecutionPendingEnvelope + for Arc> +{ + fn into_execution_pending_envelope( + self, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, EnvelopeError> { + GossipVerifiedEnvelope::new(self, &chain.gossip_verification_context())? + .into_execution_pending_envelope(chain, notify_execution_layer) + } + + fn envelope(&self) -> &Arc> { + self + } +} diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 68d6e8605e..8c8ee57fb4 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -2,11 +2,6 @@ use std::sync::Arc; use educe::Educe; use parking_lot::{Mutex, RwLock}; -use slot_clock::SlotClock; -use state_processing::{ - VerifySignatures, - envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, -}; use store::DatabaseBlock; use tracing::{Span, debug}; use types::{ @@ -15,14 +10,11 @@ use types::{ }; use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BeaconStore, NotifyExecutionLayer, - PayloadVerificationOutcome, + BeaconChain, BeaconChainError, BeaconChainTypes, BeaconStore, beacon_proposer_cache::{self, BeaconProposerCache}, canonical_head::CanonicalHead, payload_envelope_verification::{ - EnvelopeError, EnvelopeImportData, EnvelopeProcessingSnapshot, ExecutionPendingEnvelope, - IntoExecutionPendingEnvelope, MaybeAvailableEnvelope, load_snapshot, - load_snapshot_from_state_root, payload_notifier::PayloadNotifier, + EnvelopeError, EnvelopeProcessingSnapshot, load_snapshot_from_state_root, }, validator_pubkey_cache::ValidatorPubkeyCache, }; @@ -234,94 +226,6 @@ impl GossipVerifiedEnvelope { } } -impl IntoExecutionPendingEnvelope for GossipVerifiedEnvelope { - fn into_execution_pending_envelope( - self, - chain: &Arc>, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result, EnvelopeError> { - let signed_envelope = self.signed_envelope; - let envelope = &signed_envelope.message; - let payload = &envelope.payload; - - // Verify the execution payload is valid - let payload_notifier = PayloadNotifier::new( - chain.clone(), - signed_envelope.clone(), - self.block.clone(), - notify_execution_layer, - )?; - let block_root = envelope.beacon_block_root; - let slot = self.block.slot(); - - let payload_verification_future = async move { - let chain = payload_notifier.chain.clone(); - if let Some(started_execution) = chain.slot_clock.now_duration() { - chain - .envelope_times_cache - .write() - .set_time_started_execution(block_root, slot, started_execution); - } - - let payload_verification_status = payload_notifier.notify_new_payload().await?; - Ok(PayloadVerificationOutcome { - payload_verification_status, - // This fork is after the merge so it'll never be the merge transition block - is_valid_merge_transition_block: false, - }) - }; - // Spawn the payload verification future as a new task, but don't wait for it to complete. - // The `payload_verification_future` will be awaited later to ensure verification completed - // successfully. - let payload_verification_handle = chain - .task_executor - .spawn_handle( - payload_verification_future, - "execution_payload_verification", - ) - .ok_or(BeaconChainError::RuntimeShutdown)?; - - let snapshot = if let Some(snapshot) = self.snapshot { - *snapshot - } else { - load_snapshot( - signed_envelope.as_ref(), - &chain.canonical_head, - &chain.store, - )? - }; - let mut state = snapshot.pre_state; - - // All the state modifications are done in envelope_processing - process_execution_payload_envelope( - &mut state, - Some(snapshot.state_root), - &signed_envelope, - // verify signature already done for GossipVerifiedEnvelope - VerifySignatures::False, - VerifyStateRoot::True, - &chain.spec, - )?; - - Ok(ExecutionPendingEnvelope { - signed_envelope: MaybeAvailableEnvelope::AvailabilityPending { - block_hash: payload.block_hash, - envelope: signed_envelope, - }, - import_data: EnvelopeImportData { - block_root, - block: self.block, - post_state: Box::new(state), - }, - payload_verification_handle, - }) - } - - fn envelope(&self) -> &Arc> { - &self.signed_envelope - } -} - impl BeaconChain { /// Build a `GossipVerificationContext` from this `BeaconChain`. pub fn gossip_verification_context(&self) -> GossipVerificationContext<'_, T> { diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 5e88d62ec1..b5193f8e8c 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -39,31 +39,16 @@ use types::{ }; use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BeaconStore, BlockError, - ExecutionPayloadError, NotifyExecutionLayer, PayloadVerificationOutcome, - block_verification::PayloadVerificationHandle, canonical_head::CanonicalHead, - payload_envelope_verification::gossip_verified_envelope::GossipVerifiedEnvelope, + BeaconChainError, BeaconChainTypes, BeaconStore, BlockError, ExecutionPayloadError, + PayloadVerificationOutcome, canonical_head::CanonicalHead, }; +pub mod execution_pending_envelope; pub mod gossip_verified_envelope; pub mod import; mod payload_notifier; -pub trait IntoExecutionPendingEnvelope: Sized { - fn into_execution_pending_envelope( - self, - chain: &Arc>, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result, EnvelopeError>; - - fn envelope(&self) -> &Arc>; -} - -pub struct ExecutionPendingEnvelope { - pub signed_envelope: MaybeAvailableEnvelope, - pub import_data: EnvelopeImportData, - pub payload_verification_handle: PayloadVerificationHandle, -} +pub use execution_pending_envelope::{ExecutionPendingEnvelope, IntoExecutionPendingEnvelope}; #[derive(PartialEq)] pub struct EnvelopeImportData { @@ -348,20 +333,3 @@ pub(crate) fn load_snapshot( load_snapshot_from_state_root::(beacon_block_root, proto_beacon_block.state_root, store) } - -impl IntoExecutionPendingEnvelope - for Arc> -{ - fn into_execution_pending_envelope( - self, - chain: &Arc>, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result, EnvelopeError> { - GossipVerifiedEnvelope::new(self, &chain.gossip_verification_context())? - .into_execution_pending_envelope(chain, notify_execution_layer) - } - - fn envelope(&self) -> &Arc> { - self - } -} From 876e6899cd10c7a68374c63d0827dedddb5df5bc Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 12:14:30 -0800 Subject: [PATCH 29/35] Some more TODOs --- .../execution_pending_envelope.rs | 2 ++ .../src/payload_envelope_verification/payload_notifier.rs | 1 + .../network/src/network_beacon_processor/gossip_methods.rs | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs index dbd7478568..eea50d9fe1 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -44,6 +44,8 @@ impl IntoExecutionPendingEnvelope for GossipVerifiedEnve let envelope = &signed_envelope.message; let payload = &envelope.payload; + // TODO(gloas) + // Verify the execution payload is valid let payload_notifier = PayloadNotifier::new( chain.clone(), diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs index a468bc5bc4..592d46022a 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs @@ -29,6 +29,7 @@ impl PayloadNotifier { let payload_verification_status = { let payload_message = &envelope.message; + // TODO(gloas) re-asses if optimistic syncing works similarly post-gloas match notify_execution_layer { NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { let new_payload_request = Self::build_new_payload_request(&envelope, &block)?; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 89563c2ec3..f8636f5429 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -3272,7 +3272,8 @@ impl NetworkBeaconProcessor { Span::current().record("beacon_block_root", beacon_block_root.to_string()); // TODO(gloas) in process_gossip_block here we check_and_insert on the duplicate cache - // before calling gossip_verified_block + // before calling gossip_verified_block. We need this to ensure we dont try to execute the + // payload multiple times. self.process_gossip_verified_execution_payload_envelope( peer_id, From 0761da770df8383d54bdcb37866e5805d92e3065 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 12:15:52 -0800 Subject: [PATCH 30/35] Clean up comments --- .../src/payload_envelope_verification/import.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 603e14446a..4ed9bc973b 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -241,11 +241,7 @@ impl BeaconChain { _payload_verification_status: PayloadVerificationStatus, parent_block: Arc>, ) -> Result { - // ----------------------------- ENVELOPE NOT YET ATTESTABLE ---------------------------------- - // Everything in this initial section is on the hot path between processing the envelope and - // being able to attest to it. DO NOT add any extra processing in this initial section - // unless it must run before fork choice. - // ----------------------------------------------------------------------------------------- + // Everything in this initial section is on the hot path for processing the envelope. let post_exec_timer = metrics::start_timer(&metrics::ENVELOPE_PROCESSING_POST_EXEC_PROCESSING); @@ -276,12 +272,10 @@ impl BeaconChain { // TODO(gloas) emit SSE event if the payload became the new head payload drop(post_exec_timer); - // ---------------------------- ENVELOPE PROBABLY ATTESTABLE ---------------------------------- // It is important NOT to return errors here before the database commit, because the envelope // has already been added to fork choice and the database would be left in an inconsistent // state if we returned early without committing. In other words, an error here would // corrupt the node's database permanently. - // ----------------------------------------------------------------------------------------- // Store the envelope and its state, and execute the confirmation batch for the intermediate // states, which will delete their temporary flags. From 38ef0d07e548c4eb6d6597e3f3bd713828b35f6f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 12:17:12 -0800 Subject: [PATCH 31/35] Update TODO --- .../beacon_chain/src/payload_envelope_verification/import.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 4ed9bc973b..05a18a6d18 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -28,6 +28,7 @@ impl BeaconChain { /// Items that implement `IntoExecutionPendingEnvelope` include: /// /// - `GossipVerifiedEnvelope` + /// - TODO(gloas) implement for envelopes recieved over RPC /// /// ## Errors /// From e59f1f03effdef50b4b2fcdbe8918ad1d2e34f87 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 25 Feb 2026 07:53:33 +1100 Subject: [PATCH 32/35] Add debug spans to DB write paths (#8895) Co-Authored-By: Jimmy Chen --- .../beacon_chain/src/historical_blocks.rs | 17 ++++++++++---- beacon_node/store/src/hot_cold_store.rs | 22 ++++++++++++++----- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 3a3c3739c7..1dae2258f6 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -12,7 +12,7 @@ use std::time::Duration; use store::metadata::DataColumnInfo; use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; -use tracing::{debug, instrument}; +use tracing::{debug, debug_span, instrument}; use types::{Hash256, Slot}; /// Use a longer timeout on the pubkey cache. @@ -256,9 +256,18 @@ impl BeaconChain { // Write the I/O batches to disk, writing the blocks themselves first, as it's better // for the hot DB to contain extra blocks than for the cold DB to point to blocks that // do not exist. - self.store.blobs_db.do_atomically(blob_batch)?; - self.store.hot_db.do_atomically(hot_batch)?; - self.store.cold_db.do_atomically(cold_batch)?; + { + let _span = debug_span!("backfill_write_blobs_db").entered(); + self.store.blobs_db.do_atomically(blob_batch)?; + } + { + let _span = debug_span!("backfill_write_hot_db").entered(); + self.store.hot_db.do_atomically(hot_batch)?; + } + { + let _span = debug_span!("backfill_write_cold_db").entered(); + self.store.cold_db.do_atomically(cold_batch)?; + } let mut anchor_and_blob_batch = Vec::with_capacity(3); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4d00ed1c4a..fe3477dbfe 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -38,7 +38,7 @@ use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, debug_span, error, info, instrument, warn}; use typenum::Unsigned; use types::data::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; @@ -1510,14 +1510,24 @@ impl, Cold: ItemStore> HotColdDB let blob_cache_ops = blobs_ops.clone(); // Try to execute blobs store ops. - self.blobs_db - .do_atomically(self.convert_to_kv_batch(blobs_ops)?)?; + let kv_blob_ops = self.convert_to_kv_batch(blobs_ops)?; + { + let _span = debug_span!("write_blobs_db").entered(); + self.blobs_db.do_atomically(kv_blob_ops)?; + } let hot_db_cache_ops = hot_db_ops.clone(); // Try to execute hot db store ops. - let tx_res = match self.convert_to_kv_batch(hot_db_ops) { - Ok(kv_store_ops) => self.hot_db.do_atomically(kv_store_ops), - Err(e) => Err(e), + let tx_res = { + let _convert_span = debug_span!("convert_hot_db_ops").entered(); + match self.convert_to_kv_batch(hot_db_ops) { + Ok(kv_store_ops) => { + drop(_convert_span); + let _span = debug_span!("write_hot_db").entered(); + self.hot_db.do_atomically(kv_store_ops) + } + Err(e) => Err(e), + } }; // Rollback on failure if let Err(e) = tx_res { From d6bf53834f2646c640bda9838b3033850cd484cc Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 24 Feb 2026 20:20:28 -0700 Subject: [PATCH 33/35] Remove merge transition code (#8761) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- .../beacon_chain/src/beacon_block_streamer.rs | 29 +- beacon_node/beacon_chain/src/beacon_chain.rs | 84 +--- .../beacon_chain/src/bellatrix_readiness.rs | 171 +------- .../beacon_chain/src/block_verification.rs | 74 +--- .../overflow_lru_cache.rs | 28 +- .../beacon_chain/src/execution_payload.rs | 143 +------ .../src/otb_verification_service.rs | 369 ------------------ beacon_node/beacon_chain/src/test_utils.rs | 120 +++++- .../tests/attestation_verification.rs | 10 +- beacon_node/beacon_chain/tests/bellatrix.rs | 212 ---------- .../beacon_chain/tests/block_verification.rs | 9 +- beacon_node/beacon_chain/tests/capella.rs | 156 -------- beacon_node/beacon_chain/tests/events.rs | 4 +- beacon_node/beacon_chain/tests/main.rs | 2 - .../tests/payload_invalidation.rs | 117 +----- beacon_node/beacon_chain/tests/store_tests.rs | 220 +++++++++++ beacon_node/client/src/builder.rs | 2 +- beacon_node/client/src/notifier.rs | 90 +---- beacon_node/execution_layer/src/lib.rs | 353 +---------------- beacon_node/execution_layer/src/metrics.rs | 2 - .../test_utils/execution_block_generator.rs | 123 +----- .../src/test_utils/mock_execution_layer.rs | 62 +-- .../execution_layer/src/test_utils/mod.rs | 23 -- beacon_node/http_api/src/lib.rs | 22 +- .../tests/broadcast_validation_tests.rs | 64 ++- beacon_node/http_api/tests/fork_tests.rs | 2 +- .../http_api/tests/interactive_tests.rs | 6 - beacon_node/http_api/tests/status_tests.rs | 7 - beacon_node/http_api/tests/tests.rs | 9 - beacon_node/operation_pool/src/lib.rs | 225 ++++------- .../src/per_block_processing/tests.rs | 226 +++-------- .../src/per_epoch_processing/tests.rs | 4 +- consensus/types/tests/committee_cache.rs | 1 + consensus/types/tests/state.rs | 1 + lcli/src/mock_el.rs | 9 +- .../src/test_rig.rs | 20 +- testing/state_transition_vectors/src/exit.rs | 5 +- testing/state_transition_vectors/src/main.rs | 1 + validator_manager/src/exit_validators.rs | 9 +- 39 files changed, 581 insertions(+), 2433 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/otb_verification_service.rs delete mode 100644 beacon_node/beacon_chain/tests/bellatrix.rs delete mode 100644 beacon_node/beacon_chain/tests/capella.rs diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index edbdd6d4d9..9ddc50a9f7 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -686,7 +686,6 @@ mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; use bls::Keypair; - use execution_layer::test_utils::Block; use fixed_bytes::FixedBytesExtended; use std::sync::Arc; use std::sync::LazyLock; @@ -720,7 +719,7 @@ mod tests { async fn check_all_blocks_from_altair_to_fulu() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let num_epochs = 12; - let bellatrix_fork_epoch = 2usize; + let bellatrix_fork_epoch = 0usize; let capella_fork_epoch = 4usize; let deneb_fork_epoch = 6usize; let electra_fork_epoch = 8usize; @@ -737,32 +736,8 @@ mod tests { let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); - // go to bellatrix fork - harness - .extend_slots(bellatrix_fork_epoch * slots_per_epoch) - .await; - // extend half an epoch - harness.extend_slots(slots_per_epoch / 2).await; - // trigger merge - harness - .execution_block_generator() - .move_to_terminal_block() - .expect("should move to terminal block"); - let timestamp = - harness.get_timestamp_at_slot() + harness.spec.get_slot_duration().as_secs(); - harness - .execution_block_generator() - .modify_last_block(|block| { - if let Block::PoW(terminal_block) = block { - terminal_block.timestamp = timestamp; - } - }); - // finish out merge epoch - harness.extend_slots(slots_per_epoch / 2).await; // finish rest of epochs - harness - .extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch) - .await; + harness.extend_slots(num_epochs * slots_per_epoch).await; let head = harness.chain.head_snapshot(); let state = &head.beacon_state; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 26ad2e714b..9d204ac7f2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -9,7 +9,6 @@ use crate::beacon_proposer_cache::{ }; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; -use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, @@ -3513,28 +3512,6 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; - // Log the PoS pandas if a merge transition just occurred. - if payload_verification_outcome.is_valid_merge_transition_block { - info!("{}", POS_PANDA_BANNER); - info!(slot = %block.slot(), "Proof of Stake Activated"); - info!( - terminal_pow_block_hash = ?block - .message() - .execution_payload()? - .parent_hash() - .into_root(), - ); - info!( - merge_transition_block_root = ?block.message().tree_hash_root(), - ); - info!( - merge_transition_execution_hash = ?block - .message() - .execution_payload()? - .block_hash() - .into_root(), - ); - } Ok(ExecutedBlock::new( block, import_data, @@ -6078,21 +6055,6 @@ impl BeaconChain { input_params: ForkchoiceUpdateParameters, override_forkchoice_update: OverrideForkchoiceUpdate, ) -> Result<(), Error> { - let next_slot = current_slot + 1; - - // There is no need to issue a `forkchoiceUpdated` (fcU) message unless the Bellatrix fork - // has: - // - // 1. Already happened. - // 2. Will happen in the next slot. - // - // The reason for a fcU message in the slot prior to the Bellatrix fork is in case the - // terminal difficulty has already been reached and a payload preparation message needs to - // be issued. - if self.slot_is_prior_to_bellatrix(next_slot) { - return Ok(()); - } - let execution_layer = self .execution_layer .as_ref() @@ -6140,50 +6102,8 @@ impl BeaconChain { .unwrap_or_else(ExecutionBlockHash::zero), ) } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - if self - .spec - .fork_name_at_slot::(next_slot) - .bellatrix_enabled() - { - // We are post-bellatrix - if let Some(payload_attributes) = execution_layer - .payload_attributes(next_slot, params.head_root) - .await - { - // We are a proposer, check for terminal_pow_block_hash - if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp()) - .await - .map_err(Error::ForkchoiceUpdate)? - { - info!( - slot = %next_slot, - "Prepared POS transition block proposer" - ); - ( - params.head_root, - terminal_pow_block_hash, - params - .justified_hash - .unwrap_or_else(ExecutionBlockHash::zero), - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // TTD hasn't been reached yet, no need to update the EL. - return Ok(()); - } - } else { - // We are not a proposer, no need to update the EL. - return Ok(()); - } - } else { - return Ok(()); - } + // Proposing the block for the merge is no longer supported. + return Ok(()); }; let forkchoice_updated_response = execution_layer diff --git a/beacon_node/beacon_chain/src/bellatrix_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs index 88ccc21b85..34d9795b84 100644 --- a/beacon_node/beacon_chain/src/bellatrix_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -1,126 +1,9 @@ -//! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge -//! transition. +//! Provides tools for checking genesis execution payload consistency. use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes}; use execution_layer::BlockByNumberQuery; -use serde::{Deserialize, Serialize, Serializer}; -use std::fmt; -use std::fmt::Write; use types::*; -/// The time before the Bellatrix fork when we will start issuing warnings about preparation. -pub const SECONDS_IN_A_WEEK: u64 = 604800; -pub const BELLATRIX_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; - -#[derive(Default, Debug, Serialize, Deserialize)] -pub struct MergeConfig { - #[serde(serialize_with = "serialize_uint256")] - pub terminal_total_difficulty: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub terminal_block_hash: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub terminal_block_hash_epoch: Option, -} - -impl fmt::Display for MergeConfig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.terminal_block_hash.is_none() - && self.terminal_block_hash_epoch.is_none() - && self.terminal_total_difficulty.is_none() - { - return write!( - f, - "Merge terminal difficulty parameters not configured, check your config" - ); - } - let mut display_string = String::new(); - if let Some(terminal_total_difficulty) = self.terminal_total_difficulty { - write!( - display_string, - "terminal_total_difficulty: {},", - terminal_total_difficulty - )?; - } - if let Some(terminal_block_hash) = self.terminal_block_hash { - write!( - display_string, - "terminal_block_hash: {},", - terminal_block_hash - )?; - } - if let Some(terminal_block_hash_epoch) = self.terminal_block_hash_epoch { - write!( - display_string, - "terminal_block_hash_epoch: {},", - terminal_block_hash_epoch - )?; - } - write!(f, "{}", display_string.trim_end_matches(','))?; - Ok(()) - } -} -impl MergeConfig { - /// Instantiate `self` from the values in a `ChainSpec`. - pub fn from_chainspec(spec: &ChainSpec) -> Self { - let mut params = MergeConfig::default(); - if spec.terminal_total_difficulty != Uint256::MAX { - params.terminal_total_difficulty = Some(spec.terminal_total_difficulty); - } - if spec.terminal_block_hash != ExecutionBlockHash::zero() { - params.terminal_block_hash = Some(spec.terminal_block_hash); - } - if spec.terminal_block_hash_activation_epoch != Epoch::max_value() { - params.terminal_block_hash_epoch = Some(spec.terminal_block_hash_activation_epoch); - } - params - } -} - -/// Indicates if a node is ready for the Bellatrix upgrade and subsequent merge transition. -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -#[serde(tag = "type")] -pub enum BellatrixReadiness { - /// The node is ready, as far as we can tell. - Ready { - config: MergeConfig, - #[serde(serialize_with = "serialize_uint256")] - current_difficulty: Option, - }, - /// The EL can be reached and has the correct configuration, however it's not yet synced. - NotSynced, - /// The user has not configured this node to use an execution endpoint. - NoExecutionEndpoint, -} - -impl fmt::Display for BellatrixReadiness { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - BellatrixReadiness::Ready { - config: params, - current_difficulty, - } => { - write!( - f, - "This node appears ready for Bellatrix \ - Params: {}, current_difficulty: {:?}", - params, current_difficulty - ) - } - BellatrixReadiness::NotSynced => write!( - f, - "The execution endpoint is connected and configured, \ - however it is not yet synced" - ), - BellatrixReadiness::NoExecutionEndpoint => write!( - f, - "The --execution-endpoint flag is not specified, this is a \ - requirement for Bellatrix" - ), - } - } -} - pub enum GenesisExecutionPayloadStatus { Correct(ExecutionBlockHash), BlockHashMismatch { @@ -141,47 +24,6 @@ pub enum GenesisExecutionPayloadStatus { } impl BeaconChain { - /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will - /// occur within `BELLATRIX_READINESS_PREPARATION_SECONDS`. - pub fn is_time_to_prepare_for_bellatrix(&self, current_slot: Slot) -> bool { - if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { - let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let bellatrix_readiness_preparation_slots = - BELLATRIX_READINESS_PREPARATION_SECONDS / self.spec.get_slot_duration().as_secs(); - - if self.execution_layer.is_some() { - // The user has already configured an execution layer, start checking for readiness - // right away. - true - } else { - // Return `true` if Bellatrix has happened or is within the preparation time. - current_slot + bellatrix_readiness_preparation_slots > bellatrix_slot - } - } else { - // The Bellatrix fork epoch has not been defined yet, no need to prepare. - false - } - } - - /// Attempts to connect to the EL and confirm that it is ready for Bellatrix. - pub async fn check_bellatrix_readiness(&self, current_slot: Slot) -> BellatrixReadiness { - if let Some(el) = self.execution_layer.as_ref() { - if !el.is_synced_for_notifier(current_slot).await { - // The EL is not synced. - return BellatrixReadiness::NotSynced; - } - let params = MergeConfig::from_chainspec(&self.spec); - let current_difficulty = el.get_current_difficulty().await.ok().flatten(); - BellatrixReadiness::Ready { - config: params, - current_difficulty, - } - } else { - // There is no EL configured. - BellatrixReadiness::NoExecutionEndpoint - } - } - /// Check that the execution payload embedded in the genesis state matches the EL's genesis /// block. pub async fn check_genesis_execution_payload_is_correct( @@ -223,14 +65,3 @@ impl BeaconChain { Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash)) } } - -/// Utility function to serialize a Uint256 as a decimal string. -fn serialize_uint256(val: &Option, s: S) -> Result -where - S: Serializer, -{ - match val { - Some(v) => v.to_string().serialize(s), - None => s.serialize_none(), - } -} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 292560d6a7..d126c3af00 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -56,8 +56,7 @@ use crate::data_availability_checker::{ }; use crate::data_column_verification::GossipDataColumnError; use crate::execution_payload::{ - AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, - validate_execution_payload_for_gossip, validate_merge_block, + NotifyExecutionLayer, PayloadNotifier, validate_execution_payload_for_gossip, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; @@ -80,7 +79,7 @@ use safe_arith::ArithError; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; +use state_processing::per_block_processing::errors::IntoWithIndex; use state_processing::{ AllCaches, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, VerifyBlockRoot, @@ -99,34 +98,10 @@ use task_executor::JoinHandle; use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument}; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, - Epoch, EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, - RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, + Epoch, EthSpec, FullPayload, Hash256, InconsistentFork, KzgProofs, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, }; -pub const POS_PANDA_BANNER: &str = r#" - ,,, ,,, ,,, ,,, - ;" ^; ;' ", ;" ^; ;' ", - ; s$$$$$$$s ; ; s$$$$$$$s ; - , ss$$$$$$$$$$s ,' ooooooooo. .oooooo. .oooooo..o , ss$$$$$$$$$$s ,' - ;s$$$$$$$$$$$$$$$ `888 `Y88. d8P' `Y8b d8P' `Y8 ;s$$$$$$$$$$$$$$$ - $$$$$$$$$$$$$$$$$$ 888 .d88'888 888Y88bo. $$$$$$$$$$$$$$$$$$ - $$$$P""Y$$$Y""W$$$$$ 888ooo88P' 888 888 `"Y8888o. $$$$P""Y$$$Y""W$$$$$ - $$$$ p"LFG"q $$$$$ 888 888 888 `"Y88b $$$$ p"LFG"q $$$$$ - $$$$ .$$$$$. $$$$ 888 `88b d88'oo .d8P $$$$ .$$$$$. $$$$ - $$DcaU$$$$$$$$$$ o888o `Y8bood8P' 8""88888P' $$DcaU$$$$$$$$$$ - "Y$$$"*"$$$Y" "Y$$$"*"$$$Y" - "$b.$$" "$b.$$" - - .o. . o8o . .o8 - .888. .o8 `"' .o8 "888 - .8"888. .ooooo. .o888oooooo oooo ooo .oooo. .o888oo .ooooo. .oooo888 - .8' `888. d88' `"Y8 888 `888 `88. .8' `P )88b 888 d88' `88bd88' `888 - .88ooo8888. 888 888 888 `88..8' .oP"888 888 888ooo888888 888 - .8' `888. 888 .o8 888 . 888 `888' d8( 888 888 .888 .o888 888 - o88o o8888o`Y8bod8P' "888"o888o `8' `Y888""8o "888"`Y8bod8P'`Y8bod88P" - -"#; - /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. const MAXIMUM_BLOCK_SLOT_NUMBER: u64 = 4_294_967_296; // 2^32 @@ -392,13 +367,6 @@ pub enum ExecutionPayloadError { /// /// The block is invalid and the peer is faulty InvalidPayloadTimestamp { expected: u64, found: u64 }, - /// The execution payload references an execution block that cannot trigger the merge. - /// - /// ## Peer scoring - /// - /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, - /// but is invalid upon further verification. - InvalidTerminalPoWBlock { parent_hash: ExecutionBlockHash }, /// The `TERMINAL_BLOCK_HASH` is set, but the block has not reached the /// `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH`. /// @@ -410,16 +378,6 @@ pub enum ExecutionPayloadError { activation_epoch: Epoch, epoch: Epoch, }, - /// The `TERMINAL_BLOCK_HASH` is set, but does not match the value specified by the block. - /// - /// ## Peer scoring - /// - /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, - /// but is invalid upon further verification. - InvalidTerminalBlockHash { - terminal_block_hash: ExecutionBlockHash, - payload_parent_hash: ExecutionBlockHash, - }, /// The execution node is syncing but we fail the conditions for optimistic sync /// /// ## Peer scoring @@ -444,16 +402,11 @@ impl ExecutionPayloadError { // This is a trivial gossip validation condition, there is no reason for an honest peer // to propagate a block with an invalid payload time stamp. ExecutionPayloadError::InvalidPayloadTimestamp { .. } => true, - // An honest optimistic node may propagate blocks with an invalid terminal PoW block, we - // should not penalized them. - ExecutionPayloadError::InvalidTerminalPoWBlock { .. } => false, // This condition is checked *after* gossip propagation, therefore penalizing gossip // peers for this block would be unfair. There may be an argument to penalize RPC // blocks, since even an optimistic node shouldn't verify this block. We will remove the // penalties for all block imports to keep things simple. ExecutionPayloadError::InvalidActivationEpoch { .. } => false, - // As per `Self::InvalidActivationEpoch`. - ExecutionPayloadError::InvalidTerminalBlockHash { .. } => false, // Do not penalize the peer since it's not their fault that *we're* optimistic. ExecutionPayloadError::UnverifiedNonOptimisticCandidate => false, } @@ -537,7 +490,6 @@ impl From for BlockError { #[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct PayloadVerificationOutcome { pub payload_verification_status: PayloadVerificationStatus, - pub is_valid_merge_transition_block: bool, } /// Information about invalid blocks which might still be slashable despite being invalid. @@ -1469,27 +1421,10 @@ impl ExecutionPendingBlock { &parent.pre_state, notify_execution_layer, )?; - let is_valid_merge_transition_block = - is_merge_transition_block(&parent.pre_state, block.message().body()); - let payload_verification_future = async move { let chain = payload_notifier.chain.clone(); let block = payload_notifier.block.clone(); - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - if is_valid_merge_transition_block { - validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; - }; - // The specification declares that this should be run *inside* `per_block_processing`, // however we run it here to keep `per_block_processing` pure (i.e., no calls to external // servers). @@ -1504,7 +1439,6 @@ impl ExecutionPendingBlock { Ok(PayloadVerificationOutcome { payload_verification_status, - is_valid_merge_transition_block, }) }; // Spawn the payload verification future as a new task, but don't wait for it to complete. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 7260a4aca0..c0403595ee 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -791,8 +791,8 @@ mod test { use store::{HotColdDB, ItemStore, StoreConfig, database::interface::BeaconNodeBackend}; use tempfile::{TempDir, tempdir}; use tracing::info; + use types::MinimalEthSpec; use types::new_non_zero_usize; - use types::{ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; @@ -820,9 +820,8 @@ mod test { async fn get_deneb_chain( db_path: &TempDir, ) -> BeaconChainHarness> { - let altair_fork_epoch = Epoch::new(1); - let bellatrix_fork_epoch = Epoch::new(2); - let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let altair_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); @@ -844,25 +843,6 @@ mod test { .mock_execution_layer() .build(); - // go to bellatrix slot - harness.extend_to_slot(bellatrix_fork_slot).await; - let bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!(bellatrix_head.as_bellatrix().is_ok()); - assert_eq!(bellatrix_head.slot(), bellatrix_fork_slot); - assert!( - bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "Bellatrix head is default payload" - ); - // Trigger the terminal PoW block. - harness - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); // go right before deneb slot harness.extend_to_slot(deneb_fork_slot - 1).await; @@ -942,7 +922,6 @@ mod test { let payload_verification_outcome = PayloadVerificationOutcome { payload_verification_status: PayloadVerificationStatus::Verified, - is_valid_merge_transition_block: false, }; let availability_pending_block = AvailabilityPendingExecutedBlock { @@ -1183,7 +1162,6 @@ mod pending_components_tests { }, payload_verification_outcome: PayloadVerificationOutcome { payload_verification_status: PayloadVerificationStatus::Verified, - is_valid_merge_transition_block: false, }, }; (block, blobs, invalid_blobs) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f32a3ba2a3..a2ebed32ee 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,19 +12,19 @@ use crate::{ ExecutionPayloadError, }; use execution_layer::{ - BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest, - PayloadAttributes, PayloadParameters, PayloadStatus, + BlockProposalContentsType, BuilderParams, NewPayloadRequest, PayloadAttributes, + PayloadParameters, PayloadStatus, }; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slot_clock::SlotClock; use state_processing::per_block_processing::{ compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, - is_merge_transition_complete, partially_verify_execution_payload, + partially_verify_execution_payload, }; use std::sync::Arc; use tokio::task::JoinHandle; -use tracing::{Instrument, debug, debug_span, warn}; +use tracing::{Instrument, debug_span, warn}; use tree_hash::TreeHash; use types::execution::BlockProductionVersion; use types::*; @@ -32,12 +32,6 @@ use types::*; pub type PreparePayloadResult = Result, BlockProductionError>; pub type PreparePayloadHandle = JoinHandle>>; -#[derive(PartialEq)] -pub enum AllowOptimisticImport { - Yes, - No, -} - /// Signal whether the execution payloads of new blocks should be /// immediately verified with the EL or imported optimistically without /// any EL communication. @@ -218,78 +212,6 @@ async fn notify_new_payload( } } -/// Verify that the block which triggers the merge is valid to be imported to fork choice. -/// -/// ## Errors -/// -/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function -/// after the merge fork. -/// -/// ## Specification -/// -/// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub async fn validate_merge_block( - chain: &Arc>, - block: BeaconBlockRef<'_, T::EthSpec>, - allow_optimistic_import: AllowOptimisticImport, -) -> Result<(), BlockError> { - let spec = &chain.spec; - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let execution_payload = block.execution_payload()?; - - if spec.terminal_block_hash != ExecutionBlockHash::zero() { - if block_epoch < spec.terminal_block_hash_activation_epoch { - return Err(ExecutionPayloadError::InvalidActivationEpoch { - activation_epoch: spec.terminal_block_hash_activation_epoch, - epoch: block_epoch, - } - .into()); - } - - if execution_payload.parent_hash() != spec.terminal_block_hash { - return Err(ExecutionPayloadError::InvalidTerminalBlockHash { - terminal_block_hash: spec.terminal_block_hash, - payload_parent_hash: execution_payload.parent_hash(), - } - .into()); - } - - return Ok(()); - } - - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - - let is_valid_terminal_pow_block = execution_layer - .is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) - .await - .map_err(ExecutionPayloadError::from)?; - - match is_valid_terminal_pow_block { - Some(true) => Ok(()), - Some(false) => Err(ExecutionPayloadError::InvalidTerminalPoWBlock { - parent_hash: execution_payload.parent_hash(), - } - .into()), - None => { - if allow_optimistic_import == AllowOptimisticImport::Yes { - debug!( - block_hash = ?execution_payload.parent_hash(), - msg = "the terminal block/parent was unavailable", - "Optimistically importing merge transition block" - ); - Ok(()) - } else { - Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) - } - } - } -} - /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( @@ -305,14 +227,14 @@ pub fn validate_execution_payload_for_gossip( // Only apply this validation if this is a Bellatrix beacon block. if let Ok(execution_payload) = block.body().execution_payload() { - // This logic should match `is_execution_enabled`. We use only the execution block hash of - // the parent here in order to avoid loading the parent state during gossip verification. + // Check parent execution status to determine if we should validate the payload. + // We use only the execution status of the parent here to avoid loading the parent state + // during gossip verification. - let is_merge_transition_complete = match parent_block.execution_status { - // Optimistically declare that an "unknown" status block has completed the merge. + let parent_has_execution = match parent_block.execution_status { + // Parent has valid or optimistic execution status. ExecutionStatus::Valid(_) | ExecutionStatus::Optimistic(_) => true, - // It's impossible for an irrelevant block to have completed the merge. It is pre-merge - // by definition. + // Pre-merge blocks have irrelevant execution status. ExecutionStatus::Irrelevant(_) => false, // If the parent has an invalid payload then it's impossible to build a valid block upon // it. Reject the block. @@ -323,7 +245,7 @@ pub fn validate_execution_payload_for_gossip( } }; - if is_merge_transition_complete || !execution_payload.is_default_with_empty_roots() { + if parent_has_execution || !execution_payload.is_default_with_empty_roots() { let expected_timestamp = chain .slot_clock .start_of(block.slot()) @@ -372,7 +294,6 @@ pub fn get_execution_payload( // task. let spec = &chain.spec; let current_epoch = state.current_epoch(); - let is_merge_transition_complete = is_merge_transition_complete(state); let timestamp = compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; @@ -399,7 +320,6 @@ pub fn get_execution_payload( async move { prepare_execution_payload::( &chain, - is_merge_transition_complete, timestamp, random, proposer_index, @@ -423,8 +343,6 @@ pub fn get_execution_payload( /// Prepares an execution payload for inclusion in a block. /// -/// Will return `Ok(None)` if the Bellatrix fork has occurred, but a terminal block has not been found. -/// /// ## Errors /// /// Will return an error when using a pre-Bellatrix fork `state`. Ensure to only run this function @@ -438,7 +356,6 @@ pub fn get_execution_payload( #[allow(clippy::too_many_arguments)] pub async fn prepare_execution_payload( chain: &Arc>, - is_merge_transition_complete: bool, timestamp: u64, random: Hash256, proposer_index: u64, @@ -453,7 +370,6 @@ pub async fn prepare_execution_payload( where T: BeaconChainTypes, { - let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let fork = spec.fork_name_at_slot::(builder_params.slot); let execution_layer = chain @@ -461,42 +377,7 @@ where .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; - let parent_hash = if !is_merge_transition_complete { - let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); - let is_activation_epoch_reached = - current_epoch >= spec.terminal_block_hash_activation_epoch; - - if is_terminal_block_hash_set && !is_activation_epoch_reached { - // Use the "empty" payload if there's a terminal block hash, but we haven't reached the - // terminal block epoch yet. - return Ok(BlockProposalContentsType::Full( - BlockProposalContents::Payload { - payload: FullPayload::default_at_fork(fork)?, - block_value: Uint256::ZERO, - }, - )); - } - - let terminal_pow_block_hash = execution_layer - .get_terminal_pow_block_hash(spec, timestamp) - .await - .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; - - if let Some(terminal_pow_block_hash) = terminal_pow_block_hash { - terminal_pow_block_hash - } else { - // If the merge transition hasn't occurred yet and the EL hasn't found the terminal - // block, return an "empty" payload. - return Ok(BlockProposalContentsType::Full( - BlockProposalContents::Payload { - payload: FullPayload::default_at_fork(fork)?, - block_value: Uint256::ZERO, - }, - )); - } - } else { - latest_execution_payload_header_block_hash - }; + let parent_hash = latest_execution_payload_header_block_hash; // Try to obtain the fork choice update parameters from the cached head. // diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs deleted file mode 100644 index e02705f5da..0000000000 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ /dev/null @@ -1,369 +0,0 @@ -use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; -use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, -}; -use itertools::process_results; -use logging::crit; -use proto_array::InvalidationOperation; -use slot_clock::SlotClock; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::is_merge_transition_complete; -use std::sync::Arc; -use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; -use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::time::sleep; -use tracing::{debug, error, info, warn}; -use tree_hash::TreeHash; -use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; -use DBColumn::OptimisticTransitionBlock as OTBColumn; - -#[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct OptimisticTransitionBlock { - root: Hash256, - slot: Slot, -} - -impl OptimisticTransitionBlock { - // types::BeaconBlockRef<'_, ::EthSpec> - pub fn from_block(block: BeaconBlockRef) -> Self { - Self { - root: block.tree_hash_root(), - slot: block.slot(), - } - } - - pub fn root(&self) -> &Hash256 { - &self.root - } - - pub fn slot(&self) -> &Slot { - &self.slot - } - - pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - if store - .as_ref() - .item_exists::(&self.root)? - { - Ok(()) - } else { - store.as_ref().put_item(&self.root, self) - } - } - - pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - store - .as_ref() - .hot_db - .key_delete(OTBColumn.into(), self.root.as_slice()) - } - - fn is_canonical( - &self, - chain: &BeaconChain, - ) -> Result { - Ok(chain - .forwards_iter_block_roots_until(self.slot, self.slot)? - .next() - .transpose()? - .map(|(root, _)| root) - == Some(self.root)) - } -} - -impl StoreItem for OptimisticTransitionBlock { - fn db_column() -> DBColumn { - OTBColumn - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - -/// The routine is expected to run once per epoch, 1/4th through the epoch. -pub const EPOCH_DELAY_FACTOR: u32 = 4; - -/// Spawns a routine which checks the validity of any optimistically imported transition blocks -/// -/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after -/// the start of each epoch. -/// -/// The service will not be started if there is no `execution_layer` on the `chain`. -pub fn start_otb_verification_service( - executor: TaskExecutor, - chain: Arc>, -) { - // Avoid spawning the service if there's no EL, it'll just error anyway. - if chain.execution_layer.is_some() { - executor.spawn( - async move { otb_verification_service(chain).await }, - "otb_verification_service", - ); - } -} - -pub fn load_optimistic_transition_blocks( - chain: &BeaconChain, -) -> Result, StoreError> { - process_results( - chain.store.hot_db.iter_column::(OTBColumn), - |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - }, - )? -} - -#[derive(Debug)] -pub enum Error { - ForkChoice(String), - BeaconChain(BeaconChainError), - StoreError(StoreError), - NoBlockFound(OptimisticTransitionBlock), -} - -pub async fn validate_optimistic_transition_blocks( - chain: &Arc>, - otbs: Vec, -) -> Result<(), Error> { - let finalized_slot = chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? - .slot; - - // separate otbs into - // non-canonical - // finalized canonical - // unfinalized canonical - let mut non_canonical_otbs = vec![]; - let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( - otbs.into_iter().map(|otb| { - otb.is_canonical(chain) - .map(|is_canonical| (otb, is_canonical)) - }), - |pair_iter| { - pair_iter - .filter_map(|(otb, is_canonical)| { - if is_canonical { - Some(otb) - } else { - non_canonical_otbs.push(otb); - None - } - }) - .partition::, _>(|otb| *otb.slot() <= finalized_slot) - }, - ) - .map_err(Error::BeaconChain)?; - - // remove non-canonical blocks that conflict with finalized checkpoint from the database - for otb in non_canonical_otbs { - if *otb.slot() <= finalized_slot { - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - } - } - - // ensure finalized canonical otb are valid, otherwise kill client - for otb in finalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - block_root = %otb.root(), - "type" = "finalized", - "Validated merge transition block" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Finalized Merge Transition Block is Invalid! Kill the Client! - crit!( - msg = "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - block_hash = ?block.canonical_root(), - "Finalized merge transition block is invalid!" - ); - let mut shutdown_sender = chain.shutdown_sender(); - if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - )) { - crit!( - error = ?e, - shutdown_reason = INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - "Failed to shut down client" - ); - } - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - // attempt to validate any non-finalized canonical otb blocks - for otb in unfinalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - block_root = ?otb.root(), - "type" = "not finalized", - "Validated merge transition block" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload - warn!( - block_root = ?otb.root(), - "Merge transition block invalid" - ); - chain - .process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: *otb.root(), - }, - ) - .await - .map_err(|e| { - warn!( - error = ?e, - location = "process_invalid_execution_payload", - "Error checking merge transition block" - ); - Error::BeaconChain(e) - })?; - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - Ok(()) -} - -/// Loop until any optimistically imported merge transition blocks have been verified and -/// the merge has been finalized. -async fn otb_verification_service(chain: Arc>) { - let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; - loop { - match chain - .slot_clock - .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) - { - Some(duration) => { - let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; - sleep(duration + additional_delay).await; - - debug!("OTB verification service firing"); - - if !is_merge_transition_complete( - &chain.canonical_head.cached_head().snapshot.beacon_state, - ) { - // We are pre-merge. Nothing to do yet. - continue; - } - - // load all optimistically imported transition blocks from the database - match load_optimistic_transition_blocks(chain.as_ref()) { - Ok(otbs) => { - if otbs.is_empty() { - if chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_or(false, |block| { - block.execution_status.is_execution_enabled() - }) - { - // there are no optimistic blocks in the database, we can exit - // the service since the merge transition is finalized and we'll - // never see another transition block - break; - } else { - debug!( - info = "waiting for the merge transition to finalize", - "No optimistic transition blocks" - ) - } - } - if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { - warn!( - error = ?e, - "Error while validating optimistic transition blocks" - ); - } - } - Err(e) => { - error!( - error = ?e, - "Error loading optimistic transition blocks" - ); - } - }; - } - None => { - error!("Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(chain.slot_clock.slot_duration()).await; - } - }; - } - debug!( - msg = "shutting down OTB verification service", - "No optimistic transition blocks in database" - ); -} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 096a0516fc..eefb5d48b7 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -29,10 +29,7 @@ use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ ExecutionLayer, auth::JwtKey, - test_utils::{ - DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, ExecutionBlockGenerator, MockBuilder, - MockExecutionLayer, - }, + test_utils::{DEFAULT_JWT_SECRET, ExecutionBlockGenerator, MockBuilder, MockExecutionLayer}, }; use fixed_bytes::FixedBytesExtended; use futures::channel::mpsc::Receiver; @@ -52,7 +49,11 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; use ssz_types::{RuntimeVariableList, VariableList}; +use state_processing::ConsensusContext; use state_processing::per_block_processing::compute_timestamp_at_slot; +use state_processing::per_block_processing::{ + BlockSignatureStrategy, VerifyBlockRoot, per_block_processing, +}; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -202,11 +203,12 @@ pub fn fork_name_from_env() -> Option { /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment -/// variable. Otherwise use the default spec. +/// variable. Otherwise we default to Bellatrix as the minimum fork (we no longer support +/// starting test networks prior to Bellatrix). pub fn test_spec() -> ChainSpec { let mut spec = fork_name_from_env() .map(|fork| fork.make_genesis_spec(E::default_spec())) - .unwrap_or_else(|| E::default_spec()); + .unwrap_or_else(|| ForkName::Bellatrix.make_genesis_spec(E::default_spec())); // Set target aggregators to a high value by default. spec.target_aggregators_per_committee = DEFAULT_TARGET_AGGREGATORS; @@ -277,16 +279,25 @@ impl Builder> { }); let mutator = move |builder: BeaconChainBuilder<_>| { - let header = generate_genesis_header::(builder.get_spec(), false); + let spec = builder.get_spec(); + let header = generate_genesis_header::(spec); let genesis_state = genesis_state_builder - .set_opt_execution_payload_header(header) + .set_opt_execution_payload_header(header.clone()) .build_genesis_state( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - builder.get_spec(), + spec, ) .expect("should generate interop state"); + // For post-Bellatrix forks, verify the merge is complete at genesis + if header.is_some() { + assert!( + state_processing::per_block_processing::is_merge_transition_complete( + &genesis_state + ) + ); + } builder .genesis_state(genesis_state) .expect("should build state using recent genesis") @@ -344,7 +355,7 @@ impl Builder> { }); let mutator = move |builder: BeaconChainBuilder<_>| { - let header = generate_genesis_header::(builder.get_spec(), false); + let header = generate_genesis_header::(builder.get_spec()); let genesis_state = genesis_state_builder .set_opt_execution_payload_header(header) .build_genesis_state( @@ -688,7 +699,6 @@ pub fn mock_execution_layer_from_parts( MockExecutionLayer::new( task_executor, - DEFAULT_TERMINAL_BLOCK, shanghai_time, cancun_time, prague_time, @@ -1178,6 +1188,94 @@ where ) } + /// Build a Bellatrix block with the given execution payload, compute the + /// correct state root, sign it, and import it into the chain. + /// + /// This bypasses the normal block production pipeline, which always requests + /// a payload from the execution layer. That makes it possible to construct + /// blocks with **default (zeroed) payloads** — something the EL-backed flow + /// cannot do — which is needed to simulate the pre-merge portion of a chain + /// that starts at Bellatrix genesis with `is_merge_transition_complete = false`. + /// + /// `state` is expected to be the head state *before* `slot`. It will be + /// advanced to `slot` in-place via `complete_state_advance`, then used to + /// derive the proposer, RANDAO reveal, and parent root. After processing, + /// the caller should typically replace `state` with the chain's new head + /// state (`self.get_current_state()`). + pub async fn build_and_import_block_with_payload( + &self, + state: &mut BeaconState, + slot: Slot, + execution_payload: ExecutionPayloadBellatrix, + ) { + complete_state_advance(state, None, slot, &self.spec).expect("should advance state"); + state.build_caches(&self.spec).expect("should build caches"); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + let randao_reveal = self.sign_randao_reveal(state, proposer_index, slot); + let parent_root = state.latest_block_header().canonical_root(); + + let mut block = BeaconBlock::Bellatrix(BeaconBlockBellatrix { + slot, + proposer_index: proposer_index as u64, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyBellatrix { + randao_reveal, + eth1_data: state.eth1_data().clone(), + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::new(), + execution_payload: FullPayloadBellatrix { execution_payload }, + }, + }); + + // Run per_block_processing on a clone to compute the post-state root. + let signed_tmp = block.clone().sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + let mut ctxt = ConsensusContext::new(slot).set_proposer_index(proposer_index as u64); + let mut post_state = state.clone(); + per_block_processing( + &mut post_state, + &signed_tmp, + BlockSignatureStrategy::NoVerification, + VerifyBlockRoot::False, + &mut ctxt, + &self.spec, + ) + .unwrap_or_else(|e| panic!("per_block_processing failed at slot {}: {e:?}", slot)); + + let state_root = post_state.update_tree_hash_cache().unwrap(); + *block.state_root_mut() = state_root; + + let signed_block = self.sign_beacon_block(block, state); + let block_root = signed_block.canonical_root(); + let rpc_block = RpcBlock::BlockOnly { + block_root, + block: Arc::new(signed_block), + }; + self.chain.slot_clock.set_slot(slot.as_u64()); + self.chain + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::No, + BlockImportSource::Lookup, + || Ok(()), + ) + .await + .unwrap_or_else(|e| panic!("import failed at slot {}: {e:?}", slot)); + self.chain.recompute_head_at_current_slot().await; + } + #[allow(clippy::too_many_arguments)] pub fn produce_single_attestation_for_block( &self, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 96071be89f..e8ee628f28 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -14,6 +14,7 @@ use beacon_chain::{ }, }; use bls::{AggregateSignature, Keypair, SecretKey}; +use execution_layer::test_utils::generate_genesis_header; use fixed_bytes::FixedBytesExtended; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; @@ -79,11 +80,13 @@ fn get_harness_capella_spec( let spec = Arc::new(spec); let validator_keypairs = KEYPAIRS[0..validator_count].to_vec(); + // Use the proper genesis execution payload header that matches the mock execution layer + let execution_payload_header = generate_genesis_header(&spec); let genesis_state = interop_genesis_state( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + execution_payload_header, &spec, ) .unwrap(); @@ -106,11 +109,6 @@ fn get_harness_capella_spec( .mock_execution_layer() .build(); - harness - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); - harness.advance_slot(); (harness, spec) diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs deleted file mode 100644 index fc0f96ef88..0000000000 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ /dev/null @@ -1,212 +0,0 @@ -#![cfg(not(debug_assertions))] // Tests run too slow in debug. - -use beacon_chain::test_utils::BeaconChainHarness; -use execution_layer::test_utils::{Block, DEFAULT_TERMINAL_BLOCK, generate_pow_block}; -use types::*; - -const VALIDATOR_COUNT: usize = 32; - -type E = MainnetEthSpec; - -fn verify_execution_payload_chain(chain: &[FullPayload]) { - let mut prev_ep: Option> = None; - - for ep in chain { - assert!(!ep.is_default_with_empty_roots()); - assert!(ep.block_hash() != ExecutionBlockHash::zero()); - - // Check against previous `ExecutionPayload`. - if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.parent_hash()); - assert_eq!(prev_ep.block_number() + 1, ep.block_number()); - assert!(ep.timestamp() > prev_ep.timestamp()); - } - prev_ep = Some(ep.clone()); - } -} - -#[tokio::test] -// TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1` -// are causing failed lookups to the execution node. I need to come back to this. -#[should_panic] -async fn merge_with_terminal_block_hash_override() { - let altair_fork_epoch = Epoch::new(0); - let bellatrix_fork_epoch = Epoch::new(0); - - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - - let genesis_pow_block_hash = generate_pow_block( - spec.terminal_total_difficulty, - DEFAULT_TERMINAL_BLOCK, - 0, - ExecutionBlockHash::zero(), - ) - .unwrap() - .block_hash; - - spec.terminal_block_hash = genesis_pow_block_hash; - - let harness = BeaconChainHarness::builder(E::default()) - .spec(spec.into()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - assert_eq!( - harness - .execution_block_generator() - .latest_block() - .unwrap() - .block_hash(), - genesis_pow_block_hash, - "pre-condition" - ); - - assert!( - harness - .chain - .head_snapshot() - .beacon_block - .as_bellatrix() - .is_ok(), - "genesis block should be a bellatrix block" - ); - - let mut execution_payloads = vec![]; - for i in 0..E::slots_per_epoch() * 3 { - harness.extend_slots(1).await; - - let block = &harness.chain.head_snapshot().beacon_block; - - let execution_payload = block.message().body().execution_payload().unwrap(); - if i == 0 { - assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); - } - execution_payloads.push(execution_payload.into()); - } - - verify_execution_payload_chain(execution_payloads.as_slice()); -} - -#[tokio::test] -async fn base_altair_bellatrix_with_terminal_block_after_fork() { - let altair_fork_epoch = Epoch::new(4); - let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); - let bellatrix_fork_epoch = Epoch::new(8); - let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); - - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - - let mut execution_payloads = vec![]; - - let harness = BeaconChainHarness::builder(E::default()) - .spec(spec.into()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - /* - * Start with the base fork. - */ - - assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); - - /* - * Do the Altair fork. - */ - - harness.extend_to_slot(altair_fork_slot).await; - - let altair_head = &harness.chain.head_snapshot().beacon_block; - assert!(altair_head.as_altair().is_ok()); - assert_eq!(altair_head.slot(), altair_fork_slot); - - /* - * Do the Bellatrix fork, without a terminal PoW block. - */ - - Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; - - let bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!(bellatrix_head.as_bellatrix().is_ok()); - assert_eq!(bellatrix_head.slot(), bellatrix_fork_slot); - assert!( - bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "Bellatrix head is default payload" - ); - - /* - * Next Bellatrix block shouldn't include an exec payload. - */ - - harness.extend_slots(1).await; - - let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!( - one_after_bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "One after bellatrix head is default payload" - ); - assert_eq!(one_after_bellatrix_head.slot(), bellatrix_fork_slot + 1); - - /* - * Trigger the terminal PoW block. - */ - - harness - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); - - // Add a slot duration to get to the next slot - let timestamp = harness.get_timestamp_at_slot() + harness.spec.get_slot_duration().as_secs(); - - harness - .execution_block_generator() - .modify_last_block(|block| { - if let Block::PoW(terminal_block) = block { - terminal_block.timestamp = timestamp; - } - }); - - harness.extend_slots(1).await; - - let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!( - two_after_bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "Two after bellatrix head is default payload" - ); - assert_eq!(two_after_bellatrix_head.slot(), bellatrix_fork_slot + 2); - - /* - * Next Bellatrix block should include an exec payload. - */ - for _ in 0..4 { - harness.extend_slots(1).await; - - let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push(block.message().body().execution_payload().unwrap().into()); - } - - verify_execution_payload_chain(execution_payloads.as_slice()); -} diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index d214ea6b15..e94e64e91d 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -20,10 +20,9 @@ use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ - BlockProcessingError, ConsensusContext, VerifyBlockRoot, + BlockProcessingError, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, common::{attesting_indices_base, attesting_indices_electra}, - per_block_processing::{BlockSignatureStrategy, per_block_processing}, - per_slot_processing, + per_block_processing, per_slot_processing, }; use std::marker::PhantomData; use std::sync::{Arc, LazyLock}; @@ -1849,10 +1848,8 @@ async fn add_altair_block_to_base_chain() { // https://github.com/sigp/lighthouse/issues/4332#issuecomment-1565092279 #[tokio::test] async fn import_duplicate_block_unrealized_justification() { - let spec = MainnetEthSpec::default_spec(); - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.into()) + .default_spec() .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs deleted file mode 100644 index e8ab795366..0000000000 --- a/beacon_node/beacon_chain/tests/capella.rs +++ /dev/null @@ -1,156 +0,0 @@ -#![cfg(not(debug_assertions))] // Tests run too slow in debug. - -use beacon_chain::test_utils::BeaconChainHarness; -use execution_layer::test_utils::Block; -use types::*; - -const VALIDATOR_COUNT: usize = 32; -type E = MainnetEthSpec; - -fn verify_execution_payload_chain(chain: &[FullPayload]) { - let mut prev_ep: Option> = None; - - for ep in chain { - assert!(!ep.is_default_with_empty_roots()); - assert!(ep.block_hash() != ExecutionBlockHash::zero()); - - // Check against previous `ExecutionPayload`. - if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.parent_hash()); - assert_eq!(prev_ep.block_number() + 1, ep.block_number()); - assert!(ep.timestamp() > prev_ep.timestamp()); - } - prev_ep = Some(ep.clone()); - } -} - -#[tokio::test] -async fn base_altair_bellatrix_capella() { - let altair_fork_epoch = Epoch::new(4); - let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); - let bellatrix_fork_epoch = Epoch::new(8); - let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); - let capella_fork_epoch = Epoch::new(12); - let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); - - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - spec.capella_fork_epoch = Some(capella_fork_epoch); - - let harness = BeaconChainHarness::builder(E::default()) - .spec(spec.into()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - /* - * Start with the base fork. - */ - assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); - - /* - * Do the Altair fork. - */ - Box::pin(harness.extend_to_slot(altair_fork_slot)).await; - - let altair_head = &harness.chain.head_snapshot().beacon_block; - assert!(altair_head.as_altair().is_ok()); - assert_eq!(altair_head.slot(), altair_fork_slot); - - /* - * Do the Bellatrix fork, without a terminal PoW block. - */ - Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; - - let bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!(bellatrix_head.as_bellatrix().is_ok()); - assert_eq!(bellatrix_head.slot(), bellatrix_fork_slot); - assert!( - bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "Bellatrix head is default payload" - ); - - /* - * Next Bellatrix block shouldn't include an exec payload. - */ - Box::pin(harness.extend_slots(1)).await; - - let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!( - one_after_bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "One after bellatrix head is default payload" - ); - assert_eq!(one_after_bellatrix_head.slot(), bellatrix_fork_slot + 1); - - /* - * Trigger the terminal PoW block. - */ - harness - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); - - // Add a slot duration to get to the next slot - let timestamp = harness.get_timestamp_at_slot() + harness.spec.get_slot_duration().as_secs(); - harness - .execution_block_generator() - .modify_last_block(|block| { - if let Block::PoW(terminal_block) = block { - terminal_block.timestamp = timestamp; - } - }); - Box::pin(harness.extend_slots(1)).await; - - let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; - assert!( - two_after_bellatrix_head - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "Two after bellatrix head is default payload" - ); - assert_eq!(two_after_bellatrix_head.slot(), bellatrix_fork_slot + 2); - - /* - * Next Bellatrix block should include an exec payload. - */ - let mut execution_payloads = vec![]; - for _ in (bellatrix_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { - harness.extend_slots(1).await; - let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = - block.message().body().execution_payload().unwrap().into(); - // pre-capella shouldn't have withdrawals - assert!(full_payload.withdrawals_root().is_err()); - execution_payloads.push(full_payload); - } - - /* - * Should enter capella fork now. - */ - for _ in 0..16 { - harness.extend_slots(1).await; - let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = - block.message().body().execution_payload().unwrap().into(); - // post-capella should have withdrawals - assert!(full_payload.withdrawals_root().is_ok()); - execution_payloads.push(full_payload); - } - - verify_execution_payload_chain(execution_payloads.as_slice()); -} diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 92727ffd76..121f8c255d 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -115,7 +115,7 @@ async fn data_column_sidecar_event_on_process_gossip_data_column() { /// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { - if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) { + if fork_name_from_env().is_none_or(|f| !f.deneb_enabled() || f.fulu_enabled()) { return; }; @@ -170,7 +170,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { #[tokio::test] async fn data_column_sidecar_event_on_process_rpc_columns() { - if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + if fork_name_from_env().is_none_or(|f| !f.fulu_enabled()) { return; }; diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index aec4416419..e02c488ac6 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -1,9 +1,7 @@ mod attestation_production; mod attestation_verification; -mod bellatrix; mod blob_verification; mod block_verification; -mod capella; mod column_verification; mod events; mod op_verification; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 7fd70f0e77..b282adecd5 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -3,8 +3,8 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, OverrideForkchoiceUpdate, - StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, StateSkipConfig, + WhenSlotSkipped, canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType, fork_name_from_env, test_spec}, }; @@ -138,25 +138,6 @@ impl InvalidPayloadRig { payload_attributes } - fn move_to_terminal_block(&self) { - let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); - } - - fn latest_execution_block_hash(&self) -> ExecutionBlockHash { - let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .execution_block_generator() - .latest_execution_block() - .unwrap() - .block_hash - } - async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { let mut roots = Vec::with_capacity(num_blocks as usize); for _ in 0..num_blocks { @@ -393,7 +374,6 @@ async fn valid_invalid_syncing() { return; } let mut rig = InvalidPayloadRig::new(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; rig.import_block(Payload::Invalid { @@ -411,7 +391,6 @@ async fn invalid_payload_invalidates_parent() { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; @@ -443,7 +422,6 @@ async fn immediate_forkchoice_update_invalid_test( invalid_payload: impl FnOnce(Option) -> Payload, ) { let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; @@ -501,7 +479,6 @@ async fn justified_checkpoint_becomes_invalid() { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; @@ -549,7 +526,6 @@ async fn pre_finalized_latest_valid_hash() { let finalized_epoch = 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); let mut blocks = vec![]; blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing).await); @@ -598,7 +574,6 @@ async fn latest_valid_hash_will_not_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); let mut blocks = vec![]; blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. @@ -649,7 +624,6 @@ async fn latest_valid_hash_is_junk() { let finalized_epoch = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); let mut blocks = vec![]; blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing).await); @@ -694,7 +668,6 @@ async fn invalidates_all_descendants() { let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; @@ -804,7 +777,6 @@ async fn switches_heads() { let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; @@ -906,7 +878,6 @@ async fn invalid_during_processing() { return; } let mut rig = InvalidPayloadRig::new(); - rig.move_to_terminal_block(); let roots = &[ rig.import_block(Payload::Valid).await, @@ -941,7 +912,6 @@ async fn invalid_after_optimistic_sync() { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let mut roots = vec![ @@ -982,7 +952,6 @@ async fn manually_validate_child() { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let parent = rig.import_block(Payload::Syncing).await; @@ -1003,7 +972,6 @@ async fn manually_validate_parent() { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let parent = rig.import_block(Payload::Syncing).await; @@ -1024,7 +992,6 @@ async fn payload_preparation() { return; } let mut rig = InvalidPayloadRig::new(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; let el = rig.execution_layer(); @@ -1088,7 +1055,6 @@ async fn invalid_parent() { return; } let mut rig = InvalidPayloadRig::new(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. // Import a syncing block atop the transition block (we'll call this the "parent block" since we @@ -1156,89 +1122,12 @@ async fn invalid_parent() { )); } -/// Tests to ensure that we will still send a proposer preparation -#[tokio::test] -async fn payload_preparation_before_transition_block() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { - return; - } - let rig = InvalidPayloadRig::new(); - let el = rig.execution_layer(); - - // Run the watchdog routine so that the status of the execution engine is set. This ensures - // that we don't end up with `eth_syncing` requests later in this function that will impede - // testing. - el.watchdog_task().await; - - let head = rig.harness.chain.head_snapshot(); - assert_eq!( - head.beacon_block - .message() - .body() - .execution_payload() - .unwrap() - .block_hash(), - ExecutionBlockHash::zero(), - "the head block is post-bellatrix but pre-transition" - ); - - let current_slot = rig.harness.chain.slot().unwrap(); - let next_slot = current_slot + 1; - let proposer = head - .beacon_state - .get_beacon_proposer_index(next_slot, &rig.harness.chain.spec) - .unwrap(); - let fee_recipient = Address::repeat_byte(99); - - // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation( - Epoch::new(0), - [( - &ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }, - &None, - )], - ) - .await; - - rig.move_to_terminal_block(); - - rig.harness - .chain - .prepare_beacon_proposer(current_slot) - .await - .unwrap(); - let forkchoice_update_params = rig - .harness - .chain - .canonical_head - .fork_choice_read_lock() - .get_forkchoice_update_parameters(); - rig.harness - .chain - .update_execution_engine_forkchoice( - current_slot, - forkchoice_update_params, - OverrideForkchoiceUpdate::Yes, - ) - .await - .unwrap(); - - let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); - let latest_block_hash = rig.latest_execution_block_hash(); - assert_eq!(payload_attributes.suggested_fee_recipient(), fee_recipient); - assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); -} - #[tokio::test] async fn attesting_to_optimistic_head() { if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let root = rig.import_block(Payload::Syncing).await; @@ -1361,7 +1250,6 @@ impl InvalidHeadSetup { async fn new() -> InvalidHeadSetup { let slots_per_epoch = E::slots_per_epoch(); let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. // Import blocks until the first time the chain finalizes. This avoids @@ -1546,7 +1434,6 @@ async fn weights_after_resetting_optimistic_status() { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); - rig.move_to_terminal_block(); rig.import_block(Payload::Valid).await; // Import a valid transition block. let mut roots = vec![]; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ff20e999bb..cfc53c8ce0 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5558,6 +5558,226 @@ fn check_iterators_from_slot(harness: &TestHarness, slot: Slot) { ); } +/// Test that blocks with default (pre-merge) execution payloads and non-default (post-merge) +/// execution payloads can be produced, stored, and retrieved correctly through a merge transition. +/// +/// Spec (see .claude/plans/8658.md): +/// - Bellatrix at epoch 0 (genesis), genesis has default execution payload header +/// - Slots 1-9: blocks have default (zeroed) execution payloads +/// - Slot 10: first block with a non-default execution payload (merge transition block) +/// - Slots 11-32+: non-default payloads, each with parent_hash == prev payload block_hash +/// - Chain must finalize past genesis +#[tokio::test] +async fn bellatrix_produce_and_store_payloads() { + use beacon_chain::test_utils::{ + DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, InteropGenesisBuilder, + }; + use safe_arith::SafeArith; + use state_processing::per_block_processing::is_merge_transition_complete; + use tree_hash::TreeHash; + + let merge_slot = 10u64; + let total_slots = 48u64; + let spec = ForkName::Bellatrix.make_genesis_spec(E::default_spec()); + + // Build genesis state with a default (zeroed) execution payload header so that + // is_merge_transition_complete = false at genesis. + let keypairs = KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec(); + let genesis_state = InteropGenesisBuilder::default() + .set_alternating_eth1_withdrawal_credentials() + .set_opt_execution_payload_header(None) + .build_genesis_state( + &keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + &spec, + ) + .unwrap(); + + assert!( + !is_merge_transition_complete(&genesis_state), + "genesis should NOT have merge complete" + ); + + let db_path = tempdir().unwrap(); + let store = get_store_generic( + &db_path, + StoreConfig { + prune_payloads: false, + ..StoreConfig::default() + }, + spec.clone(), + ); + + let chain_config = ChainConfig { + archive: true, + ..ChainConfig::default() + }; + let harness = TestHarness::builder(MinimalEthSpec) + .spec(store.get_chain_spec().clone()) + .keypairs(keypairs.clone()) + .fresh_disk_store(store.clone()) + .override_store_mutator(Box::new(move |builder: BeaconChainBuilder<_>| { + builder + .genesis_state(genesis_state) + .expect("should set genesis state") + })) + .mock_execution_layer() + .chain_config(chain_config) + .build(); + + harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .all_payloads_valid(); + + harness.advance_slot(); + + // Phase 1: slots 1 to merge_slot-1 — blocks with default execution payloads. + let mut state = harness.get_current_state(); + for slot_num in 1..merge_slot { + let slot = Slot::new(slot_num); + harness.advance_slot(); + harness + .build_and_import_block_with_payload( + &mut state, + slot, + ExecutionPayloadBellatrix::default(), + ) + .await; + state = harness.get_current_state(); + } + + // Phase 2: slot merge_slot — the merge transition block with a real payload. + { + let slot = Slot::new(merge_slot); + harness.advance_slot(); + + // Advance state to compute correct timestamp and randao. + let mut pre_state = state.clone(); + complete_state_advance(&mut pre_state, None, slot, &harness.spec) + .expect("should advance state"); + pre_state + .build_caches(&harness.spec) + .expect("should build caches"); + + let timestamp = pre_state + .genesis_time() + .safe_add( + slot.as_u64() + .safe_mul(harness.spec.seconds_per_slot) + .unwrap(), + ) + .unwrap(); + let prev_randao = *pre_state.get_randao_mix(pre_state.current_epoch()).unwrap(); + + let mut transition_payload = ExecutionPayloadBellatrix { + parent_hash: ExecutionBlockHash::zero(), + fee_recipient: Address::repeat_byte(42), + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].try_into().unwrap(), + prev_randao, + block_number: 1, + gas_limit: 30_000_000, + gas_used: 0, + timestamp, + extra_data: VariableList::empty(), + base_fee_per_gas: Uint256::from(1u64), + block_hash: ExecutionBlockHash::zero(), + transactions: VariableList::empty(), + }; + transition_payload.block_hash = + ExecutionBlockHash::from_root(transition_payload.tree_hash_root()); + + // Insert the transition payload into the mock EL so subsequent blocks can chain. + { + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + let mut block_gen = mock_el.server.execution_block_generator(); + block_gen.insert_block_without_checks(execution_layer::test_utils::Block::PoS( + ExecutionPayload::Bellatrix(transition_payload.clone()), + )); + } + + harness + .build_and_import_block_with_payload(&mut state, slot, transition_payload) + .await; + state = harness.get_current_state(); + + assert!( + is_merge_transition_complete(&state), + "merge should be complete after slot {merge_slot}" + ); + } + + // Phase 3: slots merge_slot+1 to total_slots — use harness with attestations. + let post_merge_slots = (total_slots - merge_slot) as usize; + harness.extend_slots(post_merge_slots).await; + + // ---- Verification: check all blocks in the store against plan invariants ---- + + let mut prev_payload_block_hash: Option = None; + + for slot_num in 1..=total_slots { + let slot = Slot::new(slot_num); + let block_root = harness + .chain + .block_root_at_slot(slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap_or_else(|| panic!("missing block at slot {slot_num}")); + let block = store + .get_blinded_block(&block_root) + .unwrap() + .unwrap_or_else(|| panic!("block not in store at slot {slot_num}")); + let payload = block + .message() + .body() + .execution_payload() + .expect("bellatrix block should have execution payload"); + + if slot_num < merge_slot { + // Slots 1 to merge_slot-1: payload must be default. + assert!( + payload.is_default_with_empty_roots(), + "slot {slot_num} should have default payload" + ); + } else if slot_num == merge_slot { + // Merge transition block: first non-default payload. + assert!( + !payload.is_default_with_empty_roots(), + "slot {slot_num} (merge) should have non-default payload" + ); + prev_payload_block_hash = Some(payload.block_hash()); + } else { + // Post-merge: non-default payload with valid parent_hash chain. + assert!( + !payload.is_default_with_empty_roots(), + "slot {slot_num} should have non-default payload" + ); + assert_eq!( + payload.parent_hash(), + prev_payload_block_hash.unwrap(), + "slot {slot_num} payload parent_hash should chain from previous payload" + ); + prev_payload_block_hash = Some(payload.block_hash()); + } + } + + // Verify finalization. + let finalized_epoch = harness + .chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch; + assert!( + finalized_epoch > 0, + "chain should have finalized past genesis" + ); +} + fn get_finalized_epoch_boundary_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 1b395ac8da..865599b9bd 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -281,7 +281,7 @@ where validator_count, genesis_time, } => { - let execution_payload_header = generate_genesis_header(&spec, true); + let execution_payload_header = generate_genesis_header(&spec); let keypairs = generate_deterministic_keypairs(validator_count); let genesis_state = interop_genesis_state( &keypairs, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 21a5abeb6c..c1d8cae573 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,9 +1,7 @@ use crate::metrics; use beacon_chain::{ BeaconChain, BeaconChainTypes, ExecutionStatus, - bellatrix_readiness::{ - BellatrixReadiness, GenesisExecutionPayloadStatus, MergeConfig, SECONDS_IN_A_WEEK, - }, + bellatrix_readiness::GenesisExecutionPayloadStatus, }; use execution_layer::{ EngineCapabilities, @@ -36,6 +34,7 @@ const SPEEDO_OBSERVATIONS: usize = 4; /// The number of slots between logs that give detail about backfill process. const BACKFILL_LOG_INTERVAL: u64 = 5; +const SECONDS_IN_A_WEEK: u64 = 604800; pub const FORK_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; @@ -70,7 +69,6 @@ pub fn spawn_notifier( wait_time = estimated_time_pretty(Some(next_slot.as_secs() as f64)), "Waiting for genesis" ); - bellatrix_readiness_logging(Slot::new(0), &beacon_chain).await; post_bellatrix_readiness_logging(Slot::new(0), &beacon_chain).await; genesis_execution_payload_logging(&beacon_chain).await; sleep(slot_duration).await; @@ -414,7 +412,6 @@ pub fn spawn_notifier( ); } - bellatrix_readiness_logging(current_slot, &beacon_chain).await; post_bellatrix_readiness_logging(current_slot, &beacon_chain).await; } }; @@ -425,88 +422,7 @@ pub fn spawn_notifier( Ok(()) } -/// Provides some helpful logging to users to indicate if their node is ready for the Bellatrix -/// fork and subsequent merge transition. -async fn bellatrix_readiness_logging( - current_slot: Slot, - beacon_chain: &BeaconChain, -) { - // There is no execution payload in gloas blocks, so this will trigger - // bellatrix readiness logging in gloas if we dont skip the check below - if beacon_chain - .spec - .fork_name_at_slot::(current_slot) - .gloas_enabled() - { - return; - } - - let merge_completed = beacon_chain - .canonical_head - .cached_head() - .snapshot - .beacon_block - .message() - .body() - .execution_payload() - .is_ok_and(|payload| payload.parent_hash() != ExecutionBlockHash::zero()); - - let has_execution_layer = beacon_chain.execution_layer.is_some(); - - if merge_completed && has_execution_layer - || !beacon_chain.is_time_to_prepare_for_bellatrix(current_slot) - { - return; - } - - match beacon_chain.check_bellatrix_readiness(current_slot).await { - BellatrixReadiness::Ready { - config, - current_difficulty, - } => match config { - MergeConfig { - terminal_total_difficulty: Some(ttd), - terminal_block_hash: None, - terminal_block_hash_epoch: None, - } => { - info!( - terminal_total_difficulty = %ttd, - current_difficulty = current_difficulty - .map(|d| d.to_string()) - .unwrap_or_else(|| "??".into()), - "Ready for Bellatrix" - ) - } - MergeConfig { - terminal_total_difficulty: _, - terminal_block_hash: Some(terminal_block_hash), - terminal_block_hash_epoch: Some(terminal_block_hash_epoch), - } => { - info!( - info = "you are using override parameters, please ensure that you \ - understand these parameters and their implications.", - ?terminal_block_hash, - ?terminal_block_hash_epoch, - "Ready for Bellatrix" - ) - } - other => error!( - config = ?other, - "Inconsistent merge configuration" - ), - }, - readiness @ BellatrixReadiness::NotSynced => warn!( - info = %readiness, - "Not ready Bellatrix" - ), - readiness @ BellatrixReadiness::NoExecutionEndpoint => warn!( - info = %readiness, - "Not ready for Bellatrix" - ), - } -} - -/// Provides some helpful logging to users to indicate if their node is ready for Capella +/// Provides some helpful logging to users to indicate if their node is ready for upcoming forks async fn post_bellatrix_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 157fe152ef..d6796f6a05 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -22,7 +22,6 @@ use eth2::types::{ForkVersionedResponse, builder::SignedBuilderBid}; use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; use logging::crit; -use lru::LruCache; pub use payload_status::PayloadStatus; use payload_status::process_payload_status; use sensitive_url::SensitiveUrl; @@ -32,7 +31,6 @@ use std::collections::{HashMap, hash_map::Entry}; use std::fmt; use std::future::Future; use std::io::Write; -use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -45,6 +43,7 @@ use tokio::{ use tokio_stream::wrappers::WatchStream; use tracing::{Instrument, debug, debug_span, error, info, instrument, warn}; use tree_hash::TreeHash; +use types::ExecutionPayloadGloas; use types::builder::BuilderBid; use types::execution::BlockProductionVersion; use types::kzg_ext::KzgCommitments; @@ -57,7 +56,6 @@ use types::{ ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, ProposerPreparationData, Slot, }; -use types::{ExecutionPayloadGloas, new_non_zero_usize}; mod block_hash; mod engine_api; @@ -75,10 +73,6 @@ pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; /// Name for the default file used for the jwt secret. pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; -/// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block -/// in an LRU cache to avoid redundant lookups. This is the size of that cache. -const EXECUTION_BLOCKS_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); - /// A fee recipient address for use during block production. Only used as a very last resort if /// there is no address provided by the user. /// @@ -452,7 +446,6 @@ struct Inner { execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, - execution_blocks: Mutex>, proposers: RwLock>, executor: TaskExecutor, payload_cache: PayloadCache, @@ -563,7 +556,6 @@ impl ExecutionLayer { suggested_fee_recipient, proposer_preparation_data: Mutex::new(HashMap::new()), proposers: RwLock::new(HashMap::new()), - execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, payload_cache: PayloadCache::default(), last_new_payload_errored: RwLock::new(false), @@ -655,12 +647,6 @@ impl ExecutionLayer { .ok_or(ApiError::ExecutionHeadBlockNotFound)?; Ok(block.total_difficulty) } - /// Note: this function returns a mutex guard, be careful to avoid deadlocks. - async fn execution_blocks( - &self, - ) -> MutexGuard<'_, LruCache> { - self.inner.execution_blocks.lock().await - } /// Gives access to a channel containing if the last engine state is online or not. /// @@ -1641,208 +1627,6 @@ impl ExecutionLayer { Ok(versions) } - /// Used during block production to determine if the merge has been triggered. - /// - /// ## Specification - /// - /// `get_terminal_pow_block_hash` - /// - /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md - pub async fn get_terminal_pow_block_hash( - &self, - spec: &ChainSpec, - timestamp: u64, - ) -> Result, Error> { - let _timer = metrics::start_timer_vec( - &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::GET_TERMINAL_POW_BLOCK_HASH], - ); - - let hash_opt = self - .engine() - .request(|engine| async move { - let terminal_block_hash = spec.terminal_block_hash; - if terminal_block_hash != ExecutionBlockHash::zero() { - if self - .get_pow_block(engine, terminal_block_hash) - .await? - .is_some() - { - return Ok(Some(terminal_block_hash)); - } else { - return Ok(None); - } - } - - let block = self.get_pow_block_at_total_difficulty(engine, spec).await?; - if let Some(pow_block) = block { - // If `terminal_block.timestamp == transition_block.timestamp`, - // we violate the invariant that a block's timestamp must be - // strictly greater than its parent's timestamp. - // The execution layer will reject a fcu call with such payload - // attributes leading to a missed block. - // Hence, we return `None` in such a case. - if pow_block.timestamp >= timestamp { - return Ok(None); - } - } - Ok(block.map(|b| b.block_hash)) - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError)?; - - if let Some(hash) = &hash_opt { - info!( - terminal_block_hash_override = ?spec.terminal_block_hash, - terminal_total_difficulty = ?spec.terminal_total_difficulty, - block_hash = ?hash, - "Found terminal block hash" - ); - } - - Ok(hash_opt) - } - - /// This function should remain internal. External users should use - /// `self.get_terminal_pow_block` instead, since it checks against the terminal block hash - /// override. - /// - /// ## Specification - /// - /// `get_pow_block_at_terminal_total_difficulty` - /// - /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md - async fn get_pow_block_at_total_difficulty( - &self, - engine: &Engine, - spec: &ChainSpec, - ) -> Result, ApiError> { - let mut block = engine - .api - .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) - .await? - .ok_or(ApiError::ExecutionHeadBlockNotFound)?; - - self.execution_blocks().await.put(block.block_hash, block); - - loop { - let block_reached_ttd = - block.terminal_total_difficulty_reached(spec.terminal_total_difficulty); - if block_reached_ttd { - if block.parent_hash == ExecutionBlockHash::zero() { - return Ok(Some(block)); - } - let parent = self - .get_pow_block(engine, block.parent_hash) - .await? - .ok_or(ApiError::ExecutionBlockNotFound(block.parent_hash))?; - let parent_reached_ttd = - parent.terminal_total_difficulty_reached(spec.terminal_total_difficulty); - - if block_reached_ttd && !parent_reached_ttd { - return Ok(Some(block)); - } else { - block = parent; - } - } else { - return Ok(None); - } - } - } - - /// Used during block verification to check that a block correctly triggers the merge. - /// - /// ## Returns - /// - /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. - /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work - /// block. - /// - `None` if the `block_hash` or its parent were not present on the execution engine. - /// - `Err(_)` if there was an error connecting to the execution engine. - /// - /// ## Fallback Behaviour - /// - /// The request will be broadcast to all nodes, simultaneously. It will await a response (or - /// failure) from all nodes and then return based on the first of these conditions which - /// returns true: - /// - /// - Terminal, if any node indicates it is terminal. - /// - Not terminal, if any node indicates it is non-terminal. - /// - Block not found, if any node cannot find the block. - /// - An error, if all nodes return an error. - /// - /// ## Specification - /// - /// `is_valid_terminal_pow_block` - /// - /// https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/fork-choice.md - pub async fn is_valid_terminal_pow_block_hash( - &self, - block_hash: ExecutionBlockHash, - spec: &ChainSpec, - ) -> Result, Error> { - let _timer = metrics::start_timer_vec( - &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH], - ); - - self.engine() - .request(|engine| async move { - if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? - && let Some(pow_parent) = - self.get_pow_block(engine, pow_block.parent_hash).await? - { - return Ok(Some( - self.is_valid_terminal_pow_block(pow_block, pow_parent, spec), - )); - } - Ok(None) - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) - } - - /// This function should remain internal. - /// - /// External users should use `self.is_valid_terminal_pow_block_hash`. - fn is_valid_terminal_pow_block( - &self, - block: ExecutionBlock, - parent: ExecutionBlock, - spec: &ChainSpec, - ) -> bool { - let is_total_difficulty_reached = - block.terminal_total_difficulty_reached(spec.terminal_total_difficulty); - let is_parent_total_difficulty_valid = parent - .total_difficulty - .is_some_and(|td| td < spec.terminal_total_difficulty); - is_total_difficulty_reached && is_parent_total_difficulty_valid - } - - /// Maps to the `eth_getBlockByHash` JSON-RPC call. - async fn get_pow_block( - &self, - engine: &Engine, - hash: ExecutionBlockHash, - ) -> Result, ApiError> { - if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { - // The block was in the cache, no need to request it from the execution - // engine. - return Ok(Some(cached)); - } - - // The block was *not* in the cache, request it from the execution - // engine and cache it for future reference. - if let Some(block) = engine.api.get_block_by_hash(hash).await? { - self.execution_blocks().await.put(hash, block); - Ok(Some(block)) - } else { - Ok(None) - } - } - pub async fn get_payload_bodies_by_hash( &self, hashes: Vec, @@ -2330,15 +2114,6 @@ async fn timed_future, T>(metric: &str, future: F) -> (T, (result, duration) } -#[cfg(test)] -/// Returns the duration since the unix epoch. -fn timestamp_now() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_secs() -} - fn noop( _: &ExecutionLayer, _: PayloadContentsRefTuple, @@ -2359,7 +2134,6 @@ mod test { async fn produce_three_valid_pos_execution_blocks() { let runtime = TestRuntime::default(); MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_terminal_block() .produce_valid_execution_payload_on_head() .await .produce_valid_execution_payload_on_head() @@ -2388,129 +2162,4 @@ mod test { Some(30_029_266) ); } - - #[tokio::test] - async fn test_forked_terminal_block() { - let runtime = TestRuntime::default(); - let (mock, block_hash) = MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_terminal_block() - .produce_forked_pow_block(); - assert!( - mock.el - .is_valid_terminal_pow_block_hash(block_hash, &mock.spec) - .await - .unwrap() - .unwrap() - ); - } - - #[tokio::test] - async fn finds_valid_terminal_block_hash() { - let runtime = TestRuntime::default(); - MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_block_prior_to_terminal_block() - .with_terminal_block(|spec, el, _| async move { - el.engine().upcheck().await; - assert_eq!( - el.get_terminal_pow_block_hash(&spec, timestamp_now()) - .await - .unwrap(), - None - ) - }) - .await - .move_to_terminal_block() - .with_terminal_block(|spec, el, terminal_block| async move { - assert_eq!( - el.get_terminal_pow_block_hash(&spec, timestamp_now()) - .await - .unwrap(), - Some(terminal_block.unwrap().block_hash) - ) - }) - .await; - } - - #[tokio::test] - async fn rejects_terminal_block_with_equal_timestamp() { - let runtime = TestRuntime::default(); - MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_block_prior_to_terminal_block() - .with_terminal_block(|spec, el, _| async move { - el.engine().upcheck().await; - assert_eq!( - el.get_terminal_pow_block_hash(&spec, timestamp_now()) - .await - .unwrap(), - None - ) - }) - .await - .move_to_terminal_block() - .with_terminal_block(|spec, el, terminal_block| async move { - let timestamp = terminal_block.as_ref().map(|b| b.timestamp).unwrap(); - assert_eq!( - el.get_terminal_pow_block_hash(&spec, timestamp) - .await - .unwrap(), - None - ) - }) - .await; - } - - #[tokio::test] - async fn verifies_valid_terminal_block_hash() { - let runtime = TestRuntime::default(); - MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_terminal_block() - .with_terminal_block(|spec, el, terminal_block| async move { - el.engine().upcheck().await; - assert_eq!( - el.is_valid_terminal_pow_block_hash(terminal_block.unwrap().block_hash, &spec) - .await - .unwrap(), - Some(true) - ) - }) - .await; - } - - #[tokio::test] - async fn rejects_invalid_terminal_block_hash() { - let runtime = TestRuntime::default(); - MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_terminal_block() - .with_terminal_block(|spec, el, terminal_block| async move { - el.engine().upcheck().await; - let invalid_terminal_block = terminal_block.unwrap().parent_hash; - - assert_eq!( - el.is_valid_terminal_pow_block_hash(invalid_terminal_block, &spec) - .await - .unwrap(), - Some(false) - ) - }) - .await; - } - - #[tokio::test] - async fn rejects_unknown_terminal_block_hash() { - let runtime = TestRuntime::default(); - MockExecutionLayer::default_params(runtime.task_executor.clone()) - .move_to_terminal_block() - .with_terminal_block(|spec, el, _| async move { - el.engine().upcheck().await; - let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); - - assert_eq!( - el.is_valid_terminal_pow_block_hash(missing_terminal_block, &spec) - .await - .unwrap(), - None - ) - }) - .await; - } } diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 859f33bc81..79bdc37aea 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -10,8 +10,6 @@ pub const GET_BLINDED_PAYLOAD_BUILDER: &str = "get_blinded_payload_builder"; pub const POST_BLINDED_PAYLOAD_BUILDER: &str = "post_blinded_payload_builder"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; -pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; -pub const IS_VALID_TERMINAL_POW_BLOCK_HASH: &str = "is_valid_terminal_pow_block_hash"; pub const LOCAL: &str = "local"; pub const BUILDER: &str = "builder"; pub const SUCCESS: &str = "success"; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 8591359f15..1743b340ab 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -28,8 +28,6 @@ use types::{ Transactions, Uint256, }; -use super::DEFAULT_TERMINAL_BLOCK; - const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); const TEST_BLOB_BUNDLE_V2: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle_v2.ssz"); @@ -172,9 +170,6 @@ fn make_rng() -> Arc> { impl ExecutionBlockGenerator { #[allow(clippy::too_many_arguments)] pub fn new( - terminal_total_difficulty: Uint256, - terminal_block_number: u64, - terminal_block_hash: ExecutionBlockHash, shanghai_time: Option, cancun_time: Option, prague_time: Option, @@ -187,9 +182,9 @@ impl ExecutionBlockGenerator { finalized_block_hash: <_>::default(), blocks: <_>::default(), block_hashes: <_>::default(), - terminal_total_difficulty, - terminal_block_number, - terminal_block_hash, + terminal_total_difficulty: Default::default(), + terminal_block_number: 0, + terminal_block_hash: Default::default(), pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), @@ -293,25 +288,6 @@ impl ExecutionBlockGenerator { .and_then(|block| block.as_execution_payload()) } - pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { - let target_block = self - .terminal_block_number - .checked_sub(1) - .ok_or("terminal pow block is 0")?; - self.move_to_pow_block(target_block) - } - - pub fn move_to_terminal_block(&mut self) -> Result<(), String> { - self.move_to_pow_block(self.terminal_block_number) - } - - pub fn move_to_pow_block(&mut self, target_block: u64) -> Result<(), String> { - let next_block = self.latest_block().unwrap().block_number() + 1; - assert!(target_block >= next_block); - - self.insert_pow_blocks(next_block..=target_block) - } - pub fn drop_all_blocks(&mut self) { self.blocks = <_>::default(); self.block_hashes = <_>::default(); @@ -879,27 +855,22 @@ fn payload_id_from_u64(n: u64) -> PayloadId { n.to_le_bytes() } -pub fn generate_genesis_header( - spec: &ChainSpec, - post_transition_merge: bool, -) -> Option> { +pub fn generate_genesis_header(spec: &ChainSpec) -> Option> { let genesis_fork = spec.fork_name_at_slot::(spec.genesis_slot); - let genesis_block_hash = - generate_genesis_block(spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK) - .ok() - .map(|block| block.block_hash); + let genesis_block_hash = generate_genesis_block(Default::default(), 0) + .ok() + .map(|block| block.block_hash); let empty_transactions_root = Transactions::::empty().tree_hash_root(); match genesis_fork { - ForkName::Base | ForkName::Altair => None, + ForkName::Base | ForkName::Altair => { + // Pre-Bellatrix forks have no execution payload + None + } ForkName::Bellatrix => { - if post_transition_merge { - let mut header = ExecutionPayloadHeader::Bellatrix(<_>::default()); - *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); - *header.transactions_root_mut() = empty_transactions_root; - Some(header) - } else { - Some(ExecutionPayloadHeader::::Bellatrix(<_>::default())) - } + let mut header = ExecutionPayloadHeader::Bellatrix(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; + Some(header) } ForkName::Capella => { let mut header = ExecutionPayloadHeader::Capella(<_>::default()); @@ -985,70 +956,6 @@ mod test { use kzg::{Bytes48, CellRef, KzgBlobRef, trusted_setup::get_trusted_setup}; use types::{MainnetEthSpec, MinimalEthSpec}; - #[test] - fn pow_chain_only() { - const TERMINAL_DIFFICULTY: u64 = 10; - const TERMINAL_BLOCK: u64 = 10; - const DIFFICULTY_INCREMENT: u64 = 1; - - let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( - Uint256::from(TERMINAL_DIFFICULTY), - TERMINAL_BLOCK, - ExecutionBlockHash::zero(), - None, - None, - None, - None, - None, - None, - ); - - for i in 0..=TERMINAL_BLOCK { - if i > 0 { - generator.insert_pow_block(i).unwrap(); - } - - /* - * Generate a block, inspect it. - */ - - let block = generator.latest_block().unwrap(); - assert_eq!(block.block_number(), i); - - let expected_parent = i - .checked_sub(1) - .map(|i| generator.block_by_number(i).unwrap().block_hash()) - .unwrap_or_else(ExecutionBlockHash::zero); - assert_eq!(block.parent_hash(), expected_parent); - - assert_eq!( - block.total_difficulty().unwrap(), - Uint256::from(i * DIFFICULTY_INCREMENT) - ); - - assert_eq!(generator.block_by_hash(block.block_hash()).unwrap(), block); - assert_eq!(generator.block_by_number(i).unwrap(), block); - - /* - * Check the parent is accessible. - */ - - if let Some(prev_i) = i.checked_sub(1) { - assert_eq!( - generator.block_by_number(prev_i).unwrap(), - generator.block_by_hash(block.parent_hash()).unwrap() - ); - } - - /* - * Check the next block is inaccessible. - */ - - let next_i = i + 1; - assert!(generator.block_by_number(next_i).is_none()); - } - } - #[test] fn valid_test_blobs_bundle_v1() { assert!( diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index c69edb8f39..91966ff65e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,9 +1,4 @@ -use crate::{ - test_utils::{ - DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, MockServer, - }, - *, -}; +use crate::{test_utils::DEFAULT_JWT_SECRET, test_utils::MockServer, *}; use alloy_primitives::B256 as H256; use fixed_bytes::FixedBytesExtended; use kzg::Kzg; @@ -20,12 +15,10 @@ pub struct MockExecutionLayer { impl MockExecutionLayer { pub fn default_params(executor: TaskExecutor) -> Self { let mut spec = MainnetEthSpec::default_spec(); - spec.terminal_total_difficulty = Uint256::from(DEFAULT_TERMINAL_DIFFICULTY); spec.terminal_block_hash = ExecutionBlockHash::zero(); spec.terminal_block_hash_activation_epoch = Epoch::new(0); Self::new( executor, - DEFAULT_TERMINAL_BLOCK, None, None, None, @@ -40,7 +33,6 @@ impl MockExecutionLayer { #[allow(clippy::too_many_arguments)] pub fn new( executor: TaskExecutor, - terminal_block: u64, shanghai_time: Option, cancun_time: Option, prague_time: Option, @@ -56,9 +48,6 @@ impl MockExecutionLayer { let server = MockServer::new( &handle, jwt_key, - spec.terminal_total_difficulty, - terminal_block, - spec.terminal_block_hash, shanghai_time, cancun_time, prague_time, @@ -293,53 +282,4 @@ impl MockExecutionLayer { assert_eq!(head_execution_block.block_hash(), block_hash); assert_eq!(head_execution_block.parent_hash(), parent_hash); } - - pub fn move_to_block_prior_to_terminal_block(self) -> Self { - self.server - .execution_block_generator() - .move_to_block_prior_to_terminal_block() - .unwrap(); - self - } - - pub fn move_to_terminal_block(self) -> Self { - self.server - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); - self - } - - pub fn produce_forked_pow_block(self) -> (Self, ExecutionBlockHash) { - let head_block = self - .server - .execution_block_generator() - .latest_block() - .unwrap(); - - let block_hash = self - .server - .execution_block_generator() - .insert_pow_block_by_hash(head_block.parent_hash(), 1) - .unwrap(); - (self, block_hash) - } - - pub async fn with_terminal_block(self, func: U) -> Self - where - U: Fn(Arc, ExecutionLayer, Option) -> V, - V: Future, - { - let terminal_block_number = self - .server - .execution_block_generator() - .terminal_block_number; - let terminal_block = self - .server - .execution_block_generator() - .execution_block_by_number(terminal_block_number); - - func(self.spec.clone(), self.el.clone(), terminal_block).await; - self - } } diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 2465a41d8b..d8e1e70e49 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -35,8 +35,6 @@ pub use hook::Hook; pub use mock_builder::{MockBuilder, Operation, mock_builder_extra_data}; pub use mock_execution_layer::MockExecutionLayer; -pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; -pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; @@ -79,9 +77,6 @@ mod mock_execution_layer; pub struct MockExecutionConfig { pub server_config: Config, pub jwt_key: JwtKey, - pub terminal_difficulty: Uint256, - pub terminal_block: u64, - pub terminal_block_hash: ExecutionBlockHash, pub shanghai_time: Option, pub cancun_time: Option, pub prague_time: Option, @@ -93,9 +88,6 @@ impl Default for MockExecutionConfig { fn default() -> Self { Self { jwt_key: JwtKey::random(), - terminal_difficulty: Uint256::from(DEFAULT_TERMINAL_DIFFICULTY), - terminal_block: DEFAULT_TERMINAL_BLOCK, - terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), shanghai_time: None, cancun_time: None, @@ -118,9 +110,6 @@ impl MockServer { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), - Uint256::from(DEFAULT_TERMINAL_DIFFICULTY), - DEFAULT_TERMINAL_BLOCK, - ExecutionBlockHash::zero(), None, // FIXME(capella): should this be the default? None, // FIXME(deneb): should this be the default? None, // FIXME(electra): should this be the default? @@ -138,9 +127,6 @@ impl MockServer { create_test_tracing_subscriber(); let MockExecutionConfig { jwt_key, - terminal_difficulty, - terminal_block, - terminal_block_hash, server_config, shanghai_time, cancun_time, @@ -151,9 +137,6 @@ impl MockServer { let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); let execution_block_generator = ExecutionBlockGenerator::new( - terminal_difficulty, - terminal_block, - terminal_block_hash, shanghai_time, cancun_time, prague_time, @@ -215,9 +198,6 @@ impl MockServer { pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, - terminal_difficulty: Uint256, - terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, shanghai_time: Option, cancun_time: Option, prague_time: Option, @@ -230,9 +210,6 @@ impl MockServer { MockExecutionConfig { server_config: Config::default(), jwt_key, - terminal_difficulty, - terminal_block, - terminal_block_hash, shanghai_time, cancun_time, prague_time, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 92a1ad934d..74710c4ed2 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -93,7 +93,7 @@ use tokio_stream::{ use tracing::{debug, info, warn}; use types::{ BeaconStateError, Checkpoint, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, - SignedBlindedBeaconBlock, Slot, + SignedBlindedBeaconBlock, }; use validator::execution_payload_envelope::get_validator_execution_payload_envelope; use version::{ @@ -3126,25 +3126,6 @@ pub fn serve( }, ); - // GET lighthouse/merge_readiness - let get_lighthouse_merge_readiness = warp::path("lighthouse") - .and(warp::path("merge_readiness")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P1, async move { - let current_slot = chain.slot_clock.now_or_genesis().unwrap_or(Slot::new(0)); - let merge_readiness = chain.check_bellatrix_readiness(current_slot).await; - Ok::<_, warp::reject::Rejection>( - warp::reply::json(&api_types::GenericResponse::from(merge_readiness)) - .into_response(), - ) - }) - }, - ); - let get_events = eth_v1 .clone() .and(warp::path("events")) @@ -3388,7 +3369,6 @@ pub fn serve( .uor(get_beacon_light_client_bootstrap) .uor(get_beacon_light_client_updates) .uor(get_lighthouse_block_packing_efficiency) - .uor(get_lighthouse_merge_readiness) .uor(get_events) .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index ef5c508595..a380f62ecf 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -85,14 +85,18 @@ pub async fn gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + // The error depends on whether blobs exist (which affects validation order): + // - Pre-Deneb (no blobs): block validation runs first -> NotFinalizedDescendant + // - Deneb/Electra (blobs): blob validation runs first -> ParentUnknown + // - Fulu+ (columns): block validation runs first -> NotFinalizedDescendant let pre_finalized_block_root = Hash256::zero(); - let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() { + let expected_error_msg = if tester.harness.spec.deneb_fork_epoch.is_none() + || tester.harness.spec.is_fulu_scheduled() + { format!( "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" ) } else { - // Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the - // block. format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") }; @@ -283,13 +287,13 @@ pub async fn consensus_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); let pre_finalized_block_root = Hash256::zero(); - let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() { + let expected_error_msg = if tester.harness.spec.deneb_fork_epoch.is_none() + || tester.harness.spec.is_fulu_scheduled() + { format!( "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" ) } else { - // Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the - // block. format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") }; @@ -520,13 +524,13 @@ pub async fn equivocation_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); let pre_finalized_block_root = Hash256::zero(); - let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() { + let expected_error_msg = if tester.harness.spec.deneb_fork_epoch.is_none() + || tester.harness.spec.is_fulu_scheduled() + { format!( "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" ) } else { - // Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the - // block. format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") }; @@ -845,16 +849,17 @@ pub async fn blinded_gossip_invalid() { assert!(response.is_err()); let error_response: eth2::Error = response.err().unwrap(); + /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); let pre_finalized_block_root = Hash256::zero(); - let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() { + let expected_error_msg = if tester.harness.spec.deneb_fork_epoch.is_none() + || tester.harness.spec.is_fulu_scheduled() + { format!( "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" ) } else { - // Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the - // block. format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") }; @@ -1070,10 +1075,16 @@ pub async fn blinded_consensus_invalid() { ); } else { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}"), - ); + let expected_error_msg = if tester.harness.spec.deneb_fork_epoch.is_none() + || tester.harness.spec.is_fulu_scheduled() + { + format!( + "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" + ) + } else { + format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") + }; + assert_server_message_error(error_response, expected_error_msg); } } @@ -1253,10 +1264,16 @@ pub async fn blinded_equivocation_invalid() { ); } else { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}"), - ); + let expected_error_msg = if tester.harness.spec.deneb_fork_epoch.is_none() + || tester.harness.spec.is_fulu_scheduled() + { + format!( + "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" + ) + } else { + format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") + }; + assert_server_message_error(error_response, expected_error_msg); } } @@ -1957,6 +1974,13 @@ pub async fn duplicate_block_status_code() { let validator_count = 64; let num_initial: u64 = 31; let duplicate_block_status_code = StatusCode::IM_A_TEAPOT; + + // Check if deneb is enabled, which is required for blobs. + let spec = test_spec::(); + if !spec.fork_name_at_slot::(Slot::new(0)).deneb_enabled() { + return; + } + let tester = InteractiveTester::::new_with_initializer_and_mutator( None, validator_count, diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index b96c8bd112..4ba35c238c 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -404,7 +404,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { bls_withdrawal_credentials(&keypair.pk, spec) } - let header = generate_genesis_header(&spec, true); + let header = generate_genesis_header(&spec); let genesis_state = InteropGenesisBuilder::new() .set_opt_execution_payload_header(header) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 21458057c4..a18dd10464 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -450,13 +450,7 @@ pub async fn proposer_boost_re_org_test( let execution_ctx = mock_el.server.ctx.clone(); let slot_clock = &harness.chain.slot_clock; - // Move to terminal block. mock_el.server.all_payloads_valid(); - execution_ctx - .execution_block_generator - .write() - .move_to_terminal_block() - .unwrap(); // Send proposer preparation data for all validators. let proposer_preparation_data = all_validators diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 6bca9e51f6..791e643ec4 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -21,15 +21,8 @@ async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> Interactiv let tester = InteractiveTester::::new(Some(spec), validator_count as usize).await; let harness = &tester.harness; let mock_el = harness.mock_execution_layer.as_ref().unwrap(); - let execution_ctx = mock_el.server.ctx.clone(); - // Move to terminal block. mock_el.server.all_payloads_valid(); - execution_ctx - .execution_block_generator - .write() - .move_to_terminal_block() - .unwrap(); // Create some chain depth. harness.advance_slot(); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7e3eb8b980..6696e109a5 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -147,15 +147,6 @@ impl ApiTester { .node_custody_type(config.node_custody_type) .build(); - harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .execution_block_generator() - .move_to_terminal_block() - .unwrap(); - harness.advance_slot(); for _ in 0..CHAIN_LENGTH { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 81423d6abd..b3bd091691 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1851,8 +1851,11 @@ mod release_tests { let mut spec = E::default_spec(); // Give some room to sign surround slashings. - spec.altair_fork_epoch = Some(Epoch::new(3)); - spec.bellatrix_fork_epoch = Some(Epoch::new(6)); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(2)); + spec.electra_fork_epoch = Some(Epoch::new(4)); // To make exits immediately valid. spec.shard_committee_period = 0; @@ -1860,185 +1863,114 @@ mod release_tests { let num_validators = 32; let harness = get_harness::(num_validators, Some(spec.clone())); + if let Some(mock_el) = harness.mock_execution_layer.as_ref() { + mock_el.server.all_payloads_valid(); + } (harness, spec) } - /// Test several cross-fork voluntary exits: - /// - /// - phase0 exit (not valid after Bellatrix) - /// - phase0 exit signed with Altair fork version (only valid after Bellatrix) - #[tokio::test] - async fn cross_fork_exits() { - let (harness, spec) = cross_fork_harness::(); - let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); - let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); - - let op_pool = OperationPool::::new(); - - // Sign an exit in phase0 with a phase0 epoch. - let exit1 = harness.make_voluntary_exit(0, Epoch::new(0)); - - // Advance to Altair. - harness - .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) - .await; - let altair_head = harness.chain.canonical_head.cached_head().snapshot; - assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); - - // Add exit 1 to the op pool during Altair. It's still valid at this point and should be - // returned. - let verified_exit1 = exit1 - .clone() - .validate(&altair_head.beacon_state, &harness.chain.spec) - .unwrap(); - op_pool.insert_voluntary_exit(verified_exit1); - let exits = - op_pool.get_voluntary_exits(&altair_head.beacon_state, |_| true, &harness.chain.spec); - assert!(exits.contains(&exit1)); - assert_eq!(exits.len(), 1); - - // Advance to Bellatrix. - harness - .extend_to_slot(bellatrix_fork_epoch.start_slot(slots_per_epoch)) - .await; - let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; - assert_eq!( - bellatrix_head.beacon_state.current_epoch(), - bellatrix_fork_epoch - ); - - // Sign an exit with the Altair domain and a phase0 epoch. This is a weird type of exit - // that is valid because after the Bellatrix fork we'll use the Altair fork domain to verify - // all prior epochs. - let unsigned_exit = VoluntaryExit { - epoch: Epoch::new(0), - validator_index: 2, - }; - let exit2 = SignedVoluntaryExit { - message: unsigned_exit.clone(), - signature: harness.validator_keypairs[2] - .sk - .sign(unsigned_exit.signing_root(spec.compute_domain( - Domain::VoluntaryExit, - harness.spec.altair_fork_version, - harness.chain.genesis_validators_root, - ))), - }; - - let verified_exit2 = exit2 - .clone() - .validate(&bellatrix_head.beacon_state, &harness.chain.spec) - .unwrap(); - op_pool.insert_voluntary_exit(verified_exit2); - - // Attempting to fetch exit1 now should fail, despite it still being in the pool. - // exit2 should still be valid, because it was signed with the Altair fork domain. - assert_eq!(op_pool.voluntary_exits.read().len(), 2); - let exits = - op_pool.get_voluntary_exits(&bellatrix_head.beacon_state, |_| true, &harness.spec); - assert_eq!(&exits, &[exit2]); - } + // Voluntary exits signed post-Capella are perpetually valid across forks, so no + // cross-fork test is required here. /// Test several cross-fork proposer slashings: /// - /// - phase0 slashing (not valid after Bellatrix) - /// - Bellatrix signed with Altair fork version (not valid after Bellatrix) - /// - phase0 exit signed with Altair fork version (only valid after Bellatrix) + /// - Capella slashing (not valid after Electra) + /// - Electra signed with Deneb fork version (not valid after Electra) + /// - Capella exit signed with Deneb fork version (only valid after Electra) #[tokio::test] async fn cross_fork_proposer_slashings() { let (harness, spec) = cross_fork_harness::(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); - let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); - let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(slots_per_epoch); + let deneb_fork_epoch = spec.deneb_fork_epoch.unwrap(); + let electra_fork_epoch = spec.electra_fork_epoch.unwrap(); + let electra_fork_slot = electra_fork_epoch.start_slot(slots_per_epoch); let op_pool = OperationPool::::new(); - // Sign a proposer slashing in phase0 with a phase0 epoch. + // Sign a proposer slashing in Capella with a Capella slot. let slashing1 = harness.make_proposer_slashing_at_slot(0, Some(Slot::new(1))); - // Advance to Altair. + // Advance to Deneb. harness - .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .extend_to_slot(deneb_fork_epoch.start_slot(slots_per_epoch)) .await; - let altair_head = harness.chain.canonical_head.cached_head().snapshot; - assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + let deneb_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(deneb_head.beacon_state.current_epoch(), deneb_fork_epoch); - // Add slashing1 to the op pool during Altair. It's still valid at this point and should be + // Add slashing1 to the op pool during Deneb. It's still valid at this point and should be // returned. let verified_slashing1 = slashing1 .clone() - .validate(&altair_head.beacon_state, &harness.chain.spec) + .validate(&deneb_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_proposer_slashing(verified_slashing1); let (proposer_slashings, _, _) = - op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + op_pool.get_slashings_and_exits(&deneb_head.beacon_state, &harness.chain.spec); assert!(proposer_slashings.contains(&slashing1)); assert_eq!(proposer_slashings.len(), 1); - // Sign a proposer slashing with a Bellatrix slot using the Altair fork domain. + // Sign a proposer slashing with a Electra slot using the Deneb fork domain. // - // This slashing is valid only before the Bellatrix fork epoch. - let slashing2 = harness.make_proposer_slashing_at_slot(1, Some(bellatrix_fork_slot)); + // This slashing is valid only before the Electra fork epoch. + let slashing2 = harness.make_proposer_slashing_at_slot(1, Some(electra_fork_slot)); let verified_slashing2 = slashing2 .clone() - .validate(&altair_head.beacon_state, &harness.chain.spec) + .validate(&deneb_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_proposer_slashing(verified_slashing2); let (proposer_slashings, _, _) = - op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + op_pool.get_slashings_and_exits(&deneb_head.beacon_state, &harness.chain.spec); assert!(proposer_slashings.contains(&slashing1)); assert!(proposer_slashings.contains(&slashing2)); assert_eq!(proposer_slashings.len(), 2); - // Advance to Bellatrix. - harness.extend_to_slot(bellatrix_fork_slot).await; - let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + // Advance to Electra. + harness.extend_to_slot(electra_fork_slot).await; + let electra_head = harness.chain.canonical_head.cached_head().snapshot; assert_eq!( - bellatrix_head.beacon_state.current_epoch(), - bellatrix_fork_epoch + electra_head.beacon_state.current_epoch(), + electra_fork_epoch ); - // Sign a proposer slashing with the Altair domain and a phase0 slot. This is a weird type - // of slashing that is only valid after the Bellatrix fork because we'll use the Altair fork + // Sign a proposer slashing with the Deneb domain and a Capella slot. This is a weird type + // of slashing that is only valid after the Electra fork because we'll use the Deneb fork // domain to verify all prior epochs. let slashing3 = harness.make_proposer_slashing_at_slot(2, Some(Slot::new(1))); let verified_slashing3 = slashing3 .clone() - .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .validate(&electra_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_proposer_slashing(verified_slashing3); // Attempting to fetch slashing1 now should fail, despite it still being in the pool. // Likewise slashing2 is also invalid now because it should be signed with the - // Bellatrix fork version. - // slashing3 should still be valid, because it was signed with the Altair fork domain. + // Electra fork version. + // slashing3 should still be valid, because it was signed with the Deneb fork domain. assert_eq!(op_pool.proposer_slashings.read().len(), 3); let (proposer_slashings, _, _) = - op_pool.get_slashings_and_exits(&bellatrix_head.beacon_state, &harness.spec); + op_pool.get_slashings_and_exits(&electra_head.beacon_state, &harness.spec); assert!(proposer_slashings.contains(&slashing3)); assert_eq!(proposer_slashings.len(), 1); } /// Test several cross-fork attester slashings: /// - /// - both target epochs in phase0 (not valid after Bellatrix) - /// - both target epochs in Bellatrix but signed with Altair domain (not valid after Bellatrix) - /// - Altair attestation that surrounds a phase0 attestation (not valid after Bellatrix) - /// - both target epochs in phase0 but signed with Altair domain (only valid after Bellatrix) + /// - both target epochs in Capella (not valid after Electra) + /// - both target epochs in Electra but signed with Deneb domain (not valid after Electra) + /// - Deneb attestation that surrounds a Capella attestation (not valid after Electra) + /// - both target epochs in Capella but signed with Deneb domain (only valid after Electra) #[tokio::test] async fn cross_fork_attester_slashings() { let (harness, spec) = cross_fork_harness::(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); let zero_epoch = Epoch::new(0); - let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); - let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(slots_per_epoch); + let deneb_fork_epoch = spec.deneb_fork_epoch.unwrap(); + let electra_fork_epoch = spec.electra_fork_epoch.unwrap(); + let electra_fork_slot = electra_fork_epoch.start_slot(slots_per_epoch); let op_pool = OperationPool::::new(); - // Sign an attester slashing with the phase0 fork version, with both target epochs in phase0. + // Sign an attester slashing with the Capella fork version, with both target epochs in Capella. let slashing1 = harness.make_attester_slashing_with_epochs( vec![0], None, @@ -2047,55 +1979,55 @@ mod release_tests { Some(zero_epoch), ); - // Advance to Altair. + // Advance to Deneb. harness - .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .extend_to_slot(deneb_fork_epoch.start_slot(slots_per_epoch)) .await; - let altair_head = harness.chain.canonical_head.cached_head().snapshot; - assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + let deneb_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(deneb_head.beacon_state.current_epoch(), deneb_fork_epoch); - // Add slashing1 to the op pool during Altair. It's still valid at this point and should be + // Add slashing1 to the op pool during Deneb. It's still valid at this point and should be // returned. let verified_slashing1 = slashing1 .clone() - .validate(&altair_head.beacon_state, &harness.chain.spec) + .validate(&deneb_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_attester_slashing(verified_slashing1); - // Sign an attester slashing with two Bellatrix epochs using the Altair fork domain. + // Sign an attester slashing with two Electra epochs using the Deneb fork domain. // - // This slashing is valid only before the Bellatrix fork epoch. + // This slashing is valid only before the Electra fork epoch. let slashing2 = harness.make_attester_slashing_with_epochs( vec![1], None, - Some(bellatrix_fork_epoch), + Some(electra_fork_epoch), None, - Some(bellatrix_fork_epoch), + Some(electra_fork_epoch), ); let verified_slashing2 = slashing2 .clone() - .validate(&altair_head.beacon_state, &harness.chain.spec) + .validate(&deneb_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_attester_slashing(verified_slashing2); let (_, attester_slashings, _) = - op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + op_pool.get_slashings_and_exits(&deneb_head.beacon_state, &harness.chain.spec); assert!(attester_slashings.contains(&slashing1)); assert!(attester_slashings.contains(&slashing2)); assert_eq!(attester_slashings.len(), 2); - // Sign an attester slashing where an Altair attestation surrounds a phase0 one. + // Sign an attester slashing where a Deneb attestation surrounds a Capella one. // - // This slashing is valid only before the Bellatrix fork epoch. + // This slashing is valid only before the Electra fork epoch. let slashing3 = harness.make_attester_slashing_with_epochs( vec![2], Some(Epoch::new(0)), - Some(altair_fork_epoch), + Some(deneb_fork_epoch), Some(Epoch::new(1)), - Some(altair_fork_epoch - 1), + Some(deneb_fork_epoch - 1), ); let verified_slashing3 = slashing3 .clone() - .validate(&altair_head.beacon_state, &harness.chain.spec) + .validate(&deneb_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_attester_slashing(verified_slashing3); @@ -2104,44 +2036,43 @@ mod release_tests { // slashed. let mut to_be_slashed = hashset! {0}; let attester_slashings = - op_pool.get_attester_slashings(&altair_head.beacon_state, &mut to_be_slashed); + op_pool.get_attester_slashings(&deneb_head.beacon_state, &mut to_be_slashed); assert!(attester_slashings.contains(&slashing2)); assert!(attester_slashings.contains(&slashing3)); assert_eq!(attester_slashings.len(), 2); - // Advance to Bellatrix. - harness.extend_to_slot(bellatrix_fork_slot).await; - let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + // Advance to Electra + harness.extend_to_slot(electra_fork_slot).await; + let electra_head = harness.chain.canonical_head.cached_head().snapshot; assert_eq!( - bellatrix_head.beacon_state.current_epoch(), - bellatrix_fork_epoch + electra_head.beacon_state.current_epoch(), + electra_fork_epoch ); - // Sign an attester slashing with the Altair domain and phase0 epochs. This is a weird type - // of slashing that is only valid after the Bellatrix fork because we'll use the Altair fork - // domain to verify all prior epochs. + // Sign an attester slashing with the Deneb domain and Capella epochs. This is only valid + // after the Electra fork. let slashing4 = harness.make_attester_slashing_with_epochs( vec![3], Some(Epoch::new(0)), - Some(altair_fork_epoch - 1), + Some(deneb_fork_epoch - 1), Some(Epoch::new(0)), - Some(altair_fork_epoch - 1), + Some(deneb_fork_epoch - 1), ); let verified_slashing4 = slashing4 .clone() - .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .validate(&electra_head.beacon_state, &harness.chain.spec) .unwrap(); op_pool.insert_attester_slashing(verified_slashing4); // All slashings except slashing4 are now invalid (despite being present in the pool). assert_eq!(op_pool.attester_slashings.read().len(), 4); let (_, attester_slashings, _) = - op_pool.get_slashings_and_exits(&bellatrix_head.beacon_state, &harness.spec); + op_pool.get_slashings_and_exits(&electra_head.beacon_state, &harness.spec); assert!(attester_slashings.contains(&slashing4)); assert_eq!(attester_slashings.len(), 1); // Pruning the attester slashings should remove all but slashing4. - op_pool.prune_attester_slashings(&bellatrix_head.beacon_state); + op_pool.prune_attester_slashings(&electra_head.beacon_state); assert_eq!(op_pool.attester_slashings.read().len(), 1); } } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 739717b33f..96610c2010 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -8,7 +8,7 @@ use crate::per_block_processing::errors::{ use crate::{BlockReplayError, BlockReplayer, per_block_processing}; use crate::{ BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, - per_block_processing::{process_operations, verify_exit::verify_exit}, + per_block_processing::process_operations, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; @@ -39,10 +39,13 @@ async fn get_harness( // Set the state and block to be in the last slot of the `epoch_offset`th epoch. let last_slot_of_epoch = (MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch()); + // Use Electra spec to ensure blocks are created at the same fork as the state + let spec = Arc::new(ForkName::Electra.make_genesis_spec(E::default_spec())); let harness = BeaconChainHarness::>::builder(E::default()) - .default_spec() + .spec(spec.clone()) .keypairs(KEYPAIRS[0..num_validators].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let state = harness.get_current_state(); if last_slot_of_epoch > Slot::new(0) { @@ -63,8 +66,8 @@ async fn get_harness( #[tokio::test] async fn valid_block_ok() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let state = harness.get_current_state(); let slot = state.slot(); @@ -87,8 +90,8 @@ async fn valid_block_ok() { #[tokio::test] async fn invalid_block_header_state_slot() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -107,18 +110,18 @@ async fn invalid_block_header_state_slot() { &spec, ); - assert_eq!( + assert!(matches!( result, Err(BlockProcessingError::HeaderInvalid { - reason: HeaderInvalid::StateSlotMismatch + reason: HeaderInvalid::StateSlotMismatch, }) - ); + )); } #[tokio::test] async fn invalid_parent_block_root() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let state = harness.get_current_state(); let slot = state.slot(); @@ -139,21 +142,18 @@ async fn invalid_parent_block_root() { &spec, ); - assert_eq!( + assert!(matches!( result, Err(BlockProcessingError::HeaderInvalid { - reason: HeaderInvalid::ParentBlockRootMismatch { - state: state.latest_block_header().canonical_root(), - block: Hash256::from([0xAA; 32]) - } + reason: HeaderInvalid::ParentBlockRootMismatch { .. }, }) - ); + )); } #[tokio::test] async fn invalid_block_signature() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let state = harness.get_current_state(); let slot = state.slot(); @@ -172,19 +172,18 @@ async fn invalid_block_signature() { &spec, ); - // should get a BadSignature error - assert_eq!( + assert!(matches!( result, Err(BlockProcessingError::HeaderInvalid { - reason: HeaderInvalid::ProposalSignatureInvalid + reason: HeaderInvalid::ProposalSignatureInvalid, }) - ); + )); } #[tokio::test] async fn invalid_randao_reveal_signature() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let state = harness.get_current_state(); let slot = state.slot(); @@ -211,8 +210,8 @@ async fn invalid_randao_reveal_signature() { #[tokio::test] async fn valid_4_deposits() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); @@ -235,8 +234,8 @@ async fn valid_4_deposits() { #[tokio::test] async fn invalid_deposit_deposit_count_too_big() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); @@ -267,8 +266,8 @@ async fn invalid_deposit_deposit_count_too_big() { #[tokio::test] async fn invalid_deposit_count_too_small() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); @@ -299,8 +298,8 @@ async fn invalid_deposit_count_too_small() { #[tokio::test] async fn invalid_deposit_bad_merkle_proof() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); @@ -333,8 +332,8 @@ async fn invalid_deposit_bad_merkle_proof() { #[tokio::test] async fn invalid_deposit_wrong_sig() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let (deposits, state) = @@ -357,8 +356,8 @@ async fn invalid_deposit_wrong_sig() { #[tokio::test] async fn invalid_deposit_invalid_pub_key() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let (deposits, state) = @@ -382,8 +381,8 @@ async fn invalid_deposit_invalid_pub_key() { #[tokio::test] async fn invalid_attestation_no_committee_for_index() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let mut head_block = harness @@ -422,8 +421,8 @@ async fn invalid_attestation_no_committee_for_index() { #[tokio::test] async fn invalid_attestation_wrong_justified_checkpoint() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let mut head_block = harness @@ -477,8 +476,8 @@ async fn invalid_attestation_wrong_justified_checkpoint() { #[tokio::test] async fn invalid_attestation_bad_aggregation_bitfield_len() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let mut head_block = harness @@ -488,13 +487,14 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { .clone() .deconstruct() .0; + // Use Electra method since harness runs at Electra fork *head_block .to_mut() .body_mut() .attestations_mut() .next() .unwrap() - .aggregation_bits_base_mut() + .aggregation_bits_electra_mut() .unwrap() = Bitfield::with_capacity(spec.target_committee_size).unwrap(); let mut ctxt = ConsensusContext::new(state.slot()); @@ -506,19 +506,20 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { &spec, ); - // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the committee size. + // In Electra, setting wrong aggregation_bits capacity causes EmptyCommittee error + // (validation order changed - committee check happens before bitfield check) assert_eq!( result, Err(BlockProcessingError::BeaconStateError( - BeaconStateError::InvalidBitfield + BeaconStateError::EmptyCommittee )) ); } #[tokio::test] async fn invalid_attestation_bad_signature() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, 97).await; // minimal number of required validators for this test + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let mut head_block = harness @@ -558,8 +559,8 @@ async fn invalid_attestation_bad_signature() { #[tokio::test] async fn invalid_attestation_included_too_early() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let mut head_block = harness @@ -603,56 +604,15 @@ async fn invalid_attestation_included_too_early() { ); } -#[tokio::test] -async fn invalid_attestation_included_too_late() { - let spec = MainnetEthSpec::default_spec(); - // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; - - let mut state = harness.get_current_state(); - let mut head_block = harness - .chain - .head_beacon_block() - .as_ref() - .clone() - .deconstruct() - .0; - let new_attesation_slot = head_block.body().attestations().next().unwrap().data().slot - - Slot::new(MainnetEthSpec::slots_per_epoch()); - head_block - .to_mut() - .body_mut() - .attestations_mut() - .next() - .unwrap() - .data_mut() - .slot = new_attesation_slot; - - let mut ctxt = ConsensusContext::new(state.slot()); - let result = process_operations::process_attestations( - &mut state, - head_block.body(), - VerifySignatures::True, - &mut ctxt, - &spec, - ); - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::IncludedTooLate { - state: state.slot(), - attestation: new_attesation_slot, - } - }) - ); -} +// Note: `invalid_attestation_included_too_late` test removed. +// The `IncludedTooLate` check was removed in Deneb (EIP7045), so this test is no longer +// applicable when running with Electra spec (which the harness uses by default). #[tokio::test] async fn invalid_attestation_target_epoch_slot_mismatch() { - let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut state = harness.get_current_state(); let mut head_block = harness @@ -694,8 +654,8 @@ async fn invalid_attestation_target_epoch_slot_mismatch() { #[tokio::test] async fn valid_insert_attester_slashing() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let attester_slashing = harness.make_attester_slashing(vec![1, 2]); @@ -715,8 +675,8 @@ async fn valid_insert_attester_slashing() { #[tokio::test] async fn invalid_attester_slashing_not_slashable() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { @@ -750,8 +710,8 @@ async fn invalid_attester_slashing_not_slashable() { #[tokio::test] async fn invalid_attester_slashing_1_invalid() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { @@ -790,8 +750,8 @@ async fn invalid_attester_slashing_1_invalid() { #[tokio::test] async fn invalid_attester_slashing_2_invalid() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { @@ -830,8 +790,8 @@ async fn invalid_attester_slashing_2_invalid() { #[tokio::test] async fn valid_insert_proposer_slashing() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); let mut ctxt = ConsensusContext::new(state.slot()); @@ -848,8 +808,8 @@ async fn valid_insert_proposer_slashing() { #[tokio::test] async fn invalid_proposer_slashing_proposals_identical() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); @@ -876,8 +836,8 @@ async fn invalid_proposer_slashing_proposals_identical() { #[tokio::test] async fn invalid_proposer_slashing_proposer_unknown() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.proposer_index = 3_141_592; @@ -905,8 +865,8 @@ async fn invalid_proposer_slashing_proposer_unknown() { #[tokio::test] async fn invalid_proposer_slashing_duplicate_slashing() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); @@ -939,8 +899,8 @@ async fn invalid_proposer_slashing_duplicate_slashing() { #[tokio::test] async fn invalid_bad_proposal_1_signature() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -965,8 +925,8 @@ async fn invalid_bad_proposal_1_signature() { #[tokio::test] async fn invalid_bad_proposal_2_signature() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -991,8 +951,8 @@ async fn invalid_bad_proposal_2_signature() { #[tokio::test] async fn invalid_proposer_slashing_proposal_epoch_mismatch() { - let spec = MainnetEthSpec::default_spec(); let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + let spec = harness.spec.clone(); let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); @@ -1019,92 +979,6 @@ async fn invalid_proposer_slashing_proposal_epoch_mismatch() { ); } -#[tokio::test] -async fn fork_spanning_exit() { - let mut spec = MainnetEthSpec::default_spec(); - let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); - - spec.altair_fork_epoch = Some(Epoch::new(2)); - spec.bellatrix_fork_epoch = Some(Epoch::new(4)); - spec.shard_committee_period = 0; - let spec = Arc::new(spec); - - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) - .deterministic_keypairs(VALIDATOR_COUNT) - .mock_execution_layer() - .fresh_ephemeral_store() - .build(); - - harness.extend_to_slot(slots_per_epoch.into()).await; - - /* - * Produce an exit *before* Altair. - */ - - let signed_exit = harness.make_voluntary_exit(0, Epoch::new(1)); - assert!(signed_exit.message.epoch < spec.altair_fork_epoch.unwrap()); - - /* - * Ensure the exit verifies before Altair. - */ - - let head = harness.chain.canonical_head.cached_head(); - let head_state = &head.snapshot.beacon_state; - assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); - verify_exit( - head_state, - None, - &signed_exit, - VerifySignatures::True, - &spec, - ) - .expect("phase0 exit verifies against phase0 state"); - - /* - * Ensure the exit verifies after Altair. - */ - - harness - .extend_to_slot(spec.altair_fork_epoch.unwrap().start_slot(slots_per_epoch)) - .await; - let head = harness.chain.canonical_head.cached_head(); - let head_state = &head.snapshot.beacon_state; - assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); - assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); - verify_exit( - head_state, - None, - &signed_exit, - VerifySignatures::True, - &spec, - ) - .expect("phase0 exit verifies against altair state"); - - /* - * Ensure the exit no longer verifies after Bellatrix. - */ - - harness - .extend_to_slot( - spec.bellatrix_fork_epoch - .unwrap() - .start_slot(slots_per_epoch), - ) - .await; - let head = harness.chain.canonical_head.cached_head(); - let head_state = &head.snapshot.beacon_state; - assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); - verify_exit( - head_state, - None, - &signed_exit, - VerifySignatures::True, - &spec, - ) - .expect_err("phase0 exit does not verify against bellatrix state"); -} - /// Check that the block replayer does not consume state roots unnecessarily. #[tokio::test] async fn block_replayer_peeking_state_roots() { diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index f042e8766c..c04b7f843d 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -11,10 +11,10 @@ async fn runs_without_error() { .default_spec() .deterministic_keypairs(8) .fresh_ephemeral_store() + .mock_execution_layer() .build(); harness.advance_slot(); - let spec = MinimalEthSpec::default_spec(); let target_slot = (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); @@ -32,7 +32,7 @@ async fn runs_without_error() { .await; let mut new_head_state = harness.get_current_state(); - process_epoch(&mut new_head_state, &spec).unwrap(); + process_epoch(&mut new_head_state, &harness.spec).unwrap(); } #[cfg(not(debug_assertions))] diff --git a/consensus/types/tests/committee_cache.rs b/consensus/types/tests/committee_cache.rs index 0bb8aa1da2..5c1962276f 100644 --- a/consensus/types/tests/committee_cache.rs +++ b/consensus/types/tests/committee_cache.rs @@ -21,6 +21,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness( .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let skip_to_slot = slot - SLOT_OFFSET; diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 544010b6a2..6086067a47 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -3,13 +3,10 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use execution_layer::{ auth::{JwtKey, strip_prefix}, - test_utils::{ - Config, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, MockExecutionConfig, MockServer, - }, + test_utils::{Config, DEFAULT_JWT_SECRET, MockExecutionConfig, MockServer}, }; use std::net::Ipv4Addr; use std::path::PathBuf; -use std::sync::Arc; use types::*; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { @@ -25,7 +22,6 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let amsterdam_time = parse_optional(matches, "amsterdam-time")?; let handle = env.core_context().executor.handle().unwrap(); - let spec = Arc::new(E::default_spec()); let jwt_key = if let Some(secret_path) = jwt_secret_path { let hex_str = std::fs::read_to_string(&secret_path) @@ -50,9 +46,6 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< listen_port, }, jwt_key, - terminal_difficulty: spec.terminal_total_difficulty, - terminal_block: DEFAULT_TERMINAL_BLOCK, - terminal_block_hash: spec.terminal_block_hash, shanghai_time: Some(shanghai_time), cancun_time, prague_time, diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 5c3061166e..6bf4a1aa52 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -9,8 +9,8 @@ use alloy_signer_local::PrivateKeySigner; use bls::PublicKeyBytes; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ - BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, - PayloadParameters, PayloadStatus, + BlockByNumberQuery, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, + LATEST_TAG, PayloadAttributes, PayloadParameters, PayloadStatus, }; use fixed_bytes::FixedBytesExtended; use fork_choice::ForkchoiceUpdateParameters; @@ -210,25 +210,29 @@ impl TestRig { let account2 = AlloyAddress::from_slice(&hex::decode(ACCOUNT2).unwrap()); /* - * Read the terminal block hash from both pairs, check it's equal. + * Read the genesis block hash from both pairs, check it's equal. + * Since TTD=0, the genesis block is the terminal PoW block. */ - let terminal_pow_block_hash = self + let genesis_block = self .ee_a .execution_layer - .get_terminal_pow_block_hash(&self.spec, timestamp_now()) + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) .await .unwrap() - .unwrap(); + .expect("should have genesis block"); + + let terminal_pow_block_hash = genesis_block.block_hash; assert_eq!( terminal_pow_block_hash, self.ee_b .execution_layer - .get_terminal_pow_block_hash(&self.spec, timestamp_now()) + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) .await .unwrap() - .unwrap() + .expect("should have genesis block") + .block_hash ); // Submit transactions before getting payload diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index f8ece0218f..3b0fe7d8ec 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -1,4 +1,5 @@ use super::*; +use beacon_chain::test_utils::test_spec; use state_processing::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, per_block_processing, per_block_processing::errors::ExitInvalid, @@ -70,13 +71,13 @@ impl ExitTest { BlockSignatureStrategy::VerifyIndividual, VerifyBlockRoot::True, &mut ctxt, - &E::default_spec(), + &test_spec::(), ) } #[cfg(all(test, not(debug_assertions)))] async fn run(self) -> BeaconState { - let spec = &E::default_spec(); + let spec = &test_spec::(); let expected = self.expected.clone(); assert_eq!(STATE_EPOCH, spec.shard_committee_period); diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 80c30489b7..6a212f034d 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -57,6 +57,7 @@ async fn get_harness( .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let skip_to_slot = slot - SLOT_OFFSET; if skip_to_slot > Slot::new(0) { diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs index 8ddcc7e419..00bcb36e80 100644 --- a/validator_manager/src/exit_validators.rs +++ b/validator_manager/src/exit_validators.rs @@ -322,7 +322,7 @@ mod test { let mut spec = ChainSpec::mainnet(); spec.shard_committee_period = 1; spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(1)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(Epoch::new(2)); spec.deneb_fork_epoch = Some(Epoch::new(3)); @@ -330,15 +330,8 @@ mod test { let harness = &beacon_node.harness; let mock_el = harness.mock_execution_layer.as_ref().unwrap(); - let execution_ctx = mock_el.server.ctx.clone(); - // Move to terminal block. mock_el.server.all_payloads_valid(); - execution_ctx - .execution_block_generator - .write() - .move_to_terminal_block() - .unwrap(); Self { exit_config: None, From 12eddb561ce8c1b85c9f4e777687078b03e20f1b Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 24 Feb 2026 22:49:57 -0800 Subject: [PATCH 34/35] fix new payload notifier --- beacon_node/beacon_chain/src/execution_payload.rs | 6 +++--- .../payload_envelope_verification/payload_notifier.rs | 10 +++++++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index cf6c5d83b4..db221712d4 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -119,6 +119,7 @@ impl PayloadNotifier { &self.chain, self.block.message().tree_hash_root(), self.block.message().slot(), + self.block.message().parent_root(), self.block.message().try_into()?, ) .await @@ -139,6 +140,7 @@ pub async fn notify_new_payload( chain: &Arc>, beacon_block_root: Hash256, slot: Slot, + parent_beacon_block_root: Hash256, new_payload_request: NewPayloadRequest<'_, T::EthSpec>, ) -> Result { let execution_layer = chain @@ -189,11 +191,9 @@ pub async fn notify_new_payload( { // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. - let latest_root = new_payload_request.parent_beacon_block_root()?; - chain .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { - head_block_root: *latest_root, + head_block_root: parent_beacon_block_root, always_invalidate_head: false, latest_valid_ancestor: latest_valid_hash, }) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs index 592d46022a..f3e4f6990b 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/payload_notifier.rs @@ -62,8 +62,16 @@ impl PayloadNotifier { Ok(precomputed_status) } else { let block_root = self.envelope.message.beacon_block_root; + let parent_root = self.block.message().parent_root(); let request = Self::build_new_payload_request(&self.envelope, &self.block)?; - notify_new_payload(&self.chain, block_root, self.envelope.slot(), request).await + notify_new_payload( + &self.chain, + block_root, + self.envelope.slot(), + parent_root, + request, + ) + .await } } From a1ef265c9e612f15060f23f43a4832032e4589dd Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 25 Feb 2026 23:17:49 +1100 Subject: [PATCH 35/35] Add getBlobsV1 and getBlobsV2 support to mock EL server (#8870) Co-Authored-By: Jimmy Chen --- .../test_utils/execution_block_generator.rs | 38 ++++++++++++++++++- .../src/test_utils/handle_rpc.rs | 29 ++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 1743b340ab..62a46246da 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,7 +1,8 @@ use crate::engine_api::{ ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, json_structures::{ - JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + BlobAndProof, BlobAndProofV1, BlobAndProofV2, JsonForkchoiceUpdatedV1Response, + JsonPayloadStatusV1, JsonPayloadStatusV1Status, }, }; use crate::engines::ForkchoiceState; @@ -15,6 +16,7 @@ use rand::{Rng, SeedableRng, rngs::StdRng}; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::VariableList; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::cmp::max; use std::collections::HashMap; use std::sync::Arc; @@ -456,6 +458,40 @@ impl ExecutionBlockGenerator { self.blobs_bundles.get(id).cloned() } + /// Look up a blob and proof by versioned hash across all stored bundles. + pub fn get_blob_and_proof(&self, versioned_hash: &Hash256) -> Option> { + self.blobs_bundles + .iter() + .find_map(|(payload_id, blobs_bundle)| { + let (blob_idx, _) = + blobs_bundle + .commitments + .iter() + .enumerate() + .find(|(_, commitment)| { + &kzg_commitment_to_versioned_hash(commitment) == versioned_hash + })?; + let is_fulu = self.payload_ids.get(payload_id)?.fork_name().fulu_enabled(); + let blob = blobs_bundle.blobs.get(blob_idx)?.clone(); + if is_fulu { + let start = blob_idx * E::cells_per_ext_blob(); + let end = start + E::cells_per_ext_blob(); + let proofs = blobs_bundle + .proofs + .get(start..end)? + .to_vec() + .try_into() + .ok()?; + Some(BlobAndProof::V2(BlobAndProofV2 { blob, proofs })) + } else { + Some(BlobAndProof::V1(BlobAndProofV1 { + blob, + proof: *blobs_bundle.proofs.get(blob_idx)?, + })) + } + }) + } + pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { let Some(parent) = self.blocks.get(&payload.parent_hash()) else { return PayloadStatusV1 { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 53eb3b5166..7a81017b3f 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -468,6 +468,35 @@ pub async fn handle_rpc( _ => unreachable!(), } } + ENGINE_GET_BLOBS_V1 => { + let versioned_hashes = + get_param::>(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; + let generator = ctx.execution_block_generator.read(); + // V1: per-element nullable array, positionally matching the request. + let response: Vec>> = versioned_hashes + .iter() + .map(|hash| match generator.get_blob_and_proof(hash) { + Some(BlobAndProof::V1(v1)) => Some(v1), + _ => None, + }) + .collect(); + Ok(serde_json::to_value(response).unwrap()) + } + ENGINE_GET_BLOBS_V2 => { + let versioned_hashes = + get_param::>(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; + let generator = ctx.execution_block_generator.read(); + // V2: all-or-nothing — null if any blob is missing. + let results: Vec>> = versioned_hashes + .iter() + .map(|hash| match generator.get_blob_and_proof(hash) { + Some(BlobAndProof::V2(v2)) => Some(v2), + _ => None, + }) + .collect(); + let response: Option>> = results.into_iter().collect(); + Ok(serde_json::to_value(response).unwrap()) + } ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 | ENGINE_FORKCHOICE_UPDATED_V3 => {