From 82dc8b4edc859469647e07d475d4b68466beb498 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 23 Apr 2026 20:32:26 +0900 Subject: [PATCH 1/7] Ensure payload envelope streamer always serves canonical envelopes after the split slot (#9085) Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi --- .../beacon_chain/src/canonical_head.rs | 23 +- .../beacon_chain_adapter.rs | 4 +- .../src/payload_envelope_streamer/mod.rs | 9 +- .../src/payload_envelope_streamer/tests.rs | 19 +- consensus/fork_choice/src/fork_choice.rs | 24 ++ .../src/fork_choice_test_definition.rs | 113 +++++++- .../gloas_payload.rs | 273 +++++++++++++++++- consensus/proto_array/src/proto_array.rs | 86 +++++- .../src/proto_array_fork_choice.rs | 18 ++ 9 files changed, 533 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 1e5e1300ab..74670b02d7 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -383,11 +383,24 @@ impl CanonicalHead { Ok((head, execution_status)) } - // TODO(gloas) just a stub for now, implement this once we have fork choice. - /// Returns true if the payload for this block is canonical according to fork choice - /// Returns an error if the block root doesn't exist in fork choice. - pub fn block_has_canonical_payload(&self, _root: &Hash256) -> Result { - Ok(true) + /// Returns `true` if the payload for this block is canonical (Full) according to fork choice. + pub fn block_has_canonical_payload( + &self, + root: &Hash256, + spec: &ChainSpec, + ) -> Result { + let cached_head = self.cached_head(); + let head_root = cached_head.head_block_root(); + let head_payload_status = cached_head.head_payload_status(); + + if *root == head_root { + return Ok(head_payload_status == PayloadStatus::Full); + } + + self.fork_choice_read_lock() + .get_canonical_payload_status(root, spec) + .map(|status| status == PayloadStatus::Full) + .map_err(Error::ForkChoiceError) } /// Returns a clone of `self.cached_head`. diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs index 47c58f07b9..4e36cf7895 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/beacon_chain_adapter.rs @@ -37,6 +37,8 @@ impl EnvelopeStreamerBeaconAdapter { &self, root: &Hash256, ) -> Result { - self.chain.canonical_head.block_has_canonical_payload(root) + self.chain + .canonical_head + .block_has_canonical_payload(root, &self.chain.spec) } } diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs index d10e3762a4..5b1bda5dd5 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/mod.rs @@ -132,13 +132,8 @@ impl PayloadEnvelopeStreamer { results.push((*root, Ok(None))); } } - Err(_) => { - results.push(( - *root, - Err(BeaconChainError::EnvelopeStreamerError( - Error::BlockMissingFromForkChoice, - )), - )); + Err(e) => { + results.push((*root, Err(e))); } } } else { diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs index 0db6d57ed6..be3dbf33ce 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs @@ -1,4 +1,5 @@ use super::*; +use crate::beacon_chain::ForkChoiceError; use crate::payload_envelope_streamer::beacon_chain_adapter::MockEnvelopeStreamerBeaconAdapter; use crate::test_utils::EphemeralHarnessType; use bls::{FixedBytesExtended, Signature}; @@ -279,15 +280,18 @@ async fn stream_envelopes_by_root() { } /// When `block_has_canonical_payload` returns an error, the streamer should -/// yield `Err(EnvelopeStreamerError(BlockMissingFromForkChoice))` for those roots. +/// propagate that error for those roots. #[tokio::test] async fn stream_envelopes_error() { let chain = build_chain(4, &[], &[], &[]); let (mut mock, _runtime) = mock_adapter(); mock.expect_get_split_slot().return_const(Slot::new(0)); mock_envelopes(&mut mock, &chain); - mock.expect_block_has_canonical_payload() - .returning(|_| Err(BeaconChainError::CanonicalHeadLockTimeout)); + mock.expect_block_has_canonical_payload().returning(|_| { + Err(BeaconChainError::ForkChoiceError( + ForkChoiceError::DoesNotDescendFromFinalizedCheckpoint, + )) + }); let streamer = PayloadEnvelopeStreamer::new(mock, EnvelopeRequestSource::ByRange); let mut stream = streamer.launch_stream(roots(&chain)); @@ -299,13 +303,8 @@ async fn stream_envelopes_error() { .unwrap_or_else(|| panic!("stream ended early at index {i}")); assert_eq!(root, entry.block_root, "root mismatch at index {i}"); assert!( - matches!( - result.as_ref(), - Err(BeaconChainError::EnvelopeStreamerError( - Error::BlockMissingFromForkChoice - )) - ), - "expected BlockMissingFromForkChoice error at index {i}, got {:?}", + result.as_ref().is_err(), + "expected error at index {i}, got {:?}", result ); } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 21415e478a..f9d779fd24 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -78,6 +78,7 @@ pub enum Error { UnrealizedVoteProcessing(state_processing::EpochProcessingError), ValidatorStatuses(BeaconStateError), ChainSpecError(String), + DoesNotDescendFromFinalizedCheckpoint, } impl From for Error { @@ -1523,6 +1524,29 @@ where } } + /// Returns the canonical payload status of a block. See + /// `ProtoArrayForkChoice::get_canonical_payload_status`. + pub fn get_canonical_payload_status( + &self, + block_root: &Hash256, + spec: &ChainSpec, + ) -> Result> { + if self.is_finalized_checkpoint_or_descendant(*block_root) { + let current_slot = self.fc_store.get_current_slot(); + let proposer_boost_root = self.fc_store.proposer_boost_root(); + self.proto_array + .get_canonical_payload_status::( + block_root, + current_slot, + proposer_boost_root, + spec, + ) + .map_err(Error::ProtoArrayError) + } else { + Err(Error::DoesNotDescendFromFinalizedCheckpoint) + } + } + /// Returns the weight for the given block root. pub fn get_block_weight(&self, block_root: &Hash256) -> Option { self.proto_array.get_weight(block_root) diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index c9764d3e44..d537f16bb2 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -4,6 +4,7 @@ mod gloas_payload; mod no_votes; mod votes; +use crate::error::Error; use crate::proto_array_fork_choice::{Block, ExecutionStatus, PayloadStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; use fixed_bytes::FixedBytesExtended; @@ -30,6 +31,8 @@ pub enum Operation { justified_state_balances: Vec, expected_head: Hash256, current_slot: Slot, + // TODO(gloas): Make this non-optional. `find_head` always returns a `PayloadStatus` + // (Empty for pre-GLOAS), so every test should assert on it explicitly. #[serde(default)] expected_payload_status: Option, }, @@ -61,6 +64,12 @@ pub enum Operation { block_root: Hash256, attestation_slot: Slot, }, + ProcessGloasAttestation { + validator_index: usize, + block_root: Hash256, + attestation_slot: Slot, + payload_present: bool, + }, ProcessPayloadAttestation { validator_index: usize, block_root: Hash256, @@ -105,6 +114,16 @@ pub enum Operation { block_root: Hash256, expected: bool, }, + AssertPayloadStatusByWeight { + block_root: Hash256, + expected_status: PayloadStatus, + /// Override `current_slot`. Defaults to the `current_slot` of the last `FindHead`. + #[serde(default)] + current_slot: Option, + /// Override the proposer boost root. Defaults to `Hash256::zero()`. + #[serde(default)] + proposer_boost_root: Option, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -149,6 +168,7 @@ impl ForkChoiceTestDefinition { ) .expect("should create fork choice struct"); let equivocating_indices = BTreeSet::new(); + let mut last_current_slot = Slot::new(0); for (op_index, op) in self.operations.into_iter().enumerate() { match op.clone() { @@ -189,6 +209,16 @@ impl ForkChoiceTestDefinition { op_index, op ); } + assert_canonical_payload_status_matches_find_head( + &fork_choice, + &head, + current_slot, + Hash256::zero(), + &spec, + payload_status, + op_index, + ); + last_current_slot = current_slot; check_bytes_round_trip(&fork_choice); } Operation::ProposerBoostFindHead { @@ -201,7 +231,7 @@ impl ForkChoiceTestDefinition { let justified_balances = JustifiedBalances::from_effective_balances(justified_state_balances) .unwrap(); - let (head, _payload_status) = fork_choice + let (head, payload_status) = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, @@ -220,6 +250,15 @@ impl ForkChoiceTestDefinition { "Operation at index {} failed head check. Operation: {:?}", op_index, op ); + assert_canonical_payload_status_matches_find_head( + &fork_choice, + &head, + Slot::new(0), + proposer_boost_root, + &spec, + payload_status, + op_index, + ); check_bytes_round_trip(&fork_choice); } Operation::InvalidFindHead { @@ -308,6 +347,27 @@ impl ForkChoiceTestDefinition { }); check_bytes_round_trip(&fork_choice); } + Operation::ProcessGloasAttestation { + validator_index, + block_root, + attestation_slot, + payload_present, + } => { + fork_choice + .process_attestation( + validator_index, + block_root, + attestation_slot, + payload_present, + ) + .unwrap_or_else(|_| { + panic!( + "process_attestation op at index {} returned error", + op_index + ) + }); + check_bytes_round_trip(&fork_choice); + } Operation::ProcessPayloadAttestation { validator_index, block_root, @@ -522,6 +582,26 @@ impl ForkChoiceTestDefinition { op_index ); } + Operation::AssertPayloadStatusByWeight { + block_root, + expected_status, + current_slot, + proposer_boost_root, + } => { + let actual = fork_choice + .get_canonical_payload_status::( + &block_root, + current_slot.unwrap_or(last_current_slot), + proposer_boost_root.unwrap_or_else(Hash256::zero), + &spec, + ) + .unwrap(); + assert_eq!( + actual, expected_status, + "canonical payload status mismatch at op index {}", + op_index + ); + } } } } @@ -546,6 +626,37 @@ fn get_checkpoint(i: u64) -> Checkpoint { } } +/// Checks that `get_canonical_payload_status` agrees with the `payload_status` +/// returned by `find_head` for the head block. +fn assert_canonical_payload_status_matches_find_head( + fork_choice: &ProtoArrayForkChoice, + head: &Hash256, + current_slot: Slot, + proposer_boost_root: Hash256, + spec: &ChainSpec, + expected: PayloadStatus, + op_index: usize, +) { + match fork_choice.get_canonical_payload_status::( + head, + current_slot, + proposer_boost_root, + spec, + ) { + Ok(actual) => assert_eq!( + actual, expected, + "get_canonical_payload_status disagreed with find_head for head {:?} at op index {}", + head, op_index + ), + // Skip the check for pre-gloas nodes + Err(Error::InvalidNodeVariant { .. }) => {} + Err(e) => panic!( + "get_canonical_payload_status failed at op index {}: {:?}", + op_index, e + ), + } +} + fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { let bytes = original.as_bytes(); let decoded = ProtoArrayForkChoice::from_bytes(&bytes, original.balances.clone()) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 197e1102a3..ac4f8992c4 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -81,20 +81,88 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { expected_payload_status: None, }); - ops.push(Operation::SetPayloadTiebreak { - block_root: get_root(0), - is_timely: false, - is_data_available: false, + // Cross-slot attestation with payload_present=true to Full branch (root 3, slot 2). + // vote_slot=3 differs from block_slot=2 and payload_present=true, so it counts as Full weight. + ops.push(Operation::ProcessGloasAttestation { + validator_index: 0, + block_root: get_root(3), + attestation_slot: Slot::new(3), + payload_present: true, }); ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + // Full weight propagated up: root 0 and root 1 should show Full. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(0), + expected_status: PayloadStatus::Full, + current_slot: None, + proposer_boost_root: None, + }); + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + current_slot: None, + proposer_boost_root: None, + }); + // Root 2 has no payload received, so it's always Empty. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(2), + expected_status: PayloadStatus::Empty, + current_slot: None, + proposer_boost_root: None, + }); + + // Cross-slot attestations with payload_present=false to Empty branch (root 4, slot 2). + // Two validators so Empty branch outweighs Full branch. + ops.push(Operation::ProcessGloasAttestation { + validator_index: 1, + block_root: get_root(4), + attestation_slot: Slot::new(3), + payload_present: false, + }); + ops.push(Operation::ProcessGloasAttestation { + validator_index: 2, + block_root: get_root(4), + attestation_slot: Slot::new(3), + payload_present: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1], expected_head: get_root(4), current_slot: Slot::new(0), expected_payload_status: None, }); + // Empty weight now dominates, so root 0 flips to Empty. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(0), + expected_status: PayloadStatus::Empty, + current_slot: None, + proposer_boost_root: None, + }); + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(2), + expected_status: PayloadStatus::Empty, + current_slot: None, + proposer_boost_root: None, + }); + // Root 1 (Full branch) still has 1 Full vote and 0 Empty, so it stays Full. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + current_slot: None, + proposer_boost_root: None, + }); + ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), justified_checkpoint: get_checkpoint(0), @@ -143,7 +211,7 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { justified_state_balances: vec![1, 1], expected_head: get_root(1), current_slot: Slot::new(0), - // With MainnetEthSpec PTC_SIZE=512, 1 bit set out of 256 threshold → not timely → Empty. + // With MainnetEthSpec PTC_SIZE=512 and a 256-bit threshold, 1 bit set is not timely, so Empty. expected_payload_status: Some(PayloadStatus::Empty), }); // PTC votes write to bitfields only, not to full/empty weight. @@ -286,7 +354,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe expected_payload_status: None, }); - // CL attestation to Empty branch (root 4) from validator 0 → head flips to 4. + // CL attestation to Empty branch (root 4) from validator 0 flips the head to 4. ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(4), @@ -301,7 +369,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe expected_payload_status: None, }); - // CL attestation back to Full branch (root 3) → head returns to 3. + // CL attestation back to Full branch (root 3) returns the head to 3. ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), @@ -546,7 +614,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef block_root: get_root(1), }); - // Step 4: Set tiebreaker to Empty on genesis → Empty branch wins. + // Step 4: Set tiebreaker to Empty on genesis so the Empty branch wins. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), is_timely: false, @@ -560,8 +628,15 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef current_slot: Slot::new(1), expected_payload_status: None, }); + // Weights are tied (1 vote each branch), tiebreaker is Empty. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(0), + expected_status: PayloadStatus::Empty, + current_slot: None, + proposer_boost_root: None, + }); - // Step 5: Flip tiebreaker to Full → Full branch wins. + // Step 5: Flip tiebreaker to Full so the Full branch wins. ops.push(Operation::SetPayloadTiebreak { block_root: get_root(0), is_timely: true, @@ -575,8 +650,15 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef current_slot: Slot::new(100), expected_payload_status: None, }); + // Weights still tied, tiebreaker flipped to Full. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(0), + expected_status: PayloadStatus::Full, + current_slot: None, + proposer_boost_root: None, + }); - // Step 6: Add extra CL weight to Empty branch → overrides Full tiebreaker. + // Step 6: Add extra CL weight to the Empty branch; this overrides the Full tiebreaker. ops.push(Operation::ProcessAttestation { validator_index: 2, block_root: get_root(4), @@ -732,6 +814,163 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe } } +/// When `current_slot == node.slot + 1`, spec `get_weight` zeroes out Full and Empty +/// weights so the tiebreaker decides. Tests that the zero-out is applied and +/// doesn't just compare raw payload weights. +pub fn get_gloas_previous_slot_tiebreaker_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block 1 at slot 1 with its payload received. + // Genesis has zero block hash so all its children are Empty (genesis never has + // payload_received). Block 1's parent_hash doesn't match zero → Empty child. + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(1), + }); + + // Block 2 at slot 2 with a mismatched EL parent hash, giving it an Empty parent payload status. + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // More Full weight than Empty on block 1. + ops.push(Operation::ProcessGloasAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(2), + payload_present: true, + }); + + // Materialize the attestation into `full_payload_weight`. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(1), + current_slot: Slot::new(1), + expected_payload_status: Some(PayloadStatus::Full), + }); + + // Before zero-out (current_slot == block 1's slot), raw weights decide payload status (Full) + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + current_slot: Some(Slot::new(1)), + proposer_boost_root: None, + }); + + // At current_slot == block 1's slot + 1, both weights zero out and the + // tiebreaker picks Empty (block 2 extends block 1 with an Empty parent + // payload status). + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(1), + expected_status: PayloadStatus::Empty, + current_slot: Some(Slot::new(2)), + proposer_boost_root: Some(get_root(2)), + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::zero()), + spec: Some(gloas_spec()), + } +} + +/// Proposer boost on a descendant can flip an ancestor's canonical payload status. +/// Boost supports the ancestor's Full variant (via the descendant's Full parent +/// payload status) but not Empty, so a large enough boost overrides raw Empty weight. +pub fn get_gloas_proposer_boost_flips_ancestor_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block 1 at slot 1 with payload received. + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(1), + }); + + // Block 2 at slot 3 with a Full parent payload status (skip slot 2 so + // block 1's previous-slot zero-out doesn't fire at current_slot 3). + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // One Empty vote on block 1. Balance totals are chosen so the proposer + // boost score exceeds the single Empty voter's balance. + ops.push(Operation::ProcessGloasAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(2), + payload_present: false, + }); + + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![100, 10000], + expected_head: get_root(1), + current_slot: Slot::new(3), + expected_payload_status: Some(PayloadStatus::Empty), + }); + + // Without boost the raw weights decide and Empty wins. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(1), + expected_status: PayloadStatus::Empty, + current_slot: Some(Slot::new(3)), + proposer_boost_root: None, + }); + + // With boost on block 2 the boost supports block 1's Full variant, so Full wins. + ops.push(Operation::AssertPayloadStatusByWeight { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + current_slot: Some(Slot::new(3)), + proposer_boost_root: Some(get_root(2)), + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::zero()), + spec: Some(gloas_spec()), + } +} + #[cfg(test)] mod tests { use super::*; @@ -758,7 +997,7 @@ mod tests { let mut ops = vec![]; // Block at slot 31 — last pre-Gloas slot. Created as a V17 node because - // gloas_fork_epoch = 1 → Gloas starts at slot 32. + // gloas_fork_epoch = 1 means Gloas starts at slot 32. // // The test harness sets execution_status = Optimistic(ExecutionBlockHash::from_root(root)), // so this V17 node's EL block hash = ExecutionBlockHash::from_root(get_root(1)). @@ -909,6 +1148,18 @@ mod tests { test.run(); } + #[test] + fn previous_slot_tiebreaker() { + let test = get_gloas_previous_slot_tiebreaker_test_definition(); + test.run(); + } + + #[test] + fn proposer_boost_flips_ancestor() { + let test = get_gloas_proposer_boost_flips_ancestor_test_definition(); + test.run(); + } + /// Test that execution payload invalidation propagates across the V17→V29 fork /// boundary: after invalidating a V17 parent, head must not select any descendant. /// diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 4ca7dab69c..8548974054 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1262,6 +1262,90 @@ impl ProtoArray { } } + /// Returns the canonical payload status of a block, matching the decision + /// `get_head` would make between `(root, FULL)` and `(root, EMPTY)`. + pub(crate) fn get_canonical_payload_status( + &self, + root: Hash256, + current_slot: Slot, + proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result { + let proto_node_index = *self.indices.get(&root).ok_or(Error::NodeUnknown(root))?; + let proto_node = self + .nodes + .get(proto_node_index) + .ok_or(Error::InvalidNodeIndex(proto_node_index))?; + + if !proto_node + .payload_received() + .map_err(|_| Error::InvalidNodeVariant { block_root: root })? + { + return Ok(PayloadStatus::Empty); + } + + let full_fc = IndexedForkChoiceNode { + root, + proto_node_index, + payload_status: PayloadStatus::Full, + }; + let empty_fc = IndexedForkChoiceNode { + root, + proto_node_index, + payload_status: PayloadStatus::Empty, + }; + + // Matches the hoisting optimization in `find_head`: `get_weight`'s spec-level + // `should_apply_proposer_boost` check is precomputed once. + let apply_proposer_boost = + self.should_apply_proposer_boost::(proposer_boost_root, justified_balances, spec)?; + + let full_weight = self.get_weight::( + &full_fc, + proto_node, + apply_proposer_boost, + proposer_boost_root, + current_slot, + justified_balances, + spec, + )?; + + let empty_weight = self.get_weight::( + &empty_fc, + proto_node, + apply_proposer_boost, + proposer_boost_root, + current_slot, + justified_balances, + spec, + )?; + + match full_weight.cmp(&empty_weight) { + std::cmp::Ordering::Greater => Ok(PayloadStatus::Full), + std::cmp::Ordering::Less => Ok(PayloadStatus::Empty), + std::cmp::Ordering::Equal => { + let full_tb = self.get_payload_status_tiebreaker::( + &full_fc, + proto_node, + current_slot, + proposer_boost_root, + )?; + let empty_tb = self.get_payload_status_tiebreaker::( + &empty_fc, + proto_node, + current_slot, + proposer_boost_root, + )?; + if full_tb >= empty_tb { + Ok(PayloadStatus::Full) + } else { + Ok(PayloadStatus::Empty) + } + } + } + } + /// Spec: `get_weight`. #[allow(clippy::too_many_arguments)] fn get_weight( @@ -1417,7 +1501,7 @@ impl ProtoArray { } } - fn get_payload_status_tiebreaker( + pub(crate) fn get_payload_status_tiebreaker( &self, fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 577e89baa1..1c6d3f3201 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1053,6 +1053,24 @@ impl ProtoArrayForkChoice { .unwrap_or(false) } + /// Returns the canonical payload status of a block, matching the decision + /// `get_head` would make between `(root, FULL)` and `(root, EMPTY)`. + pub fn get_canonical_payload_status( + &self, + block_root: &Hash256, + current_slot: Slot, + proposer_boost_root: Hash256, + spec: &ChainSpec, + ) -> Result { + self.proto_array.get_canonical_payload_status::( + *block_root, + current_slot, + proposer_boost_root, + &self.balances, + spec, + ) + } + /// Returns the weight of a given block. pub fn get_weight(&self, block_root: &Hash256) -> Option { let block_index = self.proto_array.indices.get(block_root)?; From e086628efe572aee7a91016a304fb443266857d3 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 23 Apr 2026 18:20:15 +0530 Subject: [PATCH 2/7] Avoid lint and fmt for agents (#9166) N/A Do not make the AI agent always fmt and lint. This takes way too long and the agents I work with take this too literally sometimes and run lint after incomplete changes just wasting time. I feel its not a big ask to run fmt and lint yourself and/or run it in some local configs instead of global ones. Co-Authored-By: Pawan Dhananjay --- CLAUDE.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 79ed344e35..34a895f464 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -5,8 +5,7 @@ This file provides guidance for AI assistants (Claude Code, Codex, etc.) working ## CRITICAL - Always Follow After completing ANY code changes: -1. **MUST** run `cargo fmt --all && make lint-fix` to format and fix linting issues -2. **MUST** run `cargo check` to verify compilation before considering task complete +1. **MUST** run `cargo check` to verify compilation before considering task complete Run `make install-hooks` if you have not already to install git hooks. Never skip git hooks. If cargo is not available install the toolchain. From 8a384ff4454bfb1061b1c4fd51cb947b26fa6803 Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Thu, 23 Apr 2026 20:52:28 +0200 Subject: [PATCH 3/7] Cell Dissemination (Partial messages) (#8314) - https://github.com/ethereum/consensus-specs/pull/4558 - https://eips.ethereum.org/EIPS/eip-8136 Co-Authored-By: Daniel Knopik Co-Authored-By: Pawan Dhananjay Co-Authored-By: Jimmy Chen --- Cargo.lock | 316 ++--- Cargo.toml | 3 - beacon_node/beacon_chain/src/beacon_chain.rs | 160 ++- beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/chain_config.rs | 3 + .../src/data_availability_checker.rs | 176 ++- .../src/data_column_verification.rs | 1062 +++++++++++++++-- .../fetch_blobs/fetch_blobs_beacon_adapter.rs | 40 +- .../beacon_chain/src/fetch_blobs/mod.rs | 266 +++-- .../beacon_chain/src/fetch_blobs/tests.rs | 69 +- beacon_node/beacon_chain/src/kzg_utils.rs | 215 +++- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 114 ++ .../src/observed_data_sidecars.rs | 12 +- .../src/partial_data_column_assembler.rs | 569 +++++++++ beacon_node/beacon_chain/src/test_utils.rs | 1 + beacon_node/beacon_processor/src/lib.rs | 14 + .../src/scheduler/work_queue.rs | 6 + beacon_node/execution_layer/src/engine_api.rs | 1 + .../execution_layer/src/engine_api/http.rs | 16 + .../src/engine_api/json_structures.rs | 3 + beacon_node/execution_layer/src/lib.rs | 19 +- .../execution_layer/src/test_utils/mod.rs | 1 + beacon_node/http_api/src/publish_blocks.rs | 62 +- beacon_node/lighthouse_network/Cargo.toml | 2 + beacon_node/lighthouse_network/src/config.rs | 4 + beacon_node/lighthouse_network/src/lib.rs | 2 +- beacon_node/lighthouse_network/src/metrics.rs | 8 + .../lighthouse_network/src/service/mod.rs | 183 ++- .../service/partial_column_header_tracker.rs | 28 + .../lighthouse_network/src/types/mod.rs | 5 +- .../lighthouse_network/src/types/partial.rs | 503 ++++++++ .../lighthouse_network/src/types/pubsub.rs | 51 +- .../lighthouse_network/src/types/topics.rs | 11 +- beacon_node/network/src/metrics.rs | 48 + .../gossip_methods.rs | 572 ++++++++- .../src/network_beacon_processor/mod.rs | 51 +- .../network_beacon_processor/sync_methods.rs | 10 +- beacon_node/network/src/router.rs | 18 +- beacon_node/network/src/service.rs | 42 +- .../network/src/sync/block_lookups/mod.rs | 18 +- .../sync/block_lookups/single_block_lookup.rs | 4 +- beacon_node/network/src/sync/manager.rs | 32 +- beacon_node/src/cli.rs | 9 + beacon_node/src/config.rs | 15 + book/src/help_bn.md | 3 + .../types/src/block/beacon_block_body.rs | 50 +- consensus/types/src/data/blob_sidecar.rs | 30 +- .../types/src/data/data_column_sidecar.rs | 77 ++ consensus/types/src/data/mod.rs | 5 + .../src/data/partial_data_column_sidecar.rs | 429 +++++++ consensus/types/src/kzg_ext/mod.rs | 52 +- .../generate_random_block_and_blobs.rs | 16 +- lighthouse/tests/beacon_node.rs | 18 + 54 files changed, 4797 insertions(+), 630 deletions(-) create mode 100644 beacon_node/beacon_chain/src/partial_data_column_assembler.rs create mode 100644 beacon_node/lighthouse_network/src/service/partial_column_header_tracker.rs create mode 100644 beacon_node/lighthouse_network/src/types/partial.rs create mode 100644 consensus/types/src/data/partial_data_column_sidecar.rs diff --git a/Cargo.lock b/Cargo.lock index b136e7da98..aefd51a950 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -695,7 +695,7 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -706,7 +706,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -1397,7 +1397,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.12.1", "log", "prettyplease", "proc-macro2", @@ -3109,7 +3109,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -3646,12 +3646,12 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.2.4" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" +checksum = "b604752cefc5aa3ab98992a107a8bd99465d2825c1584e0b60cb6957b21e19d7" dependencies = [ - "futures-timer", "futures-util", + "tokio", ] [[package]] @@ -3737,6 +3737,10 @@ name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +dependencies = [ + "gloo-timers", + "send_wrapper", +] [[package]] name = "futures-util" @@ -3832,6 +3836,18 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "graffiti_file" version = "0.1.0" @@ -4364,7 +4380,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.6.3", "tokio", "tower-service", "tracing", @@ -4382,7 +4398,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core", ] [[package]] @@ -4502,16 +4518,6 @@ dependencies = [ "icu_properties", ] -[[package]] -name = "if-addrs" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "if-addrs" version = "0.14.0" @@ -4523,16 +4529,26 @@ dependencies = [ ] [[package]] -name = "if-watch" -version = "3.2.1" +name = "if-addrs" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +checksum = "c0a05c691e1fae256cf7013d99dad472dc52d5543322761f83ec8d47eab40d2b" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "if-watch" +version = "3.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71c02a5161c313f0cbdbadc511611893584a10a7b6153cb554bdf83ddce99ec2" dependencies = [ "async-io", "core-foundation 0.9.4", "fnv", "futures", - "if-addrs 0.10.2", + "if-addrs 0.15.0", "ipnet", "log", "netlink-packet-core", @@ -4919,9 +4935,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.183" +version = "0.2.185" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" +checksum = "52ff2c0fe9bc6cb6b14a0592c2ff4fa9ceb83eea9db979b0487cd054946a2b8f" [[package]] name = "libloading" @@ -4956,8 +4972,8 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.56.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.57.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "bytes", "either", @@ -4987,8 +5003,8 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.6.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.7.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4997,8 +5013,8 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.6.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.7.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "libp2p-core", "libp2p-identity", @@ -5007,8 +5023,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.43.2" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.44.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "either", "fnv", @@ -5032,7 +5048,7 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.45.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "hickory-resolver", @@ -5046,7 +5062,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.50.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "async-channel 2.5.0", "asynchronous-codec", @@ -5075,8 +5091,8 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.47.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.48.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "asynchronous-codec", "either", @@ -5115,8 +5131,8 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.48.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.49.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "hickory-proto", @@ -5126,15 +5142,15 @@ dependencies = [ "libp2p-swarm", "rand 0.8.5", "smallvec", - "socket2 0.6.1", + "socket2 0.6.3", "tokio", "tracing", ] [[package]] name = "libp2p-metrics" -version = "0.17.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.18.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "libp2p-core", @@ -5149,8 +5165,8 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.43.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.44.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "asynchronous-codec", "bytes", @@ -5167,8 +5183,8 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.46.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.47.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "asynchronous-codec", "bytes", @@ -5189,8 +5205,8 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.13.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.14.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "futures-timer", @@ -5202,7 +5218,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustls 0.23.35", - "socket2 0.6.1", + "socket2 0.6.3", "thiserror 2.0.17", "tokio", "tracing", @@ -5210,13 +5226,14 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.47.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.48.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "either", "fnv", "futures", "futures-timer", + "getrandom 0.2.16", "hashlink 0.11.0", "libp2p-core", "libp2p-identity", @@ -5226,13 +5243,14 @@ dependencies = [ "smallvec", "tokio", "tracing", + "wasm-bindgen-futures", "web-time", ] [[package]] name = "libp2p-swarm-derive" -version = "0.35.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.36.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "heck", "quote", @@ -5241,23 +5259,23 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.44.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.45.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "socket2 0.6.1", + "socket2 0.6.3", "tokio", "tracing", ] [[package]] name = "libp2p-tls" -version = "0.6.2" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.7.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "futures-rustls", @@ -5274,8 +5292,8 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.6.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.7.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "futures-timer", @@ -5288,8 +5306,8 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.47.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.48.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "either", "futures", @@ -5422,6 +5440,7 @@ dependencies = [ "if-addrs 0.14.0", "itertools 0.14.0", "libp2p", + "libp2p-gossipsub", "libp2p-mplex", "lighthouse_version", "logging", @@ -5968,8 +5987,8 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.13.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.14.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "bytes", "futures", @@ -5981,46 +6000,30 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +checksum = "3463cbb78394cb0141e2c926b93fc2197e473394b761986eca3b9da2c63ae0f4" dependencies = [ - "anyhow", - "byteorder", - "netlink-packet-utils", + "paste", ] [[package]] name = "netlink-packet-route" -version = "0.17.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +checksum = "4ce3636fa715e988114552619582b530481fd5ef176a1e5c1bf024077c2c9445" dependencies = [ - "anyhow", - "bitflags 1.3.2", - "byteorder", + "bitflags 2.10.0", "libc", + "log", "netlink-packet-core", - "netlink-packet-utils", -] - -[[package]] -name = "netlink-packet-utils" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" -dependencies = [ - "anyhow", - "byteorder", - "paste", - "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.11.5" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" +checksum = "b65d130ee111430e47eed7896ea43ca693c387f097dd97376bffafbf25812128" dependencies = [ "bytes", "futures", @@ -6032,12 +6035,12 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +checksum = "cd6c30ed10fa69cc491d491b85cc971f6bdeb8e7367b7cde2ee6cc878d583fae" dependencies = [ "bytes", - "futures", + "futures-util", "libc", "log", "tokio", @@ -6123,17 +6126,6 @@ dependencies = [ "libc", ] -[[package]] -name = "nix" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", -] - [[package]] name = "nix" version = "0.30.1" @@ -6195,7 +6187,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -6623,18 +6615,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", @@ -7000,7 +6992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.117", @@ -7066,8 +7058,8 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.3.1" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.4.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "asynchronous-codec", "bytes", @@ -7090,7 +7082,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls 0.23.35", - "socket2 0.6.1", + "socket2 0.6.3", "thiserror 2.0.17", "tokio", "tracing", @@ -7127,7 +7119,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.1", + "socket2 0.6.3", "tracing", "windows-sys 0.60.2", ] @@ -7513,18 +7505,18 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.13.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +checksum = "4b960d5d873a75b5be9761b1e73b146f52dddcd27bac75263f40fba686d4d7b5" dependencies = [ - "futures", + "futures-channel", + "futures-util", "log", "netlink-packet-core", "netlink-packet-route", - "netlink-packet-utils", "netlink-proto", "netlink-sys", - "nix 0.26.4", + "nix 0.30.1", "thiserror 1.0.69", "tokio", ] @@ -7651,7 +7643,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -7756,8 +7748,8 @@ dependencies = [ [[package]] name = "rw-stream-sink" -version = "0.4.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=defcaf1a78cf5b70a723b3fee0e0be051c1dbd88#defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" +version = "0.5.0" +source = "git+https://github.com/libp2p/rust-libp2p.git#f4cf4bf79b710c7502969eeab8343191ec63c956" dependencies = [ "futures", "pin-project", @@ -7946,6 +7938,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + [[package]] name = "sensitive_url" version = "0.1.0" @@ -8346,9 +8344,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" dependencies = [ "libc", "windows-sys 0.60.2", @@ -8384,9 +8382,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc20a89bab2dabeee65e9c9eb96892dc222c23254b401e1319b85efd852fa31" +checksum = "d625e4de8e0057eefe7e0b1510ba1dd7adf10cd375fad6cc7fcceac7c39623c9" dependencies = [ "arbitrary", "context_deserialize", @@ -8622,9 +8620,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ "bitflags 2.10.0", "core-foundation 0.9.4", @@ -8696,7 +8694,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -8927,7 +8925,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.1", + "socket2 0.6.3", "tokio-macros", "tracing", "windows-sys 0.61.2", @@ -9151,9 +9149,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -9186,9 +9184,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -10015,7 +10013,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -10026,12 +10024,14 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.53.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" dependencies = [ - "windows-core 0.53.0", - "windows-targets 0.52.6", + "windows-collections", + "windows-core", + "windows-future", + "windows-numerics", ] [[package]] @@ -10047,13 +10047,12 @@ dependencies = [ ] [[package]] -name = "windows-core" -version = "0.53.0" +name = "windows-collections" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" dependencies = [ - "windows-result 0.1.2", - "windows-targets 0.52.6", + "windows-core", ] [[package]] @@ -10065,10 +10064,21 @@ dependencies = [ "windows-implement", "windows-interface", "windows-link", - "windows-result 0.4.1", + "windows-result", "windows-strings", ] +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core", + "windows-link", + "windows-threading", +] + [[package]] name = "windows-implement" version = "0.60.2" @@ -10098,12 +10108,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] -name = "windows-result" -version = "0.1.2" +name = "windows-numerics" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" dependencies = [ - "windows-targets 0.52.6", + "windows-core", + "windows-link", ] [[package]] @@ -10217,6 +10228,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" diff --git a/Cargo.toml b/Cargo.toml index db6853d44d..1f58c322f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -276,6 +276,3 @@ debug = true [patch.crates-io] quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "87c4ccb9bb2af494de375f5f6c62850badd26304" } -[patch."https://github.com/libp2p/rust-libp2p.git"] -libp2p = { git = "https://github.com/sigp/rust-libp2p.git", rev = "defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" } -libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p.git", rev = "defcaf1a78cf5b70a723b3fee0e0be051c1dbd88" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e14c7c047f..f3861ac727 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,7 +22,12 @@ use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, AvailableBlockData, DataAvailabilityChecker, DataColumnReconstructionResult, }; -use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use crate::data_column_verification::{ + GossipDataColumnError, GossipPartialDataColumnError, GossipVerifiedDataColumn, + GossipVerifiedPartialDataColumnHeader, KzgVerifiedCustodyPartialDataColumn, + KzgVerifiedPartialDataColumn, PartialColumnVerificationResult, + validate_partial_data_column_sidecar_for_gossip, +}; use crate::early_attester_cache::EarlyAttesterCache; use crate::envelope_times_cache::EnvelopeTimesCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; @@ -54,6 +59,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; +use crate::partial_data_column_assembler::PartialMergeResult; use crate::payload_bid_verification::payload_bid_cache::GossipVerifiedPayloadBidCache; #[cfg(not(test))] use crate::payload_envelope_streamer::{EnvelopeRequestSource, launch_payload_envelope_stream}; @@ -552,6 +558,9 @@ impl FinalizationAndCanonicity { } } +type ProcessedPartialColumnStatus = + Option<(AvailabilityProcessingStatus, PartialMergeResult)>; + impl BeaconChain { /// Checks if a block is finalized. /// The finalization check is done with the block slot. The block root is used to verify that @@ -2297,6 +2306,59 @@ impl BeaconChain { }) } + pub fn verify_partial_data_column_header_for_gossip( + &self, + block_root: Hash256, + data_column_header: PartialDataColumnHeader, + ) -> Result, GossipPartialDataColumnError> + { + metrics::inc_counter(&metrics::PARTIAL_DATA_COLUMN_SIDECAR_HEADER_PROCESSING_REQUESTS); + let _timer = metrics::start_timer( + &metrics::PARTIAL_DATA_COLUMN_SIDECAR_HEADER_GOSSIP_VERIFICATION_TIMES, + ); + let Some(assembler) = self.data_availability_checker.partial_assembler() else { + return Err(GossipPartialDataColumnError::PartialColumnsDisabled); + }; + if let Some(cached_header) = assembler.get_header(&block_root) { + return if *cached_header == data_column_header { + metrics::inc_counter(&metrics::PARTIAL_DATA_COLUMN_SIDECAR_HEADER_PROCESSING_DUPES); + Ok(GossipVerifiedPartialDataColumnHeader::new_from_cached( + cached_header, + )) + } else { + Err(GossipPartialDataColumnError::HeaderMismatches) + }; + } + + GossipVerifiedPartialDataColumnHeader::new(block_root, data_column_header, self).inspect( + |_| { + metrics::inc_counter( + &metrics::PARTIAL_DATA_COLUMN_SIDECAR_HEADER_PROCESSING_SUCCESSES, + ); + }, + ) + } + + #[instrument(skip_all, level = "trace")] + pub fn verify_partial_data_column_sidecar_for_gossip( + self: &Arc, + data_column_sidecar: Box>, + seen_timestamp: Duration, + ) -> PartialColumnVerificationResult { + metrics::inc_counter(&metrics::PARTIAL_DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS); + let _timer = + metrics::start_timer(&metrics::PARTIAL_DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES); + let ret = validate_partial_data_column_sidecar_for_gossip( + data_column_sidecar, + self, + seen_timestamp, + ); + if matches!(ret, PartialColumnVerificationResult::Ok { .. }) { + metrics::inc_counter(&metrics::PARTIAL_DATA_COLUMN_SIDECAR_PROCESSING_SUCCESSES); + } + ret + } + #[instrument(skip_all, level = "trace")] pub fn verify_blob_sidecar_for_gossip( self: &Arc, @@ -3128,6 +3190,7 @@ impl BeaconChain { /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was /// imported or errors. + /// Only accepts full columns. Partials are handled via PartialDataColumnAssembler. #[instrument(skip_all, level = "debug")] pub async fn process_gossip_data_columns( self: &Arc, @@ -3169,6 +3232,93 @@ impl BeaconChain { .await } + /// Process a gossip-verified partial data column by attempting to merge it in the assembler. + /// Returns the merge result which indicates if a column was completed. + #[instrument(skip_all, level = "debug")] + pub async fn process_gossip_partial_data_column( + self: &Arc, + verified_partial: KzgVerifiedPartialDataColumn, + verified_header: GossipVerifiedPartialDataColumnHeader, + slot: Slot, + ) -> Result, BlockError> { + let block_root = verified_partial.block_root(); + let partial = verified_partial.as_data_column(); + let index_str = partial.index.to_string(); + metrics::inc_counter_vec_by( + &metrics::BEACON_PARTIAL_MESSAGE_CELLS_RECEIVED_TOTAL, + &[index_str.as_str()], + partial.sidecar.column.len() as u64, + ); + + // Check if we have custody of this column + let sampling_columns = + self.sampling_columns_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())); + let verified_partial = if sampling_columns.contains(&partial.index) { + KzgVerifiedCustodyPartialDataColumn::from_asserted_custody(verified_partial) + } else { + return Ok(None); + }; + + // If this block has already been imported to forkchoice it must have been available + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + let Some(assembler) = self.data_availability_checker.partial_assembler() else { + // Partial messages are apparently not activated + return Ok(None); + }; + + // Merge the partial into the assembler + let merge_result = assembler + .merge_partials( + block_root, + vec![verified_partial], + verified_header.into_header(), + ) + .ok_or_else(|| BlockError::InternalError("No assembly found for block".to_string()))?; + + metrics::inc_counter_vec_by( + &metrics::BEACON_PARTIAL_MESSAGE_USEFUL_CELLS_TOTAL, + &[index_str.as_str()], + merge_result.added_cells as u64, + ); + + let availability = if !merge_result.full_columns.is_empty() { + metrics::inc_counter_vec_by( + &metrics::BEACON_PARTIAL_MESSAGE_COLUMN_COMPLETIONS_TOTAL, + &[index_str.as_str()], + merge_result.full_columns.len() as u64, + ); + + self.emit_sse_data_column_sidecar_events( + &block_root, + merge_result + .full_columns + .iter() + .map(|column| column.as_data_column()), + ); + + let availability = self + .data_availability_checker + .put_kzg_verified_custody_data_columns( + block_root, + merge_result.full_columns.clone(), + )?; + + self.process_availability(slot, availability, || Ok(())) + .await? + } else { + AvailabilityProcessingStatus::MissingComponents(slot, block_root) + }; + + Ok(Some((availability, merge_result))) + } + /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was /// imported or errors. #[instrument(skip_all, level = "debug")] @@ -3624,6 +3774,8 @@ impl BeaconChain { /// Checks if the provided data column can make any cached blocks available, and imports immediately /// if so, otherwise caches the data column in the data availability checker. + /// Check gossip data columns for availability and import. Only accepts full columns. + /// Partials are handled separately via PartialDataColumnAssembler. async fn check_gossip_data_columns_availability_and_import( self: &Arc, slot: Slot, @@ -3774,13 +3926,13 @@ impl BeaconChain { // from RPC. for header in custody_columns .into_iter() - .map(|c| c.signed_block_header.clone()) + .map(|c| &c.signed_block_header) .unique() { // Return an error if *any* header signature is invalid, we do not want to import this // list of blobs into the DA checker. However, we will process any valid headers prior // to the first invalid header in the slashable cache & slasher. - verify_header_signature::(self, &header)?; + verify_header_signature::(self, header)?; slashable_cache .observe_slashable( @@ -3790,7 +3942,7 @@ impl BeaconChain { ) .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; if let Some(slasher) = self.slasher.as_ref() { - slasher.accept_block_header(header); + slasher.accept_block_header(header.clone()); } } Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 74141dc64a..19eb1aa877 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -930,6 +930,7 @@ where CanonicalHead::new(fork_choice, Arc::new(head_snapshot), head_payload_status); let shuffling_cache_size = self.chain_config.shuffling_cache_size; let complete_blob_backfill = self.chain_config.complete_blob_backfill; + let enable_partial_columns = self.chain_config.enable_partial_columns; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -1063,6 +1064,7 @@ where self.kzg.clone(), Arc::new(custody_context), self.spec, + enable_partial_columns, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index e9cc4f24e9..b2c017a469 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -121,6 +121,8 @@ pub struct ChainConfig { pub ignore_ws_check: bool, /// Disable the getBlobs optimisation to fetch blobs from the EL mempool. pub disable_get_blobs: bool, + /// Whether to enable partial data column support. + pub enable_partial_columns: bool, /// The node's custody type, determining how many data columns to custody and sample. pub node_custody_type: NodeCustodyType, } @@ -164,6 +166,7 @@ impl Default for ChainConfig { invalid_block_roots: HashSet::new(), ignore_ws_check: false, disable_get_blobs: false, + enable_partial_columns: false, node_custody_type: NodeCustodyType::Fullnode, } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4372efa809..9d8b76aaed 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -5,6 +5,7 @@ use crate::block_verification_types::{AvailabilityPendingExecutedBlock, Availabl use crate::data_availability_checker::overflow_lru_cache::{ DataAvailabilityCheckerInner, ReconstructColumnsDecision, }; +use crate::partial_data_column_assembler::{AssemblyColumn, PartialDataColumnAssembler}; use crate::{BeaconChain, BeaconChainTypes, BlockProcessStatus, CustodyContext, metrics}; use educe::Educe; use kzg::Kzg; @@ -17,10 +18,11 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tracing::{debug, error, instrument}; -use types::data::{BlobIdentifier, FixedBlobSidecarList}; +use types::data::{BlobIdentifier, FixedBlobSidecarList, PartialDataColumn}; use types::{ BlobSidecar, BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + DataColumnSidecarList, Epoch, EthSpec, Hash256, PartialDataColumnSidecarError, + PartialDataColumnSidecarRef, SignedBeaconBlock, Slot, new_non_zero_usize, }; mod error; @@ -36,7 +38,6 @@ use crate::metrics::{ }; use crate::observed_data_sidecars::ObservationStrategy; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; -use types::new_non_zero_usize; /// The LRU Cache stores `PendingComponents`, which store block and its associated blob data: /// @@ -78,6 +79,7 @@ const OVERFLOW_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); pub struct DataAvailabilityChecker { complete_blob_backfill: bool, availability_cache: Arc>, + partial_assembler: Option>>, slot_clock: T::SlotClock, kzg: Arc, custody_context: Arc>, @@ -120,14 +122,23 @@ impl DataAvailabilityChecker { kzg: Arc, custody_context: Arc>, spec: Arc, + enable_partial_columns: bool, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY_NON_ZERO, custody_context.clone(), spec.clone(), )?; + let partial_assembler = if enable_partial_columns { + Some(Arc::new(PartialDataColumnAssembler::new( + OVERFLOW_LRU_CAPACITY_NON_ZERO, + ))) + } else { + None + }; Ok(Self { complete_blob_backfill, + partial_assembler, availability_cache: Arc::new(inner), slot_clock, kzg, @@ -140,6 +151,10 @@ impl DataAvailabilityChecker { &self.custody_context } + pub fn partial_assembler(&self) -> Option<&Arc>> { + self.partial_assembler.as_ref() + } + /// Checks if the block root is currently in the availability cache awaiting import because /// of missing components. /// @@ -172,19 +187,104 @@ impl DataAvailabilityChecker { }) } - /// Check if the exact data column is in the availability cache. - pub fn is_data_column_cached( - &self, - block_root: &Hash256, - data_column: &DataColumnSidecar, - ) -> bool { - self.availability_cache - .peek_pending_components(block_root, |components| { - components.is_some_and(|components| { - let cached_column_opt = components.get_cached_data_column(*data_column.index()); - cached_column_opt.is_some_and(|cached| *cached == *data_column) + /// Filter out all cells that are already cached for the given `block_root`. + /// Returns None if all cells are already cached. + /// Returns an error if any cells or proofs mismatch the cached cells. + pub fn missing_cells_for_column_sidecar<'a>( + &'_ self, + data_column: &'a DataColumnSidecar, + ) -> Result>, MissingCellsError> { + let block_root = data_column.block_root(); + let column_index = *data_column.index(); + + // Check DA checker cache first - if we have a full column cached, nothing is missing. + // We return Some(true) from the peek if it exists and matches, Some(false) if it exists but + // does not match, and None if it doesn't exist. + if let Some(matches) = + self.availability_cache + .peek_pending_components(&block_root, |components| { + components + .and_then(|c| c.get_cached_data_column(column_index)) + .map(|cached| *cached == *data_column) }) + { + return if matches { + Ok(None) + } else { + Err(MissingCellsError::MismatchesCachedColumn) + }; + } + + // Check assembler for partial columns + if let Some(assembler) = &self.partial_assembler { + match assembler.get_partial(&block_root, column_index) { + Some(AssemblyColumn::Incomplete(cached_partial)) => { + return data_column.try_filter_to_partial_ref(|idx, cell, proof| { + match cached_partial.as_data_column().sidecar.get(idx) { + None => Ok(true), + Some((cached_cell, cached_proof)) => { + if cell == cached_cell && proof == cached_proof { + Ok(false) + } else { + Err(MissingCellsError::MismatchesCachedColumn) + } + } + } + }); + } + // This can happen if the column has been marked as completed already but has not + // reached the availability cache yet. + Some(AssemblyColumn::Complete(_)) => { + return Ok(None); + } + None => { + // No cached data, all cells are "missing" (new data we want) + } + } + } + // No cached data, all cells are "missing" (new data we want) + data_column.try_filter_to_partial_ref(|_, _, _| Ok(true)) + } + + /// Filter out all cells that are already cached for the given `block_root`. + /// Returns input for kzg verification, or None if all cells are already cached. + pub fn missing_cells_for_partial_column_sidecar<'a>( + &'_ self, + partial_data_column: &'a PartialDataColumn, + ) -> Result>, MissingCellsError> { + let column_index = partial_data_column.index; + let block_root = partial_data_column.block_root; + + // Check DA checker cache first - if we have a full column cached, nothing is missing. + if self + .availability_cache + .peek_pending_components(&block_root, |components| { + components.is_some_and(|c| c.get_cached_data_column(column_index).is_some()) }) + { + return Ok(None); + } + + // Check assembler for partial columns + if let Some(assembler) = &self.partial_assembler { + match assembler.get_partial(&block_root, column_index) { + Some(AssemblyColumn::Incomplete(cached_partial)) => { + return Ok(partial_data_column.sidecar.filter(|idx| { + cached_partial.as_data_column().sidecar.get(idx).is_none() + })?); + } + // This can happen if the column has been marked as completed already but has not + // reached the availability cache yet. + Some(AssemblyColumn::Complete(_)) => { + return Ok(None); + } + None => { + // No cached data, all cells are "missing" (new data we want) + } + } + } + // No cached data, all cells are "missing" (new data we want) + Ok(partial_data_column.sidecar.filter(|_| true)?) } /// Get a blob from the availability cache. @@ -295,7 +395,8 @@ impl DataAvailabilityChecker { /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the data column sidecar. /// - /// This should only accept gossip verified data columns, so we should not have to worry about dupes. + /// This should only accept gossip verified full data columns (not partials). + /// Partials are assembled in PartialDataColumnAssembler. #[instrument(skip_all, level = "trace")] pub fn put_gossip_verified_data_columns< O: ObservationStrategy, @@ -316,10 +417,18 @@ impl DataAvailabilityChecker { .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) .collect::>(); + if let Some(assembler) = &self.partial_assembler { + for column in &custody_columns { + assembler.mark_as_complete(block_root, column); + } + } + self.availability_cache .put_kzg_verified_data_columns(block_root, custody_columns) } + /// Put KZG-verified full custody data columns. + /// Only accepts full columns. Partials are assembled in PartialDataColumnAssembler. #[instrument(skip_all, level = "trace")] pub fn put_kzg_verified_custody_data_columns< I: IntoIterator>, @@ -338,6 +447,12 @@ impl DataAvailabilityChecker { &self, executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { + let block = executed_block.as_block(); + if let Some(assembler) = &self.partial_assembler + && let Ok(header) = block.try_into() + { + assembler.init(executed_block.import_data.block_root, Arc::new(header)); + } self.availability_cache.put_executed_block(executed_block) } @@ -349,6 +464,11 @@ impl DataAvailabilityChecker { block: Arc>, source: BlockImportSource, ) -> Result<(), Error> { + if let Some(assembler) = &self.partial_assembler + && let Ok(header) = block.as_ref().try_into() + { + assembler.init(block_root, Arc::new(header)); + } self.availability_cache .put_pre_execution_block(block_root, block, source) } @@ -568,8 +688,12 @@ pub fn start_availability_cache_maintenance_service( // this cache only needs to be maintained if deneb is configured if chain.spec.deneb_fork_epoch.is_some() { let overflow_cache = chain.data_availability_checker.availability_cache.clone(); + let partial_assembler = chain.data_availability_checker.partial_assembler.clone(); executor.spawn( - async move { availability_cache_maintenance_service(chain, overflow_cache).await }, + async move { + availability_cache_maintenance_service(chain, overflow_cache, partial_assembler) + .await + }, "availability_cache_service", ); } else { @@ -580,6 +704,7 @@ pub fn start_availability_cache_maintenance_service( async fn availability_cache_maintenance_service( chain: Arc>, overflow_cache: Arc>, + partial_assembler: Option>>, ) { let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; loop { @@ -631,6 +756,9 @@ async fn availability_cache_maintenance_service( if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) { error!(error = ?e,"Failed to maintain availability cache"); } + if let Some(assembler) = &partial_assembler { + assembler.do_maintenance(cutoff_epoch); + } } None => { error!("Failed to read slot clock"); @@ -887,6 +1015,21 @@ impl MaybeAvailableBlock { } } +pub enum MissingCellsError { + /// The provided column is not matching with the existing cached column. + /// This is to be treated as a KZG verification failure. + MismatchesCachedColumn, + /// An error occurred while operating on the column. It is possibly malformed. + /// This is not expected to happen for columns passing basic validation. + UnexpectedError(PartialDataColumnSidecarError), +} + +impl From for MissingCellsError { + fn from(e: PartialDataColumnSidecarError) -> Self { + Self::UnexpectedError(e) + } +} + #[cfg(test)] mod test { use super::*; @@ -1254,6 +1397,7 @@ mod test { kzg, custody_context, spec, + true, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index a24dbd8942..8ea3c792f4 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -1,7 +1,10 @@ use crate::block_verification::{ BlockSlashInfo, get_validator_pubkey_cache, process_block_slash_info, }; -use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; +use crate::data_availability_checker::MissingCellsError; +use crate::kzg_utils::{ + reconstruct_data_columns, validate_full_data_columns, validate_partial_data_columns, +}; use crate::observed_data_sidecars::{ Error as ObservedDataSidecarsError, ObservationKey, ObservationStrategy, Observe, }; @@ -18,10 +21,14 @@ use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use tracing::{debug, instrument}; -use types::data::ColumnIndex; +use tree_hash::TreeHash; +use types::data::{ + ColumnIndex, PartialDataColumn, PartialDataColumnHeader, PartialDataColumnSidecar, + PartialDataColumnSidecarError, +}; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, - EthSpec, Hash256, Slot, + EthSpec, Hash256, PartialDataColumnSidecarRef, SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip data column. @@ -63,6 +70,13 @@ pub enum GossipDataColumnError { /// /// The data column sidecar is invalid and the peer is faulty. InvalidKzgProof(kzg::Error), + /// The column mismatches the cached (possibly partial) column. + /// This is equivalent to failed kzg verification. + /// + /// ## Peer scoring + /// + /// The data column sidecar is invalid and the peer is faulty. + MismatchesCachedColumn, /// The column was gossiped over an incorrect subnet. /// /// ## Peer scoring @@ -115,6 +129,7 @@ pub enum GossipDataColumnError { /// We cannot process the columns without validating its parent, the peer isn't necessarily faulty. ParentUnknown { parent_root: Hash256, + slot: Slot, }, /// The column conflicts with finalization, no need to propagate. /// @@ -199,25 +214,88 @@ impl From for GossipDataColumnError { } } +#[derive(Debug)] +pub enum GossipPartialDataColumnError { + GossipDataColumnError(GossipDataColumnError), + /// Partial messages are disabled and we can not validate them. + /// + /// ## Peer scoring + /// A peer sent us a partial message even though we did not advertize support for it, penalize + /// it + PartialColumnsDisabled, + /// There was an unexpected error while performing an operation on the partial data column. + InternalError(PartialDataColumnSidecarError), + /// The partial data column does not contain a header, and we do not have it cached. + /// + /// ## Peer scoring + /// The peer SHOULD send us the header on the first partial message, but is not required to. + /// Still, the peer incorrectly assumed that we have the header, and sent us data we can not + /// process due to that. Penalize it slightly. + MissingHeader, + /// The partial data column header does not match the valid one we have already cached. + /// + /// ## Peer scoring + /// The column sidecar is invalid and the peer is faulty + HeaderMismatches, + /// The partial data column header block root does not match the group id. + /// + /// ## Peer scoring + /// The column sidecar is invalid and the peer is faulty + HeaderIncorrectRoot { + group_id: Hash256, + header_hash: Hash256, + }, + /// The partial message has neither a header nor cells. + /// + /// ## Peer scoring + /// The column sidecar is invalid and the peer is faulty + EmptyMessage, + /// The partial message has a count of proofs anc/or cells that is inconsistent with the bitmap. + /// + /// ## Peer scoring + /// The column sidecar is invalid and the peer is faulty + InconsistentPresentCount { + bitmap_popcount: usize, + cells_len: usize, + proofs_len: usize, + }, + /// The partial message has a bitmap length that is inconsistent with the number of commitments. + /// + /// ## Peer scoring + /// The column sidecar is invalid and the peer is faulty + InconsistentCommitmentsLength { + bitmap_len: usize, + commitments_len: usize, + }, +} + +impl From for GossipPartialDataColumnError { + fn from(e: GossipDataColumnError) -> Self { + GossipPartialDataColumnError::GossipDataColumnError(e) + } +} + +impl From for GossipPartialDataColumnError { + fn from(e: BeaconChainError) -> Self { + GossipDataColumnError::from(e).into() + } +} + +impl From for GossipPartialDataColumnError { + fn from(e: BeaconStateError) -> Self { + GossipDataColumnError::from(e).into() + } +} + /// A wrapper around a `DataColumnSidecar` that indicates it has been approved for re-gossiping on /// the p2p network. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct GossipVerifiedDataColumn { block_root: Hash256, data_column: KzgVerifiedDataColumn, _phantom: PhantomData, } -impl Clone for GossipVerifiedDataColumn { - fn clone(&self) -> Self { - Self { - block_root: self.block_root, - data_column: self.data_column.clone(), - _phantom: PhantomData, - } - } -} - impl GossipVerifiedDataColumn { pub fn new( column_sidecar: Arc>, @@ -262,22 +340,29 @@ impl GossipVerifiedDataColumn // In this case, we should accept it for gossip propagation. verify_is_unknown_sidecar(chain, &column_sidecar)?; - if chain + match chain .data_availability_checker - .is_data_column_cached(&column_sidecar.block_root(), &column_sidecar) + .missing_cells_for_column_sidecar(&column_sidecar) { - // Observe this data column so we don't process it again. - if O::observe() { - observe_gossip_data_column(&column_sidecar, chain)?; + Ok(Some(_)) => Ok(Self { + block_root: column_sidecar.block_root(), + data_column: KzgVerifiedDataColumn::from_execution_verified(column_sidecar), + _phantom: Default::default(), + }), + Ok(None) => { + // Observe this data column so we don't process it again. + if O::observe() { + observe_gossip_data_column(&column_sidecar, chain)?; + } + Err(GossipDataColumnError::PriorKnownUnpublished) + } + Err(MissingCellsError::MismatchesCachedColumn) => { + Err(GossipDataColumnError::MismatchesCachedColumn) + } + Err(MissingCellsError::UnexpectedError(_)) => { + todo!("handle unexpected error") } - return Err(GossipDataColumnError::PriorKnownUnpublished); } - - Ok(Self { - block_root: column_sidecar.block_root(), - data_column: KzgVerifiedDataColumn::from_execution_verified(column_sidecar), - _phantom: Default::default(), - }) } /// Create a `GossipVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. @@ -316,24 +401,14 @@ impl GossipVerifiedDataColumn } /// Wrapper over a `DataColumnSidecar` for which we have completed kzg verification. -#[derive(Debug, Educe, Clone, Encode)] +#[derive(Debug, Educe, Clone)] #[educe(PartialEq, Eq)] -#[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedDataColumn { data: Arc>, - #[ssz(skip_serializing, skip_deserializing)] seen_timestamp: Duration, } impl KzgVerifiedDataColumn { - pub fn new( - data_column: Arc>, - kzg: &Kzg, - seen_timestamp: Duration, - ) -> Result, KzgError)> { - verify_kzg_for_data_column(data_column, kzg, seen_timestamp) - } - /// Mark a data column as KZG verified. Caller must ONLY use this on columns constructed /// from EL blobs. pub fn from_execution_verified(data_column: Arc>) -> Self { @@ -381,6 +456,131 @@ impl KzgVerifiedDataColumn { } } +/// Wrapper over a `VerifiablePartialDataColumn` for which we have completed kzg verification. +#[derive(Debug, Educe, Clone)] +#[educe(PartialEq, Eq)] +pub struct KzgVerifiedPartialDataColumn { + data: Arc>, + latest_cell_timestamp: Duration, +} + +impl KzgVerifiedPartialDataColumn { + /// Create a `KzgVerifiedPartialDataColumn` for testing ONLY. + pub(crate) fn __new_for_testing(data_column: Arc>) -> Self { + Self { + data: data_column, + latest_cell_timestamp: timestamp_now(), + } + } + + /// Mark a partial data column as KZG verified. Caller must ONLY use this on columns constructed + /// from EL blobs. + pub fn from_execution_verified(data_column: Arc>) -> Self { + Self { + data: data_column, + latest_cell_timestamp: timestamp_now(), + } + } + + pub fn to_data_column(self) -> Arc> { + self.data + } + + pub fn as_data_column(&self) -> &PartialDataColumn { + &self.data + } + + pub fn index(&self) -> ColumnIndex { + self.data.index + } + + pub fn block_root(&self) -> Hash256 { + self.data.block_root + } +} + +/// Wrapper over a `PartialDataColumnHeader` for which we have completed gossip verification. +#[derive(Debug, Educe, Clone)] +#[educe(PartialEq, Eq)] +pub struct GossipVerifiedPartialDataColumnHeader { + header: Arc>, + previously_cached: bool, +} + +impl GossipVerifiedPartialDataColumnHeader { + pub fn new>( + group_id: Hash256, + header: PartialDataColumnHeader, + chain: &BeaconChain, + ) -> Result { + let column_slot = header.slot(); + if header.kzg_commitments.is_empty() { + return Err(GossipDataColumnError::UnexpectedDataColumn.into()); + } + + let header_hash = header.signed_block_header.message.canonical_root(); + if group_id != header_hash { + return Err(GossipPartialDataColumnError::HeaderIncorrectRoot { + group_id, + header_hash, + }); + } + + verify_sidecar_not_from_future_slot(chain, column_slot)?; + verify_slot_greater_than_latest_finalized_slot(chain, column_slot)?; + verify_partial_column_header_inclusion_proof(&header)?; + let parent_block = verify_parent_block_and_finalized_descendant( + header.signed_block_header.message.parent_root, + column_slot, + chain, + )?; + verify_slot_higher_than_parent(&parent_block, column_slot)?; + verify_proposer_and_signature(&header.signed_block_header, &parent_block, chain)?; + + let header = Arc::new(header); + + // Cache the valid header + let Some(assembler) = chain.data_availability_checker.partial_assembler() else { + return Err(GossipPartialDataColumnError::PartialColumnsDisabled); + }; + let newly_cached = assembler.init(group_id, header.clone()); + + chain + .observed_slashable + .write() + .observe_slashable( + column_slot, + header.signed_block_header.message.proposer_index, + header_hash, + ) + .map_err(BeaconChainError::from)?; + + Ok(Self { + header, + previously_cached: !newly_cached, + }) + } + + pub fn new_from_cached(header: Arc>) -> Self { + Self { + header, + previously_cached: true, + } + } + + pub fn was_cached(&self) -> bool { + self.previously_cached + } + + pub fn as_header(&self) -> &PartialDataColumnHeader { + &self.header + } + + pub fn into_header(self) -> Arc> { + self.header + } +} + pub type CustodyDataColumnList = VariableList, ::NumberOfColumns>; @@ -414,13 +614,12 @@ impl CustodyDataColumn { } } -/// Data column that we must custody and has completed kzg verification -#[derive(Debug, Educe, Clone, Encode)] +/// Data column that we must custody and has completed kzg verification. +/// Wraps a full `DataColumnSidecar`. +#[derive(Debug, Educe, Clone)] #[educe(PartialEq, Eq)] -#[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedCustodyDataColumn { data: Arc>, - #[ssz(skip_serializing, skip_deserializing)] seen_timestamp: Duration, } @@ -434,19 +633,6 @@ impl KzgVerifiedCustodyDataColumn { } } - /// Verify a column already marked as custody column - pub fn new( - data_column: CustodyDataColumn, - kzg: &Kzg, - seen_timestamp: Duration, - ) -> Result, KzgError)> { - verify_kzg_for_data_column(data_column.clone_arc(), kzg, seen_timestamp)?; - Ok(Self { - data: data_column.data, - seen_timestamp, - }) - } - pub fn reconstruct_columns( kzg: &Kzg, partial_set_of_columns: &[Self], @@ -493,23 +679,211 @@ impl KzgVerifiedCustodyDataColumn { } } +/// Partial data column that we must custody and has completed kzg verification. +/// Wraps a `VerifiablePartialDataColumn`. +#[derive(Debug, Educe, Clone)] +#[educe(PartialEq, Eq)] +pub struct KzgVerifiedCustodyPartialDataColumn { + data: Arc>, + latest_cell_timestamp: Duration, +} + +impl KzgVerifiedCustodyPartialDataColumn { + /// Mark a partial column as custody column. Caller must ensure that our current custody requirements + /// include this column + pub fn from_asserted_custody(kzg_verified: KzgVerifiedPartialDataColumn) -> Self { + Self { + latest_cell_timestamp: kzg_verified.latest_cell_timestamp, + data: kzg_verified.to_data_column(), + } + } + + pub fn into_inner(self) -> Arc> { + self.data + } + + pub fn as_data_column(&self) -> &PartialDataColumn { + &self.data + } + + pub fn index(&self) -> ColumnIndex { + self.data.index + } + + /// Merge two verified partial data columns. + /// + /// Each column must be internally consistent. Additionally, the columns to be merged must have + /// the same block root and index. + /// An error is returned if the columns are internally inconsistent or incompatible for merging. + /// + /// If both columns contain the same cell, the cell from `self` is used - however, as they are + /// KZG verified, they will be the same. + pub fn merge(&self, other: &Self) -> Result { + let self_sidecar = &self.data.sidecar; + let other_sidecar = &other.data.sidecar; + + // Check that each sidecar is internally consistent by checking the lengths. + self_sidecar.verify_len()?; + other_sidecar.verify_len()?; + if self.data.block_root != other.data.block_root || self.data.index != other.data.index { + return Err(PartialDataColumnSidecarError::ConflictingData); + } + if self_sidecar.cells_present_bitmap.len() != other_sidecar.cells_present_bitmap.len() { + return Err(PartialDataColumnSidecarError::DifferingLengths { + lhs_len: self_sidecar.cells_present_bitmap.len(), + rhs_len: other_sidecar.cells_present_bitmap.len(), + }); + } + + let new_bitmap = self_sidecar + .cells_present_bitmap + .union(&other_sidecar.cells_present_bitmap); + let len = new_bitmap.num_set_bits(); + let mut new_column = Vec::with_capacity(len); + let mut new_proofs = Vec::with_capacity(len); + let mut self_iter = self_sidecar + .column + .iter() + .zip(self_sidecar.kzg_proofs.iter()); + let mut other_iter = other_sidecar + .column + .iter() + .zip(other_sidecar.kzg_proofs.iter()); + + for presence_bits in self_sidecar + .cells_present_bitmap + .iter() + .zip(other_sidecar.cells_present_bitmap.iter()) + { + match presence_bits { + (false, false) => {} + (true, other) => { + let (cell, proof) = self_iter + .next() + .ok_or(PartialDataColumnSidecarError::UnexpectedBounds)?; + new_column.push(cell.clone()); + new_proofs.push(*proof); + if other { + other_iter + .next() + .ok_or(PartialDataColumnSidecarError::UnexpectedBounds)?; + } + } + (false, true) => { + let (cell, proof) = other_iter + .next() + .ok_or(PartialDataColumnSidecarError::UnexpectedBounds)?; + new_column.push(cell.clone()); + new_proofs.push(*proof); + } + } + } + + Ok(Self { + data: Arc::new(PartialDataColumn { + block_root: self.data.block_root, + index: self.data.index, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: new_bitmap, + column: new_column + .try_into() + .map_err(|_| PartialDataColumnSidecarError::UnexpectedBounds)?, + kzg_proofs: new_proofs + .try_into() + .map_err(|_| PartialDataColumnSidecarError::UnexpectedBounds)?, + header: if self_sidecar.header.is_some() { + self_sidecar.header.clone() + } else { + other_sidecar.header.clone() + }, + }, + }), + latest_cell_timestamp: self.latest_cell_timestamp.max(other.latest_cell_timestamp), + }) + } + + pub fn try_clone_full( + &self, + header: &PartialDataColumnHeader, + ) -> Option> { + self.data + .try_clone_full(header) + .map(|data| KzgVerifiedCustodyDataColumn { + data: Arc::new(data), + seen_timestamp: self.latest_cell_timestamp, + }) + } + + /// Try to convert the partial data column into a full one, returning None if the conversion + /// fails. + /// May clone the column if the Arc cannot be unwrapped. + pub fn try_into_full( + self, + header: &PartialDataColumnHeader, + ) -> Option> { + match Arc::try_unwrap(self.data) { + Ok(data) => data.try_into_full(header), + Err(data) => data.try_clone_full(header), + } + .map(|data| KzgVerifiedCustodyDataColumn { + data: Arc::new(data), + seen_timestamp: self.latest_cell_timestamp, + }) + } +} + /// Complete kzg verification for a `DataColumnSidecar`. /// /// Returns an error if the kzg verification check fails. #[instrument(skip_all, level = "debug")] pub fn verify_kzg_for_data_column( data_column: Arc>, + cells_to_verify: PartialDataColumnSidecarRef, kzg: &Kzg, seen_timestamp: Duration, ) -> Result, (Option, KzgError)> { let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); - validate_data_columns(kzg, iter::once(&data_column))?; + let Ok(kzg_commitments) = data_column.kzg_commitments() else { + return Err(( + Some(*data_column.index()), + KzgError::InconsistentArrayLength("todo(gloas)".to_string()), + )); + }; + validate_partial_data_columns( + kzg, + iter::once((*data_column.index(), cells_to_verify)), + kzg_commitments, + )?; Ok(KzgVerifiedDataColumn { data: data_column, seen_timestamp, }) } +/// Complete kzg verification for a `VerifiablePartialDataColumn`. +/// +/// Returns an error if the kzg verification check fails. +#[instrument(skip_all, level = "debug")] +pub fn verify_kzg_for_partial_data_column( + data_column: Arc>, + cells_to_verify: PartialDataColumnSidecarRef, + header: &GossipVerifiedPartialDataColumnHeader, + kzg: &Kzg, + seen_timestamp: Duration, +) -> Result, GossipPartialDataColumnError> { + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); + validate_partial_data_columns( + kzg, + iter::once((data_column.index, cells_to_verify)), + header.header.kzg_commitments.as_ref(), + ) + .map_err(|(_, e)| GossipDataColumnError::InvalidKzgProof(e))?; + Ok(KzgVerifiedPartialDataColumn { + data: data_column, + latest_cell_timestamp: seen_timestamp, + }) +} + /// Complete kzg verification for a list of `DataColumnSidecar`s. /// Returns an error for the first `DataColumnSidecar`s that fails kzg verification. /// @@ -523,7 +897,7 @@ where I: Iterator>> + Clone, { let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES); - validate_data_columns(kzg, data_column_iter)?; + validate_full_data_columns(kzg, data_column_iter)?; Ok(()) } @@ -549,30 +923,45 @@ pub fn validate_data_column_sidecar_for_gossip_fulu { + GossipDataColumnError::MismatchesCachedColumn + } + MissingCellsError::UnexpectedError(_) => todo!("handle unexpected error"), + })? + else { // Observe this data column so we don't process it again. if O::observe() { observe_gossip_data_column(&data_column, chain)?; } return Err(GossipDataColumnError::PriorKnownUnpublished); - } + }; verify_column_inclusion_proof(data_column_fulu)?; - let parent_block = verify_parent_block_and_finalized_descendant(data_column_fulu, chain)?; + let parent_block = verify_parent_block_and_finalized_descendant( + data_column_fulu.block_parent_root(), + column_slot, + chain, + )?; verify_slot_higher_than_parent(&parent_block, column_slot)?; - verify_proposer_and_signature(data_column_fulu, &parent_block, chain)?; + verify_proposer_and_signature(&data_column_fulu.signed_block_header, &parent_block, chain)?; let kzg = &chain.kzg; let seen_timestamp = chain.slot_clock.now_duration().unwrap_or_default(); - let kzg_verified_data_column = - verify_kzg_for_data_column(data_column.clone(), kzg, seen_timestamp) - .map_err(|(_, e)| GossipDataColumnError::InvalidKzgProof(e))?; + let kzg_verified_data_column = verify_kzg_for_data_column( + data_column.clone(), + cells_to_kzg_verify, + kzg, + seen_timestamp, + ) + .map_err(|(_, e)| GossipDataColumnError::InvalidKzgProof(e))?; chain .observed_slashable @@ -595,6 +984,137 @@ pub fn validate_data_column_sidecar_for_gossip_fulu( + mut column: Box>, + chain: &BeaconChain, + seen_timestamp: Duration, +) -> PartialColumnVerificationResult { + let block_root = column.block_root; + + // Remove the header (if any) to avoid wasted memory. + let header = column.sidecar.header.take(); + + let header = if let Some(header) = header { + // Header was sent, so it is required to be valid + match chain.verify_partial_data_column_header_for_gossip(block_root, header) { + Ok(verified) => verified, + Err(err) => { + return PartialColumnVerificationResult::Err(err); + } + } + } else { + let Some(assembler) = chain.data_availability_checker.partial_assembler() else { + return PartialColumnVerificationResult::Err( + GossipPartialDataColumnError::PartialColumnsDisabled, + ); + }; + + // There is no header, so we check if we have a cached one to use + let Some(header) = assembler + .get_header(&column.block_root) + .map(GossipVerifiedPartialDataColumnHeader::new_from_cached) + else { + return PartialColumnVerificationResult::Err( + GossipPartialDataColumnError::MissingHeader, + ); + }; + + // If there was no header, there must be at least one cell. + if column.sidecar.column.is_empty() { + return PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipPartialDataColumnError::EmptyMessage, + header, + }; + } + + header + }; + + // The number of cells nad proofs must match the population count of the bitmap. + let bitmap_popcount = column.sidecar.cells_present_bitmap.num_set_bits(); + let cells_len = column.sidecar.column.len(); + let proofs_len = column.sidecar.kzg_proofs.len(); + if bitmap_popcount != cells_len || bitmap_popcount != proofs_len { + return PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipPartialDataColumnError::InconsistentPresentCount { + bitmap_popcount, + cells_len, + proofs_len, + }, + header, + }; + } + + let bitmap_len = column.sidecar.cells_present_bitmap.len(); + let commitments_len = header.as_header().kzg_commitments.len(); + if bitmap_len != commitments_len { + return PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipPartialDataColumnError::InconsistentCommitmentsLength { + bitmap_len, + commitments_len, + }, + header, + }; + } + + let column = Arc::from(column); + let cells_to_kzg_verify = match chain + .data_availability_checker + .missing_cells_for_partial_column_sidecar(&column) + { + Ok(Some(cells_to_kzg_verify)) => cells_to_kzg_verify, + Ok(None) => { + return PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipDataColumnError::PriorKnownUnpublished.into(), + header, + }; + } + Err(MissingCellsError::MismatchesCachedColumn) => { + return PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipDataColumnError::MismatchesCachedColumn.into(), + header, + }; + } + Err(MissingCellsError::UnexpectedError(e)) => todo!("handle unexpected error {:?}", e), + }; + + // We do not have to check block related data here, as we create the verifiable column from + // gossip accepted block + let kzg = &chain.kzg; + let column = match verify_kzg_for_partial_data_column( + column.clone(), + cells_to_kzg_verify, + &header, + kzg, + seen_timestamp, + ) { + Ok(column) => column, + Err(err) => { + return PartialColumnVerificationResult::ErrWithValidHeader { err, header }; + } + }; + + PartialColumnVerificationResult::Ok { column, header } +} + +/// The result of a `validate_partial_data_column_sidecar_for_gossip` call. Any headers returned +/// herein were cached during this call or previously cached. +pub enum PartialColumnVerificationResult { + /// Verification succeeded fully. + Ok { + column: KzgVerifiedPartialDataColumn, + header: GossipVerifiedPartialDataColumnHeader, + }, + /// Verification of the column failed, but the header is valid. + ErrWithValidHeader { + err: GossipPartialDataColumnError, + header: GossipVerifiedPartialDataColumnHeader, + }, + /// Verification of the column or header failed, and no valid header was cached previously. + Err(GossipPartialDataColumnError), +} + /// Verify if the data column sidecar is valid. fn verify_data_column_sidecar( data_column: &DataColumnSidecar, @@ -677,6 +1197,17 @@ fn verify_column_inclusion_proof( Ok(()) } +fn verify_partial_column_header_inclusion_proof( + header: &PartialDataColumnHeader, +) -> Result<(), GossipDataColumnError> { + let _timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION); + if !header.verify_inclusion_proof() { + return Err(GossipDataColumnError::InvalidInclusionProof); + } + + Ok(()) +} + fn verify_slot_higher_than_parent( parent_block: &Block, data_column_slot: Slot, @@ -691,17 +1222,18 @@ fn verify_slot_higher_than_parent( } fn verify_parent_block_and_finalized_descendant( - data_column: &DataColumnSidecarFulu, + block_parent_root: Hash256, + slot: Slot, chain: &BeaconChain, ) -> Result { let fork_choice = chain.canonical_head.fork_choice_read_lock(); // We have already verified that the column is past finalization, so we can // just check fork choice for the block's parent. - let block_parent_root = data_column.block_parent_root(); let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { return Err(GossipDataColumnError::ParentUnknown { parent_root: block_parent_root, + slot, }); }; @@ -715,16 +1247,15 @@ fn verify_parent_block_and_finalized_descendant( } fn verify_proposer_and_signature( - data_column: &DataColumnSidecarFulu, + signed_block_header: &SignedBeaconBlockHeader, parent_block: &ProtoBlock, chain: &BeaconChain, ) -> Result<(), GossipDataColumnError> { - let column_slot = data_column.slot(); + let column_slot = signed_block_header.message.slot; let slots_per_epoch = T::EthSpec::slots_per_epoch(); let column_epoch = column_slot.epoch(slots_per_epoch); - let column_index = data_column.index; - let block_root = data_column.block_root(); - let block_parent_root = data_column.block_parent_root(); + let block_root = signed_block_header.message.tree_hash_root(); + let block_parent_root = signed_block_header.message.parent_root; let proposer_shuffling_root = parent_block.proposer_shuffling_root_for_child_block(column_epoch, &chain.spec); @@ -736,7 +1267,6 @@ fn verify_proposer_and_signature( || { debug!( %block_root, - index = %column_index, "Proposer shuffling cache miss for column verification" ); // We assume that the `Pending` state has the same shufflings as a `Full` state @@ -765,7 +1295,6 @@ fn verify_proposer_and_signature( let pubkey = pubkey_cache .get(proposer_index) .ok_or_else(|| GossipDataColumnError::UnknownValidator(proposer_index as u64))?; - let signed_block_header = &data_column.signed_block_header; signed_block_header.verify_signature::( pubkey, &fork, @@ -778,7 +1307,7 @@ fn verify_proposer_and_signature( return Err(GossipDataColumnError::ProposalSignatureInvalid); } - let column_proposer_index = data_column.block_proposer_index(); + let column_proposer_index = signed_block_header.message.proposer_index; if proposer_index != column_proposer_index as usize { return Err(GossipDataColumnError::ProposerIndexMismatch { sidecar: column_proposer_index as usize, @@ -875,20 +1404,29 @@ pub fn observe_gossip_data_column( #[cfg(test)] mod test { + use crate::ChainConfig; use crate::data_column_verification::{ - GossipDataColumnError, GossipVerifiedDataColumn, - validate_data_column_sidecar_for_gossip_fulu, + GossipDataColumnError, GossipPartialDataColumnError, GossipVerifiedDataColumn, + GossipVerifiedPartialDataColumnHeader, KzgVerifiedCustodyPartialDataColumn, + PartialColumnVerificationResult, validate_data_column_sidecar_for_gossip_fulu, + validate_partial_data_column_sidecar_for_gossip, }; use crate::observed_data_sidecars::Observe; use crate::test_utils::{ - BeaconChainHarness, EphemeralHarnessType, generate_data_column_sidecars_from_block, + BeaconChainHarness, EphemeralHarnessType, fork_name_from_env, + generate_data_column_sidecars_from_block, test_spec, }; use eth2::types::BlobsBundle; use execution_layer::test_utils::generate_blobs; + use kzg::KzgProof; + use ssz::BitList; + use ssz_types::VariableList; use std::sync::Arc; + use std::time::UNIX_EPOCH; use types::{ - DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, EthSpec, ForkName, - MainnetEthSpec, + Cell, CellBitmap, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, EthSpec, + ForkName, MainnetEthSpec, PartialDataColumn, PartialDataColumnHeader, + PartialDataColumnSidecar, }; type E = MainnetEthSpec; @@ -1013,4 +1551,360 @@ mod test { Some(GossipDataColumnError::MaxBlobsPerBlockExceeded { .. }) )); } + + #[tokio::test] + async fn test_partial_message_verification_fulu() { + let spec = if fork_name_from_env().is_some() { + Arc::new(test_spec::()) + } else { + Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())) + }; + + // Only run these tests if columns are enabled. + if !spec.is_fulu_scheduled() { + return; + } + // Gloas is not supported yet. + if spec.is_gloas_scheduled() { + return; + } + + let chain_config = ChainConfig { + enable_partial_columns: true, + ..Default::default() + }; + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .chain_config(chain_config) + .build(); + + partial_empty_message_without_cells_returns_error(&harness).await; + partial_inconsistent_present_count_returns_error(&harness).await; + partial_inconsistent_max_count_returns_error(&harness).await; + partial_header_with_empty_commitments_fails(&harness).await; + partial_header_root_mismatch_fails(&harness).await; + partial_header_with_invalid_inclusion_proof_fails(&harness).await; + } + + /// Build a block containing 1 blob and pre-cache the header in the partial assembler. + async fn add_block_and_header( + harness: &BeaconChainHarness>, + ) -> (types::Hash256, Arc>) { + harness.advance_slot(); + // Generate a block with 1 blob so we have valid data columns. + let fork = harness + .spec + .fork_name_at_epoch(harness.get_current_slot().epoch(E::slots_per_epoch())); + let BlobsBundle:: { + commitments, + proofs: _, + blobs: _, + } = generate_blobs(1, fork).unwrap().0; + + let slot = harness.get_current_slot(); + let state = harness.get_current_state(); + let ((block, _blobs_opt), _state) = harness + .make_block_with_modifier(state, slot, |block| { + *block.body_mut().blob_kzg_commitments_mut().unwrap() = + vec![commitments[0]].try_into().unwrap(); + }) + .await; + + let block_root = block.canonical_root(); + let header: PartialDataColumnHeader = block.as_ref().try_into().unwrap(); + let header = Arc::new(header); + + // Pre-cache the header in the partial assembler so headerless partials can be verified. + harness + .chain + .data_availability_checker + .partial_assembler() + .unwrap() + .init(block_root, header.clone()); + + (block_root, header) + } + + async fn partial_empty_message_without_cells_returns_error( + harness: &BeaconChainHarness>, + ) { + let (block_root, header) = add_block_and_header(harness).await; + + // Create a headerless partial with no cells — should trigger EmptyMessage. + let num_commitments = header.kzg_commitments.len(); + let empty_bitmap = + BitList::<::MaxBlobCommitmentsPerBlock>::with_capacity(num_commitments) + .unwrap(); + + let column = PartialDataColumn { + block_root, + index: 0, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: empty_bitmap, + column: vec![].try_into().unwrap(), + kzg_proofs: vec![].try_into().unwrap(), + header: None.into(), + }, + }; + + let result = validate_partial_data_column_sidecar_for_gossip( + Box::new(column), + &harness.chain, + UNIX_EPOCH.elapsed().unwrap(), + ); + assert!( + matches!( + result, + PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipPartialDataColumnError::EmptyMessage, + .. + } + ), + "Expected EmptyMessage" + ); + } + + async fn partial_inconsistent_present_count_returns_error( + harness: &BeaconChainHarness>, + ) { + let (block_root, header) = add_block_and_header(harness).await; + + // Create a bitmap that says 2 bits are set, but only provide 1 cell/proof. + let num_commitments = header.kzg_commitments.len(); + let mut bitmap = + BitList::<::MaxBlobCommitmentsPerBlock>::with_capacity(num_commitments) + .unwrap(); + bitmap.set(0, true).unwrap(); + + let column = PartialDataColumn { + block_root, + index: 0, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column: vec![types::Cell::::default()].try_into().unwrap(), + // Provide 2 proofs but only 1 cell ← mismatch with popcount=1 + kzg_proofs: vec![types::KzgProof::empty(), types::KzgProof::empty()] + .try_into() + .unwrap(), + header: None.into(), + }, + }; + + let result = validate_partial_data_column_sidecar_for_gossip( + Box::new(column), + &harness.chain, + UNIX_EPOCH.elapsed().unwrap(), + ); + assert!( + matches!( + result, + PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipPartialDataColumnError::InconsistentPresentCount { .. }, + .. + } + ), + "Expected InconsistentPresentCount" + ); + } + + async fn partial_inconsistent_max_count_returns_error( + harness: &BeaconChainHarness>, + ) { + let (block_root, _header) = add_block_and_header(harness).await; + + // Create a bitmap with length different from the number of commitments in the header. + // Header has 1 commitment, but we use a bitmap with capacity 3. + let mut bitmap = + BitList::<::MaxBlobCommitmentsPerBlock>::with_capacity(3).unwrap(); + bitmap.set(0, true).unwrap(); + + let column = PartialDataColumn { + block_root, + index: 0, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column: vec![types::Cell::::default()].try_into().unwrap(), + kzg_proofs: vec![types::KzgProof::empty()].try_into().unwrap(), + header: None.into(), + }, + }; + + let result = validate_partial_data_column_sidecar_for_gossip( + Box::new(column), + &harness.chain, + UNIX_EPOCH.elapsed().unwrap(), + ); + assert!( + matches!( + result, + PartialColumnVerificationResult::ErrWithValidHeader { + err: GossipPartialDataColumnError::InconsistentCommitmentsLength { .. }, + .. + } + ), + "Expected InconsistentMaxCount" + ); + } + + async fn partial_header_with_empty_commitments_fails( + harness: &BeaconChainHarness>, + ) { + let slot = harness.get_current_slot(); + let state = harness.get_current_state(); + let ((block, _), _) = harness + .make_block_with_modifier(state, slot, |block| { + *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].try_into().unwrap(); + }) + .await; + + let block_root = block.canonical_root(); + let header: PartialDataColumnHeader = block.as_ref().try_into().unwrap(); + assert!(header.kzg_commitments.is_empty()); + + let result = + GossipVerifiedPartialDataColumnHeader::new(block_root, header, &*harness.chain); + assert!( + matches!( + result, + Err(GossipPartialDataColumnError::GossipDataColumnError( + GossipDataColumnError::UnexpectedDataColumn + )) + ), + "Expected UnexpectedDataColumn, got: {result:?}" + ); + } + + async fn partial_header_root_mismatch_fails( + harness: &BeaconChainHarness>, + ) { + let (_block_root, header) = add_block_and_header(harness).await; + + // Use a wrong group_id (not matching the header's block root) + let wrong_root = types::Hash256::repeat_byte(0xff); + let header = PartialDataColumnHeader::clone(&header); + + let result = + GossipVerifiedPartialDataColumnHeader::new(wrong_root, header, &*harness.chain); + assert!( + matches!( + result, + Err(GossipPartialDataColumnError::HeaderIncorrectRoot { .. }) + ), + "Expected HeaderIncorrectRoot, got: {result:?}" + ); + } + + async fn partial_header_with_invalid_inclusion_proof_fails( + harness: &BeaconChainHarness>, + ) { + let (block_root, header) = add_block_and_header(harness).await; + + // Corrupt the inclusion proof + let mut header = PartialDataColumnHeader::clone(&header); + header.kzg_commitments_inclusion_proof[0] = types::Hash256::repeat_byte(0xaa); + + let result = + GossipVerifiedPartialDataColumnHeader::new(block_root, header, &*harness.chain); + assert!( + matches!( + result, + Err(GossipPartialDataColumnError::GossipDataColumnError( + GossipDataColumnError::InvalidInclusionProof + )) + ), + "Expected InvalidInclusionProof, got: {result:?}" + ); + } + + // -- merge tests -- + + fn make_cell(marker: u8) -> Cell { + let mut cell = Cell::::default(); + cell[0] = marker; + cell + } + + fn make_partial_with_marker( + total_blobs: usize, + present_indices: &[usize], + marker_base: u8, + ) -> KzgVerifiedCustodyPartialDataColumn { + let mut bitmap = CellBitmap::::with_capacity(total_blobs).unwrap(); + for &idx in present_indices { + bitmap.set(idx, true).unwrap(); + } + + let column: VariableList<_, _> = present_indices + .iter() + .map(|&idx| make_cell(marker_base.wrapping_add(idx as u8))) + .collect::>() + .try_into() + .unwrap(); + let proofs: VariableList<_, _> = present_indices + .iter() + .map(|_| KzgProof::empty()) + .collect::>() + .try_into() + .unwrap(); + + KzgVerifiedCustodyPartialDataColumn { + data: Arc::new(PartialDataColumn { + block_root: Default::default(), + index: 0, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column, + kzg_proofs: proofs, + header: None.into(), + }, + }), + latest_cell_timestamp: Default::default(), + } + } + + fn make_partial( + total_blobs: usize, + present_indices: &[usize], + ) -> KzgVerifiedCustodyPartialDataColumn { + make_partial_with_marker(total_blobs, present_indices, 0) + } + + #[test] + fn merge_disjoint_partials() { + let a = make_partial(6, &[0, 2]); + let b = make_partial(6, &[1, 3]); + let merged = a.merge(&b).unwrap(); + assert_eq!(merged.data.sidecar.column.len(), 4); + assert_eq!(merged.data.sidecar.kzg_proofs.len(), 4); + for i in 0..4 { + assert!(merged.data.sidecar.cells_present_bitmap.get(i).unwrap()); + } + assert!(!merged.data.sidecar.cells_present_bitmap.get(4).unwrap()); + } + + #[test] + fn merge_overlapping_partials_prefers_self() { + let a = make_partial_with_marker(4, &[0, 1], 0); + let b = make_partial_with_marker(4, &[1, 2], 100); + let merged = a.merge(&b).unwrap(); + assert_eq!(merged.data.sidecar.column.len(), 3); + // Cell at bitmap index 1 is the second cell in the merged column. + // It should come from `a` (marker_base=0, so marker=0+1=1), not `b` (marker=100+1=101). + assert_eq!(merged.data.sidecar.column[1][0], 1); + } + + #[test] + fn merge_with_empty_other() { + let a = make_partial(4, &[0, 2]); + let b = make_partial(4, &[]); + let merged = a.merge(&b).unwrap(); + assert_eq!(merged.data.sidecar.column.len(), 2); + assert_eq!( + merged.data.sidecar.cells_present_bitmap, + a.data.sidecar.cells_present_bitmap + ); + } } diff --git a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs index a5dc7d7f8b..c94fb036f8 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs @@ -1,7 +1,8 @@ use crate::fetch_blobs::{EngineGetBlobsOutput, FetchEngineBlobError}; use crate::observed_data_sidecars::ObservationKey; +use crate::partial_data_column_assembler::PartialDataColumnAssembler; use crate::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; -use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; +use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2, BlobAndProofV3}; use kzg::Kzg; #[cfg(test)] use mockall::automock; @@ -35,6 +36,13 @@ impl FetchBlobsBeaconAdapter { &self.chain.task_executor } + pub(crate) fn partial_assembler(&self) -> Option>> { + self.chain + .data_availability_checker + .partial_assembler() + .cloned() + } + pub(crate) async fn get_blobs_v1( &self, versioned_hashes: Vec, @@ -67,6 +75,22 @@ impl FetchBlobsBeaconAdapter { .map_err(FetchEngineBlobError::RequestFailed) } + pub(crate) async fn get_blobs_v3( + &self, + versioned_hashes: Vec, + ) -> Result>>, FetchEngineBlobError> { + let execution_layer = self + .chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + execution_layer + .get_blobs_v3(versioned_hashes) + .await + .map_err(FetchEngineBlobError::RequestFailed) + } + pub(crate) fn blobs_known_for_observation_key( &self, observation_key: ObservationKey, @@ -119,4 +143,18 @@ impl FetchBlobsBeaconAdapter { .fork_choice_read_lock() .contains_block(block_root) } + + pub(crate) async fn supports_get_blobs_v3(&self) -> Result { + let execution_layer = self + .chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + execution_layer + .get_engine_capabilities(None) + .await + .map_err(FetchEngineBlobError::RequestFailed) + .map(|caps| caps.get_blobs_v3) + } } diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index ffc308f3d1..f7b4b8a29e 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -13,31 +13,28 @@ mod fetch_blobs_beacon_adapter; mod tests; use crate::blob_verification::{GossipBlobError, KzgVerifiedBlob}; -use crate::block_verification_types::AsBlock; -use crate::data_column_verification::{KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn}; +use crate::data_column_verification::{ + KzgVerifiedCustodyDataColumn, KzgVerifiedCustodyPartialDataColumn, KzgVerifiedPartialDataColumn, +}; #[cfg_attr(test, double)] use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter; -use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::kzg_utils::blobs_to_partial_data_columns; use crate::observed_data_sidecars::ObservationKey; use crate::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, metrics, }; use execution_layer::Error as ExecutionLayerError; -use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; +use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2, BlobAndProofV3}; use metrics::{TryExt, inc_counter}; #[cfg(test)] use mockall_double::double; use slot_clock::timestamp_now; -use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::sync::Arc; use tracing::{debug, instrument, warn}; -use types::data::{BlobSidecarError, DataColumnSidecarError}; -use types::{ - BeaconStateError, Blob, BlobSidecar, ColumnIndex, EthSpec, FullPayload, Hash256, KzgProofs, - SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, -}; +use types::data::{BlobSidecarError, ColumnIndex, DataColumnSidecarError, PartialDataColumnHeader}; +use types::{BeaconStateError, BlobSidecar, EthSpec, Hash256, VersionedHash}; /// Result from engine get blobs to be passed onto `DataAvailabilityChecker` and published to the /// gossip network. The blobs / data columns have not been marked as observed yet, as they may not @@ -71,14 +68,14 @@ pub enum FetchEngineBlobError { pub async fn fetch_and_process_engine_blobs( chain: Arc>, block_root: Hash256, - block: Arc>>, + header: Arc>, custody_columns: &[ColumnIndex], publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { fetch_and_process_engine_blobs_inner( FetchBlobsBeaconAdapter::new(chain), block_root, - block, + header, custody_columns, publish_fn, ) @@ -90,22 +87,16 @@ pub async fn fetch_and_process_engine_blobs( async fn fetch_and_process_engine_blobs_inner( chain_adapter: FetchBlobsBeaconAdapter, block_root: Hash256, - block: Arc>>, + header: Arc>, custody_columns: &[ColumnIndex], publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { - let versioned_hashes = if let Some(kzg_commitments) = block - .message() - .body() - .blob_kzg_commitments() - .ok() - .filter(|blobs| !blobs.is_empty()) - { - kzg_commitments - .iter() - .map(kzg_commitment_to_versioned_hash) - .collect::>() - } else { + let versioned_hashes = header + .kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect::>(); + if versioned_hashes.is_empty() { debug!("Fetch blobs not triggered - none required"); return Ok(None); }; @@ -117,12 +108,12 @@ async fn fetch_and_process_engine_blobs_inner( if chain_adapter .spec() - .is_peer_das_enabled_for_epoch(block.epoch()) + .is_peer_das_enabled_for_epoch(header.slot().epoch(T::EthSpec::slots_per_epoch())) { - fetch_and_process_blobs_v2( + fetch_and_process_blobs_v2_or_v3( chain_adapter, block_root, - block, + header, versioned_hashes, custody_columns, publish_fn, @@ -132,7 +123,7 @@ async fn fetch_and_process_engine_blobs_inner( fetch_and_process_blobs_v1( chain_adapter, block_root, - block, + &header, versioned_hashes, publish_fn, ) @@ -144,7 +135,7 @@ async fn fetch_and_process_engine_blobs_inner( async fn fetch_and_process_blobs_v1( chain_adapter: FetchBlobsBeaconAdapter, block_root: Hash256, - block: Arc>, + header: &PartialDataColumnHeader, versioned_hashes: Vec, publish_fn: impl Fn(EngineGetBlobsOutput) + Send + Sized, ) -> Result, FetchEngineBlobError> { @@ -182,19 +173,12 @@ async fn fetch_and_process_blobs_v1( return Ok(None); } - let (signed_block_header, kzg_commitments_proof) = block - .signed_block_header_and_kzg_commitments_proof() - .map_err(FetchEngineBlobError::BeaconStateError)?; + let mut blob_sidecar_list = build_blob_sidecars(header, response)?; - let mut blob_sidecar_list = build_blob_sidecars( - &block, - response, - signed_block_header, - &kzg_commitments_proof, - )?; - - let observation_key = - ObservationKey::new_proposer_key(block.message().proposer_index(), block.slot()); + let observation_key = ObservationKey::new_proposer_key( + header.signed_block_header.message.proposer_index, + header.slot(), + ); if let Some(observed_blobs) = chain_adapter.blobs_known_for_observation_key(observation_key) { blob_sidecar_list.retain(|blob| !observed_blobs.contains(&blob.blob_index())); @@ -225,7 +209,7 @@ async fn fetch_and_process_blobs_v1( let availability_processing_status = chain_adapter .process_engine_blobs( - block.slot(), + header.slot(), block_root, EngineGetBlobsOutput::Blobs(blob_sidecar_list), ) @@ -235,35 +219,53 @@ async fn fetch_and_process_blobs_v1( } #[instrument(skip_all, level = "debug")] -async fn fetch_and_process_blobs_v2( +async fn fetch_and_process_blobs_v2_or_v3( chain_adapter: FetchBlobsBeaconAdapter, block_root: Hash256, - block: Arc>, + header: Arc>, versioned_hashes: Vec, custody_columns_indices: &[ColumnIndex], publish_fn: impl Fn(EngineGetBlobsOutput) + Send + 'static, ) -> Result, FetchEngineBlobError> { let num_expected_blobs = versioned_hashes.len(); + let slot = header.slot(); metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); - debug!(num_expected_blobs, "Fetching blobs from the EL"); - // Track request count and duration for standardized metrics - inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V2_REQUESTS_TOTAL); - let _timer = - metrics::start_timer(&metrics::BEACON_ENGINE_GET_BLOBS_V2_REQUEST_DURATION_SECONDS); + let get_blobs_v3 = chain_adapter.supports_get_blobs_v3().await?; + let response = if get_blobs_v3 { + debug!(num_expected_blobs, "Fetching available blobs from the EL"); + // Track request count and duration for standardized metrics + inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V3_REQUESTS_TOTAL); + let _timer = + metrics::start_timer(&metrics::BEACON_ENGINE_GET_BLOBS_V3_REQUEST_DURATION_SECONDS); - let response = chain_adapter - .get_blobs_v2(versioned_hashes) - .await - .inspect_err(|_| { - inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); - })?; + chain_adapter + .get_blobs_v3(versioned_hashes) + .await + .inspect_err(|_| { + inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); + })? + } else { + debug!(num_expected_blobs, "Fetching all blobs from the EL"); - drop(_timer); + // Track request count and duration for standardized metrics + inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V2_REQUESTS_TOTAL); + let _timer = + metrics::start_timer(&metrics::BEACON_ENGINE_GET_BLOBS_V2_REQUEST_DURATION_SECONDS); - // Track successful response - inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V2_RESPONSES_TOTAL); + let response = chain_adapter + .get_blobs_v2(versioned_hashes) + .await + .inspect_err(|_| { + inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); + })?; + + // Track successful response + inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V2_RESPONSES_TOTAL); + + response.map(|vec| vec.into_iter().map(Some).collect()) + }; let Some(blobs_and_proofs) = response else { debug!(num_expected_blobs, "No blobs fetched from the EL"); @@ -271,32 +273,35 @@ async fn fetch_and_process_blobs_v2( return Ok(None); }; - let (blobs, proofs): (Vec<_>, Vec<_>) = blobs_and_proofs - .into_iter() - .map(|blob_and_proof| { - let BlobAndProofV2 { blob, proofs } = blob_and_proof; - (blob, proofs) - }) - .unzip(); - - let num_fetched_blobs = blobs.len(); + let num_fetched_blobs = blobs_and_proofs.iter().filter(|opt| opt.is_some()).count(); metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); if num_fetched_blobs != num_expected_blobs { - // This scenario is not supposed to happen if the EL is spec compliant. - // It should either return all requested blobs or none, but NOT partial responses. - // If we attempt to compute columns with partial blobs, we'd end up with invalid columns. - warn!( - num_fetched_blobs, - num_expected_blobs, "The EL did not return all requested blobs" - ); - inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); - return Ok(None); + if !get_blobs_v3 { + // This scenario is not supposed to happen if the EL is spec compliant. + // It should either return all requested blobs or none, but NOT partial responses. + // If we attempt to compute columns with partial blobs, we'd end up with invalid columns. + warn!( + num_fetched_blobs, + num_expected_blobs, "The EL did not return all requested blobs" + ); + inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); + return Ok(None); + } else { + inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V3_PARTIAL_RESPONSES_TOTAL); + debug!( + num_fetched_blobs, + num_expected_blobs, "Blobs partially received from the EL" + ); + } + } else { + debug!(num_fetched_blobs, "All blobs received from the EL"); + inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); + if get_blobs_v3 { + inc_counter(&metrics::BEACON_ENGINE_GET_BLOBS_V3_COMPLETE_RESPONSES_TOTAL); + } } - debug!(num_fetched_blobs, "All expected blobs received from the EL"); - inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); - if chain_adapter.fork_choice_contains_block(&block_root) { // Avoid computing columns if the block has already been imported. debug!( @@ -310,9 +315,8 @@ async fn fetch_and_process_blobs_v2( let custody_columns_to_import = compute_custody_columns_to_import( &chain_adapter, block_root, - block.clone(), - blobs, - proofs, + &header, + blobs_and_proofs, custody_columns_indices, ) .await?; @@ -325,20 +329,49 @@ async fn fetch_and_process_blobs_v2( return Ok(None); } - // Up until this point we have not observed the data columns in the gossip cache, which allows - // them to arrive independently while this function is running. In publish_fn we will observe - // them and then publish any columns that had not already been observed. - publish_fn(EngineGetBlobsOutput::CustodyColumns( - custody_columns_to_import.clone(), - )); + let full_columns = match chain_adapter.partial_assembler() { + Some(assembler) => { + // Initialize the partial assembler with the columns from the engine and return any full + // columns for publishing + assembler + .merge_partials(block_root, custody_columns_to_import, header) + .ok_or_else(|| { + FetchEngineBlobError::InternalError( + "Failed to merge partials into assembler".to_string(), + ) + })? + .full_columns + } + None => { + // Partial columns are disabled, so let's try to directly convert the columns we got + // from the EL into full columns. + custody_columns_to_import + .into_iter() + .filter_map(|col| col.try_into_full(&header)) + .collect() + } + }; - let availability_processing_status = chain_adapter - .process_engine_blobs( - block.slot(), - block_root, - EngineGetBlobsOutput::CustodyColumns(custody_columns_to_import), - ) - .await?; + // Publish complete columns + if !full_columns.is_empty() { + publish_fn(EngineGetBlobsOutput::CustodyColumns(full_columns.clone())); + } + // We publish all partials at the calling site, regardless of result, as previous publishs + // have been blocked, waiting for the results of this call + + // Process complete columns through DA checker + let availability_processing_status = if !full_columns.is_empty() { + chain_adapter + .process_engine_blobs( + slot, + block_root, + EngineGetBlobsOutput::CustodyColumns(full_columns), + ) + .await? + } else { + // No complete columns yet, still missing components + AvailabilityProcessingStatus::MissingComponents(slot, block_root) + }; Ok(Some(availability_processing_status)) } @@ -347,28 +380,34 @@ async fn fetch_and_process_blobs_v2( async fn compute_custody_columns_to_import( chain_adapter: &Arc>, block_root: Hash256, - block: Arc>>, - blobs: Vec>, - proofs: Vec>, + header: &PartialDataColumnHeader, + blobs_and_proofs: Vec>, custody_columns_indices: &[ColumnIndex], -) -> Result>, FetchEngineBlobError> { +) -> Result>, FetchEngineBlobError> { let kzg = chain_adapter.kzg().clone(); let spec = chain_adapter.spec().clone(); let chain_adapter_cloned = chain_adapter.clone(); let custody_columns_indices = custody_columns_indices.to_vec(); + let header = header.clone(); chain_adapter .executor() .spawn_blocking_handle( move || { let mut timer = metrics::start_timer_vec( &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, - &[&blobs.len().to_string()], + &[&blobs_and_proofs.len().to_string()], ); - let blob_refs = blobs.iter().collect::>(); - let cell_proofs = proofs.into_iter().flatten().collect(); + let blob_and_proof_refs = blobs_and_proofs + .iter() + .map(|option| { + option + .as_ref() + .map(|BlobAndProofV2 { blob, proofs }| (blob, proofs.as_ref())) + }) + .collect::>(); let data_columns_result = - blobs_to_data_column_sidecars(&blob_refs, cell_proofs, &block, &kzg, &spec) + blobs_to_partial_data_columns(blob_and_proof_refs, &header, &kzg, &spec) .discard_timer_on_break(&mut timer); drop(timer); @@ -379,10 +418,12 @@ async fn compute_custody_columns_to_import( .map(|data_columns| { data_columns .into_iter() - .filter(|col| custody_columns_indices.contains(col.index())) + .filter(|col| custody_columns_indices.contains(&col.index)) .map(|col| { - KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::from_execution_verified(col), + KzgVerifiedCustodyPartialDataColumn::from_asserted_custody( + KzgVerifiedPartialDataColumn::from_execution_verified( + Arc::new(col), + ), ) }) .collect::>() @@ -390,7 +431,8 @@ async fn compute_custody_columns_to_import( .map_err(FetchEngineBlobError::DataColumnSidecarError)?; // Only consider columns that are not already observed on gossip. - let observation_key = ObservationKey::from_block(&block, block_root, &spec); + let observation_key = + ObservationKey::from_partial_column_header(&header, block_root, &spec); if let Some(observed_columns) = chain_adapter_cloned.data_column_known_for_observation_key(observation_key) @@ -421,10 +463,8 @@ async fn compute_custody_columns_to_import( } fn build_blob_sidecars( - block: &Arc>>, + header: &PartialDataColumnHeader, response: Vec>>, - signed_block_header: SignedBeaconBlockHeader, - kzg_commitments_inclusion_proof: &FixedVector, ) -> Result>, FetchEngineBlobError> { let mut sidecars = vec![]; for (index, blob_and_proof) in response @@ -435,9 +475,7 @@ fn build_blob_sidecars( let blob_sidecar = BlobSidecar::new_with_existing_proof( index, blob_and_proof.blob, - block, - signed_block_header.clone(), - kzg_commitments_inclusion_proof, + header.clone(), blob_and_proof.proof, ) .map_err(FetchEngineBlobError::BlobSidecarError)?; diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index b3deffa4d7..ef282a3eaa 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -3,12 +3,14 @@ use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter; use crate::fetch_blobs::{ EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs_inner, }; +use crate::partial_data_column_assembler::PartialDataColumnAssembler; use crate::test_utils::{EphemeralHarnessType, get_kzg}; use bls::Signature; use eth2::types::BlobsBundle; use execution_layer::json_structures::{BlobAndProof, BlobAndProofV1, BlobAndProofV2}; use execution_layer::test_utils::generate_blobs; use maplit::hashset; +use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; use task_executor::test_utils::TestRuntime; use types::{ @@ -21,11 +23,11 @@ type T = EphemeralHarnessType; mod get_blobs_v2 { use super::*; - use types::ColumnIndex; + use types::{ColumnIndex, PartialDataColumnHeader}; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_no_blobs_in_block() { - let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu, false); let (publish_fn, _s) = mock_publish_fn(); let block = SignedBeaconBlock::::Fulu(SignedBeaconBlockFulu { message: BeaconBlockFulu::empty(mock_adapter.spec()), @@ -41,7 +43,7 @@ mod get_blobs_v2 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - Arc::new(block), + Arc::new((&block).try_into().unwrap()), &custody_columns, publish_fn, ) @@ -53,7 +55,7 @@ mod get_blobs_v2 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_no_blobs_returned() { - let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu, false); let (publish_fn, _) = mock_publish_fn(); let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -66,7 +68,7 @@ mod get_blobs_v2 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -78,7 +80,7 @@ mod get_blobs_v2 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_partial_blobs_returned() { - let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let (block, mut blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -94,7 +96,7 @@ mod get_blobs_v2 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -111,7 +113,7 @@ mod get_blobs_v2 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_block_imported_after_el_response() { - let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -127,7 +129,7 @@ mod get_blobs_v2 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -144,7 +146,7 @@ mod get_blobs_v2 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_no_new_columns_to_import() { - let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -166,7 +168,7 @@ mod get_blobs_v2 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -184,7 +186,7 @@ mod get_blobs_v2 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v2_success() { - let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu); + let mut mock_adapter = mock_beacon_adapter(ForkName::Fulu, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -208,7 +210,7 @@ mod get_blobs_v2 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -253,17 +255,19 @@ mod get_blobs_v1 { use super::*; use crate::block_verification_types::AsBlock; use std::collections::HashSet; - use types::ColumnIndex; + use types::{ColumnIndex, FullPayload, PartialDataColumnHeader}; const ELECTRA_FORK: ForkName = ForkName::Electra; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v1_no_blobs_in_block() { - let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK, false); let spec = mock_adapter.spec(); let (publish_fn, _s) = mock_publish_fn(); - let block_no_blobs = - SignedBeaconBlock::from_block(BeaconBlock::empty(spec), Signature::empty()); + let block_no_blobs = SignedBeaconBlock::>::from_block( + BeaconBlock::empty(spec), + Signature::empty(), + ); let block_root = block_no_blobs.canonical_root(); // Expectations: engine fetch blobs should not be triggered @@ -274,7 +278,7 @@ mod get_blobs_v1 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - Arc::new(block_no_blobs), + Arc::new(PartialDataColumnHeader::try_from(&block_no_blobs).unwrap()), &custody_columns, publish_fn, ) @@ -287,7 +291,7 @@ mod get_blobs_v1 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v1_no_blobs_returned() { - let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK, false); let (publish_fn, _) = mock_publish_fn(); let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -301,7 +305,7 @@ mod get_blobs_v1 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -314,7 +318,7 @@ mod get_blobs_v1 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v1_partial_blobs_returned() { - let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let blob_count = 2; let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, blob_count); @@ -347,7 +351,7 @@ mod get_blobs_v1 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -372,7 +376,7 @@ mod get_blobs_v1 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v1_block_imported_after_el_response() { - let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -387,7 +391,7 @@ mod get_blobs_v1 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -405,7 +409,7 @@ mod get_blobs_v1 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v1_no_new_blobs_to_import() { - let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, 2); let block_root = block.canonical_root(); @@ -435,7 +439,7 @@ mod get_blobs_v1 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -453,7 +457,7 @@ mod get_blobs_v1 { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fetch_blobs_v1_success() { - let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK); + let mut mock_adapter = mock_beacon_adapter(ELECTRA_FORK, false); let (publish_fn, publish_fn_args) = mock_publish_fn(); let blob_count = 2; let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter, blob_count); @@ -479,7 +483,7 @@ mod get_blobs_v1 { let processing_status = fetch_and_process_engine_blobs_inner( mock_adapter, block_root, - block, + Arc::new(PartialDataColumnHeader::try_from(block.as_ref()).unwrap()), &custody_columns, publish_fn, ) @@ -606,10 +610,11 @@ fn mock_publish_fn() -> ( (publish_fn, captured_args) } -fn mock_beacon_adapter(fork_name: ForkName) -> MockFetchBlobsBeaconAdapter { +fn mock_beacon_adapter(fork_name: ForkName, get_blobs_v3: bool) -> MockFetchBlobsBeaconAdapter { let test_runtime = TestRuntime::default(); let spec = Arc::new(fork_name.make_genesis_spec(E::default_spec())); let kzg = get_kzg(&spec); + let partial_assembler = PartialDataColumnAssembler::new(NonZeroUsize::new(32).unwrap()); let mut mock_adapter = MockFetchBlobsBeaconAdapter::default(); mock_adapter.expect_spec().return_const(spec.clone()); @@ -618,4 +623,10 @@ fn mock_beacon_adapter(fork_name: ForkName) -> MockFetchBlobsBeaconAdapter { .expect_executor() .return_const(test_runtime.task_executor.clone()); mock_adapter + .expect_supports_get_blobs_v3() + .returning(move || Ok(get_blobs_v3)); + mock_adapter + .expect_partial_assembler() + .return_const(Some(Arc::new(partial_assembler))); + mock_adapter } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 10cb208729..9641aec47d 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -6,7 +6,10 @@ use ssz_types::{FixedVector, VariableList}; use std::sync::Arc; use tracing::instrument; use tree_hash::TreeHash; -use types::data::{Cell, DataColumn, DataColumnSidecarError}; +use types::data::{ + Cell, CellBitmap, ColumnIndex, DataColumn, DataColumnSidecarError, PartialDataColumn, + PartialDataColumnHeader, PartialDataColumnSidecarRef, +}; use types::kzg_ext::KzgCommitments; use types::{ Blob, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, @@ -45,14 +48,13 @@ pub fn validate_blob( kzg.verify_blob_kzg_proof(kzg_blob, kzg_commitment, kzg_proof) } -/// Validate a batch of `DataColumnSidecar`. -pub fn validate_data_columns<'a, E: EthSpec, I>( +/// Validate a batch of full `DataColumnSidecar`s. +/// +/// Full columns have all cells present, so we iterate over all cells directly. +pub fn validate_full_data_columns<'a, E: EthSpec>( kzg: &Kzg, - data_column_iter: I, -) -> Result<(), (Option, KzgError)> -where - I: Iterator>> + Clone, -{ + data_column_iter: impl Iterator>>, +) -> Result<(), (Option, KzgError)> { let mut cells = Vec::new(); let mut proofs = Vec::new(); let mut column_indices = Vec::new(); @@ -109,6 +111,59 @@ where kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) } +/// Validate a batch of partial `VerifiablePartialDataColumn`s. +/// +/// Partial columns may have missing cells, indicated by a bitmap. We only verify present cells. +pub fn validate_partial_data_columns<'a, E: EthSpec>( + kzg: &Kzg, + data_column_iter: impl Iterator)>, + kzg_commitments: &[KzgCommitment], +) -> Result<(), (Option, KzgError)> { + let mut cells = Vec::new(); + let mut proofs = Vec::new(); + let mut column_indices = Vec::new(); + let mut commitments = Vec::new(); + + for (col_index, sidecar) in data_column_iter { + if sidecar.column.is_empty() { + return Err((Some(col_index), KzgError::KzgVerificationFailed)); + } + + // Partial columns have a bitmap indicating present cells + // We iterate over the bitmap and only process present cells + let mut present_iterator = sidecar.column.iter().zip(sidecar.kzg_proofs.iter()); + for (present, commitment) in sidecar.cells_present_bitmap.iter().zip(kzg_commitments) { + if present { + let (cell, proof) = present_iterator.next().ok_or(( + Some(col_index), + KzgError::InconsistentArrayLength( + "Partial column has fewer cells than bitmap indicates".to_string(), + ), + ))?; + cells.push(ssz_cell_to_crypto_cell::(cell).map_err(|e| (Some(col_index), e))?); + column_indices.push(col_index); + proofs.push(proof.0); + commitments.push(commitment.0); + } + } + + let expected_len = column_indices.len(); + + // We make this check at each iteration so that the error is attributable to a specific column + if cells.len() != expected_len + || proofs.len() != expected_len + || commitments.len() != expected_len + { + return Err(( + Some(col_index), + KzgError::InconsistentArrayLength("Invalid data column".to_string()), + )); + } + } + + kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) +} + /// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. pub fn validate_blobs( kzg: &Kzg, @@ -241,6 +296,46 @@ pub fn blobs_to_data_column_sidecars( } } +/// Build data column sidecars from a signed beacon block and its blobs. +#[instrument(skip_all, level = "debug", fields(blob_count = blobs_and_proofs.len()))] +pub fn blobs_to_partial_data_columns( + blobs_and_proofs: Vec, &[KzgProof])>>, + header: &PartialDataColumnHeader, + kzg: &Kzg, + spec: &ChainSpec, +) -> Result>, DataColumnSidecarError> { + if blobs_and_proofs.is_empty() { + return Ok(vec![]); + } + + let blob_cells_and_proofs_vec = blobs_and_proofs + .into_par_iter() + .map(|maybe_blob_and_proofs| { + let Some((blob, proofs)) = maybe_blob_and_proofs else { + return Ok(None); + }; + + let blob = blob.as_ref().try_into().map_err(|e| { + KzgError::InconsistentArrayLength(format!( + "blob should have a guaranteed size due to FixedVector: {e:?}" + )) + })?; + + kzg.compute_cells(blob).and_then(|cells| { + let proofs = proofs.try_into().map_err(|e| { + KzgError::InconsistentArrayLength(format!( + "proof chunks should have exactly `number_of_columns` proofs: {e:?}" + )) + })?; + Ok(Some((cells, proofs))) + }) + }) + .collect::, KzgError>>()?; + + build_partial_data_columns(header, blob_cells_and_proofs_vec, spec) + .map_err(DataColumnSidecarError::BuildSidecarFailed) +} + pub fn compute_cells(blobs: &[&Blob], kzg: &Kzg) -> Result, KzgError> { let cells_vec = blobs .into_par_iter() @@ -330,7 +425,6 @@ pub(crate) fn build_data_column_sidecars_fulu( sidecars } - pub(crate) fn build_data_column_sidecars_gloas( beacon_block_root: Hash256, slot: Slot, @@ -396,6 +490,87 @@ pub(crate) fn build_data_column_sidecars_gloas( sidecars } +pub(crate) fn build_partial_data_columns( + header: &PartialDataColumnHeader, + blob_cells_and_proofs_vec: Vec>, + spec: &ChainSpec, +) -> Result>, String> { + let number_of_columns = E::number_of_columns(); + let max_blobs_per_block = + spec.max_blobs_per_block(header.slot().epoch(E::slots_per_epoch())) as usize; + let mut bitmap = + CellBitmap::::with_capacity(blob_cells_and_proofs_vec.len()).map_err(|_| { + format!( + "Exceeded max committment count: {} (got {})", + E::max_blob_commitments_per_block(), + blob_cells_and_proofs_vec.len() + ) + })?; + let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + + for (idx, maybe_cells_and_proofs) in blob_cells_and_proofs_vec.into_iter().enumerate() { + let Some((blob_cells, blob_cell_proofs)) = maybe_cells_and_proofs else { + continue; + }; + + bitmap + .set(idx, true) + .expect("bitmap constructed from iterator length above"); + + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = blob_cells + .get(col) + .ok_or(format!("Missing blob cell at index {col}"))?; + let cell: Vec = cell.to_vec(); + let cell = + Cell::::try_from(cell).map_err(|e| format!("BytesPerCell exceeded: {e:?}"))?; + + let proof = blob_cell_proofs + .get(col) + .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; + + let column = columns + .get_mut(col) + .ok_or(format!("Missing data column at index {col}"))?; + let column_proofs = column_kzg_proofs + .get_mut(col) + .ok_or(format!("Missing data column proofs at index {col}"))?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + let block_root = header.signed_block_header.message.canonical_root(); + + let sidecars: Result>, String> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map(|(index, (col, proofs))| { + let column = PartialDataColumn { + block_root, + index: index as u64, + sidecar: types::data::PartialDataColumnSidecar { + cells_present_bitmap: bitmap.clone(), + column: VariableList::try_from(col) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + kzg_proofs: VariableList::try_from(proofs) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + header: None.into(), + }, + }; + Ok(column) + }) + .collect(); + + sidecars +} + // TODO(gloas) blob reconstruction will fail post gloas. We should just return `Blob`s // instead of a `BlobSidecar`. This might require a beacon api spec change as well. /// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). @@ -473,21 +648,9 @@ pub fn reconstruct_blobs( let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; let kzg_proof = KzgProof::empty(); - BlobSidecar::::new_with_existing_proof( - row_index, - blob, - signed_block, - first_data_column - .signed_block_header() - .map_err(|e| format!("{e:?}"))? - .clone(), - first_data_column - .kzg_commitments_inclusion_proof() - .map_err(|e| format!("{e:?}"))?, - kzg_proof, - ) - .map(Arc::new) - .map_err(|e| format!("{e:?}")) + BlobSidecar::::new_with_existing_proof(row_index, blob, signed_block, kzg_proof) + .map(Arc::new) + .map_err(|e| format!("{e:?}")) }) .collect::, _>>()?; @@ -566,7 +729,7 @@ pub fn reconstruct_data_columns( mod test { use crate::kzg_utils::{ blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, - validate_data_columns, + validate_full_data_columns, }; use bls::Signature; use eth2::types::BlobsBundle; @@ -605,7 +768,7 @@ mod test { blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); - let result = validate_data_columns::(kzg, column_sidecars.iter()); + let result = validate_full_data_columns(kzg, column_sidecars.iter()); assert!(result.is_ok()); } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index a8a706d8bc..7631e6b904 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -43,6 +43,7 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; +pub mod partial_data_column_assembler; pub mod payload_bid_verification; pub mod payload_envelope_streamer; pub mod payload_envelope_verification; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 5485f0a9e3..ce136ef3fc 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1686,6 +1686,56 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_partial_data_column_sidecar_header_processing_requests_total", + "Count of all partial data column sidecars submitted for processing", + ) + }); +pub static PARTIAL_DATA_COLUMN_SIDECAR_HEADER_PROCESSING_DUPES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_partial_data_column_sidecar_header_processing_dupes_total", + "Number of partial data column sidecars verified for gossip (excluding dupes)", + ) + }); +pub static PARTIAL_DATA_COLUMN_SIDECAR_HEADER_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_partial_data_column_sidecar_header_processing_successes_total", + "Number of partial data column sidecar headers verified for gossip (excluding dupes)", + ) + }); +pub static PARTIAL_DATA_COLUMN_SIDECAR_HEADER_GOSSIP_VERIFICATION_TIMES: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram( + "beacon_partial_data_column_sidecar_header_gossip_verification_seconds", + "Full runtime of partial data column sidecar headers gossip verification", + ) +}); +pub static PARTIAL_DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_partial_data_column_sidecar_processing_requests_total", + "Count of all partial data column sidecars submitted for processing", + ) + }); +pub static PARTIAL_DATA_COLUMN_SIDECAR_PROCESSING_SUCCESSES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_partial_data_column_sidecar_processing_successes_total", + "Number of partial data column sidecars verified for gossip", + ) + }); +pub static PARTIAL_DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_partial_data_column_sidecar_gossip_verification_seconds", + "Full runtime of partial data column sidecars gossip verification", + ) + }); pub static BLOBS_FROM_EL_HIT_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -1755,6 +1805,70 @@ pub static BEACON_ENGINE_GET_BLOBS_V2_REQUEST_DURATION_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_engine_getBlobsV3_requests_total", + "Total number of engine_getBlobsV3 requests made to the execution layer", + ) + }); + +pub static BEACON_ENGINE_GET_BLOBS_V3_COMPLETE_RESPONSES_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_engine_getBlobsV3_complete_responses_total", + "Total number of successful engine_getBlobsV3 responses from the execution layer \ + with all blobs", + ) + }); + +pub static BEACON_ENGINE_GET_BLOBS_V3_PARTIAL_RESPONSES_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "beacon_engine_getBlobsV3_partial_responses_total", + "Total number of successful engine_getBlobsV3 responses from the execution layer \ + with at least one blob missing", + ) + }); + +pub static BEACON_ENGINE_GET_BLOBS_V3_REQUEST_DURATION_SECONDS: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "beacon_engine_getBlobsV3_request_duration_seconds", + "Duration of engine_getBlobsV3 requests to the execution layer in seconds", + ) + }); + +/* + * Standardized metrics for partial column efficiency + */ +pub static BEACON_PARTIAL_MESSAGE_USEFUL_CELLS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_partial_message_useful_cells_total", + "Number of useful cells received via a partial message", + &["column_index"], + ) + }); + +pub static BEACON_PARTIAL_MESSAGE_CELLS_RECEIVED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_partial_message_cells_received_total", + "Number of total cells received via a partial message", + &["column_index"], + ) + }); + +pub static BEACON_PARTIAL_MESSAGE_COLUMN_COMPLETIONS_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_partial_message_column_completions_total", + "How often the partial message first completed the column", + &["column_index"], + ) + }); + /* * Light server message verification */ diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 894b8d3444..2461c8115d 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -6,7 +6,9 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; -use types::{BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Hash256, PartialDataColumnHeader, Slot, +}; type ValidatorIndex = u64; type BeaconBlockRoot = Hash256; @@ -102,17 +104,17 @@ impl ObservationKey { } } - pub fn from_block( - block: &SignedBeaconBlock, + pub fn from_partial_column_header( + header: &PartialDataColumnHeader, block_root: Hash256, spec: &ChainSpec, ) -> Self { - let slot = block.slot(); + let slot = header.slot(); if spec.fork_name_at_slot::(slot).gloas_enabled() { Self::new_block_root_key(block_root, slot) } else { - Self::new_proposer_key(block.message().proposer_index(), slot) + Self::new_proposer_key(header.signed_block_header.message.proposer_index, slot) } } diff --git a/beacon_node/beacon_chain/src/partial_data_column_assembler.rs b/beacon_node/beacon_chain/src/partial_data_column_assembler.rs new file mode 100644 index 0000000000..0ce754c8a0 --- /dev/null +++ b/beacon_node/beacon_chain/src/partial_data_column_assembler.rs @@ -0,0 +1,569 @@ +use crate::data_column_verification::{ + KzgVerifiedCustodyDataColumn, KzgVerifiedCustodyPartialDataColumn, +}; +use lru::LruCache; +use parking_lot::RwLock; +use std::collections::HashMap; +use std::num::NonZeroUsize; +use std::sync::Arc; +use tracing::error; +use types::core::{Epoch, EthSpec, Hash256}; +use types::data::{ColumnIndex, PartialDataColumnHeader}; + +/// Assembles partial data columns into complete columns +pub struct PartialDataColumnAssembler { + /// Cache of assemblies keyed by block root + assemblies: RwLock>>, +} + +/// Tracks partial columns being assembled for a single block +struct PartialAssembly { + header: Arc>, + has_local_blobs: bool, + /// Map of column_index -> partial column being assembled + columns: HashMap>, +} + +#[derive(Clone, Debug)] +pub enum AssemblyColumn { + // As the actual column is Arc'd inside, storing it redundantly here will not increase memory usage. + Complete(KzgVerifiedCustodyDataColumn), + Incomplete(KzgVerifiedCustodyPartialDataColumn), +} + +/// Result of merging a partial column +pub struct PartialMergeResult { + /// How many cells were added to the store + pub added_cells: usize, + /// Have local blobs been added yet + pub local_blobs: bool, + /// Merge that completed the column + pub full_columns: Vec>, + /// The updated partials for publishing + pub updated_partials: Vec>, +} + +impl PartialDataColumnAssembler { + pub fn new(capacity: NonZeroUsize) -> Self { + Self { + assemblies: RwLock::new(LruCache::new(capacity)), + } + } + + /// Insert a `header` for the given `block_root` into the assembler. + /// Returns true unless there already is a header for the block root. + pub fn init(&self, block_root: Hash256, header: Arc>) -> bool { + let mut assemblies = self.assemblies.write(); + + if assemblies.contains(&block_root) { + return false; + } + + let assembly = PartialAssembly { + header, + has_local_blobs: false, + columns: HashMap::new(), + }; + + assemblies.put(block_root, assembly); + + true + } + + /// Merge one or more received partial columns into the assembly. + /// Returns the merge result indicating if the columns are now complete. + pub fn merge_partials( + &self, + block_root: Hash256, + partials: Vec>, + header: Arc>, + ) -> Option> { + let mut assemblies = self.assemblies.write(); + let assembly = assemblies.get_or_insert_mut(block_root, || PartialAssembly { + header: header.clone(), + has_local_blobs: false, + columns: HashMap::new(), + }); + + let mut full_columns = Vec::new(); + let mut updated_partials = Vec::new(); + let mut added_cells = 0; + + for partial in partials { + let partial_column = partial.as_data_column(); + let column_index = partial_column.index; + + let merged = if let Some(existing) = assembly.columns.get(&column_index) { + let AssemblyColumn::Incomplete(existing) = existing else { + // Already complete. + continue; + }; + let column = existing.as_data_column(); + + let old_len = column.sidecar.column.len(); + + // Merge with existing partial + let merged = match existing.merge(&partial) { + Ok(merged) => merged, + Err(err) => { + error!("Unexpected error merging partial data column: {:?}", err); + continue; + } + }; + + let adding_cells = merged + .as_data_column() + .sidecar + .column + .len() + .saturating_sub(old_len); + + added_cells += adding_cells; + + if adding_cells == 0 { + continue; + } + + merged + } else { + added_cells += partial_column.sidecar.column.len(); + // First time seeing this column index for this block + partial + }; + + // Check if merged column is now complete by trying to convert into full + let column = if let Some(full_column) = merged.try_clone_full(&header) { + full_columns.push(full_column.clone()); + AssemblyColumn::Complete(full_column) + } else { + AssemblyColumn::Incomplete(merged.clone()) + }; + + // Update assembly with merged partial + assembly.columns.insert(column_index, column); + updated_partials.push(merged); + } + + Some(PartialMergeResult { + added_cells, + local_blobs: assembly.has_local_blobs, + full_columns, + updated_partials, + }) + } + + /// Mark a column as assembled. Returns true if the column was previously incomplete or not + /// in the assembly at all. + pub fn mark_as_complete( + &self, + block_root: Hash256, + column: &KzgVerifiedCustodyDataColumn, + ) -> bool { + // TODO(gloas): support partial messages + let Ok(fulu) = column.as_data_column().as_fulu() else { + return false; + }; + + let mut assemblies = self.assemblies.write(); + let assembly = assemblies.get_or_insert_mut(block_root, || PartialAssembly { + header: Arc::new(PartialDataColumnHeader { + kzg_commitments: fulu.kzg_commitments.clone(), + signed_block_header: fulu.signed_block_header.clone(), + kzg_commitments_inclusion_proof: fulu.kzg_commitments_inclusion_proof.clone(), + }), + has_local_blobs: false, + columns: Default::default(), + }); + let prev = assembly + .columns + .insert(column.index(), AssemblyColumn::Complete(column.clone())); + !matches!(prev, Some(AssemblyColumn::Complete(_))) + } + + /// Returns true if the given column is complete. + pub fn is_complete(&self, block_root: Hash256, column_index: ColumnIndex) -> bool { + self.assemblies.read().peek(&block_root).is_some_and(|a| { + matches!( + a.columns.get(&column_index), + Some(AssemblyColumn::Complete(_)) + ) + }) + } + + /// Get the current partial for a specific column if it exists in assembly + pub fn get_partial( + &self, + block_root: &Hash256, + column_index: ColumnIndex, + ) -> Option> { + self.assemblies + .read() + .peek(block_root)? + .columns + .get(&column_index) + .cloned() + } + + /// Get all current partials for a block for publishing after fetching local blobs. + /// To unlock future publishing, mark blobs as fetched locally. + /// We do this within one write lock to avoid useless double publishes. + pub fn get_partials_and_mark_as_local_fetched( + &self, + block_root: Hash256, + header: &Arc>, + ) -> Vec> { + let mut assemblies = self.assemblies.write(); + let assembly = assemblies.get_or_insert_mut(block_root, || PartialAssembly { + header: header.clone(), + has_local_blobs: true, + columns: Default::default(), + }); + + assembly.has_local_blobs = true; + + assembly + .columns + .values() + .filter_map(|value| { + if let AssemblyColumn::Incomplete(partial) = value { + Some(partial.clone()) + } else { + None + } + }) + .collect() + } + + /// Get header for a block if we have an active assembly + pub fn get_header(&self, block_root: &Hash256) -> Option>> { + self.assemblies + .read() + .peek(block_root) + .map(|a| a.header.clone()) + } + + /// Maintenance: remove assemblies older than cutoff epoch + pub fn do_maintenance(&self, cutoff_epoch: Epoch) { + let mut assemblies = self.assemblies.write(); + let mut to_remove = vec![]; + + for (root, assembly) in assemblies.iter() { + if assembly + .header + .signed_block_header + .message + .slot + .epoch(E::slots_per_epoch()) + < cutoff_epoch + { + to_remove.push(*root); + } + } + + for root in to_remove { + assemblies.pop(&root); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::data_column_verification::{ + KzgVerifiedCustodyPartialDataColumn, KzgVerifiedDataColumn, KzgVerifiedPartialDataColumn, + }; + use bls::{FixedBytesExtended, Signature}; + use kzg::{KzgCommitment, KzgProof}; + use ssz_types::{FixedVector, VariableList}; + use types::block::{BeaconBlockHeader, SignedBeaconBlockHeader}; + use types::core::{EthSpec, Hash256, MinimalEthSpec, Slot}; + use types::data::{ + Cell, CellBitmap, DataColumnSidecar, DataColumnSidecarFulu, PartialDataColumn, + PartialDataColumnSidecar, + }; + + type E = MinimalEthSpec; + + fn make_cell(marker: u8) -> Cell { + let mut cell = Cell::::default(); + cell[0] = marker; + cell + } + + fn make_header(num_commitments: usize) -> PartialDataColumnHeader { + PartialDataColumnHeader { + kzg_commitments: vec![KzgCommitment([0u8; 48]); num_commitments] + .try_into() + .unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: FixedVector::new( + vec![Hash256::zero(); E::kzg_commitments_inclusion_proof_depth()], + ) + .unwrap(), + } + } + + fn make_partial( + block_root: Hash256, + column_index: ColumnIndex, + total_blobs: usize, + present_indices: &[usize], + ) -> KzgVerifiedCustodyPartialDataColumn { + make_partial_with_header(block_root, column_index, total_blobs, present_indices, true) + } + + fn make_partial_with_header( + block_root: Hash256, + column_index: ColumnIndex, + total_blobs: usize, + present_indices: &[usize], + include_header: bool, + ) -> KzgVerifiedCustodyPartialDataColumn { + let mut bitmap = CellBitmap::::with_capacity(total_blobs).unwrap(); + for &idx in present_indices { + bitmap.set(idx, true).unwrap(); + } + + let column: VariableList<_, _> = present_indices + .iter() + .map(|&idx| make_cell(idx as u8)) + .collect::>() + .try_into() + .unwrap(); + let proofs: VariableList<_, _> = present_indices + .iter() + .map(|_| KzgProof::empty()) + .collect::>() + .try_into() + .unwrap(); + + let header = include_header.then(|| make_header(total_blobs)).into(); + + let partial = PartialDataColumn { + block_root, + index: column_index, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column, + kzg_proofs: proofs, + header, + }, + }; + KzgVerifiedCustodyPartialDataColumn::from_asserted_custody( + KzgVerifiedPartialDataColumn::__new_for_testing(Arc::new(partial)), + ) + } + + fn make_full_column(fulu: DataColumnSidecarFulu) -> KzgVerifiedCustodyDataColumn { + KzgVerifiedCustodyDataColumn::from_asserted_custody( + KzgVerifiedDataColumn::__new_for_testing(Arc::new(DataColumnSidecar::Fulu(fulu))), + ) + } + + fn make_assembler() -> PartialDataColumnAssembler { + PartialDataColumnAssembler::new(NonZeroUsize::new(16).unwrap()) + } + + // -- init and get_header tests -- + + #[test] + fn init_stores_header() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = make_header(4); + assert!(assembler.init(root, Arc::new(header.clone()))); + let retrieved = assembler.get_header(&root).unwrap(); + assert_eq!(*retrieved, header); + } + + #[test] + fn init_returns_false_if_already_exists() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = Arc::new(make_header(4)); + assert!(assembler.init(root, header.clone())); + assert!(!assembler.init(root, header)); + } + + // -- merge_partials tests -- + + #[test] + fn merge_partials_tracks_added_cells() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = Arc::new(make_header(4)); + + let partial = make_partial(root, 0, 4, &[0, 1, 2]); + let result = assembler + .merge_partials(root, vec![partial], header.clone()) + .unwrap(); + assert_eq!(result.added_cells, 3); + + // Merge more cells for the same column + let partial2 = make_partial(root, 0, 4, &[2, 3]); + let result2 = assembler + .merge_partials(root, vec![partial2], header) + .unwrap(); + // Only cell 3 is new (cell 2 was already present) + assert_eq!(result2.added_cells, 1); + } + + #[test] + fn merge_partials_ignores_already_complete_column() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = Arc::new(make_header(4)); + + // Complete the column + let partial = make_partial(root, 0, 4, &[0, 1, 2, 3]); + let result = assembler + .merge_partials(root, vec![partial], header.clone()) + .unwrap(); + assert_eq!(result.added_cells, 4); + assert_eq!(result.full_columns.len(), 1); + + // Try to merge more — should be ignored + let partial2 = make_partial(root, 0, 4, &[0, 1]); + let result2 = assembler + .merge_partials(root, vec![partial2], header) + .unwrap(); + assert_eq!(result2.added_cells, 0); + assert!(result2.full_columns.is_empty()); + } + + #[test] + fn merge_partials_completes_column_progressively() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = Arc::new(make_header(4)); + + let partial1 = make_partial(root, 0, 4, &[0, 1]); + let result1 = assembler + .merge_partials(root, vec![partial1], header.clone()) + .unwrap(); + assert!(result1.full_columns.is_empty()); + + let partial2 = make_partial(root, 0, 4, &[2, 3]); + let result2 = assembler + .merge_partials(root, vec![partial2], header) + .unwrap(); + assert_eq!(result2.full_columns.len(), 1); + } + + #[test] + fn merge_partials_returns_updated_partials() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = Arc::new(make_header(4)); + + let partial = make_partial(root, 0, 4, &[0, 2]); + let result = assembler + .merge_partials(root, vec![partial], header) + .unwrap(); + assert_eq!(result.updated_partials.len(), 1); + assert_eq!(result.updated_partials[0].index(), 0); + } + + // -- mark_as_complete tests -- + + #[test] + fn mark_as_complete_replaces_incomplete() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + let header = Arc::new(make_header(4)); + + // Merge an incomplete partial first + let partial = make_partial(root, 0, 4, &[0, 1]); + assembler.merge_partials(root, vec![partial], header); + + let full_column = make_full_column(DataColumnSidecarFulu:: { + index: 0, + column: vec![Cell::::default(); 4].try_into().unwrap(), + kzg_commitments: vec![KzgCommitment([0u8; 48]); 4].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty(); 4].try_into().unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: FixedVector::new( + vec![Hash256::zero(); E::kzg_commitments_inclusion_proof_depth()], + ) + .unwrap(), + }); + assert!(assembler.mark_as_complete(root, &full_column)); + } + + #[test] + fn mark_as_complete_returns_false_if_already_complete() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + + let full_column = make_full_column(DataColumnSidecarFulu:: { + index: 0, + column: vec![Cell::::default(); 4].try_into().unwrap(), + kzg_commitments: vec![KzgCommitment([0u8; 48]); 4].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty(); 4].try_into().unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: FixedVector::new( + vec![Hash256::zero(); E::kzg_commitments_inclusion_proof_depth()], + ) + .unwrap(), + }); + assert!(assembler.mark_as_complete(root, &full_column)); + assert!(!assembler.mark_as_complete(root, &full_column)); + } + + // -- do_maintenance tests -- + + #[test] + fn do_maintenance_removes_old_assemblies() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + // Header at slot 0 → epoch 0 + let header = Arc::new(make_header(4)); + assembler.init(root, header); + assert!(assembler.get_header(&root).is_some()); + + // Cutoff epoch 1 removes epoch 0 + assembler.do_maintenance(Epoch::new(1)); + assert!(assembler.get_header(&root).is_none()); + } + + #[test] + fn do_maintenance_keeps_recent_assemblies() { + let assembler = make_assembler(); + let root = Hash256::repeat_byte(1); + // Header at slot 100 → epoch 100/8 = 12 for MinimalEthSpec (8 slots/epoch) + let mut header = make_header(4); + header.signed_block_header.message.slot = Slot::new(100); + let header = Arc::new(header); + assembler.init(root, header); + + assembler.do_maintenance(Epoch::new(1)); + assert!(assembler.get_header(&root).is_some()); + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 00a2ed64f1..e628a81459 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -239,6 +239,7 @@ pub fn test_da_checker( kzg, custody_context, spec, + true, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index a6c76beb31..ea87e9bc71 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -392,6 +392,7 @@ pub enum Work { GossipBlock(AsyncFn), GossipBlobSidecar(AsyncFn), GossipDataColumnSidecar(AsyncFn), + GossipPartialDataColumnSidecar(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -470,6 +471,7 @@ pub enum WorkType { GossipBlock, GossipBlobSidecar, GossipDataColumnSidecar, + GossipPartialDataColumnSidecar, DelayedImportBlock, DelayedImportEnvelope, GossipVoluntaryExit, @@ -524,6 +526,7 @@ impl Work { Work::GossipBlock(_) => WorkType::GossipBlock, Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::GossipPartialDataColumnSidecar(_) => WorkType::GossipPartialDataColumnSidecar, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, Work::DelayedImportEnvelope { .. } => WorkType::DelayedImportEnvelope, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, @@ -836,6 +839,10 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = work_queues.gossip_data_column_queue.pop() { Some(item) + } else if let Some(item) = + work_queues.gossip_partial_data_column_queue.pop() + { + Some(item) } else if let Some(item) = work_queues.column_reconstruction_queue.pop() { Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. @@ -1146,6 +1153,9 @@ impl BeaconProcessor { Work::GossipDataColumnSidecar { .. } => { work_queues.gossip_data_column_queue.push(work, work_id) } + Work::GossipPartialDataColumnSidecar { .. } => work_queues + .gossip_partial_data_column_queue + .push(work, work_id), Work::DelayedImportBlock { .. } => { work_queues.delayed_block_queue.push(work, work_id) } @@ -1284,6 +1294,9 @@ impl BeaconProcessor { WorkType::GossipDataColumnSidecar => { work_queues.gossip_data_column_queue.len() } + WorkType::GossipPartialDataColumnSidecar => { + work_queues.gossip_partial_data_column_queue.len() + } WorkType::DelayedImportBlock => work_queues.delayed_block_queue.len(), WorkType::DelayedImportEnvelope => work_queues.delayed_envelope_queue.len(), WorkType::GossipVoluntaryExit => { @@ -1506,6 +1519,7 @@ impl BeaconProcessor { Work::GossipBlock(work) | Work::GossipBlobSidecar(work) | Work::GossipDataColumnSidecar(work) + | Work::GossipPartialDataColumnSidecar(work) | Work::GossipExecutionPayload(work) => task_spawner.spawn_async(async move { work.await; }), diff --git a/beacon_node/beacon_processor/src/scheduler/work_queue.rs b/beacon_node/beacon_processor/src/scheduler/work_queue.rs index 363ec06097..f7163d538b 100644 --- a/beacon_node/beacon_processor/src/scheduler/work_queue.rs +++ b/beacon_node/beacon_processor/src/scheduler/work_queue.rs @@ -126,6 +126,7 @@ pub struct BeaconProcessorQueueLengths { gossip_block_queue: usize, gossip_blob_queue: usize, gossip_data_column_queue: usize, + gossip_partial_data_column_queue: usize, delayed_block_queue: usize, delayed_envelope_queue: usize, status_queue: usize, @@ -199,6 +200,7 @@ impl BeaconProcessorQueueLengths { gossip_block_queue: 1024, gossip_blob_queue: 1024, gossip_data_column_queue: 1024, + gossip_partial_data_column_queue: 1024, delayed_block_queue: 1024, delayed_envelope_queue: 1024, status_queue: 1024, @@ -255,6 +257,7 @@ pub struct WorkQueues { pub gossip_block_queue: FifoQueue>, pub gossip_blob_queue: FifoQueue>, pub gossip_data_column_queue: FifoQueue>, + pub gossip_partial_data_column_queue: FifoQueue>, pub delayed_block_queue: FifoQueue>, pub delayed_envelope_queue: FifoQueue>, pub status_queue: FifoQueue>, @@ -323,6 +326,8 @@ impl WorkQueues { let gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); + let gossip_partial_data_column_queue = + FifoQueue::new(queue_lengths.gossip_partial_data_column_queue); let delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let delayed_envelope_queue = FifoQueue::new(queue_lengths.delayed_envelope_queue); @@ -388,6 +393,7 @@ impl WorkQueues { gossip_block_queue, gossip_blob_queue, gossip_data_column_queue, + gossip_partial_data_column_queue, delayed_block_queue, delayed_envelope_queue, status_queue, diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 6566616c04..acf5f2778b 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -596,6 +596,7 @@ pub struct EngineCapabilities { pub get_client_version_v1: bool, pub get_blobs_v1: bool, pub get_blobs_v2: bool, + pub get_blobs_v3: bool, } impl EngineCapabilities { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index b9f6289d05..110e155c77 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -64,6 +64,7 @@ pub const ENGINE_GET_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_GET_BLOBS_V1: &str = "engine_getBlobsV1"; pub const ENGINE_GET_BLOBS_V2: &str = "engine_getBlobsV2"; +pub const ENGINE_GET_BLOBS_V3: &str = "engine_getBlobsV3"; pub const ENGINE_GET_BLOBS_TIMEOUT: Duration = Duration::from_secs(1); /// This error is returned during a `chainId` call by Geth. @@ -743,6 +744,20 @@ impl HttpJsonRpc { .await } + pub async fn get_blobs_v3( + &self, + versioned_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([versioned_hashes]); + + self.rpc_request( + ENGINE_GET_BLOBS_V3, + params, + ENGINE_GET_BLOBS_TIMEOUT * self.execution_timeout_multiplier, + ) + .await + } + pub async fn get_block_by_number( &self, query: BlockByNumberQuery<'_>, @@ -1258,6 +1273,7 @@ impl HttpJsonRpc { get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), get_blobs_v2: capabilities.contains(ENGINE_GET_BLOBS_V2), + get_blobs_v3: capabilities.contains(ENGINE_GET_BLOBS_V3), }) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index a77861981f..cfff0b4d9f 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -864,6 +864,9 @@ pub struct BlobAndProof { pub proofs: KzgProofs, } +/// A BlobAndProofV3 is just a BlobAndProofV2 that may also be `null` if unknown by the EL. +pub type BlobAndProofV3 = Option>; + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceStateV1 { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 90968fa213..4e4fe20e14 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,7 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::json_structures::{BlobAndProofV1, BlobAndProofV2}; +use crate::json_structures::{BlobAndProofV1, BlobAndProofV2, BlobAndProofV3}; use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{Auth, JwtKey, strip_prefix}; @@ -1741,6 +1741,23 @@ impl ExecutionLayer { } } + pub async fn get_blobs_v3( + &self, + query: Vec, + ) -> Result>>, Error> { + let capabilities = self.get_engine_capabilities(None).await?; + + if capabilities.get_blobs_v3 { + self.engine() + .request(|engine| async move { engine.api.get_blobs_v3(query).await }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Err(Error::GetBlobsNotSupported) + } + } + pub async fn get_block_by_number( &self, query: BlockByNumberQuery<'_>, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index c382d8abf5..4eb03778f8 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -59,6 +59,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_client_version_v1: true, get_blobs_v1: true, get_blobs_v2: true, + get_blobs_v3: true, }; pub static DEFAULT_CLIENT_VERSION: LazyLock = diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 340b0bbbed..6b65995a73 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -16,6 +16,7 @@ use eth2::types::{ use execution_layer::{ProvenancedPayload, SubmitBlindedBlockResponse}; use futures::TryFutureExt; use lighthouse_network::PubsubMessage; +use logging::crit; use network::NetworkMessage; use rand::prelude::SliceRandom; use reqwest::StatusCode; @@ -29,8 +30,9 @@ use tracing::{Span, debug, debug_span, error, field, info, instrument, warn}; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, - DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, SignedBlindedBeaconBlock, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, + FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, + SignedBlindedBeaconBlock, }; use warp::{Rejection, Reply, reply::Response}; @@ -514,15 +516,53 @@ fn publish_column_sidecars( .collect::>(); debug!(indices = ?dropped_indices, "Dropping data columns from publishing"); } - let pubsub_messages = data_column_sidecars - .into_iter() - .map(|data_col| { - let subnet = DataColumnSubnetId::from_column_index(*data_col.index(), &chain.spec); - PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) - }) - .collect::>(); - crate::utils::publish_pubsub_messages(sender_clone, pubsub_messages) - .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) + let mut full_messages = Vec::new(); + let mut partial_columns = Vec::new(); + let mut partial_header = None; + + for data_col in data_column_sidecars { + if chain.config.enable_partial_columns + && let DataColumnSidecar::Fulu(fulu_data_col) = data_col.as_ref() + { + let mut partial = fulu_data_col.to_partial(); + if let Some(header) = partial.sidecar.header.take() { + partial_header = Some(header); + } + partial_columns.push(Arc::new(partial)); + } + + let subnet = DataColumnSubnetId::from_column_index(*data_col.index(), &chain.spec); + full_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( + subnet, data_col, + )))); + } + + // Publish full messages + if !full_messages.is_empty() { + crate::utils::publish_pubsub_messages(sender_clone, full_messages).map_err(|_| { + BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)) + })?; + } + + // Publish partial messages + if !partial_columns.is_empty() { + if let Some(header) = partial_header { + crate::utils::publish_network_message( + sender_clone, + NetworkMessage::PublishPartialColumns { + columns: partial_columns, + header: Arc::new(header), + }, + ) + .map_err(|_| { + BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)) + })?; + } else { + crit!("Unable to extract header from full columns") + } + } + + Ok(()) } async fn post_block_import_logging_and_response( diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 659886f0f1..44af8d7006 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -21,6 +21,8 @@ ethereum_ssz_derive = { workspace = true } fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } +# Enable partial messages feature +gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/libp2p/rust-libp2p.git", features = ["partial_messages"] } hex = { workspace = true } if-addrs = "0.14" itertools = { workspace = true } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index cb94bfff22..db42d0cfa8 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -140,6 +140,9 @@ pub struct Config { /// Flag for advertising a fake CGC to peers for testing ONLY. pub advertise_false_custody_group_count: Option, + + /// Whether to enable partial data column support. + pub enable_partial_columns: bool, } impl Config { @@ -364,6 +367,7 @@ impl Default for Config { inbound_rate_limiter_config: None, idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, advertise_false_custody_group_count: None, + enable_partial_columns: false, } } } diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 863a7a4a43..fdb6ff095e 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -99,7 +99,7 @@ impl std::fmt::Display for ClearDialError<'_> { pub use crate::types::{ Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, - SubnetDiscovery, + SubnetDiscovery, decode_partial, }; pub use prometheus_client; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 623d43a727..d5d1ed5053 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -83,6 +83,14 @@ pub static FAILED_PUBLISHES_PER_MAIN_TOPIC: LazyLock> = Lazy &["topic_hash"], ) }); +pub static FAILED_PARTIAL_PUBLISHES_PER_MAIN_TOPIC: LazyLock> = + LazyLock::new(|| { + try_create_int_gauge_vec( + "gossipsub_failed_partial_publishes_per_main_topic", + "Failed gossip partial message publishes", + &["topic_hash"], + ) + }); pub static TOTAL_RPC_ERRORS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "libp2p_rpc_errors_per_client", diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 56fcbb3bb6..f0c1567cb0 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -14,17 +14,19 @@ use crate::rpc::{ GoodbyeReason, HandlerErr, InboundRequestId, Protocol, RPC, RPCError, RPCMessage, RPCReceived, RequestType, ResponseTermination, RpcResponse, RpcSuccessResponse, }; +use crate::service::partial_column_header_tracker::PartialColumnHeaderTracker; use crate::types::{ - GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, - all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, + GossipEncoding, GossipKind, GossipTopic, OutgoingPartialColumn, SnappyTransform, Subnet, + SubnetDiscovery, all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, + subnet_from_topic_hash, }; -use crate::{Enr, NetworkGlobals, PubsubMessage, TopicHash, metrics}; +use crate::{Enr, NetworkGlobals, PubsubMessage, TopicHash, decode_partial, metrics}; use api_types::{AppRequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{PeerScoreSettings, lighthouse_gossip_thresholds}; use libp2p::gossipsub::{ - self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, - TopicScoreParams, + self, Event, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, + PublishError, TopicScoreParams, }; use libp2p::identity::Keypair; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; @@ -40,16 +42,18 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, trace, warn}; -use types::{ChainSpec, ForkName}; use types::{ - EnrForkId, EthSpec, ForkContext, Slot, SubnetId, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, + ChainSpec, DataColumnSubnetId, EnrForkId, EthSpec, ForkContext, ForkName, PartialDataColumn, + PartialDataColumnHeader, Slot, SubnetId, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, }; use utils::{Context as ServiceContext, build_transport, strip_peer_id}; pub mod api_types; mod gossip_cache; pub mod gossipsub_scoring_parameters; +mod partial_column_header_tracker; pub mod utils; + /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 3; @@ -99,6 +103,15 @@ pub enum NetworkEvent { /// The message itself. message: PubsubMessage, }, + /// A partial data column sidecar received via gossipsub partial protocol. + PartialDataColumnSidecar { + /// The peer from which we received this message. + source: PeerId, + /// The partial column data. + column: Box>, + /// The topic that this message was sent on. + topic: GossipTopic, + }, /// Inform the network to send a Status to this peer. StatusPeer(PeerId), NewListenAddr(Multiaddr), @@ -162,6 +175,7 @@ pub struct Network { /// The interval for updating gossipsub scores update_gossipsub_scores: tokio::time::Interval, gossip_cache: GossipCache, + partial_column_header_tracker: PartialColumnHeaderTracker, /// This node's PeerId. pub local_peer_id: PeerId, } @@ -505,6 +519,7 @@ impl Network { score_settings, update_gossipsub_scores, gossip_cache, + partial_column_header_tracker: PartialColumnHeaderTracker::new(), local_peer_id, }; @@ -804,9 +819,18 @@ impl Network { .write() .insert(topic.clone()); + let partial = topic + .kind() + .use_partial_messages(self.network_globals.config.as_ref()); let topic: Topic = topic.into(); - match self.gossipsub_mut().subscribe(&topic) { + let subscribe_result = if partial { + self.gossipsub_mut().subscribe_partial(&topic, true) + } else { + self.gossipsub_mut().subscribe(&topic) + }; + + match subscribe_result { Err(e) => { warn!(%topic, error = ?e, "Failed to subscribe to topic"); false @@ -849,6 +873,16 @@ impl Network { "Attempted to publish duplicate message" ); } + PublishError::NoPeersSubscribedToTopic + if topic + .kind() + .use_partial_messages(self.network_globals.config.as_ref()) => + { + debug!( + kind = %topic.kind(), + "No peers supporting full messages" + ); + } ref e => { warn!( error = ?e, @@ -886,6 +920,66 @@ impl Network { } } + /// Publishes partial data column sidecars to the gossipsub network. + pub fn publish_partial( + &mut self, + columns: Vec>>, + header: Arc>, + ) { + if !self.network_globals.config.enable_partial_columns { + return; + } + + debug!( + count = columns.len(), + "Sending partial data column sidecars" + ); + + for column in columns { + let subnet = + DataColumnSubnetId::from_column_index(column.index, &self.fork_context.spec); + let topic = GossipTopic::new( + GossipKind::DataColumnSidecar(subnet), + GossipEncoding::default(), + self.enr_fork_id.fork_digest, + ); + let header_sent_set = self + .partial_column_header_tracker + .get_for_block(column.block_root); + let partial_message = OutgoingPartialColumn::new(column, &header, header_sent_set); + let publish_topic: Topic = topic.clone().into(); + + if let Err(e) = self + .gossipsub_mut() + .publish_partial(publish_topic, partial_message) + { + match e { + PublishError::NoPeersSubscribedToTopic => { + debug!( + kind = %topic.kind(), + "No peers supporting partial messages" + ); + } + ref e => { + warn!( + error = ?e, + kind = %topic.kind(), + "Could not publish partial message" + ); + } + } + + // add to metrics + if let Some(v) = metrics::get_int_gauge( + &metrics::FAILED_PARTIAL_PUBLISHES_PER_MAIN_TOPIC, + &[&format!("{:?}", topic.kind())], + ) { + v.inc() + }; + } + } + } + /// Informs the gossipsub about the result of a message validation. /// If the message is valid it will get propagated by gossipsub. pub fn report_message_validation_result( @@ -918,6 +1012,29 @@ impl Network { ); } + /// Informs the gossipsub about the failure of a partial message validation. + pub fn report_partial_message_validation_failure( + &mut self, + propagation_source: PeerId, + topic: GossipTopic, + ) { + if let Some(client) = self + .network_globals + .peers + .read() + .peer_info(&propagation_source) + .map(|info| info.client().kind.as_ref()) + { + metrics::inc_counter_vec( + &metrics::GOSSIP_UNACCEPTED_MESSAGES_PER_CLIENT, + &[client, "reject"], + ) + } + + self.gossipsub_mut() + .report_invalid_partial(propagation_source, &TopicHash::from(Topic::from(topic))); + } + /// Updates the current gossipsub scoring parameters based on the validator count and current /// slot. pub fn update_gossipsub_parameters( @@ -1290,6 +1407,56 @@ impl Network { } } } + Event::Partial { + topic_hash, + peer_id, + group_id, + message, + .. + } => { + let topic = GossipTopic::decode(topic_hash.as_str()) + .inspect_err(|error| { + debug!( + topic = ?topic_hash, + error, + "Could not decode gossipsub partial message topic" + ); + // punish the peer + self.gossipsub_mut() + .report_invalid_partial(peer_id, &topic_hash); + }) + .ok()?; + + if let Some(message) = message { + match decode_partial::(&topic, &group_id, &message) { + Err(error) => { + debug!( + topic = ?topic_hash, + error, + "Could not decode gossipsub partial message" + ); + //reject the message + self.gossipsub_mut() + .report_invalid_partial(peer_id, &topic_hash); + } + Ok(column) => { + debug!( + block_root = %column.block_root, + index = column.index, + %peer_id, + cells_present = %column.sidecar.cells_present_bitmap, + "Decoded partial message" + ); + // Notify the network + return Some(NetworkEvent::PartialDataColumnSidecar { + source: peer_id, + column: Box::new(column), + topic, + }); + } + } + } + } gossipsub::Event::Subscribed { peer_id, topic } => { if let Ok(topic) = GossipTopic::decode(topic.as_str()) { if let Some(subnet_id) = topic.subnet_id() { diff --git a/beacon_node/lighthouse_network/src/service/partial_column_header_tracker.rs b/beacon_node/lighthouse_network/src/service/partial_column_header_tracker.rs new file mode 100644 index 0000000000..bb588fe3d8 --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/partial_column_header_tracker.rs @@ -0,0 +1,28 @@ +use crate::types::HeaderSentSet; +use lru::LruCache; +use parking_lot::Mutex; +use std::collections::HashSet; +use std::num::NonZeroUsize; +use std::sync::Arc; +use types::core::Hash256; + +const MAX_BLOCKS: NonZeroUsize = NonZeroUsize::new(4).unwrap(); + +pub struct PartialColumnHeaderTracker { + blocks: LruCache, +} + +impl PartialColumnHeaderTracker { + pub fn new() -> Self { + PartialColumnHeaderTracker { + blocks: LruCache::new(MAX_BLOCKS), + } + } + + pub fn get_for_block(&mut self, hash: Hash256) -> HeaderSentSet { + Arc::clone( + self.blocks + .get_or_insert(hash, || Arc::new(Mutex::new(HashSet::new()))), + ) + } +} diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index eea8782b2d..d0173e5b9a 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -1,4 +1,5 @@ mod globals; +mod partial; mod pubsub; mod subnet; mod topics; @@ -13,7 +14,9 @@ pub type Enr = discv5::enr::Enr; pub use eth2::lighthouse::sync_state::{BackFillState, CustodyBackFillState, SyncState}; pub use globals::NetworkGlobals; -pub use pubsub::{PubsubMessage, SnappyTransform}; +pub use partial::HeaderSentSet; +pub use partial::OutgoingPartialColumn; +pub use pubsub::{PubsubMessage, SnappyTransform, decode_partial}; pub use subnet::{Subnet, SubnetDiscovery}; pub use topics::{ GossipEncoding, GossipKind, GossipTopic, TopicConfig, all_topics_at_fork, diff --git a/beacon_node/lighthouse_network/src/types/partial.rs b/beacon_node/lighthouse_network/src/types/partial.rs new file mode 100644 index 0000000000..f25ce9ec36 --- /dev/null +++ b/beacon_node/lighthouse_network/src/types/partial.rs @@ -0,0 +1,503 @@ +use crate::PeerId; +use itertools::Itertools; +use libp2p::gossipsub::partial_messages::{Metadata, Partial, PartialAction, PartialError}; +use parking_lot::Mutex; +use ssz::{Decode, Encode}; +use std::collections::HashSet; +use std::fmt::Debug; +use std::sync::Arc; +use tracing::{debug, error}; +use types::core::{EthSpec, Hash256}; +use types::data::{ + CellBitmap, PartialDataColumn, PartialDataColumnHeader, PartialDataColumnPartsMetadata, + PartialDataColumnSidecar, PartialDataColumnSidecarRef, +}; + +const PARTIAL_COLUMNS_VERSION_BYTE: u8 = 0; + +pub type HeaderSentSet = Arc>>; + +#[derive(Debug, Clone)] +pub struct OutgoingPartialColumn { + partial_column: Arc>, + metadata: MaybeKnownMetadata, + header_message: Vec, + header_sent_set: HeaderSentSet, +} + +impl OutgoingPartialColumn { + pub fn new( + partial_column: Arc>, + header: &PartialDataColumnHeader, + header_sent_set: HeaderSentSet, + ) -> Self { + // For now, always request all cells + let mut requests = partial_column.sidecar.cells_present_bitmap.clone(); + for idx in 0..requests.len() { + requests + .set(idx, true) + .expect("Bound asserted via `len` above"); + } + let metadata = PartialDataColumnPartsMetadata:: { + available: partial_column.sidecar.cells_present_bitmap.clone(), + requests, + } + .into(); + + let header_message = PartialDataColumnSidecarRef { + cells_present_bitmap: CellBitmap::::with_capacity( + partial_column.sidecar.cells_present_bitmap.len(), + ) + .expect("Taking length from bitmap with same bound"), + column: vec![], + kzg_proofs: vec![], + header: Some(header).into(), + } + .as_ssz_bytes(); + + OutgoingPartialColumn { + partial_column, + metadata, + header_message, + header_sent_set, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum MaybeKnownMetadata { + Unknown, + Known { + metadata: Box>, + encoded: Vec, + }, +} + +impl MaybeKnownMetadata { + fn do_update( + &mut self, + received: PartialDataColumnPartsMetadata, + ) -> Result { + let MaybeKnownMetadata::Known { metadata, encoded } = self else { + *self = MaybeKnownMetadata::Known { + encoded: received.as_ssz_bytes(), + metadata: Box::new(received), + }; + return Ok(true); + }; + + if ![ + received.available.len(), + received.requests.len(), + metadata.available.len(), + metadata.requests.len(), + ] + .into_iter() + .all_equal() + { + return Err(PartialError::OutOfRange); + } + let new_available = metadata.available.union(&received.available); + let new_request = metadata.requests.union(&received.requests); + if metadata.available == new_available && metadata.requests == new_request { + return Ok(false); + } + metadata.available = new_available; + metadata.requests = new_request; + *encoded = metadata.as_ssz_bytes(); + Ok(true) + } +} + +impl Metadata for MaybeKnownMetadata { + fn as_slice(&self) -> &[u8] { + match self { + MaybeKnownMetadata::Unknown => &[], + MaybeKnownMetadata::Known { encoded, .. } => encoded, + } + } + + fn update(&mut self, data: &[u8]) -> Result { + let received = PartialDataColumnPartsMetadata::from_ssz_bytes(data) + .map_err(|_| PartialError::InvalidFormat)?; + + self.do_update(received) + } + + fn update_from_data(&mut self, data: &[u8]) -> Result<(), PartialError> { + if data.is_empty() { + return Ok(()); + } + + let sidecar = PartialDataColumnSidecar::::from_ssz_bytes(data) + .map_err(|_| PartialError::InvalidFormat)?; + + self.do_update(PartialDataColumnPartsMetadata { + available: sidecar.cells_present_bitmap.clone(), + requests: sidecar.cells_present_bitmap, + }) + .map(|_| ()) + } +} + +impl From> for MaybeKnownMetadata { + fn from(metadata: PartialDataColumnPartsMetadata) -> Self { + Self::Known { + encoded: metadata.as_ssz_bytes(), + metadata: Box::new(metadata), + } + } +} + +impl Partial for OutgoingPartialColumn { + fn group_id(&self) -> Vec { + let mut group_id = Vec::with_capacity(Hash256::len_bytes() + 1); + group_id.push(PARTIAL_COLUMNS_VERSION_BYTE); + group_id.extend_from_slice(self.partial_column.block_root.as_slice()); + group_id + } + + fn metadata(&self) -> Box { + Box::new(self.metadata.clone()) + } + + fn partial_action_from_metadata( + &self, + peer_id: PeerId, + metadata: Option<&[u8]>, + ) -> Result { + match metadata { + None => { + // send the header-only messsage to the peer if we have not yet + let send = self.header_sent_set.lock().insert(peer_id).then(|| { + ( + self.header_message.clone(), + Box::new(MaybeKnownMetadata::::Unknown) as Box, + ) + }); + debug!( + peer=%peer_id, + group_id=%self.partial_column.block_root, + column_index=self.partial_column.index, + sending_header=send.is_some(), + "Partial send: No metadata" + ); + + Ok(PartialAction { need: false, send }) + } + Some([]) => Ok(PartialAction { + need: false, + send: None, + }), + Some(metadata) => { + // The peer is apparently aware of the header, make sure we track that: + self.header_sent_set.lock().insert(peer_id); + + let peer_metadata = PartialDataColumnPartsMetadata::::from_ssz_bytes(metadata) + .map_err(|_| PartialError::InvalidFormat)?; + let expected_len = self.partial_column.sidecar.cells_present_bitmap.len(); + if peer_metadata.available.len() != expected_len + || peer_metadata.requests.len() != expected_len + { + return Err(PartialError::InvalidFormat); + } + + let need = !peer_metadata + .available + .is_subset(&self.partial_column.sidecar.cells_present_bitmap); + let want = peer_metadata.requests.difference(&peer_metadata.available); + + let send = self + .partial_column + .sidecar + .filter(|idx| want.get(idx).expect("Bound checked above")) + .map_err(|err| { + error!(?err, "Unexpected error filtering sidecar"); + PartialError::InvalidFormat + })? + .map(|sidecar| { + debug!( + peer=%peer_id, + group_id=%self.partial_column.block_root, + column_index=self.partial_column.index, + metadata=%peer_metadata, + sending=%sidecar.cells_present_bitmap, + "Partial send: Sending" + ); + ( + sidecar.as_ssz_bytes(), + Box::new(MaybeKnownMetadata::::from( + PartialDataColumnPartsMetadata { + available: peer_metadata + .available + .union(&sidecar.cells_present_bitmap), + requests: peer_metadata + .requests + .union(&sidecar.cells_present_bitmap), + }, + )) as Box, + ) + }); + + if send.is_none() { + debug!( + peer=%peer_id, + group_id=%self.partial_column.block_root, + column_index=self.partial_column.index, + metadata=%peer_metadata, + "Partial send: Nothing to send" + ); + } + + Ok(PartialAction { need, send }) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::Signature; + use fixed_bytes::FixedBytesExtended; + use libp2p::identity::Keypair; + use ssz_types::FixedVector; + use types::block::{BeaconBlockHeader, SignedBeaconBlockHeader}; + use types::core::{MinimalEthSpec, Slot}; + use types::data::PartialDataColumnHeader; + + type E = MinimalEthSpec; + + fn make_cell(marker: u8) -> types::Cell { + let mut cell = types::Cell::::default(); + cell[0] = marker; + cell + } + + fn make_header(num_commitments: usize) -> PartialDataColumnHeader { + PartialDataColumnHeader { + kzg_commitments: vec![types::KzgCommitment([0u8; 48]); num_commitments] + .try_into() + .unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: FixedVector::new( + vec![Hash256::zero(); E::kzg_commitments_inclusion_proof_depth()], + ) + .unwrap(), + } + } + + fn make_partial_column( + block_root: Hash256, + total_blobs: usize, + present_indices: &[usize], + ) -> Arc> { + let mut bitmap = CellBitmap::::with_capacity(total_blobs).unwrap(); + for &idx in present_indices { + bitmap.set(idx, true).unwrap(); + } + + Arc::new(PartialDataColumn { + block_root, + index: 0, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column: present_indices + .iter() + .map(|&idx| make_cell(idx as u8)) + .collect::>() + .try_into() + .unwrap(), + kzg_proofs: present_indices + .iter() + .map(|_| types::KzgProof::empty()) + .collect::>() + .try_into() + .unwrap(), + header: None.into(), + }, + }) + } + + fn random_peer_id() -> PeerId { + let keypair = Keypair::generate_ed25519(); + PeerId::from(keypair.public()) + } + + // -- MaybeKnownMetadata tests -- + + #[test] + fn update_from_unknown_initializes() { + let mut meta = MaybeKnownMetadata::::Unknown; + let mut bitmap = CellBitmap::::with_capacity(4).unwrap(); + bitmap.set(0, true).unwrap(); + let received = PartialDataColumnPartsMetadata { + available: bitmap.clone(), + requests: bitmap, + }; + let changed = meta.do_update(received).unwrap(); + assert!(changed); + assert!(matches!(meta, MaybeKnownMetadata::Known { .. })); + } + + #[test] + fn update_unions_bitmaps() { + let mut bitmap1 = CellBitmap::::with_capacity(4).unwrap(); + bitmap1.set(0, true).unwrap(); + let mut meta: MaybeKnownMetadata = PartialDataColumnPartsMetadata { + available: bitmap1.clone(), + requests: bitmap1, + } + .into(); + + let mut bitmap2 = CellBitmap::::with_capacity(4).unwrap(); + bitmap2.set(1, true).unwrap(); + let changed = meta + .do_update(PartialDataColumnPartsMetadata { + available: bitmap2.clone(), + requests: bitmap2, + }) + .unwrap(); + assert!(changed); + + if let MaybeKnownMetadata::Known { metadata, .. } = &meta { + assert!(metadata.available.get(0).unwrap()); + assert!(metadata.available.get(1).unwrap()); + assert!(!metadata.available.get(2).unwrap()); + } else { + panic!("Expected Known metadata"); + } + } + + #[test] + fn update_returns_false_when_no_change() { + let mut bitmap = CellBitmap::::with_capacity(4).unwrap(); + bitmap.set(0, true).unwrap(); + bitmap.set(1, true).unwrap(); + let mut meta: MaybeKnownMetadata = PartialDataColumnPartsMetadata { + available: bitmap.clone(), + requests: bitmap.clone(), + } + .into(); + + // Update with a subset + let mut subset = CellBitmap::::with_capacity(4).unwrap(); + subset.set(0, true).unwrap(); + let changed = meta + .do_update(PartialDataColumnPartsMetadata { + available: subset.clone(), + requests: subset, + }) + .unwrap(); + assert!(!changed); + } + + #[test] + fn update_rejects_mismatched_lengths() { + let mut bitmap4 = CellBitmap::::with_capacity(4).unwrap(); + bitmap4.set(0, true).unwrap(); + let mut meta: MaybeKnownMetadata = PartialDataColumnPartsMetadata { + available: bitmap4.clone(), + requests: bitmap4, + } + .into(); + + let mut bitmap6 = CellBitmap::::with_capacity(6).unwrap(); + bitmap6.set(0, true).unwrap(); + let result = meta.do_update(PartialDataColumnPartsMetadata { + available: bitmap6.clone(), + requests: bitmap6, + }); + assert!(result.is_err()); + } + + // -- OutgoingPartialColumn::partial_action_from_metadata tests -- + + #[test] + fn no_metadata_sends_header_once() { + let root = Hash256::repeat_byte(1); + let header = make_header(4); + let partial = make_partial_column(root, 4, &[0, 1]); + let header_sent_set: HeaderSentSet = Arc::new(Mutex::new(HashSet::new())); + let outgoing = OutgoingPartialColumn::new(partial, &header, header_sent_set); + + let peer = random_peer_id(); + + // First call with no metadata → sends header + let action = outgoing.partial_action_from_metadata(peer, None).unwrap(); + assert!(action.send.is_some()); + + // Second call for same peer → no send + let action2 = outgoing.partial_action_from_metadata(peer, None).unwrap(); + assert!(action2.send.is_none()); + } + + #[test] + fn metadata_filters_cells_to_send() { + let root = Hash256::repeat_byte(1); + let header = make_header(4); + // We have cells [0, 2, 3] + let partial = make_partial_column(root, 4, &[0, 2, 3]); + let header_sent_set: HeaderSentSet = Arc::new(Mutex::new(HashSet::new())); + let outgoing = OutgoingPartialColumn::new(partial, &header, header_sent_set); + + let peer = random_peer_id(); + + // Peer has [0, 1], wants [0, 1, 2, 3] + let mut peer_available = CellBitmap::::with_capacity(4).unwrap(); + peer_available.set(0, true).unwrap(); + peer_available.set(1, true).unwrap(); + let mut peer_request = CellBitmap::::with_capacity(4).unwrap(); + for i in 0..4 { + peer_request.set(i, true).unwrap(); + } + let peer_meta = PartialDataColumnPartsMetadata:: { + available: peer_available, + requests: peer_request, + }; + let encoded = peer_meta.as_ssz_bytes(); + + let action = outgoing + .partial_action_from_metadata(peer, Some(&encoded)) + .unwrap(); + // We should send cells [2, 3] (want = request - available = [2,3], and we have [0,2,3]) + assert!(action.send.is_some()); + } + + #[test] + fn metadata_sets_need_when_peer_has_unknown_cells() { + let root = Hash256::repeat_byte(1); + let header = make_header(4); + // We have cells [0] + let partial = make_partial_column(root, 4, &[0]); + let header_sent_set: HeaderSentSet = Arc::new(Mutex::new(HashSet::new())); + let outgoing = OutgoingPartialColumn::new(partial, &header, header_sent_set); + + let peer = random_peer_id(); + + // Peer has [0, 1, 2] — cells [1, 2] are unknown to us + let mut peer_available = CellBitmap::::with_capacity(4).unwrap(); + peer_available.set(0, true).unwrap(); + peer_available.set(1, true).unwrap(); + peer_available.set(2, true).unwrap(); + let peer_meta = PartialDataColumnPartsMetadata:: { + available: peer_available.clone(), + requests: peer_available, + }; + let encoded = peer_meta.as_ssz_bytes(); + + let action = outgoing + .partial_action_from_metadata(peer, Some(&encoded)) + .unwrap(); + assert!(action.need); + } +} diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 12567907f6..9875d4b0c4 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -1,23 +1,23 @@ //! Handles the encoding and decoding of pubsub messages. -use crate::TopicHash; use crate::types::{GossipEncoding, GossipKind, GossipTopic}; -use libp2p::gossipsub; +use gossipsub::TopicHash; use snap::raw::{Decoder, Encoder, decompress_len}; use ssz::{Decode, Encode}; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, - LightClientFinalityUpdate, LightClientOptimisticUpdate, PayloadAttestationMessage, - ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, - SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, - SignedBeaconBlockGloas, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope, SignedProposerPreferences, - SignedVoluntaryExit, SingleAttestation, SubnetId, SyncCommitteeMessage, SyncSubnetId, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, Hash256, + LightClientFinalityUpdate, LightClientOptimisticUpdate, PartialDataColumn, + PartialDataColumnSidecar, PayloadAttestationMessage, ProposerSlashing, SignedAggregateAndProof, + SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope, + SignedProposerPreferences, SignedVoluntaryExit, SingleAttestation, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -464,6 +464,35 @@ impl PubsubMessage { } } +/// Decodes incoming partial data column sidecar from gossipsub partial protocol. +/// Note: Currently, data columns are the only supported partial messages. In future this could +/// return an enum. +pub fn decode_partial( + topic: &GossipTopic, + group: &[u8], + data: &[u8], +) -> Result, String> { + match topic.kind() { + GossipKind::DataColumnSidecar(id) => { + if group.first() != Some(&0) { + return Err(format!("Unknown data column format: {:?}", group.first())); + } + let block_root = Hash256::from_ssz_bytes(&group[1..]) + .map_err(|e| format!("Error decoding group: {:?}", e))?; + let sidecar = PartialDataColumnSidecar::from_ssz_bytes(data) + .map_err(|e| format!("Error decoding sidecar: {:?}", e))?; + let data_column = PartialDataColumn { + block_root, + // Partial messages are spec'd under the assumption that there is one column per subnet. + index: **id, + sidecar, + }; + Ok(data_column) + } + other => Err(format!("Partial message unsupported for topic: {other}")), + } +} + impl std::fmt::Display for PubsubMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index a3ea4babce..b51c459a80 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,7 +11,7 @@ use types::{ sync_committee::SyncSubnetId, }; -use crate::Subnet; +use crate::{NetworkConfig, Subnet}; /// The gossipsub topic names. // These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX @@ -200,6 +200,15 @@ pub enum GossipKind { LightClientOptimisticUpdate, } +impl GossipKind { + pub fn use_partial_messages(&self, config: &NetworkConfig) -> bool { + match self { + GossipKind::DataColumnSidecar(_) => config.enable_partial_columns, + _ => false, + } + } +} + impl std::fmt::Display for GossipKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 2119acf946..b09dc95db4 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -143,6 +143,22 @@ pub static BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_SIDECAR_VERIFIED_TOTAL: LazyLock< "Total number of gossip data column sidecar verified for propagation.", ) }); +pub static BEACON_PROCESSOR_GOSSIP_PARTIAL_DATA_COLUMN_SIDECAR_VERIFIED_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_gossip_partial_data_column_verified_total", + "Total number of gossip partial data column sidecar verified for propagation.", + ) +}); +pub static BEACON_PROCESSOR_GOSSIP_PARTIAL_DATA_COLUMN_SIDECAR_MISSING_HEADER_TOTAL: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_int_counter( + "beacon_processor_gossip_partial_data_column_missing_header_total", + "Total number of gossip partial data column sidecar received without a (cached) header.", + ) +}); // Gossip Exits. pub static BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: LazyLock> = LazyLock::new(|| { @@ -601,6 +617,16 @@ pub static BEACON_DATA_COLUMN_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: LazyLo decimal_buckets(-3, -1), ) }); +pub static BEACON_PARTIAL_DATA_COLUMN_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: LazyLock< + Result, +> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_partial_data_column_gossip_propagation_verification_delay_time", + "Duration between when the partial data column sidecar is received over gossip and when it is verified for propagation.", + // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] + decimal_buckets(-3, -1), + ) +}); pub static BEACON_DATA_COLUMN_GOSSIP_SLOT_START_DELAY_TIME: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( @@ -615,6 +641,28 @@ pub static BEACON_DATA_COLUMN_GOSSIP_SLOT_START_DELAY_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_partial_data_column_gossip_slot_start_delay_time", + "Duration between when the partial data column sidecar is received over gossip and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![ + 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 5.0, + 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, + ]), // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + ) + }); +pub static BEACON_USEFUL_FULL_COLUMNS_RECEIVED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_useful_full_columns_received_total", + "Number of useful full columns (any cell being useful) received", + &["column_index"], + ) + }); pub static BEACON_BLOB_DELAY_GOSSIP_VERIFICATION: LazyLock> = LazyLock::new( || { diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 2fe5aec347..ea1a2286a0 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,6 +4,14 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::data_column_verification::{ + GossipDataColumnError, GossipPartialDataColumnError, GossipVerifiedDataColumn, + GossipVerifiedPartialDataColumnHeader, KzgVerifiedPartialDataColumn, + PartialColumnVerificationResult, +}; +use beacon_chain::payload_bid_verification::PayloadBidError; +use beacon_chain::proposer_preferences_verification::ProposerPreferencesError; use beacon_chain::store::Error; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -22,13 +30,11 @@ use beacon_chain::{ EnvelopeError, gossip_verified_envelope::GossipVerifiedEnvelope, }, }; -use beacon_chain::{block_verification_types::AsBlock, payload_bid_verification::PayloadBidError}; -use beacon_chain::{ - data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}, - proposer_preferences_verification::ProposerPreferencesError, -}; use beacon_processor::{Work, WorkEvent}; -use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use lighthouse_network::{ + Client, GossipTopic, MessageAcceptance, MessageId, PeerAction, PeerId, PubsubMessage, + ReportSource, +}; use logging::crit; use operation_pool::ReceivedPreCapella; use slot_clock::SlotClock; @@ -41,13 +47,14 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ - Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, - DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, PayloadAttestationMessage, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope, - SignedProposerPreferences, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, - SyncCommitteeMessage, SyncSubnetId, block::BlockImportSource, + Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, ColumnIndex, + DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, PartialDataColumn, + PartialDataColumnHeader, PayloadAttestationMessage, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, + SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope, SignedProposerPreferences, + SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -196,6 +203,19 @@ impl NetworkBeaconProcessor { }) } + /// Send a message on `message_tx` that `peer_id` has sent an invalid partial message and should + /// be penalized. + pub(crate) fn propagate_partial_validation_failure( + &self, + propagation_source: PeerId, + gossip_topic: GossipTopic, + ) { + self.send_network_message(NetworkMessage::PartialValidationFailure { + propagation_source, + gossip_topic, + }) + } + /* Processing functions */ /// Process the unaggregated attestation received from the gossip network and: @@ -697,7 +717,7 @@ impl NetworkBeaconProcessor { MessageAcceptance::Accept, ); } - GossipDataColumnError::ParentUnknown { parent_root } => { + GossipDataColumnError::ParentUnknown { parent_root, .. } => { debug!( action = "requesting parent", %block_root, @@ -723,6 +743,7 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InvalidSubnetId { .. } | GossipDataColumnError::InvalidInclusionProof | GossipDataColumnError::InvalidKzgProof { .. } + | GossipDataColumnError::MismatchesCachedColumn | GossipDataColumnError::UnexpectedDataColumn | GossipDataColumnError::InvalidColumnIndex(_) | GossipDataColumnError::MaxBlobsPerBlockExceeded { .. } @@ -784,6 +805,261 @@ impl NetworkBeaconProcessor { } } + #[instrument( + name = "lh_process_gossip_partial_data_column", + parent = None, + level = "debug", + skip_all, + fields(block_root = ?column.block_root, index = column.index), + )] + pub async fn process_gossip_partial_data_column_sidecar( + self: &Arc, + peer_id: PeerId, + column: Box>, + seen_duration: Duration, + topic: GossipTopic, + ) { + let block_root = column.block_root; + let index = column.index; + + let result = self + .chain + .verify_partial_data_column_sidecar_for_gossip(column, seen_duration); + + let header = match result { + PartialColumnVerificationResult::Ok { header, column } => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_GOSSIP_PARTIAL_DATA_COLUMN_SIDECAR_VERIFIED_TOTAL, + ); + + let slot = header.as_header().slot(); + + debug!( + %slot, + %block_root, + %index, + "Successfully verified gossip partial data column sidecar" + ); + + // Log metrics to keep track of propagation delay times. + if let Some(duration) = UNIX_EPOCH + .elapsed() + .ok() + .and_then(|now| now.checked_sub(seen_duration)) + { + metrics::observe_duration( + &metrics::BEACON_PARTIAL_DATA_COLUMN_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, + duration, + ); + } + + self.process_gossip_verified_partial_data_column( + peer_id, + column, + header.clone(), + slot, + ) + .await; + Some(header) + } + PartialColumnVerificationResult::ErrWithValidHeader { header, err } => { + self.handle_partial_verification_error(peer_id, err, block_root, index, topic); + Some(header) + } + PartialColumnVerificationResult::Err(err) => { + self.handle_partial_verification_error(peer_id, err, block_root, index, topic); + None + } + }; + + if let Some(header) = header { + let slot = header.as_header().slot(); + let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); + // Log metrics to track delay from other nodes on the network. + metrics::observe_duration( + &metrics::BEACON_PARTIAL_DATA_COLUMN_GOSSIP_SLOT_START_DELAY_TIME, + delay, + ); + + if !header.was_cached() { + debug!(block = %block_root, "Triggering getBlobs after receiving partial header"); + // We want to publish immediately when this finishes + let publish_blobs = true; + self.fetch_engine_blobs_and_publish(header.into_header(), block_root, publish_blobs) + .await + } + } + } + + fn handle_partial_verification_error( + self: &Arc, + peer_id: PeerId, + err: GossipPartialDataColumnError, + block_root: Hash256, + index: ColumnIndex, + topic: GossipTopic, + ) { + match err { + GossipPartialDataColumnError::GossipDataColumnError(err) => match err { + GossipDataColumnError::InvalidVariant => { + // TODO(gloas) we should probably penalize the peer here + debug!( + %block_root, + %index, + "Invalid gossip partial data column variant." + ) + } + GossipDataColumnError::PriorKnownUnpublished => { + debug!( + %block_root, + %index, + "Gossip partial data column already processed via the EL." + ); + } + GossipDataColumnError::ParentUnknown { parent_root, slot } => { + debug!( + action = "requesting parent", + %block_root, + %parent_root, + "Unknown parent hash for partial column" + ); + self.send_sync_message(SyncMessage::UnknownParentPartialDataColumn { + peer_id, + block_root, + parent_root, + slot, + }); + } + GossipDataColumnError::PubkeyCacheTimeout + | GossipDataColumnError::BeaconChainError(_) => { + crit!( + error = ?err, + "Internal error when verifying partial column sidecar" + ) + } + GossipDataColumnError::ProposalSignatureInvalid + | GossipDataColumnError::UnknownValidator(_) + | GossipDataColumnError::ProposerIndexMismatch { .. } + | GossipDataColumnError::IsNotLaterThanParent { .. } + | GossipDataColumnError::InvalidSubnetId { .. } + | GossipDataColumnError::InvalidInclusionProof + | GossipDataColumnError::InvalidKzgProof { .. } + | GossipDataColumnError::MismatchesCachedColumn + | GossipDataColumnError::UnexpectedDataColumn + | GossipDataColumnError::InvalidColumnIndex(_) + | GossipDataColumnError::MaxBlobsPerBlockExceeded { .. } + | GossipDataColumnError::InconsistentCommitmentsLength { .. } + | GossipDataColumnError::InconsistentProofsLength { .. } + | GossipDataColumnError::NotFinalizedDescendant { .. } => { + debug!( + error = ?err, + %block_root, + %index, + "Could not verify partial column for gossip. Rejecting the column sidecar" + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_partial_data_column_low", + ); + self.propagate_partial_validation_failure(peer_id, topic); + } + GossipDataColumnError::PriorKnown { .. } => { + // Data column is available via either the EL or reconstruction. + // Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + %block_root, + %index, + "Received already available column sidecar. Ignoring the partial column sidecar" + ) + } + GossipDataColumnError::FutureSlot { .. } + | GossipDataColumnError::PastFinalizedSlot { .. } => { + debug!( + error = ?err, + %block_root, + %index, + "Could not verify column sidecar for gossip. Ignoring the partial column sidecar" + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_partial_data_column_high", + ); + } + }, + GossipPartialDataColumnError::MissingHeader => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_GOSSIP_PARTIAL_DATA_COLUMN_SIDECAR_MISSING_HEADER_TOTAL, + ); + warn!( + error = ?err, + %block_root, + %index, + "Received partial column while not having header stored" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_partial_data_column_high", + ); + } + GossipPartialDataColumnError::HeaderMismatches + | GossipPartialDataColumnError::HeaderIncorrectRoot { .. } => { + debug!( + error = ?err, + %block_root, + %index, + "Could not verify partial column header" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_partial_data_column_low", + ); + } + GossipPartialDataColumnError::EmptyMessage + | GossipPartialDataColumnError::InconsistentPresentCount { .. } + | GossipPartialDataColumnError::InconsistentCommitmentsLength { .. } => { + debug!( + error = ?err, + %block_root, + %index, + "Could not verify partial column" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_partial_data_column_low", + ); + } + GossipPartialDataColumnError::PartialColumnsDisabled => { + error!( + error = ?err, + %block_root, + %index, + "Received partial column while disabled" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_partial_data_column_low", + ); + } + GossipPartialDataColumnError::InternalError(_) => { + error!( + error = ?err, + %block_root, + %index, + "Internal error while processing partial column" + ); + } + } + } + #[allow(clippy::too_many_arguments)] #[instrument( name = "lh_process_gossip_blob", @@ -1030,6 +1306,8 @@ impl NetworkBeaconProcessor { } } + /// Process a gossip-verified full data column (not partial). + /// Partials are handled by process_gossip_verified_partial_data_column. async fn process_gossip_verified_data_column( self: &Arc, peer_id: PeerId, @@ -1042,6 +1320,30 @@ impl NetworkBeaconProcessor { let data_column_slot = verified_data_column.slot(); let data_column_index = verified_data_column.index(); + if let DataColumnSidecar::Fulu(col) = verified_data_column.as_data_column() + && self + .chain + .data_availability_checker + .partial_assembler() + .is_some_and(|a| !a.is_complete(block_root, verified_data_column.index())) + { + metrics::inc_counter_vec( + &metrics::BEACON_USEFUL_FULL_COLUMNS_RECEIVED_TOTAL, + &[&data_column_index.to_string()], + ); + + let mut column = col.to_partial(); + let header = column.sidecar.header.take(); + if let Some(header) = header { + self.send_network_message(NetworkMessage::PublishPartialColumns { + columns: vec![Arc::new(column)], + header: Arc::new(header), + }); + } else { + crit!("Converting from full to partial yielded headerless partial") + }; + } + let result = self .chain .process_gossip_data_columns(vec![verified_data_column], || Ok(())) @@ -1070,44 +1372,7 @@ impl NetworkBeaconProcessor { "Processed data column, waiting for other components" ); - if self - .chain - .data_availability_checker - .custody_context() - .should_attempt_reconstruction( - slot.epoch(T::EthSpec::slots_per_epoch()), - &self.chain.spec, - ) - { - // Instead of triggering reconstruction immediately, schedule it to be run. If - // another column arrives, it either completes availability or pushes - // reconstruction back a bit. - let cloned_self = Arc::clone(self); - let block_root = *block_root; - - if self - .beacon_processor_send - .try_send(WorkEvent { - drop_during_sync: false, - work: Work::Reprocess( - ReprocessQueueMessage::DelayColumnReconstruction( - QueuedColumnReconstruction { - block_root, - slot: *slot, - process_fn: Box::pin(async move { - cloned_self - .attempt_data_column_reconstruction(block_root) - .await; - }), - }, - ), - ), - }) - .is_err() - { - warn!("Unable to send reconstruction to reprocessing"); - } - } + self.check_reconstruction_trigger(*slot, block_root).await; } }, Err(BlockError::DuplicateFullyImported(_)) => { @@ -1143,6 +1408,183 @@ impl NetworkBeaconProcessor { } } + /// Process a gossip-verified partial data column by merging it in the assembler + async fn process_gossip_verified_partial_data_column( + self: &Arc, + _peer_id: PeerId, + verified_partial: KzgVerifiedPartialDataColumn, + verified_header: GossipVerifiedPartialDataColumnHeader, + slot: Slot, + ) { + let processing_start_time = Instant::now(); + let block_root = verified_partial.block_root(); + let data_column_index = verified_partial.index(); + + let result = self + .chain + .process_gossip_partial_data_column(verified_partial, verified_header.clone(), slot) + .await; + + // First, handle merge results (if any) + let result = match result { + Ok(Some((avail, merge_result))) => { + if !merge_result.full_columns.is_empty() { + debug!( + %block_root, + index = data_column_index, + "Partial data column completed to full column" + ); + + self.send_network_message(NetworkMessage::Publish { + messages: merge_result + .full_columns + .into_iter() + .map(|col| { + let subnet = DataColumnSubnetId::from_column_index( + col.index(), + &self.chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new(( + subnet, + col.into_inner(), + ))) + }) + .collect(), + }); + } + + let only_send_completed_partials = + merge_result.local_blobs || self.chain.config.disable_get_blobs; + let columns = merge_result + .updated_partials + .into_iter() + .map(|partial| partial.into_inner()) + .filter(|partial| { + !only_send_completed_partials || partial.sidecar.is_complete() + }) + .collect::>(); + + if !columns.is_empty() { + if only_send_completed_partials { + debug!( + block = %block_root, + "Not publishing incomplete partials before getBlobs" + ); + } + self.send_network_message(NetworkMessage::PublishPartialColumns { + columns, + header: verified_header.into_header(), + }); + } + Ok(avail) + } + Ok(None) => { + // Column was not merged because it is not a custody column. + return; + } + Err(err) => Err(err), + }; + + register_process_result_metrics( + &result, + metrics::BlockSource::Gossip, + "partial_data_column", + ); + + match &result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + debug!( + %block_root, + "Data column from partial processed, imported fully available block" + ); + self.chain.recompute_head_at_current_slot().await; + + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + %slot, + %data_column_index, + %block_root, + "Processed data column from partial, waiting for other components" + ); + + self.check_reconstruction_trigger(*slot, block_root).await; + } + }, + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + ?block_root, + data_column_index, "Ignoring completed gossip column already imported" + ); + } + Err(err) => { + debug!( + outcome = ?err, + ?block_root, + block_slot = %slot, + data_column_index, + "Invalid completed gossip data column" + ); + // We can't really penalize here, as the error might be the fault of another peer + // contributing to the partial. + } + } + + // If a block is in the da_checker, sync maybe awaiting for an event when block is finally + // imported. A block can become imported both after processing a block or data column. If a + // importing a block results in `Imported`, notify. Do not notify of data column errors. + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } + } + + async fn check_reconstruction_trigger(self: &Arc, slot: Slot, block_root: &Hash256) { + if self + .chain + .data_availability_checker + .custody_context() + .should_attempt_reconstruction( + slot.epoch(T::EthSpec::slots_per_epoch()), + &self.chain.spec, + ) + { + // Instead of triggering reconstruction immediately, schedule it to be run. If + // another column arrives, it either completes availability or pushes + // reconstruction back a bit. + let cloned_self = Arc::clone(self); + let block_root = *block_root; + + if self + .beacon_processor_send + .try_send(WorkEvent { + drop_during_sync: false, + work: Work::Reprocess(ReprocessQueueMessage::DelayColumnReconstruction( + QueuedColumnReconstruction { + block_root, + slot, + process_fn: Box::pin(async move { + cloned_self + .attempt_data_column_reconstruction(block_root) + .await; + }), + }, + )), + }) + .is_err() + { + warn!("Unable to send reconstruction to reprocessing"); + } + } + } + /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -1499,23 +1941,21 @@ impl NetworkBeaconProcessor { // Block is gossip valid. Attempt to fetch blobs from the EL using versioned hashes derived // from kzg commitments, without having to wait for all blobs to be sent from the peers. - // TODO(gloas) we'll want to use this same optimization, but we need to refactor the - // `fetch_and_process_engine_blobs` flow to support gloas. - if !block.fork_name_unchecked().gloas_enabled() { - let publish_blobs = true; - let self_clone = self.clone(); - let block_clone = block.clone(); - let current_span = Span::current(); - self.executor.spawn( - async move { + let publish_blobs = true; + let self_clone = self.clone(); + let block_clone = block.clone(); + let current_span = Span::current(); + self.executor.spawn( + async move { + if let Ok(header) = PartialDataColumnHeader::try_from(block_clone.as_ref()) { self_clone - .fetch_engine_blobs_and_publish(block_clone, block_root, publish_blobs) + .fetch_engine_blobs_and_publish(Arc::new(header), block_root, publish_blobs) .await } - .instrument(current_span), - "fetch_blobs_gossip", - ); - } + } + .instrument(current_span), + "fetch_blobs_gossip", + ); let result = self .chain diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 2b354aaa20..015b6a616e 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -20,7 +20,7 @@ use lighthouse_network::rpc::methods::{ }; use lighthouse_network::service::api_types::CustodyBackfillBatchId; use lighthouse_network::{ - Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, + Client, GossipTopic, MessageId, NetworkGlobals, PeerId, PubsubMessage, rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, }; use rand::prelude::SliceRandom; @@ -251,6 +251,32 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some partial data column sidecar. + pub fn send_gossip_partial_data_column_sidecar( + self: &Arc, + peer_id: PeerId, + column_sidecar: Box>, + seen_timestamp: Duration, + topic: GossipTopic, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_partial_data_column_sidecar( + peer_id, + column_sidecar, + seen_timestamp, + topic, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipPartialDataColumnSidecar(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, @@ -894,14 +920,14 @@ impl NetworkBeaconProcessor { pub async fn fetch_engine_blobs_and_publish( self: &Arc, - block: Arc>>, + header: Arc>, block_root: Hash256, publish_blobs: bool, ) { if self.chain.config.disable_get_blobs { return; } - let epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + let epoch = header.slot().epoch(T::EthSpec::slots_per_epoch()); let custody_columns = self.chain.sampling_columns_for_epoch(epoch); let self_cloned = self.clone(); let publish_fn = move |blobs_or_data_column| { @@ -926,7 +952,7 @@ impl NetworkBeaconProcessor { match fetch_and_process_engine_blobs( self.chain.clone(), block_root, - block.clone(), + header.clone(), custody_columns, publish_fn, ) @@ -970,6 +996,23 @@ impl NetworkBeaconProcessor { ); } } + + // Publish partial columns without eager send + if let Some(assembler) = self.chain.data_availability_checker.partial_assembler() { + let columns = assembler.get_partials_and_mark_as_local_fetched(block_root, &header); + if !columns.is_empty() { + debug!(block = %block_root, "Publishing all partials after getBlobs"); + self.send_network_message(NetworkMessage::PublishPartialColumns { + columns: columns + .into_iter() + .map(|partial| partial.into_inner()) + .collect(), + header, + }); + } else { + debug!(block = %block_root, "No partials to publish after getBlobs"); + } + } } /// Attempts to reconstruct all data columns if the conditions checked in diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f7fbce8e56..8f89b66948 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -218,9 +218,15 @@ impl NetworkBeaconProcessor { // Block is valid, we can now attempt fetching blobs from EL using version hashes // derived from kzg commitments from the block, without having to wait for all blobs // to be sent from the peers if we already have them. - let publish_blobs = false; - self.fetch_engine_blobs_and_publish(signed_beacon_block, block_root, publish_blobs) + if let Ok(header) = signed_beacon_block.as_ref().try_into() { + let publish_blobs = false; + self.fetch_engine_blobs_and_publish( + Arc::new(header), + block_root, + publish_blobs, + ) .await; + } } _ => {} } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 3f0e329e91..443fa51cc6 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -14,7 +14,7 @@ use beacon_processor::{BeaconProcessorSend, DuplicateCache}; use futures::prelude::*; use lighthouse_network::rpc::*; use lighthouse_network::{ - MessageId, NetworkGlobals, PeerId, PubsubMessage, Response, + GossipTopic, MessageId, NetworkGlobals, PeerId, PubsubMessage, Response, service::api_types::{AppRequestId, SyncRequestId}, }; use logging::TimeLatch; @@ -24,7 +24,9 @@ use std::sync::Arc; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, PartialDataColumn, SignedBeaconBlock, +}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -69,6 +71,8 @@ pub enum RouterMessage { /// message, the message itself and a bool which indicates if the message should be processed /// by the beacon chain after successful verification. PubsubMessage(MessageId, PeerId, PubsubMessage, bool), + /// A partial data column sidecar has been received via gossipsub partial protocol. + PartialDataColumnSidecar(PeerId, Box>, GossipTopic), /// The peer manager has requested we re-status a peer. StatusPeer(PeerId), /// The peer has an updated custody group count from METADATA. @@ -180,6 +184,16 @@ impl Router { RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); } + RouterMessage::PartialDataColumnSidecar(peer_id, column, topic) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_gossip_partial_data_column_sidecar( + peer_id, + column, + self.chain.slot_clock.now_duration().unwrap_or_default(), + topic, + ), + ), } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index af56b80822..ce54ffc38f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -39,8 +39,8 @@ use tokio::time::Sleep; use tracing::{debug, error, info, trace, warn}; use typenum::Unsigned; use types::{ - EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, - ValidatorSubscription, + EthSpec, ForkContext, PartialDataColumn, PartialDataColumnHeader, Slot, SubnetId, + SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; mod tests; @@ -83,6 +83,11 @@ pub enum NetworkMessage { }, /// Publish a list of messages to the gossipsub protocol. Publish { messages: Vec> }, + /// Publish partial data column sidecars via the partial gossipsub protocol. + PublishPartialColumns { + columns: Vec>>, + header: Arc>, + }, /// Validates a received gossipsub message. This will propagate the message on the network. ValidationResult { /// The peer that sent us the message. We don't send back to this peer. @@ -92,6 +97,13 @@ pub enum NetworkMessage { /// The result of the validation validation_result: MessageAcceptance, }, + /// Reports validation failure of a partial message. + PartialValidationFailure { + /// The peer that sent us the message. + propagation_source: PeerId, + /// The topic of the message. + gossip_topic: GossipTopic, + }, /// Reports a peer to the peer manager for performing an action. ReportPeer { peer_id: PeerId, @@ -540,7 +552,7 @@ impl NetworkService { let subnet_id = subnet_and_attestation.0; let attestation = &subnet_and_attestation.1; // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. + // the attestation, else we just propagate the Attestation. let should_process = self.subnet_service.should_process_attestation( Subnet::Attestation(subnet_id), &attestation.data, @@ -560,6 +572,15 @@ impl NetworkService { } } } + NetworkEvent::PartialDataColumnSidecar { + source, + column, + topic, + } => { + self.send_to_router(RouterMessage::PartialDataColumnSidecar( + source, column, topic, + )); + } NetworkEvent::NewListenAddr(multiaddr) => { self.network_globals .listen_multiaddrs @@ -640,11 +661,19 @@ impl NetworkService { validation_result, ); } + NetworkMessage::PartialValidationFailure { + propagation_source, + gossip_topic, + } => { + self.libp2p + .report_partial_message_validation_failure(propagation_source, gossip_topic); + } NetworkMessage::Publish { messages } => { let mut topic_kinds = Vec::new(); for message in &messages { - if !topic_kinds.contains(&message.kind()) { - topic_kinds.push(message.kind()); + let kind = message.kind(); + if !topic_kinds.contains(&kind) { + topic_kinds.push(kind); } } debug!( @@ -654,6 +683,9 @@ impl NetworkService { ); self.libp2p.publish(messages); } + NetworkMessage::PublishPartialColumns { columns, header } => { + self.libp2p.publish_partial(columns, header); + } NetworkMessage::ReportPeer { peer_id, action, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 394f2fc37d..3929f74aa0 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -45,7 +45,7 @@ use std::sync::Arc; use std::time::Duration; use store::Hash256; use tracing::{debug, error, warn}; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; +use types::{EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; @@ -77,22 +77,21 @@ const LOOKUP_MAX_DURATION_NO_PEERS_SECS: u64 = 10; /// take at most 2 GB. 200 lookups allow 3 parallel chains of depth 64 (current maximum). const MAX_LOOKUPS: usize = 200; +/// The values for `Blob`, `DataColumn` and `PartialDataColumn` is the parent root of the column. pub enum BlockComponent { Block(DownloadResult>>), - Blob(DownloadResult>>), - DataColumn(DownloadResult>>), + Blob(DownloadResult), + DataColumn(DownloadResult), + PartialDataColumn(DownloadResult), } impl BlockComponent { fn parent_root(&self) -> Hash256 { match self { BlockComponent::Block(block) => block.value.parent_root(), - BlockComponent::Blob(blob) => blob.value.block_parent_root(), - BlockComponent::DataColumn(column) => match column.value.as_ref() { - DataColumnSidecar::Fulu(column) => column.block_parent_root(), - // TODO(gloas) we don't have a parent root post gloas, not sure what to do here - DataColumnSidecar::Gloas(column) => column.beacon_block_root, - }, + BlockComponent::Blob(parent_root) + | BlockComponent::DataColumn(parent_root) + | BlockComponent::PartialDataColumn(parent_root) => parent_root.value, } } fn get_type(&self) -> &'static str { @@ -100,6 +99,7 @@ impl BlockComponent { BlockComponent::Block(_) => "block", BlockComponent::Blob(_) => "blob", BlockComponent::DataColumn(_) => "data_column", + BlockComponent::PartialDataColumn(_) => "partial_data_column", } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 919526c238..23bfd531f0 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -156,7 +156,9 @@ impl SingleBlockLookup { .block_request_state .state .insert_verified_response(block), - BlockComponent::Blob(_) | BlockComponent::DataColumn(_) => { + BlockComponent::Blob(_) + | BlockComponent::DataColumn(_) + | BlockComponent::PartialDataColumn(_) => { // For now ignore single blobs and columns, as the blob request state assumes all blobs are // attributed to the same peer = the peer serving the remaining blobs. Ignoring this // block component has a minor effect, causing the node to re-request this blob diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 60dcc3efc7..734295ac1d 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -141,6 +141,14 @@ pub enum SyncMessage { /// A data column with an unknown parent has been received. UnknownParentDataColumn(PeerId, Arc>), + /// A partial data column with an unknown parent has been received. + UnknownParentPartialDataColumn { + peer_id: PeerId, + block_root: Hash256, + parent_root: Hash256, + slot: Slot, + }, + /// A peer has sent an attestation that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. UnknownBlockHashFromAttestation(PeerId, Hash256), @@ -866,7 +874,7 @@ impl SyncManager { parent_root, blob_slot, BlockComponent::Blob(DownloadResult { - value: blob, + value: parent_root, block_root, seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), peer_group: PeerGroup::from_single(peer_id), @@ -886,7 +894,7 @@ impl SyncManager { parent_root, data_column_slot, BlockComponent::DataColumn(DownloadResult { - value: data_column, + value: parent_root, block_root, seen_timestamp: self .chain @@ -903,6 +911,26 @@ impl SyncManager { } } } + SyncMessage::UnknownParentPartialDataColumn { + peer_id, + block_root, + parent_root, + slot, + } => { + debug!(%block_root, %parent_root, "Received unknown parent partial column message"); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + slot, + BlockComponent::PartialDataColumn(DownloadResult { + value: parent_root, + block_root, + seen_timestamp: self.chain.slot_clock.now_duration().unwrap_or_default(), + peer_group: PeerGroup::from_single(peer_id), + }), + ); + } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { self.notified_unknown_roots.insert((peer_id, block_root)); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 61dccc9674..51cda0fac3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -670,6 +670,15 @@ pub fn cli_app() -> Command { .hide(true) .display_order(0) ) + .arg( + Arg::new("enable-partial-columns") + .long("enable-partial-columns") + .help("Enable partial messages for data columns. This can reduce the amount of \ + data sent over the network.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) /* * Monitoring metrics */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0a52bcef06..8ba2c0f321 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -110,6 +110,21 @@ pub fn get_config( set_network_config(&mut client_config.network, cli_args, &data_dir_ref)?; + if parse_flag(cli_args, "enable-partial-columns") { + // Partial messages assume that each subnet maps to exactly one column. + // Check this here to avoid weird issues on networks where this is not the case. + if spec.data_column_sidecar_subnet_count == E::number_of_columns() as u64 { + client_config.network.enable_partial_columns = true; + client_config.chain.enable_partial_columns = true; + } else { + warn!( + subnets = spec.data_column_sidecar_subnet_count, + columns = E::number_of_columns(), + "Not enabling partial columns on networks with multiple columns per subnet" + ) + } + } + // Parse custody mode from CLI flags let is_supernode = parse_flag(cli_args, "supernode"); let is_semi_supernode = parse_flag(cli_args, "semi-supernode"); diff --git a/book/src/help_bn.md b/book/src/help_bn.md index cad21a3e78..b580bcae52 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -497,6 +497,9 @@ Flags: Sets the local ENR IP address and port to match those set for lighthouse. Specifically, the IP address will be the value of --listen-address and the UDP port will be --discovery-port. + --enable-partial-columns + Enable partial messages for data columns. This can reduce the amount + of data sent over the network. --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses. diff --git a/consensus/types/src/block/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs index cd3f4dcaba..25695dbdda 100644 --- a/consensus/types/src/block/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -3,14 +3,14 @@ use std::marker::PhantomData; use bls::Signature; use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; -use merkle_proof::{MerkleTree, MerkleTreeError}; +use merkle_proof::MerkleTree; use metastruct::metastruct; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; -use tree_hash::{BYTES_PER_CHUNK, TreeHash}; +use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ @@ -18,6 +18,7 @@ use crate::{ attestation::{ AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, PayloadAttestation, }, + complete_kzg_commitment_merkle_proof, core::{EthSpec, Graffiti, Hash256}, deposit::Deposit, execution::{ @@ -272,46 +273,11 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, | Self::Capella(_) | Self::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) => { - // We compute the branches by generating 2 merkle trees: - // 1. Merkle tree for the `blob_kzg_commitments` List object - // 2. Merkle tree for the `BeaconBlockBody` container - // We then merge the branches for both the trees all the way up to the root. - - // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) - // - // Branches for `blob_kzg_commitments` without length mix-in - let blob_leaves = self - .blob_kzg_commitments()? - .iter() - .map(|commitment| commitment.tree_hash_root()) - .collect::>(); - let depth = E::max_blob_commitments_per_block() - .next_power_of_two() - .ilog2(); - let tree = MerkleTree::create(&blob_leaves, depth as usize); - let (_, mut proof) = tree - .generate_proof(index, depth as usize) - .map_err(BeaconStateError::MerkleTreeError)?; - - // Add the branch corresponding to the length mix-in. - let length = blob_leaves.len(); - let usize_len = std::mem::size_of::(); - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes - .get_mut(0..usize_len) - .ok_or(BeaconStateError::MerkleTreeError( - MerkleTreeError::PleaseNotifyTheDevs, - ))? - .copy_from_slice(&length.to_le_bytes()); - let length_root = Hash256::from_slice(length_bytes.as_slice()); - proof.push(length_root); - - // Part 2 - // Branches for `BeaconBlockBody` container - // Join the proofs for the subtree and the main tree - proof.extend_from_slice(kzg_commitments_proof); - - Ok(FixedVector::new(proof)?) + complete_kzg_commitment_merkle_proof::( + self.blob_kzg_commitments()?, + index, + kzg_commitments_proof, + ) } } } diff --git a/consensus/types/src/data/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs index 2774176190..70b95615e5 100644 --- a/consensus/types/src/data/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -19,9 +19,9 @@ use crate::{ block::{ BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlock, SignedBeaconBlockHeader, }, + complete_kzg_commitment_merkle_proof, core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, - data::Blob, - execution::AbstractExecPayload, + data::{Blob, PartialDataColumnHeader}, fork::ForkName, kzg_ext::KzgProofs, state::BeaconStateError, @@ -140,33 +140,29 @@ impl BlobSidecar { }) } - pub fn new_with_existing_proof>( + pub fn new_with_existing_proof>>( index: usize, blob: Blob, - signed_block: &SignedBeaconBlock, - signed_block_header: SignedBeaconBlockHeader, - kzg_commitments_inclusion_proof: &[Hash256], + header: T, kzg_proof: KzgProof, ) -> Result { - let expected_kzg_commitments = signed_block - .message() - .body() - .blob_kzg_commitments() - .map_err(|_e| BlobSidecarError::PreDeneb)?; - let kzg_commitment = *expected_kzg_commitments + let header = header.try_into().map_err(|_| BlobSidecarError::PreDeneb)?; + let kzg_commitment = *header + .kzg_commitments .get(index) .ok_or(BlobSidecarError::MissingKzgCommitment)?; - let kzg_commitment_inclusion_proof = signed_block - .message() - .body() - .complete_kzg_commitment_merkle_proof(index, kzg_commitments_inclusion_proof)?; + let kzg_commitment_inclusion_proof = complete_kzg_commitment_merkle_proof::( + &header.kzg_commitments, + index, + &header.kzg_commitments_inclusion_proof, + )?; Ok(Self { index: index as u64, blob, kzg_commitment, kzg_proof, - signed_block_header, + signed_block_header: header.signed_block_header, kzg_commitment_inclusion_proof, }) } diff --git a/consensus/types/src/data/data_column_sidecar.rs b/consensus/types/src/data/data_column_sidecar.rs index c8a49e346a..109c9472a5 100644 --- a/consensus/types/src/data/data_column_sidecar.rs +++ b/consensus/types/src/data/data_column_sidecar.rs @@ -19,6 +19,10 @@ use tree_hash_derive::TreeHash; use crate::{ block::{BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlockHeader}, core::{Epoch, EthSpec, Hash256, Slot}, + data::{ + CellBitmap, PartialDataColumn, PartialDataColumnHeader, PartialDataColumnSidecar, + PartialDataColumnSidecarError, PartialDataColumnSidecarRef, + }, fork::ForkName, kzg_ext::{KzgCommitments, KzgError}, state::BeaconStateError, @@ -136,6 +140,49 @@ impl DataColumnSidecar { )), } } + + /// Convert this full data column into a partial data column reference for KZG verification. + /// The header will NOT be set. + /// + /// Uses the supplied filter to determine which cells to include in the partial sidecar. + pub fn try_filter_to_partial_ref( + &self, + filter: F, + ) -> Result>, Err> + where + F: Fn(usize, &Cell, &KzgProof) -> Result, + Err: From, + { + let len = self.column().len(); + let mut new_bitmap = CellBitmap::::with_capacity(len) + .map_err(|_| PartialDataColumnSidecarError::UnexpectedBounds)?; + let mut new_column = Vec::with_capacity(len); + let mut new_proofs = Vec::with_capacity(len); + let iter = self.column().iter().zip(self.kzg_proofs().iter()); + + for (blob_idx, (cell, proof)) in iter.enumerate() { + if filter(blob_idx, cell, proof)? { + // Keep this cell + new_column.push(cell); + new_proofs.push(proof); + // Mark as present + new_bitmap + .set(blob_idx, true) + .map_err(|_| PartialDataColumnSidecarError::UnexpectedBounds)?; + } + } + + if new_column.is_empty() { + return Ok(None); + } + + Ok(Some(PartialDataColumnSidecarRef { + cells_present_bitmap: new_bitmap, + column: new_column, + kzg_proofs: new_proofs, + header: None.into(), + })) + } } impl DataColumnSidecarFulu { @@ -204,6 +251,36 @@ impl DataColumnSidecarFulu { .as_ssz_bytes() .len() } + + /// Convert this full data column into a verifiable partial data column. + pub fn to_partial(&self) -> PartialDataColumn { + let cell_count = self.column.len(); + let mut bitmap = + CellBitmap::::with_capacity(cell_count).expect("our column has the same bound"); + for idx in 0..cell_count { + bitmap + .set(idx, true) + .expect("The correct size is initialized right above"); + } + + let block_root = self.block_root(); + + PartialDataColumn { + block_root, + index: self.index, + sidecar: PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column: self.column.clone(), + kzg_proofs: self.kzg_proofs.clone(), + header: Some(PartialDataColumnHeader { + kzg_commitments: self.kzg_commitments.clone(), + signed_block_header: self.signed_block_header.clone(), + kzg_commitments_inclusion_proof: self.kzg_commitments_inclusion_proof.clone(), + }) + .into(), + }, + } + } } impl DataColumnSidecarGloas { diff --git a/consensus/types/src/data/mod.rs b/consensus/types/src/data/mod.rs index 4125b6072b..9c7eb42626 100644 --- a/consensus/types/src/data/mod.rs +++ b/consensus/types/src/data/mod.rs @@ -2,6 +2,7 @@ mod blob_sidecar; mod data_column_custody_group; mod data_column_sidecar; mod data_column_subnet_id; +mod partial_data_column_sidecar; pub use blob_sidecar::{ BlobIdentifier, BlobSidecar, BlobSidecarError, BlobSidecarList, BlobsList, FixedBlobSidecarList, @@ -17,6 +18,10 @@ pub use data_column_sidecar::{ DataColumnsByRootIdentifier, }; pub use data_column_subnet_id::{DataColumnSubnetId, all_data_column_sidecar_subnets_from_spec}; +pub use partial_data_column_sidecar::{ + CellBitmap, PartialDataColumn, PartialDataColumnHeader, PartialDataColumnPartsMetadata, + PartialDataColumnSidecar, PartialDataColumnSidecarError, PartialDataColumnSidecarRef, +}; use crate::core::EthSpec; use ssz_types::FixedVector; diff --git a/consensus/types/src/data/partial_data_column_sidecar.rs b/consensus/types/src/data/partial_data_column_sidecar.rs new file mode 100644 index 0000000000..df65be1ae3 --- /dev/null +++ b/consensus/types/src/data/partial_data_column_sidecar.rs @@ -0,0 +1,429 @@ +use crate::{ + block::{BLOB_KZG_COMMITMENTS_INDEX, SignedBeaconBlock, SignedBeaconBlockHeader}, + core::{EthSpec, Hash256, Slot}, + data::{Cell, ColumnIndex, DataColumnSidecar, DataColumnSidecarFulu}, + execution::AbstractExecPayload, + kzg_ext::KzgCommitments, + state::BeaconStateError, + test_utils::TestRandom, +}; +use educe::Educe; +use kzg::KzgProof; +use merkle_proof::verify_merkle_proof; +use ssz::BitList; +use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, ListEncodedOption, VariableList}; +use std::fmt::Display; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +pub type CellBitmap = BitList<::MaxBlobCommitmentsPerBlock>; + +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Encode, Decode, TreeHash, Educe)] +#[educe(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +pub struct PartialDataColumnSidecar { + pub cells_present_bitmap: CellBitmap, + pub column: VariableList, E::MaxBlobCommitmentsPerBlock>, + pub kzg_proofs: VariableList, + pub header: ListEncodedOption>, +} + +/// Equivalent to `PartialDataColumnSidecar`, but containing references to the cells. This is done +/// so that we can get a part of a sidecar without expensively cloning all the contents. +#[derive(Debug, Clone, Encode)] +pub struct PartialDataColumnSidecarRef<'a, E: EthSpec> { + pub cells_present_bitmap: CellBitmap, + // It is fine to use `Vec` here as we never decode directly into this type, and only create + // this from the `PartialDataColumnSidecar` type above. This avoids a few ugly `expect` calls. + pub column: Vec<&'a Cell>, + pub kzg_proofs: Vec<&'a KzgProof>, + pub header: ListEncodedOption<&'a PartialDataColumnHeader>, +} + +#[derive(Debug, Clone, Copy)] +pub enum PartialDataColumnSidecarError { + UnexpectedBounds, + InternallyInconsistent, + DifferingLengths { lhs_len: usize, rhs_len: usize }, + ConflictingData, +} + +impl PartialDataColumnSidecar { + pub fn is_complete(&self) -> bool { + self.cells_present_bitmap.num_set_bits() == self.cells_present_bitmap.len() + } + + pub fn get(&self, idx: usize) -> Option<(&Cell, &KzgProof)> { + if !self.cells_present_bitmap.get(idx).unwrap_or(false) { + return None; + } + let storage_idx = self + .cells_present_bitmap + .iter() + .take(idx) + .filter(|b| *b) + .count(); + self.column + .get(storage_idx) + .and_then(|cell| self.kzg_proofs.get(storage_idx).map(|proof| (cell, proof))) + } + + /// Creates a reference to this sidecar containing only the blob indices for which the passed + /// closure returns `true` and is present in `self`. Will return `None` if there is no overlap. + pub fn filter( + &self, + filter: F, + ) -> Result>, PartialDataColumnSidecarError> + where + F: Fn(usize) -> bool, + { + let len = self.verify_len()?; + + let mut new_bitmap = self.cells_present_bitmap.clone(); + let mut new_column = Vec::with_capacity(len); + let mut new_proofs = Vec::with_capacity(len); + let mut iter = self.column.iter().zip(self.kzg_proofs.iter()); + + for (blob_idx, present) in self.cells_present_bitmap.iter().enumerate() { + if present { + let (cell, proof) = iter + .next() + .ok_or(PartialDataColumnSidecarError::UnexpectedBounds)?; + if filter(blob_idx) { + // Keep this cell + new_column.push(cell); + new_proofs.push(proof); + } else { + // Mark as not present + new_bitmap + .set(blob_idx, false) + .map_err(|_| PartialDataColumnSidecarError::UnexpectedBounds)?; + } + } + } + + if new_column.is_empty() { + return Ok(None); + } + + Ok(Some(PartialDataColumnSidecarRef { + cells_present_bitmap: new_bitmap, + column: new_column, + kzg_proofs: new_proofs, + header: self.header.as_ref().into(), + })) + } + + pub fn verify_len(&self) -> Result { + let len = self.cells_present_bitmap.num_set_bits(); + if len != self.kzg_proofs.len() || len != self.column.len() { + return Err(PartialDataColumnSidecarError::InternallyInconsistent); + } + Ok(len) + } +} + +#[cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") +)] +#[derive(Debug, Clone, Encode, Decode, TreeHash, TestRandom, Educe)] +#[educe(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +pub struct PartialDataColumnHeader { + pub kzg_commitments: KzgCommitments, + pub signed_block_header: SignedBeaconBlockHeader, + pub kzg_commitments_inclusion_proof: FixedVector, +} + +impl PartialDataColumnHeader { + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + + pub fn verify_inclusion_proof(&self) -> bool { + let blob_kzg_commitments_root = self.kzg_commitments.tree_hash_root(); + + verify_merkle_proof( + blob_kzg_commitments_root, + &self.kzg_commitments_inclusion_proof, + E::kzg_commitments_inclusion_proof_depth(), + BLOB_KZG_COMMITMENTS_INDEX, + self.signed_block_header.message.body_root, + ) + } +} + +impl> TryFrom<&SignedBeaconBlock> + for PartialDataColumnHeader +{ + type Error = BeaconStateError; + + fn try_from(block: &SignedBeaconBlock) -> Result { + Ok(Self { + kzg_commitments: block.message().body().blob_kzg_commitments()?.clone(), + signed_block_header: block.signed_block_header(), + kzg_commitments_inclusion_proof: block + .message() + .body() + .kzg_commitments_merkle_proof()?, + }) + } +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub struct PartialDataColumnPartsMetadata { + pub available: CellBitmap, + pub requests: CellBitmap, +} + +impl Display for PartialDataColumnPartsMetadata { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "(available: {}, requested: {})", + self.available, self.requests + ) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct PartialDataColumn { + pub block_root: Hash256, + pub index: ColumnIndex, + pub sidecar: PartialDataColumnSidecar, +} + +impl PartialDataColumn { + /// Equivalent to a call to `clone` followed by `try_into_full`, but returns early if conversion + /// is not possible. + pub fn try_clone_full( + &self, + header: &PartialDataColumnHeader, + ) -> Option> { + if !self.sidecar.is_complete() { + return None; + } + + Some(DataColumnSidecar::Fulu(DataColumnSidecarFulu { + index: self.index, + column: self.sidecar.column.clone(), + kzg_commitments: header.kzg_commitments.clone(), + kzg_proofs: self.sidecar.kzg_proofs.clone(), + signed_block_header: header.signed_block_header.clone(), + kzg_commitments_inclusion_proof: header.kzg_commitments_inclusion_proof.clone(), + })) + } + + pub fn try_into_full( + self, + header: &PartialDataColumnHeader, + ) -> Option> { + if !self.sidecar.is_complete() { + return None; + } + + Some(DataColumnSidecar::Fulu(DataColumnSidecarFulu { + index: self.index, + column: self.sidecar.column, + kzg_commitments: header.kzg_commitments.clone(), + kzg_proofs: self.sidecar.kzg_proofs, + signed_block_header: header.signed_block_header.clone(), + kzg_commitments_inclusion_proof: header.kzg_commitments_inclusion_proof.clone(), + })) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MinimalEthSpec; + use bls::Signature; + use fixed_bytes::FixedBytesExtended; + use kzg::KzgCommitment; + use ssz::Encode; + + type E = MinimalEthSpec; + + fn make_cell(marker: u8) -> Cell { + let mut cell = Cell::::default(); + cell[0] = marker; + cell + } + + fn make_sidecar_with_marker( + total_blobs: usize, + present_indices: &[usize], + marker_base: u8, + ) -> PartialDataColumnSidecar { + let mut bitmap = CellBitmap::::with_capacity(total_blobs).unwrap(); + for &idx in present_indices { + bitmap.set(idx, true).unwrap(); + } + + let column: VariableList<_, _> = present_indices + .iter() + .map(|&idx| make_cell(marker_base.wrapping_add(idx as u8))) + .collect::>() + .try_into() + .unwrap(); + let proofs: VariableList<_, _> = present_indices + .iter() + .map(|_| KzgProof::empty()) + .collect::>() + .try_into() + .unwrap(); + + PartialDataColumnSidecar { + cells_present_bitmap: bitmap, + column, + kzg_proofs: proofs, + header: None.into(), + } + } + + fn make_sidecar(total_blobs: usize, present_indices: &[usize]) -> PartialDataColumnSidecar { + make_sidecar_with_marker(total_blobs, present_indices, 0) + } + + fn make_header(num_commitments: usize) -> PartialDataColumnHeader { + PartialDataColumnHeader { + kzg_commitments: vec![KzgCommitment([0u8; 48]); num_commitments] + .try_into() + .unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: crate::BeaconBlockHeader { + slot: Slot::new(0), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: FixedVector::new( + vec![Hash256::zero(); E::kzg_commitments_inclusion_proof_depth()], + ) + .unwrap(), + } + } + + // -- filter tests -- + + #[test] + fn filter_keeps_matching_cells() { + let sidecar = make_sidecar(6, &[0, 2, 4]); + let filtered = sidecar.filter(|idx| idx == 0 || idx == 4).unwrap().unwrap(); + assert_eq!(filtered.column.len(), 2); + assert_eq!(filtered.kzg_proofs.len(), 2); + assert!(filtered.cells_present_bitmap.get(0).unwrap()); + assert!(!filtered.cells_present_bitmap.get(2).unwrap()); + assert!(filtered.cells_present_bitmap.get(4).unwrap()); + } + + #[test] + fn filter_returns_none_when_no_overlap() { + let sidecar = make_sidecar(6, &[0, 2, 4]); + assert!( + sidecar + .filter(|idx| idx == 1 || idx == 3) + .unwrap() + .is_none() + ); + } + + #[test] + fn filter_preserves_all_when_all_match() { + let sidecar = make_sidecar(6, &[0, 2, 4]); + let filtered = sidecar.filter(|_| true).unwrap().unwrap(); + assert_eq!(filtered.column.len(), 3); + assert_eq!(filtered.kzg_proofs.len(), 3); + assert_eq!(filtered.cells_present_bitmap, sidecar.cells_present_bitmap); + + // Also, check that the encoded version matches + assert_eq!(filtered.as_ssz_bytes(), sidecar.as_ssz_bytes()); + } + + // -- is_complete tests -- + + #[test] + fn is_complete_true_when_all_bits_set() { + let sidecar = make_sidecar(4, &[0, 1, 2, 3]); + assert!(sidecar.is_complete()); + } + + #[test] + fn is_complete_false_when_partial() { + let sidecar = make_sidecar(4, &[0, 2]); + assert!(!sidecar.is_complete()); + } + + // -- try_clone_full tests (on PartialDataColumn) -- + + #[test] + fn try_clone_full_succeeds_when_complete() { + let sidecar = make_sidecar(3, &[0, 1, 2]); + let header = make_header(3); + let partial = PartialDataColumn { + block_root: Hash256::zero(), + index: 5, + sidecar, + }; + let full = partial.try_clone_full(&header).unwrap(); + assert_eq!(*full.index(), 5); + assert_eq!(full.column().len(), 3); + } + + #[test] + fn try_clone_full_returns_none_when_incomplete() { + let sidecar = make_sidecar(4, &[0, 2]); + let header = make_header(4); + let partial = PartialDataColumn { + block_root: Hash256::zero(), + index: 0, + sidecar, + }; + assert!(partial.try_clone_full(&header).is_none()); + } + + // -- get tests -- + + #[test] + fn get_sparse_bitmap_maps_to_correct_storage_position() { + // bitmap: [false, true, false, true] → column: [cell_1, cell_3] + let sidecar = make_sidecar_with_marker(4, &[1, 3], 0); + let (cell, _) = sidecar.get(1).expect("cell at blob index 1 should exist"); + assert_eq!(cell[0], 1); + let (cell, _) = sidecar.get(3).expect("cell at blob index 3 should exist"); + assert_eq!(cell[0], 3); + } + + #[test] + fn get_absent_blob_index_returns_none() { + let sidecar = make_sidecar(4, &[1, 3]); + assert!(sidecar.get(0).is_none()); + assert!(sidecar.get(2).is_none()); + } + + #[test] + fn get_out_of_range_returns_none() { + let sidecar = make_sidecar(4, &[0, 2]); + assert!(sidecar.get(4).is_none()); + assert!(sidecar.get(100).is_none()); + } + + #[test] + fn get_dense_bitmap_matches_direct_index() { + let sidecar = make_sidecar_with_marker(4, &[0, 1, 2, 3], 10); + for i in 0..4 { + let (cell, _) = sidecar.get(i).expect("all cells should be present"); + assert_eq!(cell[0], 10 + i as u8); + } + } +} diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs index e0ec9dd956..09305716ab 100644 --- a/consensus/types/src/kzg_ext/mod.rs +++ b/consensus/types/src/kzg_ext/mod.rs @@ -2,9 +2,11 @@ pub mod consts; pub use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof}; -use ssz_types::VariableList; - use crate::core::EthSpec; +use crate::{BeaconStateError, Hash256}; +use merkle_proof::{MerkleTree, MerkleTreeError}; +use ssz_types::{FixedVector, VariableList}; +use tree_hash::{BYTES_PER_CHUNK, TreeHash}; // Note on List limit: // - Deneb to Electra: `MaxBlobCommitmentsPerBlock` @@ -25,3 +27,49 @@ pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { let surrounded_commitments = format!("[{}]", commitments_joined); surrounded_commitments } + +pub fn complete_kzg_commitment_merkle_proof( + kzg_commitments: &KzgCommitments, + index: usize, + kzg_commitments_proof: &[Hash256], +) -> Result, BeaconStateError> { + // We compute the branches by generating 2 merkle trees: + // 1. Merkle tree for the `blob_kzg_commitments` List object + // 2. Merkle tree for the `BeaconBlockBody` container + // We then merge the branches for both the trees all the way up to the root. + + // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) + // + // Branches for `blob_kzg_commitments` without length mix-in + let blob_leaves = kzg_commitments + .iter() + .map(|commitment| commitment.tree_hash_root()) + .collect::>(); + let depth = E::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2(); + let tree = MerkleTree::create(&blob_leaves, depth as usize); + let (_, mut proof) = tree + .generate_proof(index, depth as usize) + .map_err(BeaconStateError::MerkleTreeError)?; + + // Add the branch corresponding to the length mix-in. + let length = blob_leaves.len(); + let usize_len = std::mem::size_of::(); + let mut length_bytes = [0; BYTES_PER_CHUNK]; + length_bytes + .get_mut(0..usize_len) + .ok_or(BeaconStateError::MerkleTreeError( + MerkleTreeError::PleaseNotifyTheDevs, + ))? + .copy_from_slice(&length.to_le_bytes()); + let length_root = Hash256::from_slice(length_bytes.as_slice()); + proof.push(length_root); + + // Part 2 + // Branches for `BeaconBlockBody` container + // Join the proofs for the subtree and the main tree + proof.extend_from_slice(kzg_commitments_proof); + + Ok(FixedVector::new(proof)?) +} diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index 4e875341a0..2a38b5be1f 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -97,20 +97,8 @@ mod test { .. } = blob_sidecars.pop().unwrap(); - // Compute the commitments inclusion proof and use it for building blob sidecar. - let (signed_block_header, kzg_commitments_inclusion_proof) = block - .signed_block_header_and_kzg_commitments_proof() - .unwrap(); - - let blob_sidecar = BlobSidecar::new_with_existing_proof( - index as usize, - blob, - &block, - signed_block_header, - &kzg_commitments_inclusion_proof, - kzg_proof, - ) - .unwrap(); + let blob_sidecar = + BlobSidecar::new_with_existing_proof(index as usize, blob, &block, kzg_proof).unwrap(); assert!(blob_sidecar.verify_blob_sidecar_inclusion_proof()); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ded1f2b765..0c5d9a5933 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2864,3 +2864,21 @@ fn invalid_block_roots_default_mainnet() { assert!(config.chain.invalid_block_roots.is_empty()); }) } + +#[test] +fn partial_columns() { + CommandLineTest::new() + .flag("enable-partial-columns", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.network.enable_partial_columns); + assert!(config.chain.enable_partial_columns); + }); + // And disabled by default: + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(!config.network.enable_partial_columns); + assert!(!config.chain.enable_partial_columns); + }) +} From df764ffa9aa794bb5b12901123c8acdf38fb407f Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Sat, 25 Apr 2026 04:04:09 -0400 Subject: [PATCH 4/7] Re-issue `ForkchoiceUpdate` based on updated `PayloadStatus` (#9102) Co-Authored-By: hopinheimer Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 89 ++- .../beacon_chain/src/canonical_head.rs | 9 +- beacon_node/beacon_chain/src/test_utils.rs | 30 + beacon_node/beacon_chain/tests/main.rs | 1 + .../beacon_chain/tests/prepare_payload.rs | 575 ++++++++++++++++++ beacon_node/client/src/builder.rs | 8 +- .../src/engine_api/json_structures.rs | 30 +- beacon_node/execution_layer/src/lib.rs | 14 +- .../test_utils/execution_block_generator.rs | 28 +- .../src/test_utils/handle_rpc.rs | 19 +- .../src/test_utils/mock_builder.rs | 13 +- .../src/test_utils/mock_execution_layer.rs | 13 +- .../src/proto_array_fork_choice.rs | 2 +- .../src/test_rig.rs | 18 +- 14 files changed, 808 insertions(+), 41 deletions(-) create mode 100644 beacon_node/beacon_chain/tests/prepare_payload.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f3861ac727..98dc9cd7fd 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -117,8 +117,8 @@ use state_processing::{ epoch_cache::initialize_epoch_cache, per_block_processing, per_block_processing::{ - VerifySignatures, errors::AttestationValidationError, get_expected_withdrawals, - verify_attestation_for_block_inclusion, + VerifySignatures, apply_parent_execution_payload, errors::AttestationValidationError, + get_expected_withdrawals, verify_attestation_for_block_inclusion, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, @@ -4858,16 +4858,20 @@ impl BeaconChain { proposal_slot: Slot, ) -> Result, Error> { let cached_head = self.canonical_head.cached_head(); + let head_block = &cached_head.snapshot.beacon_block; + let head_block_root = cached_head.head_block_root(); let head_state = &cached_head.snapshot.beacon_state; let parent_block_root = forkchoice_update_params.head_root; - let (unadvanced_state, unadvanced_state_root) = - if cached_head.head_block_root() == parent_block_root { - (Cow::Borrowed(head_state), cached_head.head_state_root()) + let (unadvanced_state, unadvanced_state_root, parent_bid_block_hash) = + if parent_block_root == head_block_root { + ( + Cow::Borrowed(head_state), + cached_head.head_state_root(), + head_block.payload_bid_block_hash().ok(), + ) } else { - // TODO(gloas): this function needs updating to be envelope-aware - // See: https://github.com/sigp/lighthouse/issues/8957 let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; @@ -4875,20 +4879,27 @@ impl BeaconChain { .store .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? .ok_or(Error::MissingBeaconState(block.state_root()))?; - (Cow::Owned(state), state_root) + ( + Cow::Owned(state), + state_root, + block.payload_bid_block_hash().ok(), + ) }; - // Parent state epoch is the same as the proposal, we don't need to advance because the - // list of expected withdrawals can only change after an epoch advance or a - // block application. - let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); - if head_state.current_epoch() == proposal_epoch { - return get_expected_withdrawals(&unadvanced_state, &self.spec) - .map(Into::into) - .map_err(Error::PrepareProposerFailed); - } + let parent_payload_status = if let Some(block_hash) = parent_bid_block_hash + && block_hash != ExecutionBlockHash::default() + && forkchoice_update_params.head_hash == Some(block_hash) + { + fork_choice::PayloadStatus::Full + } else { + fork_choice::PayloadStatus::Empty + }; // Advance the state using the partial method. + // TODO(gloas): we might want to optimise this further by using: + // - `get_advanced_hot_state` instead of the cached head + // - restoring the pre-Gloas optimisation to avoid advancing further than the epoch + // boundary debug!( %proposal_slot, ?parent_block_root, @@ -4898,9 +4909,33 @@ impl BeaconChain { partial_state_advance( &mut advanced_state, Some(unadvanced_state_root), - proposal_epoch.start_slot(T::EthSpec::slots_per_epoch()), + proposal_slot, &self.spec, )?; + + // For Gloas, when the head payload is Full, we need to apply the parent's + // execution requests to the state to get the correct withdrawals. + if parent_payload_status == fork_choice::PayloadStatus::Full { + let envelope = if parent_block_root == head_block_root { + cached_head.snapshot.execution_envelope.clone() + } else { + self.store + .get_payload_envelope(&parent_block_root)? + .map(Arc::new) + } + .ok_or(Error::MissingExecutionPayloadEnvelope(parent_block_root))?; + + let parent_bid = advanced_state.latest_execution_payload_bid()?.clone(); + + apply_parent_execution_payload( + &mut advanced_state, + &parent_bid, + &envelope.message.execution_requests, + &self.spec, + ) + .map_err(Error::PrepareProposerFailed)?; + } + get_expected_withdrawals(&advanced_state, &self.spec) .map(Into::into) .map_err(Error::PrepareProposerFailed) @@ -6112,13 +6147,20 @@ impl BeaconChain { fcu_params.head_root, &cached_head, )?; - Ok::<_, Error>(Some((fcu_params, pre_payload_attributes))) + let head_payload_status = cached_head.head_payload_status(); + Ok::<_, Error>(Some(( + fcu_params, + pre_payload_attributes, + head_payload_status, + ))) }, "prepare_beacon_proposer_head_read", ) .await??; - let Some((forkchoice_update_params, Some(pre_payload_attributes))) = maybe_prep_data else { + let Some((forkchoice_update_params, Some(pre_payload_attributes), head_payload_status)) = + maybe_prep_data + else { // Appropriate log messages have already been logged above and in // `get_pre_payload_attributes`. return Ok(None); @@ -6140,7 +6182,7 @@ impl BeaconChain { // considerable time to compute if a state load is required. let head_root = forkchoice_update_params.head_root; let payload_attributes = if let Some(payload_attributes) = execution_layer - .payload_attributes(prepare_slot, head_root) + .payload_attributes(prepare_slot, head_root, head_payload_status) .await { payload_attributes @@ -6187,6 +6229,7 @@ impl BeaconChain { .insert_proposer( prepare_slot, head_root, + head_payload_status, proposer, payload_attributes.clone(), ) @@ -6198,6 +6241,7 @@ impl BeaconChain { %prepare_slot, validator = proposer, parent_root = ?head_root, + payload_status = ?head_payload_status, "Prepared beacon proposer" ); payload_attributes @@ -6250,6 +6294,7 @@ impl BeaconChain { self.update_execution_engine_forkchoice( current_slot, forkchoice_update_params, + head_payload_status, OverrideForkchoiceUpdate::AlreadyApplied, ) .await?; @@ -6262,6 +6307,7 @@ impl BeaconChain { self: &Arc, current_slot: Slot, input_params: ForkchoiceUpdateParameters, + head_payload_status: fork_choice::PayloadStatus, override_forkchoice_update: OverrideForkchoiceUpdate, ) -> Result<(), Error> { let execution_layer = self @@ -6322,6 +6368,7 @@ impl BeaconChain { finalized_hash, current_slot, head_block_root, + head_payload_status, ) .await .map_err(Error::ExecutionForkChoiceUpdateFailed); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 74670b02d7..04c18c88e0 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -827,8 +827,11 @@ impl BeaconChain { // The execution layer updates might attempt to take a write-lock on fork choice, so it's // important to ensure the fork-choice lock isn't being held. - let el_update_handle = - spawn_execution_layer_updates(self.clone(), new_forkchoice_update_parameters)?; + let el_update_handle = spawn_execution_layer_updates( + self.clone(), + new_forkchoice_update_parameters, + new_payload_status, + )?; // We have completed recomputing the head and it's now valid for another process to do the // same. @@ -1186,6 +1189,7 @@ fn perform_debug_logging( fn spawn_execution_layer_updates( chain: Arc>, forkchoice_update_params: ForkchoiceUpdateParameters, + head_payload_status: PayloadStatus, ) -> Result>, Error> { let current_slot = chain .slot_clock @@ -1208,6 +1212,7 @@ fn spawn_execution_layer_updates( .update_execution_engine_forkchoice( current_slot, forkchoice_update_params, + head_payload_status, OverrideForkchoiceUpdate::Yes, ) .await diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e628a81459..b657f81b1f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -771,6 +771,36 @@ where .execution_block_generator() } + /// Create a switch-to-compounding `ConsolidationRequest` for the given validator. + /// + /// Panics if the validator doesn't exist, doesn't have eth1 withdrawal credentials, + /// or doesn't have an execution withdrawal address. + pub fn make_switch_to_compounding_request( + &self, + validator_index: usize, + ) -> ConsolidationRequest { + let head = self.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + let validator = head_state + .get_validator(validator_index) + .expect("validator should exist"); + + assert!( + validator.has_eth1_withdrawal_credential(&self.spec), + "validator {validator_index} should have eth1 withdrawal credentials" + ); + + let source_address = validator + .get_execution_withdrawal_address(&self.spec) + .expect("validator should have execution withdrawal address"); + + ConsolidationRequest { + source_address, + source_pubkey: validator.pubkey, + target_pubkey: validator.pubkey, + } + } + pub fn set_mock_builder( &mut self, beacon_url: SensitiveUrl, diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index e02c488ac6..d31db128c5 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -6,6 +6,7 @@ mod column_verification; mod events; mod op_verification; mod payload_invalidation; +mod prepare_payload; mod rewards; mod schema_stability; mod store_tests; diff --git a/beacon_node/beacon_chain/tests/prepare_payload.rs b/beacon_node/beacon_chain/tests/prepare_payload.rs new file mode 100644 index 0000000000..dc4f999eb2 --- /dev/null +++ b/beacon_node/beacon_chain/tests/prepare_payload.rs @@ -0,0 +1,575 @@ +#![cfg(not(debug_assertions))] +#![allow(clippy::result_large_err)] + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, +}; +use beacon_chain::{ChainConfig, custody_context::NodeCustodyType}; +use bls::Keypair; +use eth2::types::ProposerPreparationData; +use fork_choice::PayloadStatus; +use logging::create_test_tracing_subscriber; +use ssz_types::VariableList; +use state_processing::{ + per_block_processing::{apply_parent_execution_payload, withdrawals::get_expected_withdrawals}, + state_advance::complete_state_advance, +}; +use std::sync::{Arc, LazyLock}; +use store::database::interface::BeaconNodeBackend; +use store::{HotColdDB, StoreConfig}; +use tempfile::{TempDir, tempdir}; +use types::*; + +// Should ideally be divisible by 3. +pub const LOW_VALIDATOR_COUNT: usize = 32; +pub const HIGH_VALIDATOR_COUNT: usize = 64; + +/// A cached set of keys. +static KEYPAIRS: LazyLock> = + LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT)); + +type E = MinimalEthSpec; +type TestHarness = BeaconChainHarness>; + +fn get_store( + db_path: &TempDir, + spec: Arc, +) -> Arc, BeaconNodeBackend>> { + let store_config = StoreConfig { + prune_payloads: false, + ..StoreConfig::default() + }; + get_store_generic(db_path, store_config, spec) +} + +fn get_store_generic( + db_path: &TempDir, + config: StoreConfig, + spec: Arc, +) -> Arc, BeaconNodeBackend>> { + create_test_tracing_subscriber(); + let hot_path = db_path.path().join("chain_db"); + let cold_path = db_path.path().join("freezer_db"); + let blobs_path = db_path.path().join("blobs_db"); + + HotColdDB::open( + &hot_path, + &cold_path, + &blobs_path, + |_, _, _| Ok(()), + config, + spec, + ) + .expect("disk store should initialize") +} + +fn get_harness( + store: Arc, BeaconNodeBackend>>, + validator_count: usize, +) -> TestHarness { + // Most tests expect to retain historic states, so we use this as the default. + let chain_config = ChainConfig { + archive: true, + ..ChainConfig::default() + }; + get_harness_generic( + store, + validator_count, + chain_config, + NodeCustodyType::Fullnode, + ) +} + +fn get_harness_generic( + store: Arc, BeaconNodeBackend>>, + validator_count: usize, + chain_config: ChainConfig, + node_custody_type: NodeCustodyType, +) -> TestHarness { + let harness = TestHarness::builder(MinimalEthSpec) + .spec(store.get_chain_spec().clone()) + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .fresh_disk_store(store) + .mock_execution_layer() + .chain_config(chain_config) + .node_custody_type(node_custody_type) + .build(); + harness.advance_slot(); + harness +} + +#[tokio::test] +async fn prepare_payload_on_full_parent_next_slot() { + prepare_payload_generic( + PayloadStatus::Full, + Slot::new(3 * E::slots_per_epoch() + 1), + Slot::new(3 * E::slots_per_epoch() + 2), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_full_parent_one_epoch_skip() { + prepare_payload_generic( + PayloadStatus::Full, + Slot::new(3 * E::slots_per_epoch() + 1), + Slot::new(4 * E::slots_per_epoch()), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_full_parent_uneven_one_epoch_skip() { + prepare_payload_generic( + PayloadStatus::Full, + Slot::new(3 * E::slots_per_epoch() + 1), + Slot::new(5 * E::slots_per_epoch() - 1), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_empty_parent_next_slot() { + prepare_payload_generic( + PayloadStatus::Empty, + Slot::new(3 * E::slots_per_epoch() + 1), + Slot::new(3 * E::slots_per_epoch() + 2), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_empty_parent_one_epoch_skip() { + prepare_payload_generic( + PayloadStatus::Empty, + Slot::new(3 * E::slots_per_epoch() + 1), + Slot::new(4 * E::slots_per_epoch()), + ) + .await; +} + +async fn prepare_payload_generic( + parent_payload_status: PayloadStatus, + parent_block_slot: Slot, + prepare_slot: Slot, +) { + assert!(parent_block_slot > 0); + + // Post-Gloas test. + let spec = Arc::new(test_spec::()); + if !spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() { + return; + } + + let num_blocks_produced = parent_block_slot.as_u64() - 1; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path, spec.clone()); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Advance the slot so the next extend_chain produces at a fresh slot. + harness.advance_slot(); + + // Produce a block with a payload that affects withdrawals for the next slot. + // A switch-to-compounding consolidation changes withdrawal credentials from 0x01 to 0x02, + // which queues the validator's excess balance as a pending deposit and removes it from the + // partial withdrawal sweep. We target an odd-indexed validator since odd validators are + // created with eth1 withdrawal credentials in the interop genesis builder. + let consolidation_request = harness.make_switch_to_compounding_request(1); + + let execution_requests = ExecutionRequests:: { + deposits: VariableList::empty(), + withdrawals: VariableList::empty(), + consolidations: VariableList::new(vec![consolidation_request]).unwrap(), + }; + + // Inject the execution requests into the mock EL so the next payload includes them. + harness + .execution_block_generator() + .set_next_execution_requests(execution_requests); + + // Produce and import one more block. Its envelope will contain the consolidation request. + // TODO(gloas): all this ugly plumbing could be avoided with some more "implicit" context + // methods + let state = harness.get_current_state(); + let (block_contents, opt_envelope, parent_block_state) = harness + .make_block_with_envelope(state, parent_block_slot) + .await; + let envelope = opt_envelope.unwrap(); + let block_root = harness + .process_block( + parent_block_slot, + block_contents.0.canonical_root(), + block_contents.clone(), + ) + .await + .unwrap(); + + // TODO(gloas): try a case where head is empty even though envelope is processed + if parent_payload_status == PayloadStatus::Full { + harness + .process_envelope( + block_root.into(), + envelope.clone(), + &parent_block_state, + block_contents.0.state_root(), + ) + .await; + } + + // Verify that the withdrawals computed from the block's state differ from the withdrawals + // computed from the block's state with its payload applied by + // `apply_parent_execution_payload`. + let cached_head = harness.chain.canonical_head.cached_head(); + let unadvanced_empty_state = &cached_head.snapshot.beacon_state; + let parent_bid = unadvanced_empty_state + .latest_execution_payload_bid() + .unwrap(); + + let mut advanced_empty_state = unadvanced_empty_state.clone(); + complete_state_advance(&mut advanced_empty_state, None, prepare_slot, &spec).unwrap(); + + let mut unadvanced_full_state = unadvanced_empty_state.clone(); + apply_parent_execution_payload( + &mut unadvanced_full_state, + parent_bid, + &envelope.message.execution_requests, + &spec, + ) + .unwrap(); + + let mut advanced_full_state = advanced_empty_state.clone(); + apply_parent_execution_payload( + &mut advanced_full_state, + parent_bid, + &envelope.message.execution_requests, + &spec, + ) + .unwrap(); + + let withdrawals_unadvanced_empty: Withdrawals = + get_expected_withdrawals(unadvanced_empty_state, &spec) + .unwrap() + .into(); + let withdrawals_advanced_empty: Withdrawals = + get_expected_withdrawals(&advanced_empty_state, &spec) + .unwrap() + .into(); + let withdrawals_unadvanced_full: Withdrawals = + get_expected_withdrawals(&unadvanced_full_state, &spec) + .unwrap() + .into(); + let withdrawals_advanced_full: Withdrawals = + get_expected_withdrawals(&advanced_full_state, &spec) + .unwrap() + .into(); + + assert_ne!( + withdrawals_advanced_empty, withdrawals_advanced_full, + "Applying execution requests should change the expected withdrawals" + ); + + let expect_state_advance_to_change_withdrawals = + prepare_slot.epoch(E::slots_per_epoch()) > parent_block_slot.epoch(E::slots_per_epoch()); + if expect_state_advance_to_change_withdrawals { + if parent_payload_status == fork_choice::PayloadStatus::Full { + assert_ne!( + withdrawals_unadvanced_full, withdrawals_advanced_full, + "Advancing the state should change the withdrawals" + ); + } else { + assert_ne!( + withdrawals_unadvanced_empty, withdrawals_advanced_empty, + "Advancing the state should change the withdrawals" + ); + } + } + + // Call `prepare_beacon_proposer` for the next slot and ensure that it primes the execution + // layer payload attributes cache with the correct withdrawals (the ones taking into account + // the applied execution_requests). + let current_slot = prepare_slot - 1; + let proposer_index = advanced_empty_state + .get_beacon_proposer_index(prepare_slot, &spec) + .expect("should get proposer index"); + + // Register the proposer so prepare_beacon_proposer doesn't skip it. + let el = harness.chain.execution_layer.as_ref().unwrap(); + el.update_proposer_preparation( + prepare_slot.epoch(E::slots_per_epoch()), + [( + &ProposerPreparationData { + validator_index: proposer_index as u64, + fee_recipient: Address::repeat_byte(42), + }, + &None, + )], + ) + .await; + + // Advance the slot clock to just before the prepare slot so the lookahead check passes. + harness.advance_to_slot_lookahead(prepare_slot, harness.chain.config.prepare_payload_lookahead); + + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .expect("prepare_beacon_proposer should succeed"); + + // Read the payload attributes from the EL cache and verify the withdrawals. + let el = harness.chain.execution_layer.as_ref().unwrap(); + let head_root = harness.head_block_root(); + let attributes = el + .payload_attributes(prepare_slot, head_root, parent_payload_status) + .await + .expect("should have cached payload attributes for prepare_slot"); + + let actual_withdrawals = attributes.withdrawals().unwrap(); + let expected_withdrawals: Vec = if parent_payload_status == PayloadStatus::Full { + withdrawals_advanced_full.to_vec() + } else { + withdrawals_advanced_empty.to_vec() + }; + + assert_eq!( + actual_withdrawals, &expected_withdrawals, + "prepare_beacon_proposer should use withdrawals computed from the \ + {parent_payload_status:?} state" + ); +} + +#[tokio::test] +async fn prepare_payload_on_genesis_next_slot() { + prepare_payload_on_genesis_generic(Slot::new(1)).await; +} + +#[tokio::test] +async fn prepare_payload_on_genesis_skip_two_epochs() { + prepare_payload_on_genesis_generic(Slot::new(2 * E::slots_per_epoch())).await; +} + +async fn prepare_payload_on_genesis_generic(prepare_slot: Slot) { + // Post-Gloas test. + let spec = Arc::new(test_spec::()); + if !spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() { + return; + } + + // Genesis is always considered Empty. + let parent_payload_status = PayloadStatus::Empty; + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path, spec.clone()); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // At genesis withdrawals are empty (because nothing has happened yet), so we don't assert + // anything about the advanced vs unadvanced state. This test just exists to test that + // calculating payload attributes at genesis works and doesn't error. + let cached_head = harness.chain.canonical_head.cached_head(); + let unadvanced_state = &cached_head.snapshot.beacon_state; + + let mut advanced_state = unadvanced_state.clone(); + complete_state_advance(&mut advanced_state, None, prepare_slot, &spec).unwrap(); + + let withdrawals_advanced: Withdrawals = get_expected_withdrawals(&advanced_state, &spec) + .unwrap() + .into(); + + // Call `prepare_beacon_proposer` for the next slot and ensure that it primes the execution + // layer payload attributes cache with the correct withdrawals (the ones taking into account + // the state advance). + let current_slot = prepare_slot - 1; + let proposer_index = advanced_state + .get_beacon_proposer_index(prepare_slot, &spec) + .unwrap(); + + // Register the proposer so prepare_beacon_proposer doesn't skip it. + let el = harness.chain.execution_layer.as_ref().unwrap(); + el.update_proposer_preparation( + prepare_slot.epoch(E::slots_per_epoch()), + [( + &ProposerPreparationData { + validator_index: proposer_index as u64, + fee_recipient: Address::repeat_byte(42), + }, + &None, + )], + ) + .await; + + // Advance the slot clock to just before the prepare slot so the lookahead check passes. + harness.advance_to_slot_lookahead(prepare_slot, harness.chain.config.prepare_payload_lookahead); + + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap(); + + // Read the payload attributes from the EL cache and verify the withdrawals. + let el = harness.chain.execution_layer.as_ref().unwrap(); + let head_root = harness.head_block_root(); + let attributes = el + .payload_attributes(prepare_slot, head_root, parent_payload_status) + .await + .unwrap(); + + let actual_withdrawals = attributes.withdrawals().unwrap(); + let expected_withdrawals: Vec = withdrawals_advanced.to_vec(); + + assert_eq!( + actual_withdrawals, &expected_withdrawals, + "prepare_beacon_proposer should use withdrawals computed from the \ + {parent_payload_status:?} advanced genesis state" + ); + assert!(actual_withdrawals.is_empty()); +} + +#[tokio::test] +async fn prepare_payload_on_fork_boundary_no_skip() { + prepare_payload_on_fork_boundary( + Slot::new(2 * E::slots_per_epoch()) - 1, + Slot::new(2 * E::slots_per_epoch()), + Epoch::new(2), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_fork_boundary_skip_one_prior() { + prepare_payload_on_fork_boundary( + Slot::new(2 * E::slots_per_epoch()) - 2, + Slot::new(2 * E::slots_per_epoch()), + Epoch::new(2), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_fork_boundary_skip_one_after() { + prepare_payload_on_fork_boundary( + Slot::new(2 * E::slots_per_epoch()) - 1, + Slot::new(2 * E::slots_per_epoch()) + 1, + Epoch::new(2), + ) + .await; +} + +#[tokio::test] +async fn prepare_payload_on_fork_boundary_skip_whole_epoch() { + prepare_payload_on_fork_boundary( + Slot::new(E::slots_per_epoch()), + Slot::new(2 * E::slots_per_epoch()), + Epoch::new(2), + ) + .await; +} + +async fn prepare_payload_on_fork_boundary( + parent_block_slot: Slot, + prepare_slot: Slot, + gloas_fork_epoch: Epoch, +) { + // Post-Gloas test. + let mut spec = test_spec::(); + if !spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() { + return; + } + spec.gloas_fork_epoch = Some(gloas_fork_epoch); + let spec = Arc::new(spec); + + // Pre-Gloas blocks are always considered Empty. + let parent_payload_status = PayloadStatus::Empty; + + let num_blocks_produced = parent_block_slot.as_u64(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path, spec.clone()); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Verify that the withdrawals computed from the block's state differ from the withdrawals + // computed from the block's state with its payload applied by + // `apply_parent_execution_payload`. + let cached_head = harness.chain.canonical_head.cached_head(); + let unadvanced_state = &cached_head.snapshot.beacon_state; + + let mut advanced_state = unadvanced_state.clone(); + complete_state_advance(&mut advanced_state, None, prepare_slot, &spec).unwrap(); + + let withdrawals_unadvanced: Withdrawals = get_expected_withdrawals(unadvanced_state, &spec) + .unwrap() + .into(); + let withdrawals_advanced: Withdrawals = get_expected_withdrawals(&advanced_state, &spec) + .unwrap() + .into(); + + let expect_state_advance_to_change_withdrawals = prepare_slot.epoch(E::slots_per_epoch()) > 0; + if expect_state_advance_to_change_withdrawals { + assert_ne!( + withdrawals_unadvanced, withdrawals_advanced, + "Advancing the state should change the withdrawals" + ); + } + + // Call `prepare_beacon_proposer` for the next slot and ensure that it primes the execution + // layer payload attributes cache with the correct withdrawals (the ones taking into account + // the applied execution_requests). + let current_slot = prepare_slot - 1; + let proposer_index = advanced_state + .get_beacon_proposer_index(prepare_slot, &spec) + .unwrap(); + + // Register the proposer so prepare_beacon_proposer doesn't skip it. + let el = harness.chain.execution_layer.as_ref().unwrap(); + el.update_proposer_preparation( + prepare_slot.epoch(E::slots_per_epoch()), + [( + &ProposerPreparationData { + validator_index: proposer_index as u64, + fee_recipient: Address::repeat_byte(42), + }, + &None, + )], + ) + .await; + + // Advance the slot clock to just before the prepare slot so the lookahead check passes. + harness.advance_to_slot_lookahead(prepare_slot, harness.chain.config.prepare_payload_lookahead); + + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap(); + + // Read the payload attributes from the EL cache and verify the withdrawals. + let el = harness.chain.execution_layer.as_ref().unwrap(); + let head_root = harness.head_block_root(); + let attributes = el + .payload_attributes(prepare_slot, head_root, parent_payload_status) + .await + .unwrap(); + + let actual_withdrawals = attributes.withdrawals().unwrap(); + let expected_withdrawals: Vec = withdrawals_advanced.to_vec(); + + assert_eq!( + actual_withdrawals, &expected_withdrawals, + "prepare_beacon_proposer should use withdrawals computed from the \ + advanced state" + ); +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 865599b9bd..9dfb8304bc 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -721,10 +721,9 @@ where if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { // Only send a head update *after* genesis. if let Ok(current_slot) = beacon_chain.slot() { - let params = beacon_chain - .canonical_head - .cached_head() - .forkchoice_update_parameters(); + let cached_head = beacon_chain.canonical_head.cached_head(); + let head_payload_status = cached_head.head_payload_status(); + let params = cached_head.forkchoice_update_parameters(); if params .head_hash .is_some_and(|hash| hash != ExecutionBlockHash::zero()) @@ -737,6 +736,7 @@ where .update_execution_engine_forkchoice( current_slot, params, + head_payload_status, Default::default(), ) .await; diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index cfff0b4d9f..9d9391a1e1 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,7 +1,7 @@ use super::*; use alloy_rlp::RlpEncodable; use serde::{Deserialize, Serialize}; -use ssz::{Decode, TryFromIter}; +use ssz::{Decode, Encode, TryFromIter}; use ssz_types::{FixedVector, VariableList, typenum::Unsigned}; use strum::EnumString; use superstruct::superstruct; @@ -481,6 +481,34 @@ pub enum RequestsError { #[serde(transparent)] pub struct JsonExecutionRequests(pub Vec); +impl From> for JsonExecutionRequests { + fn from(requests: ExecutionRequests) -> Self { + let mut result = Vec::new(); + if !requests.deposits.is_empty() { + result.push(format!( + "0x{:02x}{}", + RequestType::Deposit.to_u8(), + hex::encode(requests.deposits.as_ssz_bytes()) + )); + } + if !requests.withdrawals.is_empty() { + result.push(format!( + "0x{:02x}{}", + RequestType::Withdrawal.to_u8(), + hex::encode(requests.withdrawals.as_ssz_bytes()) + )); + } + if !requests.consolidations.is_empty() { + result.push(format!( + "0x{:02x}{}", + RequestType::Consolidation.to_u8(), + hex::encode(requests.consolidations.as_ssz_bytes()) + )); + } + JsonExecutionRequests(result) + } +} + impl TryFrom for ExecutionRequests { type Error = RequestsError; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 4e4fe20e14..4146543fd5 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -403,6 +403,7 @@ impl ProposerPreparationDataEntry { pub struct ProposerKey { slot: Slot, head_block_root: Hash256, + head_payload_status: fork_choice::PayloadStatus, } #[derive(PartialEq, Clone)] @@ -1461,12 +1462,14 @@ impl ExecutionLayer { &self, slot: Slot, head_block_root: Hash256, + head_payload_status: fork_choice::PayloadStatus, validator_index: u64, payload_attributes: PayloadAttributes, ) -> bool { let proposers_key = ProposerKey { slot, head_block_root, + head_payload_status, }; let existing = self.proposers().write().await.insert( @@ -1485,16 +1488,18 @@ impl ExecutionLayer { } /// If there has been a proposer registered via `Self::insert_proposer` with a matching `slot` - /// `head_block_root`, then return the appropriate `PayloadAttributes` for inclusion in - /// `forkchoiceUpdated` calls. + /// `head_block_root`, and `head_payload_status` then return the appropriate `PayloadAttributes` + /// for inclusion in `forkchoiceUpdated` calls. pub async fn payload_attributes( &self, current_slot: Slot, head_block_root: Hash256, + head_payload_status: fork_choice::PayloadStatus, ) -> Option { let proposers_key = ProposerKey { slot: current_slot, head_block_root, + head_payload_status, }; let proposer = self.proposers().read().await.get(&proposers_key).cloned()?; @@ -1518,6 +1523,7 @@ impl ExecutionLayer { finalized_block_hash: ExecutionBlockHash, current_slot: Slot, head_block_root: Hash256, + head_payload_status: fork_choice::PayloadStatus, ) -> Result { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -1534,7 +1540,9 @@ impl ExecutionLayer { ); let next_slot = current_slot + 1; - let payload_attributes = self.payload_attributes(next_slot, head_block_root).await; + let payload_attributes = self + .payload_attributes(next_slot, head_block_root, head_payload_status) + .await; // Compute the "lookahead", the time between when the payload will be produced and now. if let Some(ref payload_attributes) = payload_attributes diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index ace6276b75..16d8c03062 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -26,8 +26,8 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadGloas, ExecutionPayloadHeader, ForkName, Hash256, KzgProofs, Transaction, - Transactions, Uint256, + ExecutionPayloadGloas, ExecutionPayloadHeader, ExecutionRequests, ForkName, Hash256, KzgProofs, + Transaction, Transactions, Uint256, }; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); @@ -161,6 +161,14 @@ pub struct ExecutionBlockGenerator { pub blobs_bundles: HashMap>, pub kzg: Option>, rng: Arc>, + /* + * Execution requests (electra+) + */ + /// Per-payload execution requests returned by `getPayload`. + execution_requests: HashMap>, + /// If set, the next call to `build_new_execution_payload` will associate these + /// execution requests with the generated payload ID. + next_execution_requests: Option>, } fn make_rng() -> Arc> { @@ -199,6 +207,8 @@ impl ExecutionBlockGenerator { blobs_bundles: <_>::default(), kzg, rng: make_rng(), + execution_requests: <_>::default(), + next_execution_requests: None, }; generator.insert_pow_block(0).unwrap(); @@ -458,6 +468,15 @@ impl ExecutionBlockGenerator { self.blobs_bundles.get(id).cloned() } + pub fn get_execution_requests(&self, id: &PayloadId) -> Option> { + self.execution_requests.get(id).cloned() + } + + /// Set execution requests to be returned alongside the next generated payload. + pub fn set_next_execution_requests(&mut self, requests: ExecutionRequests) { + self.next_execution_requests = Some(requests); + } + /// Look up a blob and proof by versioned hash across all stored bundles. pub fn get_blob_and_proof(&self, versioned_hash: &Hash256) -> Option> { self.blobs_bundles @@ -763,6 +782,11 @@ impl ExecutionBlockGenerator { }, }; + // Store execution requests for this payload if configured. + if let Some(requests) = self.next_execution_requests.take() { + self.execution_requests.insert(id, requests); + } + let fork_name = execution_payload.fork_name(); if fork_name.deneb_enabled() { // get random number between 0 and 1 blobs by default diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 3054289996..64eecccc58 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -295,6 +295,10 @@ pub async fn handle_rpc( })?; let maybe_blobs = ctx.execution_block_generator.write().get_blobs_bundle(&id); + let maybe_execution_requests = ctx + .execution_block_generator + .read() + .get_execution_requests(&id); // validate method called correctly according to shanghai fork time if ctx @@ -432,8 +436,10 @@ pub async fn handle_rpc( ))? .into(), should_override_builder: false, - // TODO(electra): add EL requests in mock el - execution_requests: Default::default(), + execution_requests: maybe_execution_requests + .clone() + .unwrap_or_default() + .into(), }) .unwrap() } @@ -453,7 +459,10 @@ pub async fn handle_rpc( ))? .into(), should_override_builder: false, - execution_requests: Default::default(), + execution_requests: maybe_execution_requests + .clone() + .unwrap_or_default() + .into(), }) .unwrap() } @@ -473,7 +482,9 @@ pub async fn handle_rpc( ))? .into(), should_override_builder: false, - execution_requests: Default::default(), + execution_requests: maybe_execution_requests + .unwrap_or_default() + .into(), }) .unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 6ab6cca3f6..d6243a7c4d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -800,6 +800,10 @@ impl MockBuilder { let head_block_root = head_block_root.unwrap_or(head.canonical_root()); + // TODO(gloas): Currently the tests are pre-Gloas and we are not considering + // other payload statuses. This codepath may not be relevant for Gloas. + let head_payload_status = fork_choice::PayloadStatus::Pending; + let head_execution_payload = head .message() .body() @@ -934,7 +938,13 @@ impl MockBuilder { ); self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .insert_proposer( + slot, + head_block_root, + head_payload_status, + val_index, + payload_attributes.clone(), + ) .await; let forkchoice_update_params = ForkchoiceUpdateParameters { @@ -952,6 +962,7 @@ impl MockBuilder { finalized_execution_hash, slot - 1, head_block_root, + head_payload_status, ) .await .map_err(|e| format!("fcu call failed : {:?}", e))?; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 288416d51e..5b721bcab2 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -90,6 +90,8 @@ impl MockExecutionLayer { let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); let head_block_root = Hash256::repeat_byte(42); + // TODO(gloas): allow statuses other than Pending? + let head_payload_status = fork_choice::PayloadStatus::Pending; let forkchoice_update_params = ForkchoiceUpdateParameters { head_root: head_block_root, head_hash: Some(parent_hash), @@ -109,7 +111,13 @@ impl MockExecutionLayer { let slot = Slot::new(0); let validator_index = 0; self.el - .insert_proposer(slot, head_block_root, validator_index, payload_attributes) + .insert_proposer( + slot, + head_block_root, + head_payload_status, + validator_index, + payload_attributes, + ) .await; self.el @@ -119,6 +127,7 @@ impl MockExecutionLayer { ExecutionBlockHash::zero(), slot, head_block_root, + head_payload_status, ) .await .unwrap(); @@ -280,6 +289,7 @@ impl MockExecutionLayer { // Use junk values for slot/head-root to ensure there is no payload supplied. let slot = Slot::new(0); let head_block_root = Hash256::repeat_byte(13); + // TODO(gloas): reconsider the state_payload_status self.el .notify_forkchoice_updated( block_hash, @@ -287,6 +297,7 @@ impl MockExecutionLayer { ExecutionBlockHash::zero(), slot, head_block_root, + fork_choice::PayloadStatus::Pending, ) .await .unwrap(); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 1c6d3f3201..7abba8a1f6 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -101,7 +101,7 @@ pub enum ExecutionStatus { } /// Represents the status of an execution payload post-Gloas. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Encode, Decode, Serialize, Deserialize)] #[ssz(enum_behaviour = "tag")] #[repr(u8)] pub enum PayloadStatus { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 05170d907c..ed6b5787b5 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -200,6 +200,9 @@ impl TestRig { pub async fn perform_tests(&self) { self.wait_until_synced().await; + // TODO(gloas): this needs to be for post-Gloas cases + let head_payload_status = fork_choice::PayloadStatus::Pending; + // Create a local signer in case we need to sign transactions locally let private_key_signer: PrivateKeySigner = PRIVATE_KEYS[0].parse().expect("Invalid private key"); @@ -308,6 +311,7 @@ impl TestRig { .insert_proposer( Slot::new(1), // Insert proposer for the next slot head_root, + fork_choice::PayloadStatus::Pending, proposer_index, PayloadAttributes::new( timestamp, @@ -332,6 +336,7 @@ impl TestRig { finalized_block_hash, Slot::new(0), Hash256::zero(), + head_payload_status, ) .await .unwrap(); @@ -411,6 +416,7 @@ impl TestRig { finalized_block_hash, slot, head_block_root, + head_payload_status, ) .await .unwrap(); @@ -452,6 +458,7 @@ impl TestRig { finalized_block_hash, slot, head_block_root, + head_payload_status, ) .await .unwrap(); @@ -587,7 +594,13 @@ impl TestRig { let validator_index = 0; self.ee_a .execution_layer - .insert_proposer(slot, head_block_root, validator_index, payload_attributes) + .insert_proposer( + slot, + head_block_root, + head_payload_status, + validator_index, + payload_attributes, + ) .await; let status = self .ee_a @@ -598,6 +611,7 @@ impl TestRig { finalized_block_hash, slot, head_block_root, + head_payload_status, ) .await .unwrap(); @@ -635,6 +649,7 @@ impl TestRig { finalized_block_hash, slot, head_block_root, + head_payload_status, ) .await .unwrap(); @@ -688,6 +703,7 @@ impl TestRig { finalized_block_hash, slot, head_block_root, + head_payload_status, ) .await .unwrap(); From 6323cd3827b596080fa43add5b09a7adc91fd58e Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 26 Apr 2026 01:51:02 +0200 Subject: [PATCH 5/7] Fix builder exit signature batch verification logic and small refactor (#9173) We had a bug when performing batch builder exit signature verification. The EF spec tests cover this case, but the EF tests only calls individual signature verification (which is a separate code path). This PR unifies the two code paths. We should probably spend some time reviewing EF test code coverage and make sure we don't have separate code paths that do similar things. Co-Authored-By: Eitan Seri-Levi --- .../process_operations.rs | 27 +++++++++---------- .../per_block_processing/signature_sets.rs | 18 ++++++++++--- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index f1de284fc8..422e0afe06 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -8,6 +8,7 @@ use crate::per_block_processing::builder::{ convert_validator_index_to_builder_index, is_builder_index, }; use crate::per_block_processing::errors::{BlockProcessingError, ExitInvalid, IntoWithIndex}; +use crate::per_block_processing::signature_sets::{exit_signature_set, get_pubkey_from_state}; use crate::per_block_processing::verify_payload_attestation::verify_payload_attestation; use bls::{PublicKeyBytes, SignatureBytes}; use ssz_types::FixedVector; @@ -547,7 +548,8 @@ fn process_builder_voluntary_exit( let builder_index = convert_validator_index_to_builder_index(signed_exit.message.validator_index); - let builder = state + // Verify builder is known + state .builders()? .get(builder_index as usize) .cloned() @@ -570,22 +572,17 @@ fn process_builder_voluntary_exit( )); } - // Verify signature (using EIP-7044 domain: capella_fork_version for Deneb+) if verify_signatures.is_true() { - let pubkey = builder.pubkey; - let domain = spec.compute_domain( - Domain::VoluntaryExit, - spec.capella_fork_version, - state.genesis_validators_root(), + verify!( + exit_signature_set( + state, + |i| get_pubkey_from_state(state, i), + signed_exit, + spec + )? + .verify(), + ExitInvalid::BadSignature ); - let message = signed_exit.message.signing_root(domain); - // TODO(gloas): use builder pubkey cache once available - let bls_pubkey = pubkey - .decompress() - .map_err(|_| BlockOperationError::invalid(ExitInvalid::BadSignature))?; - if !signed_exit.signature.verify(&bls_pubkey, message) { - return Err(BlockOperationError::invalid(ExitInvalid::BadSignature)); - } } // Initiate builder exit diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 5c1767f227..0686c4d605 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -2,6 +2,7 @@ //! validated individually, or alongside in others in a potentially cheaper bulk operation. //! //! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`. +use super::builder::{convert_validator_index_to_builder_index, is_builder_index}; use bls::{AggregateSignature, PublicKey, PublicKeyBytes, Signature, SignatureSet}; use ssz::DecodeError; use std::borrow::Cow; @@ -503,7 +504,7 @@ pub fn deposit_pubkey_signature_message( } /// Returns a signature set that is valid if the `SignedVoluntaryExit` was signed by the indicated -/// validator. +/// validator (or builder, in the case of a builder exit). pub fn exit_signature_set<'a, E, F>( state: &'a BeaconState, get_pubkey: F, @@ -515,7 +516,18 @@ where F: Fn(usize) -> Option>, { let exit = &signed_exit.message; - let proposer_index = exit.validator_index as usize; + let validator_index = exit.validator_index; + + let is_builder_exit = + state.fork_name_unchecked().gloas_enabled() && is_builder_index(validator_index); + + let pubkey = if is_builder_exit { + let builder_index = convert_validator_index_to_builder_index(validator_index); + get_builder_pubkey_from_state(state, builder_index) + .ok_or(Error::ValidatorUnknown(validator_index))? + } else { + get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))? + }; let domain = if state.fork_name_unchecked().deneb_enabled() { // EIP-7044 @@ -537,7 +549,7 @@ where Ok(SignatureSet::single_pubkey( &signed_exit.signature, - get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?, + pubkey, message, )) } From 276c4d5ff353fe93db306668fca7f8639a1e2ab1 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 26 Apr 2026 15:40:22 +0200 Subject: [PATCH 6/7] Gloas set `AttestationData.index` (#9100) For gloas `attestation.data.index` should be set to 1 if we are attesting to a block whose slot is not the attestation duty slot and slot payload_status is `FULL` Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 26 +++ .../beacon_chain/src/early_attester_cache.rs | 13 ++ beacon_node/beacon_chain/src/test_utils.rs | 2 + .../tests/attestation_production.rs | 179 +++++++++++++++--- .../types/src/attestation/attestation.rs | 11 +- .../src/attestation_service.rs | 1 + 6 files changed, 209 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 98dc9cd7fd..b556e6d849 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1956,6 +1956,7 @@ impl BeaconChain { let beacon_block_root; let beacon_state_root; let target; + let is_same_slot_attestation; let current_epoch_attesting_info: Option<(Checkpoint, usize)>; let head_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS); let head_span = debug_span!("attestation_production_head_scrape").entered(); @@ -1996,11 +1997,20 @@ impl BeaconChain { // When attesting to the head slot or later, always use the head of the chain. beacon_block_root = head.beacon_block_root; beacon_state_root = head.beacon_state_root(); + is_same_slot_attestation = request_slot == head.beacon_block.slot(); } else { // Permit attesting to slots *prior* to the current head. This is desirable when // the VC and BN are out-of-sync due to time issues or overloading. beacon_block_root = *head_state.get_block_root(request_slot)?; beacon_state_root = *head_state.get_state_root(request_slot)?; + + // Fetch the previous block root. If the previous block root equals + // the block root being attested to, the `request_slot` is a skipped slot + // and this is not a same slot attestation. + let prior_slot_root = head_state + .get_block_root(request_slot.saturating_sub(1u64)) + .ok(); + is_same_slot_attestation = prior_slot_root != Some(&beacon_block_root); }; let target_slot = request_epoch.start_slot(T::EthSpec::slots_per_epoch()); @@ -2090,6 +2100,21 @@ impl BeaconChain { ) }; + // For gloas the attestation data index indicates payload presence: + // `payload_present=false` for same-slot attestations or when payload not received. + // `payload_present=true` when attesting to a prior slot whose payload has been received. + let payload_present = if self + .spec + .fork_name_at_slot::(request_slot) + .gloas_enabled() + && !is_same_slot_attestation + { + self.canonical_head + .block_has_canonical_payload(&beacon_block_root, &self.spec)? + } else { + false + }; + Ok(Attestation::::empty_for_signing( request_index, committee_len, @@ -2097,6 +2122,7 @@ impl BeaconChain { beacon_block_root, justified_checkpoint, target, + payload_present, &self.spec, )?) } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 752e4d1a96..e3a83f9374 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -165,6 +165,12 @@ impl EarlyAttesterCache { /// - There is a cache `item` present. /// - If `request_slot` is in the same epoch as `item.epoch`. /// - If `request_index` does not exceed `item.committee_count`. + /// + /// Post gloas an additional condition must be met: + /// - `request_slot` is the same slot as `item.block.slot` (i.e. a same slot attestation). + /// + /// Non-same-slot Gloas attestations need `data.index` set from the canonical payload + /// status, which the cache doesn't track. Returning `None` falls through to fork choice. #[instrument(skip_all, fields(%request_slot, %request_index), level = "debug")] pub fn try_attest( &self, @@ -197,6 +203,12 @@ impl EarlyAttesterCache { item.committee_lengths .get_committee_length::(request_slot, request_index, spec)?; + let is_same_slot_attestation = request_slot == item.block.slot(); + if spec.fork_name_at_slot::(request_slot).gloas_enabled() && !is_same_slot_attestation { + return Ok(None); + } + let payload_present = false; + let attestation = Attestation::empty_for_signing( request_index, committee_len, @@ -204,6 +216,7 @@ impl EarlyAttesterCache { item.beacon_block_root, item.source, item.target, + payload_present, spec, ) .map_err(Error::AttestationError)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index b657f81b1f..274f41d1cb 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1451,6 +1451,7 @@ where epoch, root: target_root, }, + false, &self.spec, )?; @@ -1560,6 +1561,7 @@ where epoch, root: target_root, }, + false, &self.spec, )?) } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index a3ab959d12..1b87fc041a 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -2,7 +2,9 @@ use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::custody_context::NodeCustodyType; -use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, fork_name_from_env, +}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{StateSkipConfig, WhenSlotSkipped, metrics}; use bls::{AggregateSignature, Keypair}; @@ -206,7 +208,15 @@ async fn produces_attestations() { &AggregateSignature::infinity(), "bad signature" ); - assert_eq!(data.index, index, "bad index"); + if harness + .spec + .fork_name_at_slot::(data.slot) + .gloas_enabled() + { + assert!(data.index <= 1, "invalid index"); + } else { + assert_eq!(data.index, index, "bad index"); + } assert_eq!(data.slot, slot, "bad slot"); assert_eq!(data.beacon_block_root, block_root, "bad block root"); assert_eq!( @@ -226,27 +236,35 @@ async fn produces_attestations() { .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); let available_block = range_sync_block.into_available_block(); - let early_attestation = { - let proto_block = chain - .canonical_head - .fork_choice_read_lock() - .get_block(&block_root) - .unwrap(); - chain - .early_attester_cache - .add_head_block(block_root, &available_block, proto_block, &state) - .unwrap(); - chain - .early_attester_cache - .try_attest(slot, index, &chain.spec) - .unwrap() - .unwrap() - }; + // For Gloas non-same-slot attestations, the early attester cache returns None. + let is_same_slot_attestation = slot == block_slot; + let is_gloas = harness + .spec + .fork_name_at_slot::(slot) + .gloas_enabled(); + if !is_gloas || is_same_slot_attestation { + let early_attestation = { + let proto_block = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root) + .unwrap(); + chain + .early_attester_cache + .add_head_block(block_root, &available_block, proto_block, &state) + .unwrap(); + chain + .early_attester_cache + .try_attest(slot, index, &chain.spec) + .unwrap() + .unwrap() + }; - assert_eq!( - attestation, early_attestation, - "early attester cache inconsistent" - ); + assert_eq!( + attestation, early_attestation, + "early attester cache inconsistent" + ); + } } } } @@ -313,3 +331,120 @@ async fn early_attester_cache_old_request() { .unwrap(); assert_eq!(attested_block.slot(), attest_slot); } + +/// Verify that `produce_unaggregated_attestation` sets `data.index = 1` (payload_present) +/// when a gloas validator attests to a prior slot whose block+envelope have been received. +/// +/// Setup: build a chain at gloas genesis, produce a block with envelope at slot N, +/// then advance the clock to slot N+1 without producing a block (skipped slot). +/// Attesting at slot N+1 should target the block at slot N with payload_present = true. +#[tokio::test] +async fn gloas_attestation_index_payload_present() { + if fork_name_from_env().is_some_and(|f| !f.gloas_enabled()) { + return; + } + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let chain = &harness.chain; + + // Build a few blocks so the chain is established (slots 1..=3). + harness.advance_slot(); + harness + .extend_chain( + 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let head = chain.head_snapshot(); + assert_eq!(head.beacon_block.slot(), Slot::new(3)); + + // Advance clock to slot 4 without producing a block (skipped slot). + harness.advance_slot(); + let attest_slot = chain.slot().unwrap(); + assert_eq!(attest_slot, Slot::new(4)); + + // Attest at slot 4 — this should target the block at slot 3 whose payload was received. + let attestation = chain + .produce_unaggregated_attestation(attest_slot, 0) + .expect("should produce attestation"); + + assert_eq!(attestation.data().slot, attest_slot); + assert_eq!( + attestation.data().index, + 1, + "gloas attestation to prior slot with payload should have index=1 (payload_present)" + ); +} + +/// Verify that `produce_unaggregated_attestation` sets `data.index = 0` (payload NOT present) +/// when a gloas validator attests to a prior slot whose block was imported but whose +/// payload envelope was never received. +/// +/// Setup: build a chain at gloas genesis through slot 2, then at slot 3 import only the +/// beacon block (no envelope), advance to slot 4 (skipped), and attest. +#[tokio::test] +async fn gloas_attestation_index_payload_absent() { + if fork_name_from_env().is_some_and(|f| !f.gloas_enabled()) { + return; + } + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let chain = &harness.chain; + + // Build slots 1..=2 normally (with envelopes). + harness.advance_slot(); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + assert_eq!(chain.head_snapshot().beacon_block.slot(), Slot::new(2)); + + // Slot 3: produce and import the beacon block but do NOT process the envelope. + harness.advance_slot(); + let state = harness.get_current_state(); + let (block_contents, _envelope, _new_state) = + harness.make_block_with_envelope(state, Slot::new(3)).await; + + let block_root = block_contents.0.canonical_root(); + harness + .process_block(Slot::new(3), block_root, block_contents) + .await + .expect("block should import without envelope"); + + assert_eq!(chain.head_snapshot().beacon_block.slot(), Slot::new(3)); + + // Advance clock to slot 4 without producing a block (skipped slot). + harness.advance_slot(); + let attest_slot = chain.slot().unwrap(); + assert_eq!(attest_slot, Slot::new(4)); + + // Attest at slot 4 — targets slot 3 whose payload was NOT received. + let attestation = chain + .produce_unaggregated_attestation(attest_slot, 0) + .expect("should produce attestation"); + + assert_eq!(attestation.data().slot, attest_slot); + assert_eq!( + attestation.data().index, + 0, + "gloas attestation to prior slot without payload should have index=0 (payload_absent)" + ); +} diff --git a/consensus/types/src/attestation/attestation.rs b/consensus/types/src/attestation/attestation.rs index 693b5889f5..28059efee6 100644 --- a/consensus/types/src/attestation/attestation.rs +++ b/consensus/types/src/attestation/attestation.rs @@ -102,6 +102,7 @@ impl Hash for Attestation { impl Attestation { /// Produces an attestation with empty signature. + #[allow(clippy::too_many_arguments)] pub fn empty_for_signing( committee_index: u64, committee_length: usize, @@ -109,6 +110,7 @@ impl Attestation { beacon_block_root: Hash256, source: Checkpoint, target: Checkpoint, + payload_present: bool, spec: &ChainSpec, ) -> Result { if spec.fork_name_at_slot::(slot).electra_enabled() { @@ -116,12 +118,19 @@ impl Attestation { committee_bits .set(committee_index as usize, true) .map_err(|_| Error::InvalidCommitteeIndex)?; + // Gloas attestation data index now indicates payload presence. + // Pre-gloas index is always 0. + let index = if spec.fork_name_at_slot::(slot).gloas_enabled() && payload_present { + 1u64 + } else { + 0u64 + }; Ok(Attestation::Electra(AttestationElectra { aggregation_bits: BitList::with_capacity(committee_length) .map_err(|_| Error::InvalidCommitteeLength)?, data: AttestationData { slot, - index: 0u64, + index, beacon_block_root, source, target, diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index dc5fc27a4f..3ffe602892 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -546,6 +546,7 @@ impl AttestationService attestation, From fae7941b2d13dc9cd1ba8282aefe2798a70c7c74 Mon Sep 17 00:00:00 2001 From: Shane K Moore <41407272+shane-moore@users.noreply.github.com> Date: Sun, 26 Apr 2026 08:25:00 -0700 Subject: [PATCH 7/7] Gloas ptc duties beacon node response (#8415) Co-Authored-By: shane-moore Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 44 ++++- beacon_node/http_api/src/lib.rs | 10 + beacon_node/http_api/src/ptc_duties.rs | 182 +++++++++++++++++++ beacon_node/http_api/src/validator/mod.rs | 38 +++- beacon_node/http_api/tests/tests.rs | 120 +++++++++++- consensus/types/src/state/beacon_state.rs | 21 +++ 6 files changed, 411 insertions(+), 4 deletions(-) create mode 100644 beacon_node/http_api/src/ptc_duties.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b556e6d849..bfe1b404e0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -84,8 +84,8 @@ use crate::{ use bls::{PublicKey, PublicKeyBytes, Signature}; use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ - EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, - SseHead, + EventKind, PtcDuty, SseBlobSidecar, SseBlock, SseDataColumnSidecar, + SseExtendedPayloadAttributes, SseHead, }; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, @@ -1719,6 +1719,46 @@ impl BeaconChain { Ok((duties, dependent_root, execution_status)) } + /// Get PTC duties for validators at a given epoch. + /// + /// TODO(gloas): per-validator `get_ptc_assignment` makes this O(N * slots_per_epoch * PTCSize). + /// A future ptc cache (or a single-pass `ptc_window` walk) can drop this to + /// O(slots_per_epoch * PTCSize + N). + pub fn compute_ptc_duties( + &self, + state: &BeaconState, + epoch: Epoch, + validator_indices: &[u64], + dependent_block_root: Hash256, + ) -> Result<(Vec>, Hash256), Error> { + // The ptc_window only covers previous, current, and next epochs. + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) + .map_err(Error::IncorrectStateForAttestation)?; + + let dependent_root = + state.attester_shuffling_decision_root(dependent_block_root, relative_epoch)?; + + let pubkey_cache = self.validator_pubkey_cache.read(); + + let duties = validator_indices + .iter() + .map(|&validator_index| -> Result, Error> { + let Some(&pubkey) = pubkey_cache.get_pubkey_bytes(validator_index as usize) else { + return Ok(None); + }; + let slot_opt = + state.get_ptc_assignment(validator_index as usize, epoch, &self.spec)?; + Ok(slot_opt.map(|slot| PtcDuty { + validator_index, + slot, + pubkey, + })) + }) + .collect::, _>>()?; + + Ok((duties, dependent_root)) + } + pub fn get_aggregated_attestation( &self, attestation: AttestationRef, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0be631c057..bd80dd1e82 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -19,6 +19,7 @@ mod metrics; mod peer; mod produce_block; mod proposer_duties; +mod ptc_duties; mod publish_attestations; mod publish_blocks; mod standard_block_rewards; @@ -2560,6 +2561,14 @@ pub fn serve( task_spawner_filter.clone(), ); + // POST validator/duties/ptc/{epoch} + let post_validator_duties_ptc = post_validator_duties_ptc( + eth_v1.clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); + // POST validator/duties/sync/{epoch} let post_validator_duties_sync = post_validator_duties_sync( eth_v1.clone(), @@ -3410,6 +3419,7 @@ pub fn serve( .uor(post_beacon_rewards_attestations) .uor(post_beacon_rewards_sync_committee) .uor(post_validator_duties_attester) + .uor(post_validator_duties_ptc) .uor(post_validator_duties_sync) .uor(post_validator_aggregate_and_proofs) .uor(post_validator_contribution_and_proofs) diff --git a/beacon_node/http_api/src/ptc_duties.rs b/beacon_node/http_api/src/ptc_duties.rs new file mode 100644 index 0000000000..f727b84004 --- /dev/null +++ b/beacon_node/http_api/src/ptc_duties.rs @@ -0,0 +1,182 @@ +//! Contains the handler for the `POST validator/duties/ptc/{epoch}` endpoint. + +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::{self as api_types, PtcDuty}; +use slot_clock::SlotClock; +use state_processing::state_advance::partial_state_advance; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256}; + +type ApiDuties = api_types::DutiesResponse>; + +pub fn ptc_duties( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + let current_epoch = chain + .slot_clock + .now_or_genesis() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + + let tolerant_current_epoch = if chain.slot_clock.is_prior_to_genesis().unwrap_or(true) { + current_epoch + } else { + chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or_else(|| { + warp_utils::reject::custom_server_error("unable to read slot clock".into()) + })? + .epoch(T::EthSpec::slots_per_epoch()) + }; + + let is_within_clock_tolerance = request_epoch == current_epoch + || request_epoch == current_epoch + 1 + || request_epoch == tolerant_current_epoch + 1; + + if is_within_clock_tolerance { + let head_epoch = chain + .canonical_head + .cached_head() + .snapshot + .beacon_state + .current_epoch(); + + let head_can_serve_request = request_epoch == head_epoch || request_epoch == head_epoch + 1; + + if head_can_serve_request { + compute_ptc_duties_from_cached_head(request_epoch, request_indices, chain) + } else { + compute_ptc_duties_from_state(request_epoch, request_indices, chain) + } + } else if request_epoch > current_epoch + 1 { + Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch past the current epoch {}", + request_epoch, current_epoch + ))) + } else { + compute_ptc_duties_from_state(request_epoch, request_indices, chain) + } +} + +fn compute_ptc_duties_from_cached_head( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::unhandled_error)?; + let state = &cached_head.snapshot.beacon_state; + let head_block_root = cached_head.head_block_root(); + + let (duties, dependent_root) = chain + .compute_ptc_duties(state, request_epoch, request_indices, head_block_root) + .map_err(warp_utils::reject::unhandled_error)?; + + convert_to_api_response( + duties, + dependent_root, + execution_status.is_optimistic_or_invalid(), + ) +} + +fn compute_ptc_duties_from_state( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::unhandled_error)?; + let head = &cached_head.snapshot; + + if head.beacon_state.current_epoch() <= request_epoch { + Some(( + head.beacon_state_root(), + head.beacon_state.clone(), + execution_status.is_optimistic_or_invalid(), + )) + } else { + None + } + }; + + let (state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + ensure_state_knows_ptc_duties_for_epoch( + &mut state, + state_root, + request_epoch, + &chain.spec, + )?; + (state, execution_optimistic) + } else { + let (state, execution_optimistic, _finalized) = + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)?; + (state, execution_optimistic) + }; + + if !(state.current_epoch() == request_epoch || state.current_epoch() + 1 == request_epoch) { + return Err(warp_utils::reject::custom_server_error(format!( + "state epoch {} not suitable for request epoch {}", + state.current_epoch(), + request_epoch + ))); + } + + let (duties, dependent_root) = chain + .compute_ptc_duties( + &state, + request_epoch, + request_indices, + chain.genesis_block_root, + ) + .map_err(warp_utils::reject::unhandled_error)?; + + convert_to_api_response(duties, dependent_root, execution_optimistic) +} + +fn ensure_state_knows_ptc_duties_for_epoch( + state: &mut BeaconState, + state_root: Hash256, + target_epoch: Epoch, + spec: &ChainSpec, +) -> Result<(), warp::reject::Rejection> { + if state.current_epoch() > target_epoch { + return Err(warp_utils::reject::custom_server_error(format!( + "state epoch {} is later than target epoch {}", + state.current_epoch(), + target_epoch + ))); + } else if state.current_epoch() + 1 < target_epoch { + let target_slot = target_epoch + .saturating_sub(1_u64) + .start_slot(E::slots_per_epoch()); + + partial_state_advance(state, Some(state_root), target_slot, spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::unhandled_error)?; + } + + Ok(()) +} + +fn convert_to_api_response( + duties: Vec>, + dependent_root: Hash256, + execution_optimistic: bool, +) -> Result { + Ok(api_types::DutiesResponse { + dependent_root, + execution_optimistic: Some(execution_optimistic), + data: duties.into_iter().flatten().collect(), + }) +} diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 7349aa4db0..27fe5de6e7 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -7,7 +7,7 @@ use crate::utils::{ ResponseFilter, TaskSpawnerFilter, ValidatorSubscriptionTxFilter, publish_network_message, }; use crate::version::{V1, V2, V3, unsupported_version_rejection}; -use crate::{StateId, attester_duties, proposer_duties, sync_committees}; +use crate::{StateId, attester_duties, proposer_duties, ptc_duties, sync_committees}; use beacon_chain::attestation_verification::VerifiedAttestation; use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::PublicKeyBytes; @@ -168,6 +168,42 @@ pub fn post_validator_duties_attester( .boxed() } +// POST validator/duties/ptc/{epoch} +pub fn post_validator_duties_ptc( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("ptc")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + ptc_duties::ptc_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + // GET validator/aggregate_attestation?attestation_data_root,slot pub fn get_validator_aggregate_attestation( any_version: AnyVersionFilter, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2dd4c28040..aac3384fbd 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3474,7 +3474,6 @@ impl ApiTester { self } - // TODO(EIP-7732): Add test_get_validator_duties_ptc function to test PTC duties endpoint pub async fn test_get_validator_duties_proposer_v2(self) -> Self { let current_epoch = self.chain.epoch().unwrap(); @@ -3598,6 +3597,17 @@ impl ApiTester { "should not get attester duties outside of tolerance" ); + assert_eq!( + self.client + .post_validator_duties_ptc(next_epoch, &[0]) + .await + .unwrap_err() + .status() + .map(Into::into), + Some(400), + "should not get ptc duties outside of tolerance" + ); + self.chain.slot_clock.set_current_time( current_epoch_start - self.chain.spec.maximum_gossip_clock_disparity(), ); @@ -3621,6 +3631,88 @@ impl ApiTester { .await .expect("should get attester duties within tolerance"); + self.client + .post_validator_duties_ptc(next_epoch, &[0]) + .await + .expect("should get ptc duties within tolerance"); + + self + } + + pub async fn test_get_validator_duties_ptc(self) -> Self { + let current_epoch = self.chain.epoch().unwrap().as_u64(); + + let half = current_epoch / 2; + let first = current_epoch - half; + let last = current_epoch + half; + + for epoch in first..=last { + for indices in self.interesting_validator_indices() { + let epoch = Epoch::from(epoch); + + // The endpoint does not allow getting duties past the next epoch. + if epoch > current_epoch + 1 { + assert_eq!( + self.client + .post_validator_duties_ptc(epoch, indices.as_slice()) + .await + .unwrap_err() + .status() + .map(Into::into), + Some(400) + ); + continue; + } + + let results = self + .client + .post_validator_duties_ptc(epoch, indices.as_slice()) + .await + .unwrap(); + + let dependent_root = self + .chain + .block_root_at_slot( + (epoch - 1).start_slot(E::slots_per_epoch()) - 1, + WhenSlotSkipped::Prev, + ) + .unwrap() + .unwrap_or(self.chain.head_beacon_block_root()); + + assert_eq!(results.dependent_root, dependent_root); + + let result_duties = results.data; + + let state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + + let expected_duties: Vec = indices + .iter() + .filter_map(|&validator_index| { + let validator = state.validators().get(validator_index as usize)?; + let slot = state + .get_ptc_assignment(validator_index as usize, epoch, &self.chain.spec) + .unwrap()?; + Some(PtcDuty { + pubkey: validator.pubkey, + validator_index, + slot, + }) + }) + .collect(); + + assert_eq!( + result_duties, expected_duties, + "ptc duties should exactly match state assignments" + ); + } + } + self } @@ -7871,6 +7963,9 @@ async fn get_light_client_finality_update() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_early() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } ApiTester::new() .await .test_get_validator_duties_early() @@ -7936,6 +8031,29 @@ async fn get_validator_duties_proposer_v2_with_skip_slots() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_duties_ptc() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + ApiTester::new_with_hard_forks() + .await + .test_get_validator_duties_ptc() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_duties_ptc_with_skip_slots() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + ApiTester::new_with_hard_forks() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_ptc() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn block_production() { ApiTester::new().await.test_block_production().await; diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 7e2b3096a8..7ed3121d6e 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -3198,6 +3198,27 @@ impl BeaconState { Ok(hash(&preimage)) } + /// Find the first slot in the given epoch where the validator is assigned to the PTC. + /// + /// Returns `Ok(Some(slot))` if the validator is in the PTC for any slot in the epoch, + /// `Ok(None)` if the validator is not in the PTC for this epoch. + /// + /// This iterates through all slots in the epoch, so it's O(slots_per_epoch) per validator. + pub fn get_ptc_assignment( + &self, + validator_index: usize, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { + for slot in epoch.slot_iter(E::slots_per_epoch()) { + let ptc = self.get_ptc(slot, spec)?; + if ptc.0.contains(&validator_index) { + return Ok(Some(slot)); + } + } + Ok(None) + } + /// Return size indices sampled by effective balance, using indices as candidates. /// /// If shuffle_indices is True, candidate indices are themselves sampled from indices