Gloas publish data columns during local block building (#9182)

Make sure we are publishing columns during local block production


  


Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu>

Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com>
This commit is contained in:
Eitan Seri-Levi
2026-04-28 15:19:47 +02:00
committed by GitHub
parent 4415cf0506
commit 6258eadc91
9 changed files with 545 additions and 57 deletions

View File

@@ -35,6 +35,7 @@ use types::{
SignedVoluntaryExit, Slot, SyncAggregate, Withdrawal, Withdrawals, SignedVoluntaryExit, Slot, SyncAggregate, Withdrawal, Withdrawals,
}; };
use crate::pending_payload_envelopes::PendingEnvelopeData;
use crate::{ use crate::{
BeaconChain, BeaconChainError, BeaconChainTypes, BlockProductionError, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProductionError,
ProduceBlockVerification, block_production::BlockProductionState, ProduceBlockVerification, block_production::BlockProductionState,
@@ -74,6 +75,7 @@ pub struct ExecutionPayloadData<E: types::EthSpec> {
pub execution_requests: ExecutionRequests<E>, pub execution_requests: ExecutionRequests<E>,
pub builder_index: BuilderIndex, pub builder_index: BuilderIndex,
pub slot: Slot, pub slot: Slot,
pub blobs_and_proofs: (types::BlobsList<E>, types::KzgProofs<E>),
} }
impl<T: BeaconChainTypes> BeaconChain<T> { impl<T: BeaconChainTypes> BeaconChain<T> {
@@ -647,9 +649,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let envelope_slot = payload_data.slot; let envelope_slot = payload_data.slot;
// TODO(gloas) might be safer to cache by root instead of by slot. // TODO(gloas) might be safer to cache by root instead of by slot.
// We should revisit this once this code path + beacon api spec matures // We should revisit this once this code path + beacon api spec matures
self.pending_payload_envelopes let (blobs, _) = payload_data.blobs_and_proofs;
.write() self.pending_payload_envelopes.write().insert(
.insert(envelope_slot, signed_envelope.message); envelope_slot,
PendingEnvelopeData {
envelope: signed_envelope.message,
blobs: Some(blobs),
},
);
debug!( debug!(
%beacon_block_root, %beacon_block_root,
@@ -769,7 +776,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
payload_value: _, payload_value: _,
execution_requests, execution_requests,
blob_kzg_commitments, blob_kzg_commitments,
blobs_and_proofs: _, blobs_and_proofs,
} = block_proposal_contents; } = block_proposal_contents;
// TODO(gloas) since we are defaulting to local building, execution payment is 0 // TODO(gloas) since we are defaulting to local building, execution payment is 0
@@ -795,6 +802,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
execution_requests, execution_requests,
builder_index, builder_index,
slot: produce_at_slot, slot: produce_at_slot,
blobs_and_proofs,
}; };
// TODO(gloas) this is only local building // TODO(gloas) this is only local building

View File

@@ -296,6 +296,35 @@ pub fn blobs_to_data_column_sidecars<E: EthSpec>(
} }
} }
/// Build Gloas data column sidecars from blobs, computing cells and proofs locally.
pub fn blobs_to_data_column_sidecars_gloas<E: EthSpec>(
blobs: &[&Blob<E>],
beacon_block_root: Hash256,
slot: Slot,
kzg: &Kzg,
spec: &ChainSpec,
) -> Result<DataColumnSidecarList<E>, DataColumnSidecarError> {
if blobs.is_empty() {
return Ok(vec![]);
}
let blob_cells_and_proofs_vec = blobs
.into_par_iter()
.map(|blob| {
let blob = blob.as_ref().try_into().map_err(|e| {
KzgError::InconsistentArrayLength(format!(
"blob should have a guaranteed size due to FixedVector: {e:?}"
))
})?;
kzg.compute_cells_and_proofs(blob)
})
.collect::<Result<Vec<_>, KzgError>>()?;
build_data_column_sidecars_gloas(beacon_block_root, slot, blob_cells_and_proofs_vec, spec)
.map_err(DataColumnSidecarError::BuildSidecarFailed)
}
/// Build data column sidecars from a signed beacon block and its blobs. /// Build data column sidecars from a signed beacon block and its blobs.
#[instrument(skip_all, level = "debug", fields(blob_count = blobs_and_proofs.len()))] #[instrument(skip_all, level = "debug", fields(blob_count = blobs_and_proofs.len()))]
pub fn blobs_to_partial_data_columns<E: EthSpec>( pub fn blobs_to_partial_data_columns<E: EthSpec>(
@@ -728,8 +757,8 @@ pub fn reconstruct_data_columns<E: EthSpec>(
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crate::kzg_utils::{ use crate::kzg_utils::{
blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, blobs_to_data_column_sidecars, blobs_to_data_column_sidecars_gloas, reconstruct_blobs,
validate_full_data_columns, reconstruct_data_columns, validate_full_data_columns,
}; };
use bls::Signature; use bls::Signature;
use eth2::types::BlobsBundle; use eth2::types::BlobsBundle;
@@ -737,25 +766,30 @@ mod test {
use kzg::{Kzg, KzgCommitment, trusted_setup::get_trusted_setup}; use kzg::{Kzg, KzgCommitment, trusted_setup::get_trusted_setup};
use types::{ use types::{
BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, EmptyBlock, EthSpec, ForkName, BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, EmptyBlock, EthSpec, ForkName,
FullPayload, KzgProofs, MainnetEthSpec, SignedBeaconBlock, kzg_ext::KzgCommitments, FullPayload, Hash256, KzgProofs, MainnetEthSpec, SignedBeaconBlock, Slot,
kzg_ext::KzgCommitments,
}; };
type E = MainnetEthSpec; type E = MainnetEthSpec;
// Loading and initializing PeerDAS KZG is expensive and slow, so we group the tests together // Loading and initializing PeerDAS KZG is expensive and slow, so we group the tests together
// only load it once. // only load it once.
// TODO(Gloas) make this generic over fulu/gloas, or write a separate function for Gloas
#[test] #[test]
fn test_build_data_columns_sidecars() { fn test_build_data_columns_sidecars() {
let spec = ForkName::Fulu.make_genesis_spec(E::default_spec());
let kzg = get_kzg(); let kzg = get_kzg();
test_build_data_columns_empty(&kzg, &spec);
test_build_data_columns_fulu(&kzg, &spec); let fulu_spec = ForkName::Fulu.make_genesis_spec(E::default_spec());
test_reconstruct_data_columns(&kzg, &spec); test_build_data_columns_empty(&kzg, &fulu_spec);
test_reconstruct_data_columns_unordered(&kzg, &spec); test_build_data_columns_fulu(&kzg, &fulu_spec);
test_reconstruct_blobs_from_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &fulu_spec);
test_reconstruct_blobs_from_data_columns_unordered(&kzg, &spec); test_reconstruct_data_columns_unordered(&kzg, &fulu_spec);
test_validate_data_columns(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &fulu_spec);
test_reconstruct_blobs_from_data_columns_unordered(&kzg, &fulu_spec);
test_validate_data_columns(&kzg, &fulu_spec);
let gloas_spec = ForkName::Gloas.make_genesis_spec(E::default_spec());
test_build_data_columns_gloas(&kzg, &gloas_spec);
test_build_data_columns_gloas_empty(&kzg, &gloas_spec);
} }
#[track_caller] #[track_caller]
@@ -784,8 +818,49 @@ mod test {
assert!(column_sidecars.is_empty()); assert!(column_sidecars.is_empty());
} }
// TODO(gloas) create `test_build_data_columns_gloas` and make sure its called #[track_caller]
// in the relevant places fn test_build_data_columns_gloas(kzg: &Kzg, spec: &ChainSpec) {
let num_of_blobs = 2;
let (blobs, _proofs) = create_test_gloas_blobs::<E>(num_of_blobs);
let beacon_block_root = Hash256::random();
let slot = Slot::new(0);
let blob_refs: Vec<_> = blobs.iter().collect();
let column_sidecars = blobs_to_data_column_sidecars_gloas::<E>(
&blob_refs,
beacon_block_root,
slot,
kzg,
spec,
)
.unwrap();
assert_eq!(column_sidecars.len(), E::number_of_columns());
for (idx, col_sidecar) in column_sidecars.iter().enumerate() {
assert_eq!(*col_sidecar.index(), idx as u64);
assert_eq!(col_sidecar.column().len(), num_of_blobs);
assert_eq!(col_sidecar.kzg_proofs().len(), num_of_blobs);
let gloas_col = col_sidecar.as_gloas().expect("should be Gloas sidecar");
assert_eq!(gloas_col.beacon_block_root, beacon_block_root);
assert_eq!(gloas_col.slot, slot);
}
}
#[track_caller]
fn test_build_data_columns_gloas_empty(kzg: &Kzg, spec: &ChainSpec) {
let blob_refs: Vec<&types::Blob<E>> = vec![];
let column_sidecars = blobs_to_data_column_sidecars_gloas::<E>(
&blob_refs,
Hash256::random(),
Slot::new(0),
kzg,
spec,
)
.unwrap();
assert!(column_sidecars.is_empty());
}
#[track_caller] #[track_caller]
fn test_build_data_columns_fulu(kzg: &Kzg, spec: &ChainSpec) { fn test_build_data_columns_fulu(kzg: &Kzg, spec: &ChainSpec) {
// Using at least 2 blobs to make sure we're arranging the data columns correctly. // Using at least 2 blobs to make sure we're arranging the data columns correctly.
@@ -974,4 +1049,9 @@ mod test {
(signed_block, blobs, proofs) (signed_block, blobs, proofs)
} }
fn create_test_gloas_blobs<E: EthSpec>(num_of_blobs: usize) -> (BlobsList<E>, KzgProofs<E>) {
let (blobs_bundle, _) = generate_blobs::<E>(num_of_blobs, ForkName::Gloas).unwrap();
(blobs_bundle.blobs, blobs_bundle.proofs)
}
} }

View File

@@ -6,7 +6,12 @@
//! and publishes the payload. //! and publishes the payload.
use std::collections::HashMap; use std::collections::HashMap;
use types::{EthSpec, ExecutionPayloadEnvelope, Slot}; use types::{BlobsList, EthSpec, ExecutionPayloadEnvelope, Slot};
pub struct PendingEnvelopeData<E: EthSpec> {
pub envelope: ExecutionPayloadEnvelope<E>,
pub blobs: Option<BlobsList<E>>,
}
/// Cache for pending execution payload envelopes awaiting publishing. /// Cache for pending execution payload envelopes awaiting publishing.
/// ///
@@ -16,7 +21,7 @@ pub struct PendingPayloadEnvelopes<E: EthSpec> {
/// Maximum number of slots to keep envelopes before pruning. /// Maximum number of slots to keep envelopes before pruning.
max_slot_age: u64, max_slot_age: u64,
/// The envelopes, keyed by slot. /// The envelopes, keyed by slot.
envelopes: HashMap<Slot, ExecutionPayloadEnvelope<E>>, envelopes: HashMap<Slot, PendingEnvelopeData<E>>,
} }
impl<E: EthSpec> Default for PendingPayloadEnvelopes<E> { impl<E: EthSpec> Default for PendingPayloadEnvelopes<E> {
@@ -38,19 +43,24 @@ impl<E: EthSpec> PendingPayloadEnvelopes<E> {
} }
/// Insert a pending envelope into the cache. /// Insert a pending envelope into the cache.
pub fn insert(&mut self, slot: Slot, envelope: ExecutionPayloadEnvelope<E>) { pub fn insert(&mut self, slot: Slot, data: PendingEnvelopeData<E>) {
// TODO(gloas): we may want to check for duplicates here, which shouldn't be allowed // TODO(gloas): we may want to check for duplicates here, which shouldn't be allowed
self.envelopes.insert(slot, envelope); self.envelopes.insert(slot, data);
} }
/// Get a pending envelope by slot. /// Get a pending envelope by slot.
pub fn get(&self, slot: Slot) -> Option<&ExecutionPayloadEnvelope<E>> { pub fn get(&self, slot: Slot) -> Option<&ExecutionPayloadEnvelope<E>> {
self.envelopes.get(&slot) self.envelopes.get(&slot).map(|d| &d.envelope)
}
/// Remove and return the blobs and proofs for a slot, leaving the envelope in place.
pub fn take_blobs(&mut self, slot: Slot) -> Option<BlobsList<E>> {
self.envelopes.get_mut(&slot).and_then(|d| d.blobs.take())
} }
/// Remove and return a pending envelope by slot. /// Remove and return a pending envelope by slot.
pub fn remove(&mut self, slot: Slot) -> Option<ExecutionPayloadEnvelope<E>> { pub fn remove(&mut self, slot: Slot) -> Option<ExecutionPayloadEnvelope<E>> {
self.envelopes.remove(&slot) self.envelopes.remove(&slot).map(|d| d.envelope)
} }
/// Check if an envelope exists for the given slot. /// Check if an envelope exists for the given slot.
@@ -85,8 +95,9 @@ mod tests {
type E = MainnetEthSpec; type E = MainnetEthSpec;
fn make_envelope(slot: Slot) -> ExecutionPayloadEnvelope<E> { fn make_envelope(slot: Slot) -> PendingEnvelopeData<E> {
ExecutionPayloadEnvelope { PendingEnvelopeData {
envelope: ExecutionPayloadEnvelope {
payload: ExecutionPayloadGloas { payload: ExecutionPayloadGloas {
slot_number: slot, slot_number: slot,
..ExecutionPayloadGloas::default() ..ExecutionPayloadGloas::default()
@@ -94,6 +105,8 @@ mod tests {
execution_requests: ExecutionRequests::default(), execution_requests: ExecutionRequests::default(),
builder_index: 0, builder_index: 0,
beacon_block_root: Hash256::ZERO, beacon_block_root: Hash256::ZERO,
},
blobs: None,
} }
} }
@@ -101,33 +114,73 @@ mod tests {
fn insert_and_get() { fn insert_and_get() {
let mut cache = PendingPayloadEnvelopes::<E>::default(); let mut cache = PendingPayloadEnvelopes::<E>::default();
let slot = Slot::new(1); let slot = Slot::new(1);
let envelope = make_envelope(slot); let data = make_envelope(slot);
let expected_envelope = data.envelope.clone();
assert!(!cache.contains(slot)); assert!(!cache.contains(slot));
assert_eq!(cache.len(), 0); assert_eq!(cache.len(), 0);
cache.insert(slot, envelope.clone()); cache.insert(slot, data);
assert!(cache.contains(slot)); assert!(cache.contains(slot));
assert_eq!(cache.len(), 1); assert_eq!(cache.len(), 1);
assert_eq!(cache.get(slot), Some(&envelope)); assert_eq!(cache.get(slot), Some(&expected_envelope));
} }
#[test] #[test]
fn remove() { fn remove() {
let mut cache = PendingPayloadEnvelopes::<E>::default(); let mut cache = PendingPayloadEnvelopes::<E>::default();
let slot = Slot::new(1); let slot = Slot::new(1);
let envelope = make_envelope(slot); let data = make_envelope(slot);
let expected_envelope = data.envelope.clone();
cache.insert(slot, envelope.clone()); cache.insert(slot, data);
assert!(cache.contains(slot)); assert!(cache.contains(slot));
let removed = cache.remove(slot); let removed = cache.remove(slot);
assert_eq!(removed, Some(envelope)); assert_eq!(removed, Some(expected_envelope));
assert!(!cache.contains(slot)); assert!(!cache.contains(slot));
assert_eq!(cache.len(), 0); assert_eq!(cache.len(), 0);
} }
#[test]
fn take_blobs_returns_once() {
let mut cache = PendingPayloadEnvelopes::<E>::default();
let slot = Slot::new(1);
let blobs = BlobsList::<E>::default();
let data = PendingEnvelopeData {
envelope: make_envelope(slot).envelope,
blobs: Some(blobs),
};
cache.insert(slot, data);
// First take returns the blobs
let taken = cache.take_blobs(slot);
assert!(taken.is_some());
// Second take returns None — blobs are consumed
let taken_again = cache.take_blobs(slot);
assert!(taken_again.is_none());
// Envelope is still in the cache
assert!(cache.contains(slot));
assert!(cache.get(slot).is_some());
}
#[test]
fn take_blobs_returns_none_when_absent() {
let mut cache = PendingPayloadEnvelopes::<E>::default();
let slot = Slot::new(1);
// Insert with no blobs
cache.insert(slot, make_envelope(slot));
assert!(cache.take_blobs(slot).is_none());
// Non-existent slot
assert!(cache.take_blobs(Slot::new(99)).is_none());
}
#[test] #[test]
fn prune_old_envelopes() { fn prune_old_envelopes() {
let mut cache = PendingPayloadEnvelopes::<E>::new(2); let mut cache = PendingPayloadEnvelopes::<E>::new(2);

View File

@@ -86,6 +86,8 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz` // `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz`
pub const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = pub const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] =
include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz"); include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz");
pub const TEST_DATA_COLUMN_SIDECARS_GLOAS_SSZ: &[u8] =
include_bytes!("test_utils/fixtures/test_data_column_sidecars_gloas.ssz");
// Default target aggregators to set during testing, this ensures an aggregator at each slot. // Default target aggregators to set during testing, this ensures an aggregator at each slot.
// //
@@ -3789,24 +3791,24 @@ pub fn generate_data_column_sidecars_from_block<E: EthSpec>(
block: &SignedBeaconBlock<E>, block: &SignedBeaconBlock<E>,
spec: &ChainSpec, spec: &ChainSpec,
) -> DataColumnSidecarList<E> { ) -> DataColumnSidecarList<E> {
let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap();
if kzg_commitments.is_empty() {
return vec![];
}
let kzg_commitments_inclusion_proof = block
.message()
.body()
.kzg_commitments_merkle_proof()
.unwrap();
let signed_block_header = block.signed_block_header();
// Load the precomputed column sidecar to avoid computing them for every block in the tests. // Load the precomputed column sidecar to avoid computing them for every block in the tests.
// Then repeat the cells and proofs for every blob // Then repeat the cells and proofs for every blob
if block.fork_name_unchecked().gloas_enabled() { if block.fork_name_unchecked().gloas_enabled() {
let kzg_commitments = &block
.message()
.body()
.signed_execution_payload_bid()
.expect("Gloas block should have a payload bid")
.message
.blob_kzg_commitments;
if kzg_commitments.is_empty() {
return vec![];
}
let num_blobs = kzg_commitments.len();
let signed_block_header = block.signed_block_header();
let template_data_columns = let template_data_columns =
RuntimeVariableList::<DataColumnSidecarGloas<E>>::from_ssz_bytes( RuntimeVariableList::<DataColumnSidecarGloas<E>>::from_ssz_bytes(
TEST_DATA_COLUMN_SIDECARS_SSZ, TEST_DATA_COLUMN_SIDECARS_GLOAS_SSZ,
E::number_of_columns(), E::number_of_columns(),
) )
.unwrap(); .unwrap();
@@ -3826,7 +3828,7 @@ pub fn generate_data_column_sidecars_from_block<E: EthSpec>(
.collect::<(Vec<_>, Vec<_>)>(); .collect::<(Vec<_>, Vec<_>)>();
let blob_cells_and_proofs_vec = let blob_cells_and_proofs_vec =
vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()]; vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); num_blobs];
build_data_column_sidecars_gloas( build_data_column_sidecars_gloas(
signed_block_header.message.tree_hash_root(), signed_block_header.message.tree_hash_root(),
@@ -3836,6 +3838,18 @@ pub fn generate_data_column_sidecars_from_block<E: EthSpec>(
) )
.unwrap() .unwrap()
} else { } else {
let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap();
if kzg_commitments.is_empty() {
return vec![];
}
let kzg_commitments_inclusion_proof = block
.message()
.body()
.kzg_commitments_merkle_proof()
.unwrap();
let signed_block_header = block.signed_block_header();
// load the precomputed column sidecar to avoid computing them for every block in the tests. // load the precomputed column sidecar to avoid computing them for every block in the tests.
let template_data_columns = let template_data_columns =
RuntimeVariableList::<DataColumnSidecarFulu<E>>::from_ssz_bytes( RuntimeVariableList::<DataColumnSidecarFulu<E>>::from_ssz_bytes(

View File

@@ -115,6 +115,78 @@ async fn rpc_columns_with_invalid_header_signature() {
)); ));
} }
/// Test that Gloas block production caches blobs alongside the envelope, and that
/// data columns can be built from those cached blobs.
#[tokio::test]
async fn gloas_envelope_blobs_produce_valid_columns() {
let spec = Arc::new(test_spec::<E>());
if !spec.is_gloas_scheduled() {
return;
}
let harness = get_harness(VALIDATOR_COUNT, spec.clone(), NodeCustodyType::Supernode);
harness.execution_block_generator().set_min_blob_count(1);
// Build some chain depth.
let num_blocks = E::slots_per_epoch() as usize;
harness
.extend_chain(
num_blocks,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
harness.advance_slot();
let slot = harness.get_current_slot();
// Produce a Gloas block via the harness. This caches envelope + blobs.
let state = harness.get_current_state();
let (block_contents, opt_envelope, _post_state) =
harness.make_block_with_envelope(state, slot).await;
let signed_block = &block_contents.0;
assert!(
opt_envelope.is_some(),
"Gloas block production should produce an envelope"
);
// Verify the block has blob commitments in the bid.
let bid = signed_block
.message()
.body()
.signed_execution_payload_bid()
.expect("Gloas block should have a payload bid");
assert!(
!bid.message.blob_kzg_commitments.is_empty(),
"Block should have blob KZG commitments"
);
// Generate data columns from the block (using test fixtures, same as the harness does).
let data_column_sidecars =
generate_data_column_sidecars_from_block(signed_block, &harness.chain.spec);
assert_eq!(
data_column_sidecars.len(),
E::number_of_columns(),
"Should produce the correct number of data columns"
);
// Verify all columns are Gloas-format.
for col in &data_column_sidecars {
assert!(
col.as_gloas().is_ok(),
"Data column sidecar should be Gloas variant"
);
let gloas_col = col.as_gloas().expect("should be Gloas sidecar");
assert_eq!(gloas_col.beacon_block_root, signed_block.canonical_root());
assert_eq!(gloas_col.slot, slot);
}
// End-to-end DA flow (process_block → process_envelope → process_rpc_custody_columns)
// is not exercised here: Gloas blocks are not gated on columns at block-import time
// and the envelope/column gating belongs in a dedicated test once the DA path matures.
}
// Regression test for verify_header_signature bug: it uses head_fork() which is wrong for fork blocks // Regression test for verify_header_signature bug: it uses head_fork() which is wrong for fork blocks
#[tokio::test] #[tokio::test]
async fn verify_header_signature_fork_block_bug() { async fn verify_header_signature_fork_block_bug() {

View File

@@ -573,3 +573,121 @@ async fn prepare_payload_on_fork_boundary(
advanced state" advanced state"
); );
} }
#[tokio::test]
async fn gloas_block_production_caches_blobs_for_column_publishing() {
use beacon_chain::ProduceBlockVerification;
use beacon_chain::graffiti_calculator::GraffitiSettings;
use eth2::types::GraffitiPolicy;
let spec = Arc::new(test_spec::<E>());
if !spec.fork_name_at_slot::<E>(Slot::new(0)).gloas_enabled() {
return;
}
let db_path = tempdir().unwrap();
let store = get_store(&db_path, spec.clone());
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
// Configure the mock EL to produce at least 1 blob per block.
harness.execution_block_generator().set_min_blob_count(1);
// Extend the chain a few slots to get past genesis.
harness
.extend_chain(
(E::slots_per_epoch() as usize) + 1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
harness.advance_slot();
let slot = harness.get_current_slot();
// Produce a Gloas block directly via produce_block_on_state_gloas so we can
// inspect the pending cache before it's consumed.
let mut state = harness.get_current_state();
complete_state_advance(&mut state, None, slot, &spec).unwrap();
state.build_caches(&spec).unwrap();
let proposer_index = state.get_beacon_proposer_index(slot, &spec).unwrap();
let randao_reveal = harness.sign_randao_reveal(&state, proposer_index, slot);
let (parent_payload_status, parent_envelope) = {
let head = harness.chain.canonical_head.cached_head();
(
head.head_payload_status(),
head.snapshot.execution_envelope.clone(),
)
};
let graffiti_settings = GraffitiSettings::new(
Some(Graffiti::default()),
Some(GraffitiPolicy::PreserveUserGraffiti),
);
let (_block, _post_state, _value) = harness
.chain
.produce_block_on_state_gloas(
state,
None,
parent_payload_status,
parent_envelope,
slot,
randao_reveal,
graffiti_settings,
ProduceBlockVerification::VerifyRandao,
)
.await
.unwrap();
// The envelope + blobs should now be in the pending cache.
assert!(
harness
.chain
.pending_payload_envelopes
.read()
.contains(slot),
"Pending cache should contain an envelope for the produced slot"
);
// Take the blobs from the cache — this is what publish_execution_payload_envelope does.
let blobs = harness
.chain
.pending_payload_envelopes
.write()
.take_blobs(slot);
assert!(
blobs.is_some(),
"Blobs should be cached alongside the envelope"
);
let blobs = blobs.unwrap();
assert!(
!blobs.is_empty(),
"Blobs should be non-empty when min_blob_count >= 1"
);
// Verify take_blobs is consume-once.
let second_take = harness
.chain
.pending_payload_envelopes
.write()
.take_blobs(slot);
assert!(
second_take.is_none(),
"Blobs should only be consumable once"
);
// The envelope should still be in the cache after taking blobs.
assert!(
harness
.chain
.pending_payload_envelopes
.read()
.get(slot)
.is_some(),
"Envelope should remain in cache after taking blobs"
);
}

View File

@@ -1,10 +1,12 @@
use crate::block_id::BlockId; use crate::block_id::BlockId;
use crate::publish_blocks::publish_column_sidecars;
use crate::task_spawner::{Priority, TaskSpawner}; use crate::task_spawner::{Priority, TaskSpawner};
use crate::utils::{ChainFilter, EthV1Filter, NetworkTxFilter, ResponseFilter, TaskSpawnerFilter}; use crate::utils::{ChainFilter, EthV1Filter, NetworkTxFilter, ResponseFilter, TaskSpawnerFilter};
use crate::version::{ use crate::version::{
ResponseIncludesVersion, add_consensus_version_header, add_ssz_content_type_header, ResponseIncludesVersion, add_consensus_version_header, add_ssz_content_type_header,
execution_optimistic_finalized_beacon_response, execution_optimistic_finalized_beacon_response,
}; };
use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn};
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use bytes::Bytes; use bytes::Bytes;
use eth2::types as api_types; use eth2::types as api_types;
@@ -12,10 +14,11 @@ use eth2::{CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER};
use lighthouse_network::PubsubMessage; use lighthouse_network::PubsubMessage;
use network::NetworkMessage; use network::NetworkMessage;
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use std::future::Future;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tracing::{info, warn}; use tracing::{debug, error, info, warn};
use types::SignedExecutionPayloadEnvelope; use types::{EthSpec, SignedExecutionPayloadEnvelope};
use warp::{ use warp::{
Filter, Rejection, Reply, Filter, Rejection, Reply,
hyper::{Body, Response}, hyper::{Body, Response},
@@ -85,7 +88,9 @@ pub(crate) fn post_beacon_execution_payload_envelope<T: BeaconChainTypes>(
) )
.boxed() .boxed()
} }
/// Publishes a signed execution payload envelope to the network. /// Publishes a signed execution payload envelope to the network. Implements
/// `POST /eth/v1/beacon/execution_payload_envelope` per the in-flight beacon-APIs PR
/// <https://github.com/ethereum/beacon-APIs/pull/580>.
pub async fn publish_execution_payload_envelope<T: BeaconChainTypes>( pub async fn publish_execution_payload_envelope<T: BeaconChainTypes>(
envelope: SignedExecutionPayloadEnvelope<T::EthSpec>, envelope: SignedExecutionPayloadEnvelope<T::EthSpec>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
@@ -109,7 +114,24 @@ pub async fn publish_execution_payload_envelope<T: BeaconChainTypes>(
"Publishing signed execution payload envelope to network" "Publishing signed execution payload envelope to network"
); );
// Publish to the network let blobs_and_proofs = chain.pending_payload_envelopes.write().take_blobs(slot);
// Spawn the column-build task (CPU-bound KZG cell-and-proof computation) before
// publishing the envelope so it runs in parallel with envelope gossip, narrowing
// the window in which peers see envelope-without-columns. If envelope publication
// fails below, dropping this future drops the spawned `JoinHandle` (the running
// closure on the blocking pool finishes and is then discarded — no work cancellation).
let column_build_future = match blobs_and_proofs {
Some(blobs) if !blobs.is_empty() => Some(spawn_build_gloas_data_columns_task(
&chain,
beacon_block_root,
slot,
blobs,
)?),
_ => None,
};
// Publish the envelope to the network.
crate::utils::publish_pubsub_message( crate::utils::publish_pubsub_message(
network_tx, network_tx,
PubsubMessage::ExecutionPayload(Box::new(envelope)), PubsubMessage::ExecutionPayload(Box::new(envelope)),
@@ -121,9 +143,130 @@ pub async fn publish_execution_payload_envelope<T: BeaconChainTypes>(
) )
})?; })?;
// From here on the envelope is on the wire. `take_blobs` already consumed the cache
// entry, so a retry would not republish columns; returning Err would mislead the
// caller. Log column-build/publish failures and fall through to `Ok`.
if let Some(column_build_future) = column_build_future {
let gossip_verified_columns = match column_build_future.await {
Ok(columns) => columns,
Err(e) => {
error!(
%slot,
error = ?e,
"Failed to build data columns after envelope publication"
);
return Ok(warp::reply().into_response());
}
};
if !gossip_verified_columns.is_empty() {
if let Err(e) = publish_column_sidecars(network_tx, &gossip_verified_columns, &chain) {
error!(
%slot,
error = ?e,
"Failed to publish data column sidecars after envelope publication"
);
return Ok(warp::reply().into_response());
}
let epoch = slot.epoch(T::EthSpec::slots_per_epoch());
let sampling_column_indices = chain.sampling_columns_for_epoch(epoch);
let sampling_columns = gossip_verified_columns
.into_iter()
.filter(|col| sampling_column_indices.contains(&col.index()))
.collect::<Vec<_>>();
// Local processing only — envelope already broadcast, so log and fall through.
if !sampling_columns.is_empty()
&& let Err(e) =
Box::pin(chain.process_gossip_data_columns(sampling_columns, || Ok(()))).await
{
error!(
%slot,
error = ?e,
"Failed to process sampling data columns during envelope publication"
);
}
}
}
Ok(warp::reply().into_response()) Ok(warp::reply().into_response())
} }
fn spawn_build_gloas_data_columns_task<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
beacon_block_root: types::Hash256,
slot: types::Slot,
blobs: types::BlobsList<T::EthSpec>,
) -> Result<impl Future<Output = Result<Vec<GossipVerifiedDataColumn<T>>, Rejection>>, Rejection> {
let chain_for_build = chain.clone();
let handle = chain
.task_executor
.spawn_blocking_handle(
move || build_gloas_data_columns(&chain_for_build, beacon_block_root, slot, &blobs),
"build_gloas_data_columns",
)
.ok_or_else(|| warp_utils::reject::custom_server_error("runtime shutdown".to_string()))?;
Ok(async move {
handle
.await
.map_err(|_| warp_utils::reject::custom_server_error("join error".to_string()))?
})
}
fn build_gloas_data_columns<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
beacon_block_root: types::Hash256,
slot: types::Slot,
blobs: &types::BlobsList<T::EthSpec>,
) -> Result<Vec<GossipVerifiedDataColumn<T>>, Rejection> {
let blob_refs: Vec<_> = blobs.iter().collect();
let data_column_sidecars = beacon_chain::kzg_utils::blobs_to_data_column_sidecars_gloas(
&blob_refs,
beacon_block_root,
slot,
&chain.kzg,
&chain.spec,
)
.map_err(|e| {
error!(
error = ?e,
%slot,
"Failed to build data column sidecars for envelope"
);
warp_utils::reject::custom_server_error(format!("{e:?}"))
})?;
let gossip_verified_columns = data_column_sidecars
.into_iter()
.filter_map(|col| {
let index = *col.index();
match GossipVerifiedDataColumn::new_for_block_publishing(col, chain) {
Ok(verified) => Some(verified),
Err(GossipDataColumnError::PriorKnownUnpublished) => None,
Err(e) => {
warn!(
%slot,
column_index = index,
error = ?e,
"Locally-built data column failed gossip verification"
);
None
}
}
})
.collect::<Vec<_>>();
debug!(
%slot,
column_count = gossip_verified_columns.len(),
"Built data columns for envelope publication"
);
Ok(gossip_verified_columns)
}
// TODO(gloas): add tests for this endpoint once we support importing payloads into the db // TODO(gloas): add tests for this endpoint once we support importing payloads into the db
// GET beacon/execution_payload_envelope/{block_id} // GET beacon/execution_payload_envelope/{block_id}
pub(crate) fn get_beacon_execution_payload_envelope<T: BeaconChainTypes>( pub(crate) fn get_beacon_execution_payload_envelope<T: BeaconChainTypes>(

View File

@@ -494,7 +494,7 @@ fn publish_blob_sidecars<T: BeaconChainTypes>(
.map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)))
} }
fn publish_column_sidecars<T: BeaconChainTypes>( pub(crate) fn publish_column_sidecars<T: BeaconChainTypes>(
sender_clone: &UnboundedSender<NetworkMessage<T::EthSpec>>, sender_clone: &UnboundedSender<NetworkMessage<T::EthSpec>>,
data_column_sidecars: &[GossipVerifiedDataColumn<T>], data_column_sidecars: &[GossipVerifiedDataColumn<T>],
chain: &BeaconChain<T>, chain: &BeaconChain<T>,