mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-30 03:03:45 +00:00
Gloas publish data columns during local block building (#9182)
Make sure we are publishing columns during local block production Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu> Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com>
This commit is contained in:
@@ -115,6 +115,78 @@ async fn rpc_columns_with_invalid_header_signature() {
|
||||
));
|
||||
}
|
||||
|
||||
/// Test that Gloas block production caches blobs alongside the envelope, and that
|
||||
/// data columns can be built from those cached blobs.
|
||||
#[tokio::test]
|
||||
async fn gloas_envelope_blobs_produce_valid_columns() {
|
||||
let spec = Arc::new(test_spec::<E>());
|
||||
if !spec.is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT, spec.clone(), NodeCustodyType::Supernode);
|
||||
harness.execution_block_generator().set_min_blob_count(1);
|
||||
|
||||
// Build some chain depth.
|
||||
let num_blocks = E::slots_per_epoch() as usize;
|
||||
harness
|
||||
.extend_chain(
|
||||
num_blocks,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
harness.advance_slot();
|
||||
let slot = harness.get_current_slot();
|
||||
|
||||
// Produce a Gloas block via the harness. This caches envelope + blobs.
|
||||
let state = harness.get_current_state();
|
||||
let (block_contents, opt_envelope, _post_state) =
|
||||
harness.make_block_with_envelope(state, slot).await;
|
||||
let signed_block = &block_contents.0;
|
||||
|
||||
assert!(
|
||||
opt_envelope.is_some(),
|
||||
"Gloas block production should produce an envelope"
|
||||
);
|
||||
|
||||
// Verify the block has blob commitments in the bid.
|
||||
let bid = signed_block
|
||||
.message()
|
||||
.body()
|
||||
.signed_execution_payload_bid()
|
||||
.expect("Gloas block should have a payload bid");
|
||||
assert!(
|
||||
!bid.message.blob_kzg_commitments.is_empty(),
|
||||
"Block should have blob KZG commitments"
|
||||
);
|
||||
|
||||
// Generate data columns from the block (using test fixtures, same as the harness does).
|
||||
let data_column_sidecars =
|
||||
generate_data_column_sidecars_from_block(signed_block, &harness.chain.spec);
|
||||
assert_eq!(
|
||||
data_column_sidecars.len(),
|
||||
E::number_of_columns(),
|
||||
"Should produce the correct number of data columns"
|
||||
);
|
||||
|
||||
// Verify all columns are Gloas-format.
|
||||
for col in &data_column_sidecars {
|
||||
assert!(
|
||||
col.as_gloas().is_ok(),
|
||||
"Data column sidecar should be Gloas variant"
|
||||
);
|
||||
let gloas_col = col.as_gloas().expect("should be Gloas sidecar");
|
||||
assert_eq!(gloas_col.beacon_block_root, signed_block.canonical_root());
|
||||
assert_eq!(gloas_col.slot, slot);
|
||||
}
|
||||
|
||||
// End-to-end DA flow (process_block → process_envelope → process_rpc_custody_columns)
|
||||
// is not exercised here: Gloas blocks are not gated on columns at block-import time
|
||||
// and the envelope/column gating belongs in a dedicated test once the DA path matures.
|
||||
}
|
||||
|
||||
// Regression test for verify_header_signature bug: it uses head_fork() which is wrong for fork blocks
|
||||
#[tokio::test]
|
||||
async fn verify_header_signature_fork_block_bug() {
|
||||
|
||||
@@ -573,3 +573,121 @@ async fn prepare_payload_on_fork_boundary(
|
||||
advanced state"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn gloas_block_production_caches_blobs_for_column_publishing() {
|
||||
use beacon_chain::ProduceBlockVerification;
|
||||
use beacon_chain::graffiti_calculator::GraffitiSettings;
|
||||
use eth2::types::GraffitiPolicy;
|
||||
|
||||
let spec = Arc::new(test_spec::<E>());
|
||||
if !spec.fork_name_at_slot::<E>(Slot::new(0)).gloas_enabled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path, spec.clone());
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
// Configure the mock EL to produce at least 1 blob per block.
|
||||
harness.execution_block_generator().set_min_blob_count(1);
|
||||
|
||||
// Extend the chain a few slots to get past genesis.
|
||||
harness
|
||||
.extend_chain(
|
||||
(E::slots_per_epoch() as usize) + 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
harness.advance_slot();
|
||||
let slot = harness.get_current_slot();
|
||||
|
||||
// Produce a Gloas block directly via produce_block_on_state_gloas so we can
|
||||
// inspect the pending cache before it's consumed.
|
||||
let mut state = harness.get_current_state();
|
||||
complete_state_advance(&mut state, None, slot, &spec).unwrap();
|
||||
state.build_caches(&spec).unwrap();
|
||||
|
||||
let proposer_index = state.get_beacon_proposer_index(slot, &spec).unwrap();
|
||||
let randao_reveal = harness.sign_randao_reveal(&state, proposer_index, slot);
|
||||
|
||||
let (parent_payload_status, parent_envelope) = {
|
||||
let head = harness.chain.canonical_head.cached_head();
|
||||
(
|
||||
head.head_payload_status(),
|
||||
head.snapshot.execution_envelope.clone(),
|
||||
)
|
||||
};
|
||||
|
||||
let graffiti_settings = GraffitiSettings::new(
|
||||
Some(Graffiti::default()),
|
||||
Some(GraffitiPolicy::PreserveUserGraffiti),
|
||||
);
|
||||
|
||||
let (_block, _post_state, _value) = harness
|
||||
.chain
|
||||
.produce_block_on_state_gloas(
|
||||
state,
|
||||
None,
|
||||
parent_payload_status,
|
||||
parent_envelope,
|
||||
slot,
|
||||
randao_reveal,
|
||||
graffiti_settings,
|
||||
ProduceBlockVerification::VerifyRandao,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// The envelope + blobs should now be in the pending cache.
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.pending_payload_envelopes
|
||||
.read()
|
||||
.contains(slot),
|
||||
"Pending cache should contain an envelope for the produced slot"
|
||||
);
|
||||
|
||||
// Take the blobs from the cache — this is what publish_execution_payload_envelope does.
|
||||
let blobs = harness
|
||||
.chain
|
||||
.pending_payload_envelopes
|
||||
.write()
|
||||
.take_blobs(slot);
|
||||
|
||||
assert!(
|
||||
blobs.is_some(),
|
||||
"Blobs should be cached alongside the envelope"
|
||||
);
|
||||
|
||||
let blobs = blobs.unwrap();
|
||||
assert!(
|
||||
!blobs.is_empty(),
|
||||
"Blobs should be non-empty when min_blob_count >= 1"
|
||||
);
|
||||
|
||||
// Verify take_blobs is consume-once.
|
||||
let second_take = harness
|
||||
.chain
|
||||
.pending_payload_envelopes
|
||||
.write()
|
||||
.take_blobs(slot);
|
||||
assert!(
|
||||
second_take.is_none(),
|
||||
"Blobs should only be consumable once"
|
||||
);
|
||||
|
||||
// The envelope should still be in the cache after taking blobs.
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.pending_payload_envelopes
|
||||
.read()
|
||||
.get(slot)
|
||||
.is_some(),
|
||||
"Envelope should remain in cache after taking blobs"
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user