Prevent AvailabilityCheckError when there's no new custody columns to import (#7533)

Addresses a regression recently introduced when we started gossip verifying data columns from EL blobs

```
failures:
network_beacon_processor::tests::accept_processed_gossip_data_columns_without_import

test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 90 filtered out; finished in 16.60s

stderr ───

thread 'network_beacon_processor::tests::accept_processed_gossip_data_columns_without_import' panicked at beacon_node/network/src/network_beacon_processor/tests.rs:829:10:
should put data columns into availability cache: Unexpected("empty columns")
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
```

https://github.com/sigp/lighthouse/actions/runs/15309278812/job/43082341868?pr=7521

If an empty `Vec` is passed to the DA checker, it causes an unexpected error.

This PR addresses it by not passing an empty `Vec` for processing, and not spawning a task to publish.
This commit is contained in:
Jimmy Chen
2025-05-29 12:54:34 +10:00
committed by GitHub
parent 5cda6a6f9e
commit 4d21846aba
2 changed files with 63 additions and 12 deletions

View File

@@ -264,6 +264,7 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
return Ok(None);
}
debug!(num_fetched_blobs, "All expected blobs received from the EL");
inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL);
if chain_adapter.fork_choice_contains_block(&block_root) {
@@ -276,23 +277,32 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
}
let chain_adapter = Arc::new(chain_adapter);
let custody_columns = compute_and_publish_data_columns(
let custody_columns_to_import = compute_custody_columns_to_import(
&chain_adapter,
block.clone(),
blobs,
proofs,
custody_columns_indices,
publish_fn,
)
.await?;
debug!(num_fetched_blobs, "Processing engine blobs");
if custody_columns_to_import.is_empty() {
debug!(
info = "No new data columns to import",
"Ignoring EL blobs response"
);
return Ok(None);
}
publish_fn(EngineGetBlobsOutput::CustodyColumns(
custody_columns_to_import.clone(),
));
let availability_processing_status = chain_adapter
.process_engine_blobs(
block.slot(),
block_root,
EngineGetBlobsOutput::CustodyColumns(custody_columns),
EngineGetBlobsOutput::CustodyColumns(custody_columns_to_import),
)
.await?;
@@ -300,13 +310,12 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
}
/// Offload the data column computation to a blocking task to avoid holding up the async runtime.
async fn compute_and_publish_data_columns<T: BeaconChainTypes>(
async fn compute_custody_columns_to_import<T: BeaconChainTypes>(
chain_adapter: &Arc<FetchBlobsBeaconAdapter<T>>,
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
blobs: Vec<Blob<T::EthSpec>>,
proofs: Vec<KzgProofs<T::EthSpec>>,
custody_columns_indices: HashSet<ColumnIndex>,
publish_fn: impl Fn(EngineGetBlobsOutput<T>) + Send + 'static,
) -> Result<Vec<GossipVerifiedDataColumn<T, DoNotObserve>>, FetchEngineBlobError> {
let kzg = chain_adapter.kzg().clone();
let spec = chain_adapter.spec().clone();
@@ -380,13 +389,9 @@ async fn compute_and_publish_data_columns<T: BeaconChainTypes>(
.collect::<Result<Vec<_>, _>>()
.map_err(FetchEngineBlobError::GossipDataColumn)?;
publish_fn(EngineGetBlobsOutput::CustodyColumns(
columns_to_import_and_publish.clone(),
));
Ok(columns_to_import_and_publish)
},
"compute_and_publish_data_columns",
"compute_custody_columns_to_import",
)
.ok_or(FetchEngineBlobError::RuntimeShutdown)?
.await