mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-28 02:03:32 +00:00
Fix unexpected blob error and duplicate import in fetch blobs (#7541)
Getting this error on a non-PeerDAS network:
```
May 29 13:30:13.484 ERROR Error fetching or processing blobs from EL error: BlobProcessingError(AvailabilityCheck(Unexpected("empty blobs"))), block_root: 0x98aa3927056d453614fefbc79eb1f9865666d1f119d0e8aa9e6f4d02aa9395d9
```
It appears we're passing an empty `Vec` to DA checker, because all blobs were already seen on gossip and filtered out, this causes a `AvailabilityCheckError::Unexpected("empty blobs")`.
I've added equivalent unit tests for `getBlobsV1` to cover all the scenarios we test in `getBlobsV2`. This would have caught the bug if I had added it earlier. It also caught another bug which could trigger duplicate block import.
Thanks Santito for reporting this! 🙏
This commit is contained in:
@@ -42,6 +42,7 @@ use types::{
|
||||
/// Result from engine get blobs to be passed onto `DataAvailabilityChecker` and published to the
|
||||
/// gossip network. The blobs / data columns have not been marked as observed yet, as they may not
|
||||
/// be published immediately.
|
||||
#[derive(Debug)]
|
||||
pub enum EngineGetBlobsOutput<T: BeaconChainTypes> {
|
||||
Blobs(Vec<GossipVerifiedBlob<T, DoNotObserve>>),
|
||||
/// A filtered list of custody data columns to be imported into the `DataAvailabilityChecker`.
|
||||
@@ -163,9 +164,22 @@ async fn fetch_and_process_blobs_v1<T: BeaconChainTypes>(
|
||||
inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL);
|
||||
return Ok(None);
|
||||
} else {
|
||||
debug!(
|
||||
num_expected_blobs,
|
||||
num_fetched_blobs, "Received blobs from the EL"
|
||||
);
|
||||
inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL);
|
||||
}
|
||||
|
||||
if chain_adapter.fork_choice_contains_block(&block_root) {
|
||||
// Avoid computing sidecars if the block has already been imported.
|
||||
debug!(
|
||||
info = "block has already been imported",
|
||||
"Ignoring EL blobs response"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let (signed_block_header, kzg_commitments_proof) = block
|
||||
.signed_block_header_and_kzg_commitments_proof()
|
||||
.map_err(FetchEngineBlobError::BeaconStateError)?;
|
||||
@@ -197,13 +211,13 @@ async fn fetch_and_process_blobs_v1<T: BeaconChainTypes>(
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(FetchEngineBlobError::GossipBlob)?;
|
||||
|
||||
if !blobs_to_import_and_publish.is_empty() {
|
||||
publish_fn(EngineGetBlobsOutput::Blobs(
|
||||
blobs_to_import_and_publish.clone(),
|
||||
));
|
||||
if blobs_to_import_and_publish.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!(num_fetched_blobs, "Processing engine blobs");
|
||||
publish_fn(EngineGetBlobsOutput::Blobs(
|
||||
blobs_to_import_and_publish.clone(),
|
||||
));
|
||||
|
||||
let availability_processing_status = chain_adapter
|
||||
.process_engine_blobs(
|
||||
|
||||
Reference in New Issue
Block a user