mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-21 22:04:44 +00:00
Remove KZG verification from local block production and blobs fetched from the EL (#7713)
#7700 As described in title, the EL already performs KZG verification on all blobs when they entered the mempool, so it's redundant to perform extra validation on blobs returned from the EL. This PR removes - KZG verification for both blobs and data columns during block production - KZG verification for data columns after fetch engine blobs call. I have not done this for blobs because it requires extra changes to check the observed cache, and doesn't feel like it's a worthy optimisation given the number of blobs per block. This PR does not remove KZG verification on the block publishing path yet.
This commit is contained in:
@@ -1,17 +1,16 @@
|
||||
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
|
||||
use crate::data_column_verification::KzgVerifiedDataColumn;
|
||||
use crate::fetch_blobs::{EngineGetBlobsOutput, FetchEngineBlobError};
|
||||
use crate::observed_block_producers::ProposalKey;
|
||||
use crate::observed_data_sidecars::DoNotObserve;
|
||||
use crate::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes};
|
||||
use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2};
|
||||
use kzg::{Error as KzgError, Kzg};
|
||||
use kzg::Kzg;
|
||||
#[cfg(test)]
|
||||
use mockall::automock;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use types::{BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, Hash256, Slot};
|
||||
use types::{BlobSidecar, ChainSpec, ColumnIndex, Hash256, Slot};
|
||||
|
||||
/// An adapter to the `BeaconChain` functionalities to remove `BeaconChain` from direct dependency to enable testing fetch blobs logic.
|
||||
pub(crate) struct FetchBlobsBeaconAdapter<T: BeaconChainTypes> {
|
||||
@@ -77,14 +76,7 @@ impl<T: BeaconChainTypes> FetchBlobsBeaconAdapter<T> {
|
||||
GossipVerifiedBlob::<T, DoNotObserve>::new(blob.clone(), blob.index, &self.chain)
|
||||
}
|
||||
|
||||
pub(crate) fn verify_data_columns_kzg(
|
||||
&self,
|
||||
data_columns: Vec<Arc<DataColumnSidecar<T::EthSpec>>>,
|
||||
) -> Result<Vec<KzgVerifiedDataColumn<T::EthSpec>>, KzgError> {
|
||||
KzgVerifiedDataColumn::from_batch(data_columns, &self.chain.kzg)
|
||||
}
|
||||
|
||||
pub(crate) fn known_for_proposal(
|
||||
pub(crate) fn data_column_known_for_proposal(
|
||||
&self,
|
||||
proposal_key: ProposalKey,
|
||||
) -> Option<HashSet<ColumnIndex>> {
|
||||
|
||||
@@ -14,7 +14,7 @@ mod tests;
|
||||
|
||||
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
|
||||
use crate::block_verification_types::AsBlock;
|
||||
use crate::data_column_verification::KzgVerifiedCustodyDataColumn;
|
||||
use crate::data_column_verification::{KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn};
|
||||
#[cfg_attr(test, double)]
|
||||
use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter;
|
||||
use crate::kzg_utils::blobs_to_data_column_sidecars;
|
||||
@@ -311,6 +311,9 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Up until this point we have not observed the data columns in the gossip cache, which allows
|
||||
// them to arrive independently while this function is running. In publish_fn we will observe
|
||||
// them and then publish any columns that had not already been observed.
|
||||
publish_fn(EngineGetBlobsOutput::CustodyColumns(
|
||||
custody_columns_to_import.clone(),
|
||||
));
|
||||
@@ -358,17 +361,24 @@ async fn compute_custody_columns_to_import<T: BeaconChainTypes>(
|
||||
// `DataAvailabilityChecker` requires a strict match on custody columns count to
|
||||
// consider a block available.
|
||||
let mut custody_columns = data_columns_result
|
||||
.map(|mut data_columns| {
|
||||
data_columns.retain(|col| custody_columns_indices.contains(&col.index));
|
||||
.map(|data_columns| {
|
||||
data_columns
|
||||
.into_iter()
|
||||
.filter(|col| custody_columns_indices.contains(&col.index))
|
||||
.map(|col| {
|
||||
KzgVerifiedCustodyDataColumn::from_asserted_custody(
|
||||
KzgVerifiedDataColumn::from_execution_verified(col),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.map_err(FetchEngineBlobError::DataColumnSidecarError)?;
|
||||
|
||||
// Only consider columns that are not already observed on gossip.
|
||||
if let Some(observed_columns) = chain_adapter_cloned.known_for_proposal(
|
||||
if let Some(observed_columns) = chain_adapter_cloned.data_column_known_for_proposal(
|
||||
ProposalKey::new(block.message().proposer_index(), block.slot()),
|
||||
) {
|
||||
custody_columns.retain(|col| !observed_columns.contains(&col.index));
|
||||
custody_columns.retain(|col| !observed_columns.contains(&col.index()));
|
||||
if custody_columns.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
@@ -378,26 +388,13 @@ async fn compute_custody_columns_to_import<T: BeaconChainTypes>(
|
||||
if let Some(known_columns) =
|
||||
chain_adapter_cloned.cached_data_column_indexes(&block_root)
|
||||
{
|
||||
custody_columns.retain(|col| !known_columns.contains(&col.index));
|
||||
custody_columns.retain(|col| !known_columns.contains(&col.index()));
|
||||
if custody_columns.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
}
|
||||
|
||||
// KZG verify data columns before publishing. This prevents blobs with invalid
|
||||
// KZG proofs from the EL making it into the data availability checker. We do not
|
||||
// immediately add these blobs to the observed blobs/columns cache because we want
|
||||
// to allow blobs/columns to arrive on gossip and be accepted (and propagated) while
|
||||
// we are waiting to publish. Just before publishing we will observe the blobs/columns
|
||||
// and only proceed with publishing if they are not yet seen.
|
||||
let verified = chain_adapter_cloned
|
||||
.verify_data_columns_kzg(custody_columns)
|
||||
.map_err(FetchEngineBlobError::KzgError)?;
|
||||
|
||||
Ok(verified
|
||||
.into_iter()
|
||||
.map(KzgVerifiedCustodyDataColumn::from_asserted_custody)
|
||||
.collect())
|
||||
Ok(custody_columns)
|
||||
},
|
||||
"compute_custody_columns_to_import",
|
||||
)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use crate::data_column_verification::KzgVerifiedDataColumn;
|
||||
use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter;
|
||||
use crate::fetch_blobs::{
|
||||
fetch_and_process_engine_blobs_inner, EngineGetBlobsOutput, FetchEngineBlobError,
|
||||
@@ -156,7 +155,7 @@ mod get_blobs_v2 {
|
||||
mock_fork_choice_contains_block(&mut mock_adapter, vec![]);
|
||||
// All data columns already seen on gossip
|
||||
mock_adapter
|
||||
.expect_known_for_proposal()
|
||||
.expect_data_column_known_for_proposal()
|
||||
.returning(|_| Some(hashset![0, 1, 2]));
|
||||
// No blobs should be processed
|
||||
mock_adapter.expect_process_engine_blobs().times(0);
|
||||
@@ -192,17 +191,12 @@ mod get_blobs_v2 {
|
||||
// All blobs returned, fork choice doesn't contain block
|
||||
mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs));
|
||||
mock_fork_choice_contains_block(&mut mock_adapter, vec![]);
|
||||
mock_adapter.expect_known_for_proposal().returning(|_| None);
|
||||
mock_adapter
|
||||
.expect_data_column_known_for_proposal()
|
||||
.returning(|_| None);
|
||||
mock_adapter
|
||||
.expect_cached_data_column_indexes()
|
||||
.returning(|_| None);
|
||||
mock_adapter
|
||||
.expect_verify_data_columns_kzg()
|
||||
.returning(|c| {
|
||||
Ok(c.into_iter()
|
||||
.map(KzgVerifiedDataColumn::__new_for_testing)
|
||||
.collect())
|
||||
});
|
||||
mock_process_engine_blobs_result(
|
||||
&mut mock_adapter,
|
||||
Ok(AvailabilityProcessingStatus::Imported(block_root)),
|
||||
|
||||
Reference in New Issue
Block a user