Use rayon to speed up batch KZG verification (#7921)

Addresses #7866.


  Use Rayon to speed up batch KZG verification during range / backfill sync.

While I was analysing the traces, I also discovered a bug that resulted in only the first 128 columns in a chain segment batch being verified. This PR fixes it, so we might actually observe slower range sync due to more cells being KZG verified.

I've also updated the handling of batch KZG failure to only find the first invalid KZG column when verification fails as this gets very expensive during range/backfill sync.
This commit is contained in:
Jimmy Chen
2025-08-29 10:59:40 +10:00
committed by GitHub
parent b6792d85d2
commit a134d43446
10 changed files with 140 additions and 129 deletions

2
Cargo.lock generated
View File

@@ -5013,9 +5013,11 @@ dependencies = [
"ethereum_ssz", "ethereum_ssz",
"ethereum_ssz_derive", "ethereum_ssz_derive",
"hex", "hex",
"rayon",
"rust_eth_kzg", "rust_eth_kzg",
"serde", "serde",
"serde_json", "serde_json",
"tracing",
"tree_hash", "tree_hash",
] ]

View File

@@ -377,7 +377,7 @@ where
.store .store
.get_hot_state(&self.justified_state_root, update_cache) .get_hot_state(&self.justified_state_root, update_cache)
.map_err(Error::FailedToReadState)? .map_err(Error::FailedToReadState)?
.ok_or_else(|| Error::MissingState(self.justified_state_root))?; .ok_or(Error::MissingState(self.justified_state_root))?;
self.justified_balances = JustifiedBalances::from_justified_state(&state)?; self.justified_balances = JustifiedBalances::from_justified_state(&state)?;
} }

View File

@@ -29,7 +29,7 @@ mod state_lru_cache;
use crate::data_column_verification::{ use crate::data_column_verification::{
CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn,
KzgVerifiedDataColumn, verify_kzg_for_data_column_list_with_scoring, KzgVerifiedDataColumn, verify_kzg_for_data_column_list,
}; };
use crate::metrics::{ use crate::metrics::{
KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES,
@@ -378,7 +378,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
} }
if self.data_columns_required_for_block(&block) { if self.data_columns_required_for_block(&block) {
return if let Some(data_column_list) = data_columns.as_ref() { return if let Some(data_column_list) = data_columns.as_ref() {
verify_kzg_for_data_column_list_with_scoring( verify_kzg_for_data_column_list(
data_column_list data_column_list
.iter() .iter()
.map(|custody_column| custody_column.as_data_column()), .map(|custody_column| custody_column.as_data_column()),
@@ -449,7 +449,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
// verify kzg for all data columns at once // verify kzg for all data columns at once
if !all_data_columns.is_empty() { if !all_data_columns.is_empty() {
// Attributes fault to the specific peer that sent an invalid column // Attributes fault to the specific peer that sent an invalid column
verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg) verify_kzg_for_data_column_list(all_data_columns.iter(), &self.kzg)
.map_err(AvailabilityCheckError::InvalidColumn)?; .map_err(AvailabilityCheckError::InvalidColumn)?;
} }

View File

@@ -4,7 +4,7 @@ use types::{BeaconStateError, ColumnIndex, Hash256};
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {
InvalidBlobs(KzgError), InvalidBlobs(KzgError),
InvalidColumn(Vec<(ColumnIndex, KzgError)>), InvalidColumn((Option<ColumnIndex>, KzgError)),
ReconstructColumnsError(KzgError), ReconstructColumnsError(KzgError),
KzgCommitmentMismatch { KzgCommitmentMismatch {
blob_commitment: KzgCommitment, blob_commitment: KzgCommitment,

View File

@@ -263,7 +263,10 @@ pub struct KzgVerifiedDataColumn<E: EthSpec> {
} }
impl<E: EthSpec> KzgVerifiedDataColumn<E> { impl<E: EthSpec> KzgVerifiedDataColumn<E> {
pub fn new(data_column: Arc<DataColumnSidecar<E>>, kzg: &Kzg) -> Result<Self, KzgError> { pub fn new(
data_column: Arc<DataColumnSidecar<E>>,
kzg: &Kzg,
) -> Result<Self, (Option<ColumnIndex>, KzgError)> {
verify_kzg_for_data_column(data_column, kzg) verify_kzg_for_data_column(data_column, kzg)
} }
@@ -278,22 +281,11 @@ impl<E: EthSpec> KzgVerifiedDataColumn<E> {
Self { data: data_column } Self { data: data_column }
} }
pub fn from_batch(
data_columns: Vec<Arc<DataColumnSidecar<E>>>,
kzg: &Kzg,
) -> Result<Vec<Self>, KzgError> {
verify_kzg_for_data_column_list(data_columns.iter(), kzg)?;
Ok(data_columns
.into_iter()
.map(|column| Self { data: column })
.collect())
}
pub fn from_batch_with_scoring( pub fn from_batch_with_scoring(
data_columns: Vec<Arc<DataColumnSidecar<E>>>, data_columns: Vec<Arc<DataColumnSidecar<E>>>,
kzg: &Kzg, kzg: &Kzg,
) -> Result<Vec<Self>, Vec<(ColumnIndex, KzgError)>> { ) -> Result<Vec<Self>, (Option<ColumnIndex>, KzgError)> {
verify_kzg_for_data_column_list_with_scoring(data_columns.iter(), kzg)?; verify_kzg_for_data_column_list(data_columns.iter(), kzg)?;
Ok(data_columns Ok(data_columns
.into_iter() .into_iter()
.map(|column| Self { data: column }) .map(|column| Self { data: column })
@@ -367,7 +359,10 @@ impl<E: EthSpec> KzgVerifiedCustodyDataColumn<E> {
} }
/// Verify a column already marked as custody column /// Verify a column already marked as custody column
pub fn new(data_column: CustodyDataColumn<E>, kzg: &Kzg) -> Result<Self, KzgError> { pub fn new(
data_column: CustodyDataColumn<E>,
kzg: &Kzg,
) -> Result<Self, (Option<ColumnIndex>, KzgError)> {
verify_kzg_for_data_column(data_column.clone_arc(), kzg)?; verify_kzg_for_data_column(data_column.clone_arc(), kzg)?;
Ok(Self { Ok(Self {
data: data_column.data, data: data_column.data,
@@ -418,22 +413,21 @@ impl<E: EthSpec> KzgVerifiedCustodyDataColumn<E> {
pub fn verify_kzg_for_data_column<E: EthSpec>( pub fn verify_kzg_for_data_column<E: EthSpec>(
data_column: Arc<DataColumnSidecar<E>>, data_column: Arc<DataColumnSidecar<E>>,
kzg: &Kzg, kzg: &Kzg,
) -> Result<KzgVerifiedDataColumn<E>, KzgError> { ) -> Result<KzgVerifiedDataColumn<E>, (Option<ColumnIndex>, KzgError)> {
let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES);
validate_data_columns(kzg, iter::once(&data_column))?; validate_data_columns(kzg, iter::once(&data_column))?;
Ok(KzgVerifiedDataColumn { data: data_column }) Ok(KzgVerifiedDataColumn { data: data_column })
} }
/// Complete kzg verification for a list of `DataColumnSidecar`s. /// Complete kzg verification for a list of `DataColumnSidecar`s.
/// Returns an error if any of the `DataColumnSidecar`s fails kzg verification. /// Returns an error for the first `DataColumnSidecar`s that fails kzg verification.
/// ///
/// Note: This function should be preferred over calling `verify_kzg_for_data_column` /// Note: This function should be preferred over calling `verify_kzg_for_data_column`
/// in a loop since this function kzg verifies a list of data columns more efficiently. /// in a loop since this function kzg verifies a list of data columns more efficiently.
#[instrument(skip_all, level = "debug")]
pub fn verify_kzg_for_data_column_list<'a, E: EthSpec, I>( pub fn verify_kzg_for_data_column_list<'a, E: EthSpec, I>(
data_column_iter: I, data_column_iter: I,
kzg: &'a Kzg, kzg: &'a Kzg,
) -> Result<(), KzgError> ) -> Result<(), (Option<ColumnIndex>, KzgError)>
where where
I: Iterator<Item = &'a Arc<DataColumnSidecar<E>>> + Clone, I: Iterator<Item = &'a Arc<DataColumnSidecar<E>>> + Clone,
{ {
@@ -442,38 +436,6 @@ where
Ok(()) Ok(())
} }
/// Complete kzg verification for a list of `DataColumnSidecar`s.
///
/// If there's at least one invalid column, it re-verifies all columns individually to identify the
/// first column that is invalid. This is necessary to attribute fault to the specific peer that
/// sent bad data. The re-verification cost should not be significant. If a peer sends invalid data it
/// will be quickly banned.
pub fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>(
data_column_iter: I,
kzg: &'a Kzg,
) -> Result<(), Vec<(ColumnIndex, KzgError)>>
where
I: Iterator<Item = &'a Arc<DataColumnSidecar<E>>> + Clone,
{
if verify_kzg_for_data_column_list(data_column_iter.clone(), kzg).is_ok() {
return Ok(());
};
// Find all columns that are invalid and identify by index. If we hit this condition there
// should be at least one invalid column
let errors = data_column_iter
.filter_map(|data_column| {
if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) {
Some((data_column.index, e))
} else {
None
}
})
.collect::<Vec<_>>();
Err(errors)
}
#[instrument(skip_all, level = "debug")] #[instrument(skip_all, level = "debug")]
pub fn validate_data_column_sidecar_for_gossip<T: BeaconChainTypes, O: ObservationStrategy>( pub fn validate_data_column_sidecar_for_gossip<T: BeaconChainTypes, O: ObservationStrategy>(
data_column: Arc<DataColumnSidecar<T::EthSpec>>, data_column: Arc<DataColumnSidecar<T::EthSpec>>,
@@ -509,7 +471,7 @@ pub fn validate_data_column_sidecar_for_gossip<T: BeaconChainTypes, O: Observati
verify_proposer_and_signature(&data_column, &parent_block, chain)?; verify_proposer_and_signature(&data_column, &parent_block, chain)?;
let kzg = &chain.kzg; let kzg = &chain.kzg;
let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), kzg) let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), kzg)
.map_err(GossipDataColumnError::InvalidKzgProof)?; .map_err(|(_, e)| GossipDataColumnError::InvalidKzgProof(e))?;
chain chain
.observed_slashable .observed_slashable

View File

@@ -1,6 +1,6 @@
use kzg::{ use kzg::{
Blob as KzgBlob, Bytes48, CELLS_PER_EXT_BLOB, Cell as KzgCell, CellRef as KzgCellRef, Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs,
CellsAndKzgProofs, Error as KzgError, Kzg, Error as KzgError, Kzg,
}; };
use rayon::prelude::*; use rayon::prelude::*;
use ssz_types::{FixedVector, VariableList}; use ssz_types::{FixedVector, VariableList};
@@ -45,38 +45,11 @@ pub fn validate_blob<E: EthSpec>(
kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof)
} }
/// Validates a list of blobs along with their corresponding KZG commitments and
/// cell proofs for the extended blobs.
pub fn validate_blobs_and_cell_proofs<E: EthSpec>(
kzg: &Kzg,
blobs: Vec<&Blob<E>>,
cell_proofs: &[KzgProof],
kzg_commitments: &KzgCommitments<E>,
) -> Result<(), KzgError> {
let cells = compute_cells::<E>(&blobs, kzg)?;
let cell_refs = cells.iter().map(|cell| cell.as_ref()).collect::<Vec<_>>();
let cell_indices = (0..blobs.len())
.flat_map(|_| 0..CELLS_PER_EXT_BLOB as u64)
.collect::<Vec<_>>();
let proofs = cell_proofs
.iter()
.map(|&proof| Bytes48::from(proof))
.collect::<Vec<_>>();
let commitments = kzg_commitments
.iter()
.flat_map(|&commitment| std::iter::repeat_n(Bytes48::from(commitment), CELLS_PER_EXT_BLOB))
.collect::<Vec<_>>();
kzg.verify_cell_proof_batch(&cell_refs, &proofs, cell_indices, &commitments)
}
/// Validate a batch of `DataColumnSidecar`. /// Validate a batch of `DataColumnSidecar`.
pub fn validate_data_columns<'a, E: EthSpec, I>( pub fn validate_data_columns<'a, E: EthSpec, I>(
kzg: &Kzg, kzg: &Kzg,
data_column_iter: I, data_column_iter: I,
) -> Result<(), KzgError> ) -> Result<(), (Option<u64>, KzgError)>
where where
I: Iterator<Item = &'a Arc<DataColumnSidecar<E>>> + Clone, I: Iterator<Item = &'a Arc<DataColumnSidecar<E>>> + Clone,
{ {
@@ -88,8 +61,12 @@ where
for data_column in data_column_iter { for data_column in data_column_iter {
let col_index = data_column.index; let col_index = data_column.index;
if data_column.column.is_empty() {
return Err((Some(col_index), KzgError::KzgVerificationFailed));
}
for cell in &data_column.column { for cell in &data_column.column {
cells.push(ssz_cell_to_crypto_cell::<E>(cell)?); cells.push(ssz_cell_to_crypto_cell::<E>(cell).map_err(|e| (Some(col_index), e))?);
column_indices.push(col_index); column_indices.push(col_index);
} }
@@ -100,6 +77,19 @@ where
for &commitment in &data_column.kzg_commitments { for &commitment in &data_column.kzg_commitments {
commitments.push(Bytes48::from(commitment)); commitments.push(Bytes48::from(commitment));
} }
let expected_len = column_indices.len();
// We make this check at each iteration so that the error is attributable to a specific column
if cells.len() != expected_len
|| proofs.len() != expected_len
|| commitments.len() != expected_len
{
return Err((
Some(col_index),
KzgError::InconsistentArrayLength("Invalid data column".to_string()),
));
}
} }
kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments)
@@ -418,7 +408,7 @@ pub fn reconstruct_data_columns<E: EthSpec>(
mod test { mod test {
use crate::kzg_utils::{ use crate::kzg_utils::{
blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns,
validate_blobs_and_cell_proofs, validate_data_columns,
}; };
use bls::Signature; use bls::Signature;
use eth2::types::BlobsBundle; use eth2::types::BlobsBundle;
@@ -442,21 +432,20 @@ mod test {
test_build_data_columns(&kzg, &spec); test_build_data_columns(&kzg, &spec);
test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec);
test_reconstruct_blobs_from_data_columns(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec);
test_verify_blob_and_cell_proofs(&kzg); test_validate_data_columns(&kzg, &spec);
} }
#[track_caller] #[track_caller]
fn test_verify_blob_and_cell_proofs(kzg: &Kzg) { fn test_validate_data_columns(kzg: &Kzg, spec: &ChainSpec) {
let (blobs_bundle, _) = generate_blobs::<E>(3, ForkName::Fulu).unwrap(); let num_of_blobs = 6;
let BlobsBundle { let (signed_block, blobs, proofs) =
blobs, create_test_fulu_block_and_blobs::<E>(num_of_blobs, spec);
commitments, let blob_refs = blobs.iter().collect::<Vec<_>>();
proofs, let column_sidecars =
} = blobs_bundle; blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec)
.unwrap();
let result =
validate_blobs_and_cell_proofs::<E>(kzg, blobs.iter().collect(), &proofs, &commitments);
let result = validate_data_columns::<E, _>(kzg, column_sidecars.iter());
assert!(result.is_ok()); assert!(result.is_ok());
} }

View File

@@ -36,7 +36,6 @@ use beacon_chain::data_availability_checker::{
use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError};
pub use common::RequestState; pub use common::RequestState;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use itertools::Itertools;
use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::service::api_types::SingleLookupReqId;
use lighthouse_network::{PeerAction, PeerId}; use lighthouse_network::{PeerAction, PeerId};
use lru_cache::LRUTimeCache; use lru_cache::LRUTimeCache;
@@ -653,15 +652,15 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
// but future errors may follow the same pattern. Generalize this // but future errors may follow the same pattern. Generalize this
// pattern with https://github.com/sigp/lighthouse/pull/6321 // pattern with https://github.com/sigp/lighthouse/pull/6321
BlockError::AvailabilityCheck( BlockError::AvailabilityCheck(
AvailabilityCheckError::InvalidColumn(errors), AvailabilityCheckError::InvalidColumn((index_opt, _)),
) => errors ) => {
.iter() match index_opt {
// Collect all peers that sent a column that was invalid. Must Some(index) => peer_group.of_index(index as usize).collect(),
// run .unique as a single peer can send multiple invalid // If no index supplied this is an un-attributable fault. In practice
// columns. Penalize once to avoid insta-bans // this should never happen.
.flat_map(|(index, _)| peer_group.of_index((*index) as usize)) None => vec![],
.unique() }
.collect(), }
_ => peer_group.all().collect(), _ => peer_group.all().collect(),
}; };
for peer in peers_to_penalize { for peer in peers_to_penalize {

View File

@@ -14,9 +14,11 @@ ethereum_serde_utils = { workspace = true }
ethereum_ssz = { workspace = true } ethereum_ssz = { workspace = true }
ethereum_ssz_derive = { workspace = true } ethereum_ssz_derive = { workspace = true }
hex = { workspace = true } hex = { workspace = true }
rayon = { workspace = true }
rust_eth_kzg = { workspace = true } rust_eth_kzg = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
tracing = { workspace = true }
tree_hash = { workspace = true } tree_hash = { workspace = true }
[dev-dependencies] [dev-dependencies]

View File

@@ -3,6 +3,7 @@ mod kzg_proof;
pub mod trusted_setup; pub mod trusted_setup;
use rust_eth_kzg::{CellIndex, DASContext}; use rust_eth_kzg::{CellIndex, DASContext};
use std::collections::HashMap;
use std::fmt::Debug; use std::fmt::Debug;
pub use crate::{ pub use crate::{
@@ -17,10 +18,12 @@ pub use c_kzg::{
}; };
use crate::trusted_setup::load_trusted_setup; use crate::trusted_setup::load_trusted_setup;
use rayon::prelude::*;
pub use rust_eth_kzg::{ pub use rust_eth_kzg::{
constants::{BYTES_PER_CELL, CELLS_PER_EXT_BLOB}, constants::{BYTES_PER_CELL, CELLS_PER_EXT_BLOB},
Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup,
}; };
use tracing::instrument;
/// Disables the fixed-base multi-scalar multiplication optimization for computing /// Disables the fixed-base multi-scalar multiplication optimization for computing
/// cell KZG proofs, because `rust-eth-kzg` already handles the precomputation. /// cell KZG proofs, because `rust-eth-kzg` already handles the precomputation.
@@ -229,31 +232,85 @@ impl Kzg {
} }
/// Verifies a batch of cell-proof-commitment triplets. /// Verifies a batch of cell-proof-commitment triplets.
#[instrument(skip_all, level = "debug", fields(cells = cells.len()))]
pub fn verify_cell_proof_batch( pub fn verify_cell_proof_batch(
&self, &self,
cells: &[CellRef<'_>], cells: &[CellRef<'_>],
kzg_proofs: &[Bytes48], kzg_proofs: &[Bytes48],
columns: Vec<CellIndex>, indices: Vec<CellIndex>,
kzg_commitments: &[Bytes48], kzg_commitments: &[Bytes48],
) -> Result<(), Error> { ) -> Result<(), (Option<u64>, Error)> {
let proofs: Vec<_> = kzg_proofs.iter().map(|proof| proof.as_ref()).collect(); let mut column_groups: HashMap<u64, Vec<(CellRef, Bytes48, Bytes48)>> = HashMap::new();
let commitments: Vec<_> = kzg_commitments
.iter()
.map(|commitment| commitment.as_ref())
.collect();
let verification_result = self.context().verify_cell_kzg_proof_batch(
commitments.to_vec(),
&columns,
cells.to_vec(),
proofs.to_vec(),
);
// Modify the result so it matches roughly what the previous method was doing. let expected_len = cells.len();
match verification_result {
Ok(_) => Ok(()), // This check is already made in `validate_data_columns`. However we add it here so that ef consensus spec tests pass
Err(e) if e.is_proof_invalid() => Err(Error::KzgVerificationFailed), // and to avoid any potential footguns in the future. Note that by catching the error here and not in `validate_data_columns`
Err(e) => Err(Error::PeerDASKZG(e)), // the error becomes non-attributable.
if kzg_proofs.len() != expected_len
|| indices.len() != expected_len
|| kzg_commitments.len() != expected_len
{
return Err((
None,
Error::InconsistentArrayLength("Invalid data column".to_string()),
));
} }
for (((cell, proof), &index), commitment) in cells
.iter()
.zip(kzg_proofs.iter())
.zip(indices.iter())
.zip(kzg_commitments.iter())
{
column_groups
.entry(index)
.or_default()
.push((cell, *proof, *commitment));
}
column_groups
.into_par_iter()
.map(|(column_index, column_data)| {
let mut cells = Vec::new();
let mut proofs = Vec::new();
let mut commitments = Vec::new();
for (cell, proof, commitment) in &column_data {
cells.push(*cell);
proofs.push(proof.as_ref());
commitments.push(commitment.as_ref());
}
// Create per-chunk tracing span for visualizing parallel processing.
// This is safe from span explosion as we have at most 128 chunks,
// i.e. the number of column indices.
let _span = tracing::debug_span!(
"verify_cell_proof_chunk",
cells = cells.len(),
column_index,
verification_result = tracing::field::Empty,
)
.entered();
let verification_result = self.context().verify_cell_kzg_proof_batch(
commitments,
&vec![column_index; cells.len()], // All column_data here is from the same index
cells,
proofs,
);
match verification_result {
Ok(_) => Ok(()),
Err(e) if e.is_proof_invalid() => {
Err((Some(column_index), Error::KzgVerificationFailed))
}
Err(e) => Err((Some(column_index), Error::PeerDASKZG(e))),
}
})
.collect::<Result<Vec<()>, (Option<u64>, Error)>>()?;
Ok(())
} }
pub fn recover_cells_and_compute_kzg_proofs( pub fn recover_cells_and_compute_kzg_proofs(

View File

@@ -53,7 +53,7 @@ impl<E: EthSpec> Case for KZGVerifyCellKZGProofBatch<E> {
let kzg = get_kzg(); let kzg = get_kzg();
match kzg.verify_cell_proof_batch(&cells, &proofs, cell_indices, &commitments) { match kzg.verify_cell_proof_batch(&cells, &proofs, cell_indices, &commitments) {
Ok(_) => Ok(true), Ok(_) => Ok(true),
Err(KzgError::KzgVerificationFailed) => Ok(false), Err((_, KzgError::KzgVerificationFailed)) => Ok(false),
Err(e) => Err(Error::InternalError(format!( Err(e) => Err(Error::InternalError(format!(
"Failed to validate cells: {:?}", "Failed to validate cells: {:?}",
e e