use kzg::{ Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, KzgBlobRef, }; use rayon::prelude::*; use ssz_types::{FixedVector, VariableList}; use std::sync::Arc; use tracing::instrument; use tree_hash::TreeHash; use types::data::{Cell, DataColumn, DataColumnSidecarError}; use types::kzg_ext::KzgCommitments; use types::{ Blob, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSidecarGloas, DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlindedBeaconBlock, Slot, }; /// Converts a blob ssz List object to an array to be used with the kzg /// crypto library. fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into) } fn ssz_blob_to_crypto_blob_boxed(blob: &Blob) -> Result, KzgError> { ssz_blob_to_crypto_blob::(blob).map(Box::new) } /// Converts a cell ssz List object to an array to be used with the kzg /// crypto library. fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result, KzgError> { let cell_bytes: &[u8] = cell.as_ref(); cell_bytes .try_into() .map_err(|e| KzgError::InconsistentArrayLength(format!("expected cell to have size BYTES_PER_CELL. This should be guaranteed by the `FixedVector` type: {e:?}"))) } /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. pub fn validate_blob( kzg: &Kzg, blob: &Blob, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, ) -> Result<(), KzgError> { let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } /// Validate a batch of `DataColumnSidecar`. pub fn validate_data_columns<'a, E: EthSpec, I>( kzg: &Kzg, data_column_iter: I, ) -> Result<(), (Option, KzgError)> where I: Iterator>> + Clone, { let mut cells = Vec::new(); let mut proofs = Vec::new(); let mut column_indices = Vec::new(); let mut commitments = Vec::new(); for data_column in data_column_iter { let col_index = *data_column.index(); if data_column.column().is_empty() { return Err((Some(col_index), KzgError::KzgVerificationFailed)); } for cell in data_column.column() { cells.push(ssz_cell_to_crypto_cell::(cell).map_err(|e| (Some(col_index), e))?); column_indices.push(col_index); } for &proof in data_column.kzg_proofs() { proofs.push(Bytes48::from(proof)); } for &commitment in data_column.kzg_commitments() { commitments.push(Bytes48::from(commitment)); } let expected_len = column_indices.len(); // We make this check at each iteration so that the error is attributable to a specific column if cells.len() != expected_len || proofs.len() != expected_len || commitments.len() != expected_len { return Err(( Some(col_index), KzgError::InconsistentArrayLength("Invalid data column".to_string()), )); } } kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) } /// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. pub fn validate_blobs( kzg: &Kzg, expected_kzg_commitments: &[KzgCommitment], blobs: Vec<&Blob>, kzg_proofs: &[KzgProof], ) -> Result<(), KzgError> { let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); let blobs = blobs .into_iter() .map(|blob| ssz_blob_to_crypto_blob::(blob)) .collect::, KzgError>>()?; kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs) } /// Compute the kzg proof given an ssz blob and its kzg commitment. pub fn compute_blob_kzg_proof( kzg: &Kzg, blob: &Blob, kzg_commitment: KzgCommitment, ) -> Result { let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment) } /// Compute the kzg commitment for a given blob. pub fn blob_to_kzg_commitment( kzg: &Kzg, blob: &Blob, ) -> Result { let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.blob_to_kzg_commitment(&kzg_blob) } /// Compute the kzg proof for a given blob and an evaluation point z. pub fn compute_kzg_proof( kzg: &Kzg, blob: &Blob, z: Hash256, ) -> Result<(KzgProof, Hash256), KzgError> { let z = z.0.into(); let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.compute_kzg_proof(&kzg_blob, &z) .map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec()))) } /// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` pub fn verify_kzg_proof( kzg: &Kzg, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, z: Hash256, y: Hash256, ) -> Result { kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) } /// Build data column sidecars from a signed beacon block and its blobs. #[instrument(skip_all, level = "debug", fields(blob_count = blobs.len()))] pub fn blobs_to_data_column_sidecars( blobs: &[&Blob], cell_proofs: Vec, block: &SignedBeaconBlock, kzg: &Kzg, spec: &ChainSpec, ) -> Result, DataColumnSidecarError> { if blobs.is_empty() { return Ok(vec![]); } let kzg_commitments = block .message() .body() .blob_kzg_commitments() .map_err(|_err| DataColumnSidecarError::PreDeneb)?; let signed_block_header = block.signed_block_header(); if cell_proofs.len() != blobs.len() * E::number_of_columns() { return Err(DataColumnSidecarError::InvalidCellProofLength { expected: blobs.len() * E::number_of_columns(), actual: cell_proofs.len(), }); } let proof_chunks = cell_proofs .chunks_exact(E::number_of_columns()) .collect::>(); // NOTE: assumes blob sidecars are ordered by index let zipped: Vec<_> = blobs.iter().zip(proof_chunks).collect(); let blob_cells_and_proofs_vec = zipped .into_par_iter() .map(|(blob, proofs)| { let blob = blob.as_ref().try_into().map_err(|e| { KzgError::InconsistentArrayLength(format!( "blob should have a guaranteed size due to FixedVector: {e:?}" )) })?; kzg.compute_cells(blob).and_then(|cells| { let proofs = proofs.try_into().map_err(|e| { KzgError::InconsistentArrayLength(format!( "proof chunks should have exactly `number_of_columns` proofs: {e:?}" )) })?; Ok((cells, proofs)) }) }) .collect::, KzgError>>()?; if block.fork_name_unchecked().gloas_enabled() { build_data_column_sidecars_gloas( kzg_commitments.clone(), signed_block_header.message.tree_hash_root(), block.slot(), blob_cells_and_proofs_vec, spec, ) .map_err(DataColumnSidecarError::BuildSidecarFailed) } else { let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; build_data_column_sidecars_fulu( kzg_commitments.clone(), kzg_commitments_inclusion_proof, signed_block_header, blob_cells_and_proofs_vec, spec, ) .map_err(DataColumnSidecarError::BuildSidecarFailed) } } pub fn compute_cells(blobs: &[&Blob], kzg: &Kzg) -> Result, KzgError> { let cells_vec = blobs .into_par_iter() .map(|blob| { let blob: KzgBlobRef<'_> = blob.as_ref().try_into().map_err(|e| { KzgError::InconsistentArrayLength(format!( "blob should have a guaranteed size due to FixedVector: {e:?}", )) })?; kzg.compute_cells(blob) }) .collect::, KzgError>>()?; let cells_flattened: Vec = cells_vec.into_iter().flatten().collect(); Ok(cells_flattened) } pub(crate) fn build_data_column_sidecars_fulu( kzg_commitments: KzgCommitments, kzg_commitments_inclusion_proof: FixedVector, signed_block_header: SignedBeaconBlockHeader, blob_cells_and_proofs_vec: Vec, spec: &ChainSpec, ) -> Result, String> { if spec .fork_name_at_slot::(signed_block_header.message.slot) .gloas_enabled() { return Err("Attempting to construct Fulu data columns post-Gloas".to_owned()); } let number_of_columns = E::number_of_columns(); let max_blobs_per_block = spec .max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch())) as usize; let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { // we iterate over each column, and we construct the column from "top to bottom", // pushing on the cell and the corresponding proof at each column index. we do this for // each blob (i.e. the outer loop). for col in 0..number_of_columns { let cell = blob_cells .get(col) .ok_or(format!("Missing blob cell at index {col}"))?; let cell: Vec = cell.to_vec(); let cell = Cell::::try_from(cell).map_err(|e| format!("BytesPerCell exceeded: {e:?}"))?; let proof = blob_cell_proofs .get(col) .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; let column = columns .get_mut(col) .ok_or(format!("Missing data column at index {col}"))?; let column_proofs = column_kzg_proofs .get_mut(col) .ok_or(format!("Missing data column proofs at index {col}"))?; column.push(cell); column_proofs.push(*proof); } } let sidecars: Result>>, String> = columns .into_iter() .zip(column_kzg_proofs) .enumerate() .map( |(index, (col, proofs))| -> Result>, String> { Ok(Arc::new(DataColumnSidecar::Fulu(DataColumnSidecarFulu { index: index as u64, column: DataColumn::::try_from(col) .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, kzg_commitments: kzg_commitments.clone(), kzg_proofs: VariableList::try_from(proofs) .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, signed_block_header: signed_block_header.clone(), kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), }))) }, ) .collect(); sidecars } pub(crate) fn build_data_column_sidecars_gloas( kzg_commitments: KzgCommitments, beacon_block_root: Hash256, slot: Slot, blob_cells_and_proofs_vec: Vec, spec: &ChainSpec, ) -> Result, String> { if !spec.fork_name_at_slot::(slot).gloas_enabled() { return Err("Attempting to construct Gloas data columns pre-Gloas".to_owned()); } let number_of_columns = E::number_of_columns(); let max_blobs_per_block = spec.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize; let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { // we iterate over each column, and we construct the column from "top to bottom", // pushing on the cell and the corresponding proof at each column index. we do this for // each blob (i.e. the outer loop). for col in 0..number_of_columns { let cell = blob_cells .get(col) .ok_or(format!("Missing blob cell at index {col}"))?; let cell: Vec = cell.to_vec(); let cell = Cell::::try_from(cell).map_err(|e| format!("BytesPerCell exceeded: {e:?}"))?; let proof = blob_cell_proofs .get(col) .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; let column = columns .get_mut(col) .ok_or(format!("Missing data column at index {col}"))?; let column_proofs = column_kzg_proofs .get_mut(col) .ok_or(format!("Missing data column proofs at index {col}"))?; column.push(cell); column_proofs.push(*proof); } } let sidecars: Result>>, String> = columns .into_iter() .zip(column_kzg_proofs) .enumerate() .map( |(index, (col, proofs))| -> Result>, String> { Ok(Arc::new(DataColumnSidecar::Gloas(DataColumnSidecarGloas { index: index as u64, column: DataColumn::::try_from(col) .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, kzg_commitments: kzg_commitments.clone(), kzg_proofs: VariableList::try_from(proofs) .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, beacon_block_root, slot, }))) }, ) .collect(); sidecars } // TODO(gloas) blob reconstruction will fail post gloas. We should just return `Blob`s // instead of a `BlobSidecar`. This might require a beacon api spec change as well. /// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). /// /// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated /// with the block. /// This function does NOT use rayon as this is primarily used by a non critical path in HTTP API /// and it will be slow if the node needs to reconstruct the blobs pub fn reconstruct_blobs( kzg: &Kzg, mut data_columns: Vec>>, blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, spec: &ChainSpec, ) -> Result, String> { // Sort data columns by index to ensure ascending order for KZG operations data_columns.sort_unstable_by_key(|dc| *dc.index()); let first_data_column = data_columns .first() .ok_or("data_columns should have at least one element".to_string())?; let blob_indices: Vec = match blob_indices_opt { Some(indices) => indices.into_iter().map(|i| i as usize).collect(), None => { let num_of_blobs = first_data_column.kzg_commitments().len(); (0..num_of_blobs).collect() } }; let blob_sidecars = blob_indices .into_iter() .map(|row_index| { let mut cells: Vec = vec![]; let mut cell_ids: Vec = vec![]; for data_column in &data_columns { let cell = data_column .column() .get(row_index) .ok_or(format!("Missing data column at row index {row_index}")) .and_then(|cell| { ssz_cell_to_crypto_cell::(cell).map_err(|e| format!("{e:?}")) })?; cells.push(cell); cell_ids.push(*data_column.index()); } let num_cells_original_blob = E::number_of_columns() / 2; let blob_bytes = if data_columns.len() < E::number_of_columns() { let (recovered_cells, _kzg_proofs) = kzg .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) .map_err(|e| { format!("Failed to recover cells and compute KZG proofs: {e:?}") })?; recovered_cells .into_iter() .take(num_cells_original_blob) .flat_map(|cell| cell.into_iter()) .collect() } else { cells .into_iter() .take(num_cells_original_blob) .flat_map(|cell| (*cell).into_iter()) .collect() }; let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; let kzg_proof = KzgProof::empty(); BlobSidecar::::new_with_existing_proof( row_index, blob, signed_block, first_data_column .signed_block_header() .map_err(|e| format!("{e:?}"))? .clone(), first_data_column .kzg_commitments_inclusion_proof() .map_err(|e| format!("{e:?}"))?, kzg_proof, ) .map(Arc::new) .map_err(|e| format!("{e:?}")) }) .collect::, _>>()?; let max_blobs = spec.max_blobs_per_block(signed_block.epoch()) as usize; BlobSidecarList::new(blob_sidecars, max_blobs).map_err(|e| format!("{e:?}")) } /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). pub fn reconstruct_data_columns( kzg: &Kzg, mut data_columns: Vec>>, spec: &ChainSpec, ) -> Result, KzgError> { // Sort data columns by index to ensure ascending order for KZG operations data_columns.sort_unstable_by_key(|dc| *dc.index()); let first_data_column = data_columns .first() .ok_or(KzgError::InconsistentArrayLength( "data_columns should have at least one element".to_string(), ))?; let num_of_blobs = first_data_column.kzg_commitments().len(); let blob_cells_and_proofs_vec = (0..num_of_blobs) .into_par_iter() .map(|row_index| { let mut cells: Vec = vec![]; let mut cell_ids: Vec = vec![]; for data_column in &data_columns { let cell = data_column.column().get(row_index).ok_or( KzgError::InconsistentArrayLength(format!( "Missing data column at row index {row_index}" )), )?; cells.push(ssz_cell_to_crypto_cell::(cell)?); cell_ids.push(*data_column.index()); } kzg.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) }) .collect::, KzgError>>()?; match first_data_column.as_ref() { DataColumnSidecar::Fulu(first_column) => { // Clone sidecar elements from existing data column, no need to re-compute build_data_column_sidecars_fulu( first_column.kzg_commitments.clone(), first_column.kzg_commitments_inclusion_proof.clone(), first_column.signed_block_header.clone(), blob_cells_and_proofs_vec, spec, ) .map_err(KzgError::ReconstructFailed) } DataColumnSidecar::Gloas(first_column) => build_data_column_sidecars_gloas( first_column.kzg_commitments.clone(), first_column.beacon_block_root, first_column.slot, blob_cells_and_proofs_vec, spec, ) .map_err(KzgError::ReconstructFailed), } } #[cfg(test)] mod test { use crate::kzg_utils::{ blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, validate_data_columns, }; use bls::Signature; use eth2::types::BlobsBundle; use execution_layer::test_utils::generate_blobs; use kzg::{Kzg, KzgCommitment, trusted_setup::get_trusted_setup}; use types::{ BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, EmptyBlock, EthSpec, ForkName, FullPayload, KzgProofs, MainnetEthSpec, SignedBeaconBlock, kzg_ext::KzgCommitments, }; type E = MainnetEthSpec; // Loading and initializing PeerDAS KZG is expensive and slow, so we group the tests together // only load it once. // TODO(Gloas) make this generic over fulu/gloas, or write a separate function for Gloas #[test] fn test_build_data_columns_sidecars() { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let kzg = get_kzg(); test_build_data_columns_empty(&kzg, &spec); test_build_data_columns_fulu(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_data_columns_unordered(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); test_reconstruct_blobs_from_data_columns_unordered(&kzg, &spec); test_validate_data_columns(&kzg, &spec); } #[track_caller] fn test_validate_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); let result = validate_data_columns::(kzg, column_sidecars.iter()); assert!(result.is_ok()); } #[track_caller] fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 0; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); assert!(column_sidecars.is_empty()); } // TODO(gloas) create `test_build_data_columns_gloas` and make sure its called // in the relevant places #[track_caller] fn test_build_data_columns_fulu(kzg: &Kzg, spec: &ChainSpec) { // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); let block_kzg_commitments = signed_block .message() .body() .blob_kzg_commitments() .unwrap() .clone(); let block_kzg_commitments_inclusion_proof = signed_block .message() .body() .kzg_commitments_merkle_proof() .unwrap(); assert_eq!(column_sidecars.len(), E::number_of_columns()); for (idx, col_sidecar) in column_sidecars.iter().enumerate() { assert_eq!(*col_sidecar.index(), idx as u64); assert_eq!(col_sidecar.kzg_commitments().len(), num_of_blobs); assert_eq!(col_sidecar.column().len(), num_of_blobs); assert_eq!(col_sidecar.kzg_proofs().len(), num_of_blobs); assert_eq!(col_sidecar.kzg_commitments().clone(), block_kzg_commitments); assert_eq!( col_sidecar .kzg_commitments_inclusion_proof() .unwrap() .clone(), block_kzg_commitments_inclusion_proof ); assert!(col_sidecar.as_fulu().unwrap().verify_inclusion_proof()); } } #[track_caller] fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); // Now reconstruct let reconstructed_columns = reconstruct_data_columns( kzg, column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2].to_vec(), spec, ) .unwrap(); for i in 0..E::number_of_columns() { assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}"); } } #[track_caller] fn test_reconstruct_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); // Test reconstruction with columns in reverse order (non-ascending) let mut subset_columns: Vec<_> = column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2].to_vec(); subset_columns.reverse(); // This would fail without proper sorting in reconstruct_data_columns let reconstructed_columns = reconstruct_data_columns(kzg, subset_columns, spec).unwrap(); for i in 0..E::number_of_columns() { assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}"); } } #[track_caller] fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 3; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); // Now reconstruct let signed_blinded_block = signed_block.into(); // Using at least 2 blobs to make sure we're arranging the data columns correctly. let blob_indices = vec![1, 2]; let reconstructed_blobs = reconstruct_blobs( kzg, column_sidecars[0..column_sidecars.len() / 2].to_vec(), Some(blob_indices.clone()), &signed_blinded_block, spec, ) .unwrap(); for i in blob_indices { let reconstructed_blob = &reconstructed_blobs .iter() .find(|sidecar| sidecar.index == i) .map(|sidecar| sidecar.blob.clone()) .expect("reconstructed blob should exist"); let original_blob = blobs.get(i as usize).unwrap(); assert_eq!(reconstructed_blob, original_blob, "{i}"); } } #[track_caller] fn test_reconstruct_blobs_from_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) .unwrap(); // Test reconstruction with columns in reverse order (non-ascending) let mut subset_columns: Vec<_> = column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2].to_vec(); subset_columns.reverse(); // This would fail without proper sorting in reconstruct_blobs let signed_blinded_block = signed_block.into(); let reconstructed_blobs = reconstruct_blobs(kzg, subset_columns, None, &signed_blinded_block, spec).unwrap(); for (i, original_blob) in blobs.iter().enumerate() { let reconstructed_blob = &reconstructed_blobs.get(i).unwrap().blob; assert_eq!(reconstructed_blob, original_blob, "{i}"); } } fn get_kzg() -> Kzg { Kzg::new_from_trusted_setup(&get_trusted_setup()).expect("should create kzg") } fn create_test_fulu_block_and_blobs( num_of_blobs: usize, spec: &ChainSpec, ) -> ( SignedBeaconBlock>, BlobsList, KzgProofs, ) { let mut block = BeaconBlock::Fulu(BeaconBlockFulu::empty(spec)); let mut body = block.body_mut(); let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); *blob_kzg_commitments = KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) .unwrap(); let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); let fork = signed_block.fork_name_unchecked(); let (blobs_bundle, _) = generate_blobs::(num_of_blobs, fork).unwrap(); let BlobsBundle { blobs, commitments, proofs, } = blobs_bundle; *signed_block .message_mut() .body_mut() .blob_kzg_commitments_mut() .unwrap() = commitments; (signed_block, blobs, proofs) } }