Merge branch 'deneb-free-blobs' of https://github.com/sigp/lighthouse into sean-blob-blob-blob

This commit is contained in:
realbigsean
2023-03-15 17:34:56 -04:00
22 changed files with 233 additions and 286 deletions

View File

@@ -7,12 +7,15 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey};
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::blob_cache::BlobCache;
use crate::blob_verification::{AsBlock, AvailabilityPendingBlock, AvailableBlock, BlobError, BlockWrapper, IntoAvailableBlock, Blobs};
use crate::blob_verification::{
AsBlock, AvailabilityPendingBlock, AvailableBlock, BlobError, Blobs, BlockWrapper,
IntoAvailableBlock,
};
use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::{
check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root,
signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock,
IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, ExecutedBlock,
signature_verify_chain_segment, BlockError, ExecutedBlock, ExecutionPendingBlock,
GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER,
};
pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock};
use crate::chain_config::ChainConfig;
@@ -82,6 +85,7 @@ use slasher::Slasher;
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
use state_processing::{
common::get_attesting_indices_from_state,
per_block_processing,
@@ -102,16 +106,14 @@ use std::io::prelude::*;
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::task::JoinHandle;
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator};
use store::{
DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp,
};
use task_executor::{ShutdownReason, TaskExecutor};
use tokio::task::JoinHandle;
use tree_hash::TreeHash;
use types::beacon_state::CloneConfig;
use types::blobs_sidecar::KzgCommitments;
use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
use types::consts::merge::INTERVALS_PER_SLOT;
use types::*;
@@ -2709,7 +2711,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let slot = unverified_block.block().slot();
let execution_pending = unverified_block.into_execution_pending_block(
block_root,
&chain,
@@ -2723,9 +2724,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let chain = self.clone();
// Check if the executed block has all it's blobs available to qualify as a fully
// Check if the executed block has all it's blobs available to qualify as a fully
// available block
let import_block = if let Ok(blobs) = self.gossip_blob_cache.lock().blobs(executed_block.block_root) {
let import_block = if let Ok(blobs) = self
.gossip_blob_cache
.lock()
.blobs(executed_block.block_root)
{
self.import_available_block(executed_block, blobs, count_unrealized)
} else {
return Ok(BlockProcessingResult::AvailabilityPending(executed_block));
@@ -2839,7 +2844,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
confirmed_state_roots,
parent_eth1_finalization_data,
consensus_context,
payload_verification_outcome
payload_verification_outcome,
})
}
@@ -2851,7 +2856,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
async fn import_available_block(
self: Arc<Self>,
executed_block: ExecutedBlock<T>,
blobs: Blobs<T::EthSpec>
blobs: Blobs<T::EthSpec>,
count_unrealized: CountUnrealized,
) -> Result<Hash256, BlockError<T::EthSpec>> {
let ExecutedBlock {
@@ -2865,14 +2870,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
consensus_context,
} = execution_pending_block;
let chain = self.clone();
let available_block = AvailableBlock {
block: block,
blobs: blobs
block: block,
blobs: blobs,
};
let block_hash = self
.spawn_blocking_handle(
move || {
@@ -4911,7 +4915,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
)),
)?;
kzg_utils::compute_blob_kzg_proof::<T::EthSpec>(kzg, blob, kzg_commitment.clone())
kzg_utils::compute_blob_kzg_proof::<T::EthSpec>(kzg, blob, *kzg_commitment)
.map_err(BlockProductionError::KzgError)
})
.collect::<Result<Vec<KzgProof>, BlockProductionError>>()
@@ -6196,19 +6200,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.unwrap_or(false))
}
pub async fn check_data_availability(&self, block: Arc<SignedBeaconBlock<T::EthSpec>>) -> Result<AvailableBlock<T>, Error> {
let kzg_commitments = block
.message()
.body()
.blob_kzg_commitments()
.map_err(|_| BlobError::KzgCommitmentMissing)?;
let transactions = block
.message()
.body()
.execution_payload_eip4844()
.map(|payload| payload.transactions())
.map_err(|_| BlobError::TransactionsMissing)?
.ok_or(BlobError::TransactionsMissing)?;
pub async fn check_data_availability(
&self,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
) -> Result<AvailableBlock<T>, Error> {
let kzg_commitments = block
.message()
.body()
.blob_kzg_commitments()
.map_err(|_| BlobError::KzgCommitmentMissing)?;
let transactions = block
.message()
.body()
.execution_payload_eip4844()
.map(|payload| payload.transactions())
.map_err(|_| BlobError::TransactionsMissing)?
.ok_or(BlobError::TransactionsMissing)?;
if verify_kzg_commitments_against_transactions::<T::EthSpec>(transactions, kzg_commitments)
.is_err()
@@ -6229,7 +6236,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
kzg_commitments,
blob_sidecar,
)
.map_err(BlobError::KzgError)?
.map_err(BlobError::KzgError)?
{
return Err(BlobError::InvalidKzgProof);
}

View File

@@ -1,8 +1,8 @@
use derivative::Derivative;
use slot_clock::SlotClock;
use ssz_types::VariableList;
use std::sync::Arc;
use tokio::task::JoinHandle;
use ssz_types::VariableList;
use crate::beacon_chain::{
BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
@@ -10,13 +10,13 @@ use crate::beacon_chain::{
};
use crate::BeaconChainError;
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
use types::blob_sidecar::BlobSidecar;
use types::{
BeaconBlockRef, BeaconStateError, BlobsSidecar, EthSpec, Hash256, KzgCommitment,
SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockHeader,
SignedBlobSidecar, Slot, Transactions,
};
use types::{Epoch, ExecPayload};
use types::blob_sidecar::BlobSidecar;
#[derive(Debug)]
pub enum BlobError {
@@ -252,8 +252,8 @@ pub fn verify_data_availability<T: BeaconChainTypes>(
blob_sidecar: &BlobsSidecar<T::EthSpec>,
kzg_commitments: &[KzgCommitment],
transactions: &Transactions<T::EthSpec>,
block_slot: Slot,
block_root: Hash256,
_block_slot: Slot,
_block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<(), BlobError> {
if verify_kzg_commitments_against_transactions::<T::EthSpec>(transactions, kzg_commitments)
@@ -263,7 +263,7 @@ pub fn verify_data_availability<T: BeaconChainTypes>(
}
// Validatate that the kzg proof is valid against the commitments and blobs
let kzg = chain
let _kzg = chain
.kzg
.as_ref()
.ok_or(BlobError::TrustedSetupNotInitialized)?;
@@ -300,8 +300,6 @@ impl<T: BeaconChainTypes> BlockWrapper<T::EthSpec> {
AvailabilityPendingBlock::new(block, block_root, da_check_required)
}
BlockWrapper::BlockAndBlobs(block, blobs_sidecar) => {
AvailabilityPendingBlock::new_with_blobs(block, blobs_sidecar, da_check_required)
}
}
@@ -336,22 +334,23 @@ pub struct AvailableBlock<T: BeaconChainTypes> {
blobs: Blobs<T::EthSpec>,
}
impl <T: BeaconChainTypes> AvailableBlock<T> {
impl<T: BeaconChainTypes> AvailableBlock<T> {
pub fn blobs(&self) -> Option<Arc<BlobsSidecar<T>>> {
match &self.blobs {
Blobs::NotRequired | Blobs::None => None,
Blobs::Available(block_sidecar) => {
Some(block_sidecar.clone())
}
Blobs::Available(block_sidecar) => Some(block_sidecar.clone()),
}
}
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<T::EthSpec>>, Option<Arc<BlobsSidecar<T::EthSpec>>>) {
pub fn deconstruct(
self,
) -> (
Arc<SignedBeaconBlock<T::EthSpec>>,
Option<Arc<BlobsSidecar<T::EthSpec>>>,
) {
match self.blobs {
Blobs::NotRequired | Blobs::None => (self.block, None),
Blobs::Available(blob_sidecars) => {
(self.block, Some(blob_sidecars))
}
Blobs::Available(blob_sidecars) => (self.block, Some(blob_sidecars)),
}
}
}
@@ -388,12 +387,10 @@ impl<T: BeaconChainTypes> AvailabilityPendingBlock<T> {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Capella(_)
| SignedBeaconBlock::Merge(_) => {
Ok(AvailabilityPendingBlock {
block: beacon_block ,
data_availability_handle: async{ Ok(Some(Blobs::NotRequired))}
})
}
| SignedBeaconBlock::Merge(_) => Ok(AvailabilityPendingBlock {
block: beacon_block,
data_availability_handle: async { Ok(Some(Blobs::NotRequired)) },
}),
SignedBeaconBlock::Eip4844(_) => {
match da_check_required {
DataAvailabilityCheckRequired::Yes => {
@@ -408,12 +405,10 @@ impl<T: BeaconChainTypes> AvailabilityPendingBlock<T> {
},
)))
}
DataAvailabilityCheckRequired::No => {
AvailabilityPendingBlock {
block: beacon_block,
data_availability_handle: async{ Ok(Some(Blobs::NotRequired))}
}
}
DataAvailabilityCheckRequired::No => AvailabilityPendingBlock {
block: beacon_block,
data_availability_handle: async { Ok(Some(Blobs::NotRequired)) },
},
}
}
}
@@ -444,24 +439,22 @@ impl<T: BeaconChainTypes> AvailabilityPendingBlock<T> {
| SignedBeaconBlock::Merge(_) => Err(BlobError::InconsistentFork),
SignedBeaconBlock::Eip4844(_) => {
match da_check_required {
DataAvailabilityCheckRequired::Yes => Ok(AvailableBlock{
block: beacon_block,
blobs: Blobs::Available(blobs_sidecar),
}
),
DataAvailabilityCheckRequired::Yes => Ok(AvailableBlock {
block: beacon_block,
blobs: Blobs::Available(blobs_sidecar),
}),
DataAvailabilityCheckRequired::No => {
// Blobs were not verified so we drop them, we'll instead just pass around
// an available `Eip4844` block without blobs.
Ok(AvailableBlock{
block: beacon_block,
blobs: Blobs::NotRequired
Ok(AvailableBlock {
block: beacon_block,
blobs: Blobs::NotRequired,
})
}
}
}
}
}
}
pub trait IntoBlockWrapper<E: EthSpec>: AsBlock<E> {

View File

@@ -1,19 +1,19 @@
use crate::blob_verification::{verify_data_availability, AvailabilityPendingBlock};
use crate::block_verification::{ExecutedBlock, IntoExecutionPendingBlock};
use crate::kzg_utils::validate_blob;
use crate::{BeaconChainError, BlockError};
use eth2::reqwest::header::Entry;
use kzg::{Kzg, KzgCommitment};
use parking_lot::{Mutex, RwLock};
use ssz_types::VariableList;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::future::Future;
use std::sync::Arc;
use parking_lot::{Mutex, RwLock};
use eth2::reqwest::header::Entry;
use kzg::{Kzg, KzgCommitment};
use ssz_types::VariableList;
use types::blob_sidecar::{BlobIdentifier, BlobSidecar};
use types::{EthSpec, Hash256};
use crate::blob_verification::{AvailabilityPendingBlock, verify_data_availability};
use crate::block_verification::{ExecutedBlock, IntoExecutionPendingBlock};
use crate::{BeaconChainError, BlockError};
use crate::kzg_utils::validate_blob;
pub enum BlobCacheError {
DuplicateBlob(Hash256)
DuplicateBlob(Hash256),
}
/// This cache contains
/// - blobs that have been gossip verified
@@ -31,14 +31,13 @@ struct GossipBlobCacheInner<T: EthSpec> {
executed_block: Option<ExecutedBlock<T>>,
}
impl <T: EthSpec> GossipBlobCache<T> {
impl<T: EthSpec> GossipBlobCache<T> {
pub fn new(kzg: Kzg) -> Self {
Self {
rpc_blob_cache: RwLock::new(HashMap::new()),
gossip_blob_cache: Mutex::new(HashMap::new()),
kzg,
}
}
/// When we receive a blob check if we've cached it. If it completes a set and we have the
@@ -46,29 +45,36 @@ impl <T: EthSpec> GossipBlobCache<T> {
/// cached, verify the block and import it.
///
/// This should only accept gossip verified blobs, so we should not have to worry about dupes.
pub fn put_blob(&self, blob: Arc<BlobSidecar<T>>) -> Result<(),BlobCacheError> {
pub fn put_blob(&self, blob: Arc<BlobSidecar<T>>) -> Result<(), BlobCacheError> {
// TODO(remove clones)
let verified = validate_blob(&self.kzg, blob.blob.clone(), blob.kzg_commitment.clone(), blob.kzg_proof)?;
let verified = validate_blob(
&self.kzg,
blob.blob.clone(),
blob.kzg_commitment.clone(),
blob.kzg_proof,
)?;
if verified {
let mut blob_cache = self.gossip_blob_cache.lock();
// Gossip cache.
blob_cache.entry(blob.block_root)
blob_cache
.entry(blob.block_root)
.and_modify(|mut inner| {
// All blobs reaching this cache should be gossip verified and gossip verification
// should filter duplicates, as well as validate indices.
inner.verified_blobs.insert(blob.index as usize, blob.clone());
inner
.verified_blobs
.insert(blob.index as usize, blob.clone());
if let Some (executed_block) = inner.executed_block.as_ref() {
if let Some(executed_block) = inner.executed_block.as_ref() {
// trigger reprocessing ?
}
})
.or_insert(GossipBlobCacheInner {
verified_blobs: vec![blob.clone()],
executed_block: None
});
verified_blobs: vec![blob.clone()],
executed_block: None,
});
drop(blob_cache);
@@ -80,18 +86,21 @@ impl <T: EthSpec> GossipBlobCache<T> {
}
pub fn put_block(&self, block: ExecutedBlock<T>) -> () {
let mut guard = self.gossip_blob_cache.lock();
guard.entry(block.block_root).and_modify(|cache|{
if cache.verified_blobs == block.block.message_eip4844().blob_kzg_commitments() {
// send to reprocessing queue ?
} else if let Some(dup) = cache.executed_block.insert(block) {
let mut guard = self.gossip_blob_cache.lock();
guard
.entry(block.block_root)
.and_modify(|cache| {
if cache.verified_blobs == block.block.message_eip4844().blob_kzg_commitments() {
// send to reprocessing queue ?
} else if let Some(dup) = cache.executed_block.insert(block) {
// return error
} else {
// log that we cached it
}
}).or_insert(GossipBlobCacheInner {
verified_blobs: vec![],
executed_block: Some(block)
});
})
.or_insert(GossipBlobCacheInner {
verified_blobs: vec![],
executed_block: Some(block),
});
}
}

View File

@@ -23,6 +23,7 @@ pub mod events;
pub mod execution_payload;
pub mod fork_choice_signal;
pub mod fork_revert;
pub mod gossip_blob_cache;
mod head_tracker;
pub mod historical_blocks;
pub mod kzg_utils;
@@ -51,7 +52,6 @@ pub mod test_utils;
mod timeout_rw_lock;
pub mod validator_monitor;
pub mod validator_pubkey_cache;
pub mod gossip_blob_cache;
pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,