merge cleanuo

This commit is contained in:
realbigsean
2023-03-21 15:49:35 -04:00
parent 2bfb0bfc5e
commit 5e98326878
4 changed files with 76 additions and 60 deletions

View File

@@ -987,19 +987,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self, &self,
block_root: &Hash256, block_root: &Hash256,
) -> Result<Option<BlobSidecarList<T::EthSpec>>, Error> { ) -> Result<Option<BlobSidecarList<T::EthSpec>>, Error> {
// If there is no data availability boundary, the Eip4844 fork is disabled.
if let Some(finalized_data_availability_boundary) =
self.finalized_data_availability_boundary()
{
self.early_attester_cache self.early_attester_cache
.get_blobs(*block_root) .get_blobs(*block_root)
.map_or_else( .map_or_else(|| self.get_blobs(block_root), |blobs| Ok(Some(blobs)))
|| self.get_blobs(block_root, finalized_data_availability_boundary),
|blobs| Ok(Some(blobs)),
)
} else {
Ok(None)
}
} }
/// Returns the block at the given root, if any. /// Returns the block at the given root, if any.
@@ -2667,11 +2657,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn process_blob( pub async fn process_blob(
self: &Arc<Self>, self: &Arc<Self>,
blob: BlobSidecar<T::EthSpec>, blob: Arc<BlobSidecar<T::EthSpec>>,
count_unrealized: CountUnrealized, count_unrealized: CountUnrealized,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> { ) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
self.check_availability_and_maybe_import( self.check_availability_and_maybe_import(
|chain| chain.data_availability_checker.put_blob(Arc::new(blob)), |chain| chain.data_availability_checker.put_blob(blob),
count_unrealized, count_unrealized,
) )
.await .await

View File

@@ -9,7 +9,6 @@ use crate::gossip_blob_cache::AvailabilityCheckError;
use crate::BeaconChainError; use crate::BeaconChainError;
use derivative::Derivative; use derivative::Derivative;
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions; use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
use types::blob_sidecar::BlobSidecarList;
use types::{ use types::{
BeaconBlockRef, BeaconStateError, BlobSidecar, BlobSidecarList, Epoch, EthSpec, Hash256, BeaconBlockRef, BeaconStateError, BlobSidecar, BlobSidecarList, Epoch, EthSpec, Hash256,
KzgCommitment, SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlobSidecar, Slot, KzgCommitment, SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlobSidecar, Slot,
@@ -131,11 +130,11 @@ impl From<BeaconStateError> for BlobError {
/// the p2p network. /// the p2p network.
#[derive(Debug)] #[derive(Debug)]
pub struct GossipVerifiedBlob<T: EthSpec> { pub struct GossipVerifiedBlob<T: EthSpec> {
blob: BlobSidecar<T>, blob: Arc<BlobSidecar<T>>,
} }
impl<T: EthSpec> GossipVerifiedBlob<T> { impl<T: EthSpec> GossipVerifiedBlob<T> {
pub fn to_blob(self) -> BlobSidecar<T> { pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
self.blob self.blob
} }
} }

View File

@@ -12,7 +12,6 @@ use lighthouse_network::rpc::*;
use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo};
use slog::{debug, error, warn}; use slog::{debug, error, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::collections::{hash_map::Entry, HashMap};
use std::sync::Arc; use std::sync::Arc;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use types::blob_sidecar::BlobIdentifier; use types::blob_sidecar::BlobIdentifier;
@@ -226,36 +225,45 @@ impl<T: BeaconChainTypes> Worker<T> {
executor.spawn( executor.spawn(
async move { async move {
let requested_blobs = request.blob_ids.len(); let requested_blobs = request.blob_ids.len();
let mut send_blob_count = 0; let send_blob_count = 0;
let mut send_response = true; let mut send_response = true;
for BlobIdentifier{ block_root: root, index: _index } in request.blob_ids.into_iter() {
let mut blob_list_results = HashMap::new(); match self
for BlobIdentifier{ block_root: root, index } in request.blob_ids.into_iter() {
let blob_list_result = match blob_list_results.entry(root) {
Entry::Vacant(entry) => {
entry.insert(self
.chain .chain
.get_blobs_checking_early_attester_cache(&root) .get_blobs_checking_early_attester_cache(&root)
.await) .await
} {
Entry::Occupied(entry) => { Ok(Some(_blob_sidecar_list)) => {
entry.into_mut() todo!();
} // //
}; // // TODO: HORRIBLE NSFW CODE AHEAD
// //
match blob_list_result.as_ref() { // let types::SignedBeaconBlockAndBlobsSidecar {beacon_block, blobs_sidecar} = block_and_blobs;
Ok(Some(blobs_sidecar_list)) => { // let types::BlobsSidecar{ beacon_block_root, beacon_block_slot, blobs: blob_bundle, kzg_aggregated_proof }: types::BlobsSidecar<_> = blobs_sidecar.as_ref().clone();
for blob_sidecar in blobs_sidecar_list.iter() { // // TODO: this should be unreachable after this is addressed seriously,
if blob_sidecar.index == index { // // so for now let's be ok with a panic in the expect.
self.send_response( // let block = beacon_block.message_eip4844().expect("We fucked up the block blob stuff");
peer_id, // // Intentionally not accessing the list directly
Response::BlobsByRoot(Some(blob_sidecar.clone())), // for (known_index, blob) in blob_bundle.into_iter().enumerate() {
request_id, // if (known_index as u64) == index {
); // let blob_sidecar = types::BlobSidecar{
send_blob_count += 1; // block_root: beacon_block_root,
break; // index,
} // slot: beacon_block_slot,
} // block_parent_root: block.parent_root,
// proposer_index: block.proposer_index,
// blob,
// kzg_commitment: block.body.blob_kzg_commitments[known_index], // TODO: needs to be stored in a more logical way so that this won't panic.
// kzg_proof: kzg_aggregated_proof // TODO: yeah
// };
// self.send_response(
// peer_id,
// Response::BlobsByRoot(Some(Arc::new(blob_sidecar))),
// request_id,
// );
// send_block_count += 1;
// }
// }
} }
Ok(None) => { Ok(None) => {
debug!( debug!(
@@ -829,16 +837,36 @@ impl<T: BeaconChainTypes> Worker<T> {
let mut send_response = true; let mut send_response = true;
for root in block_roots { for root in block_roots {
match self.chain.get_blobs(&root, data_availability_boundary) { match self.chain.get_blobs(&root) {
Ok(Some(blob_sidecar_list)) => { Ok(Some(_blobs)) => {
for blob_sidecar in blob_sidecar_list.iter() { todo!();
blobs_sent += 1; // // TODO: more GROSS code ahead. Reader beware
self.send_network_message(NetworkMessage::SendResponse { // let types::BlobsSidecar {
peer_id, // beacon_block_root,
response: Response::BlobsByRange(Some(blob_sidecar.clone())), // beacon_block_slot,
id: request_id, // blobs: blob_bundle,
}); // kzg_aggregated_proof: _,
} // }: types::BlobsSidecar<_> = blobs;
//
// for (blob_index, blob) in blob_bundle.into_iter().enumerate() {
// let blob_sidecar = types::BlobSidecar {
// block_root: beacon_block_root,
// index: blob_index as u64,
// slot: beacon_block_slot,
// block_parent_root: Hash256::zero(),
// proposer_index: 0,
// blob,
// kzg_commitment: types::KzgCommitment::default(),
// kzg_proof: types::KzgProof::default(),
// };
//
// blobs_sent += 1;
// self.send_network_message(NetworkMessage::SendResponse {
// peer_id,
// response: Response::BlobsByRange(Some(Arc::new(blob_sidecar))),
// id: request_id,
// });
// }
} }
Ok(None) => { Ok(None) => {
error!( error!(

View File

@@ -47,8 +47,7 @@ pub struct BlobSidecar<T: EthSpec> {
pub kzg_proof: KzgProof, pub kzg_proof: KzgProof,
} }
pub type BlobSidecarList<T> = pub type BlobSidecarList<T> = VariableList<Arc<BlobSidecar<T>>, <T as EthSpec>::MaxBlobsPerBlock>;
VariableList<Arc<BlobSidecar<T>>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxExtraDataBytes>; pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxExtraDataBytes>;
impl<T: EthSpec> SignedRoot for BlobSidecar<T> {} impl<T: EthSpec> SignedRoot for BlobSidecar<T> {}