diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 228c9ca237..c50f192678 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -103,6 +103,7 @@ use store::{ use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; +use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS; use types::signed_block_and_blobs::BlockWrapper; use types::*; @@ -5421,9 +5422,24 @@ impl BeaconChain { /// The epoch at which we require a data availability check in block processing. /// `None` if the `Eip4844` fork is disabled. pub fn data_availability_boundary(&self) -> Option { - self.spec + self.spec.eip4844_fork_epoch.map(|fork_epoch| { + self.epoch().ok().map(|current_epoch|{ + std::cmp::max( + fork_epoch, + current_epoch - *MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, + ) + }) + }).flatten() + } + + /// Returns `true` if we are at or past the `Eip4844` fork. This will always return `false` if + /// the `Eip4844` fork is disabled. + pub fn is_data_availability_check_required(&self) -> Result { + let current_epoch = self.epoch()?; + Ok(self.spec .eip4844_fork_epoch - .map(|e| std::cmp::max(e, self.head().finalized_checkpoint().epoch)) + .map(|fork_epoch| fork_epoch <= current_epoch) + .unwrap_or(false)) } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 353ecc86cc..ea4ed6e14d 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,10 +1,10 @@ use slot_clock::SlotClock; use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use bls::PublicKey; -use types::consts::eip4844::BLS_MODULUS; use crate::{kzg_utils, BeaconChainError}; +use bls::PublicKey; use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions; +use types::consts::eip4844::BLS_MODULUS; use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions}; #[derive(Debug)] diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 7b7786aa0f..53e6b67599 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -252,6 +252,13 @@ pub struct BlobsByRootRequest { pub block_roots: VariableList, } +impl From for BlobsByRootRequest { + fn from(r: BlocksByRootRequest) -> Self { + let BlocksByRootRequest { block_roots } = r; + Self { block_roots } + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 06dfdcdf41..21b6d6658d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -10,11 +10,11 @@ use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; -use slot_clock::SystemTimeSlotClock; +use slot_clock::{SlotClock, SystemTimeSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::MinimalEthSpec as E; +use types::{EthSpec, MainnetEthSpec, MinimalEthSpec as E, Slot}; type T = Witness, E, MemoryStore, MemoryStore>; @@ -55,6 +55,7 @@ impl TestRig { network_tx, globals, beacon_processor_tx, + chain, log.new(slog::o!("component" => "network_context")), ) }; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 67efa3163a..c55e90cf46 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -231,6 +231,7 @@ pub fn spawn( network_send, network_globals.clone(), beacon_processor_send, + beacon_chain.clone(), log.clone(), ), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index d7d48be74d..7680597e46 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -6,18 +6,21 @@ use super::range_sync::{BatchId, ChainId, ExpectedBatchTy}; use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChainTypes, EngineState}; +use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; +use slot_clock::SlotClock; use std::collections::hash_map::Entry; use std::collections::VecDeque; use std::sync::Arc; use tokio::sync::mpsc; use types::signed_block_and_blobs::BlockWrapper; -use types::{BlobsSidecar, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar}; +use types::{ + BlobsSidecar, ChainSpec, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, +}; #[derive(Debug, Default)] struct BlockBlobRequestInfo { @@ -94,6 +97,8 @@ pub struct SyncNetworkContext { /// Channel to send work to the beacon processor. beacon_processor_send: mpsc::Sender>, + chain: Arc>, + /// Logger for the `SyncNetworkContext`. log: slog::Logger, } @@ -103,6 +108,7 @@ impl SyncNetworkContext { network_send: mpsc::UnboundedSender>, network_globals: Arc>, beacon_processor_send: mpsc::Sender>, + chain: Arc>, log: slog::Logger, ) -> Self { SyncNetworkContext { @@ -115,6 +121,7 @@ impl SyncNetworkContext { backfill_sidecar_pair_requests: Default::default(), execution_engine_state: EngineState::Online, // always assume `Online` at the start beacon_processor_send, + chain, log, } } @@ -459,19 +466,25 @@ impl SyncNetworkContext { peer_id: PeerId, request: BlocksByRootRequest, ) -> Result { - //FIXME(sean) add prune depth logic here? - // D: YES - // MOREINFO: here depending of the boundaries we decide what kind of request we send, if we - // request just a block or if we request a block, glob pair. - - trace!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "count" => request.block_roots.len(), - "peer" => %peer_id - ); - let request = Request::BlocksByRoot(request); + let request = if self.chain.is_data_availability_check_required().map_err(|_|"Unable to read slot clock")? { + trace!( + self.log, + "Sending BlobsByRoot Request"; + "method" => "BlobsByRoot", + "count" => request.block_roots.len(), + "peer" => %peer_id + ); + Request::BlobsByRoot(request.into()) + } else { + trace!( + self.log, + "Sending BlocksByRoot Request"; + "method" => "BlocksByRoot", + "count" => request.block_roots.len(), + "peer" => %peer_id + ); + Request::BlocksByRoot(request) + }; let id = self.next_id(); let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id }); self.send_network_msg(NetworkMessage::SendRequest { @@ -488,14 +501,25 @@ impl SyncNetworkContext { peer_id: PeerId, request: BlocksByRootRequest, ) -> Result { - trace!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "count" => request.block_roots.len(), - "peer" => %peer_id - ); - let request = Request::BlocksByRoot(request); + let request = if self.chain.is_data_availability_check_required().map_err(|_|"Unable to read slot clock")? { + trace!( + self.log, + "Sending BlobsByRoot Request"; + "method" => "BlobsByRoot", + "count" => request.block_roots.len(), + "peer" => %peer_id + ); + Request::BlobsByRoot(request.into()) + } else { + trace!( + self.log, + "Sending BlocksByRoot Request"; + "method" => "BlocksByRoot", + "count" => request.block_roots.len(), + "peer" => %peer_id + ); + Request::BlocksByRoot(request) + }; let id = self.next_id(); let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id }); self.send_network_msg(NetworkMessage::SendRequest { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index ca5e133970..73e6f49eb0 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -388,11 +388,12 @@ mod tests { use slog::{o, Drain}; use tokio::sync::mpsc; - use slot_clock::SystemTimeSlotClock; + use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::collections::HashSet; use std::sync::Arc; + use std::time::Duration; use store::MemoryStore; - use types::{Hash256, MinimalEthSpec as E}; + use types::{Hash256, MainnetEthSpec, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { @@ -606,6 +607,7 @@ mod tests { network_tx, globals.clone(), beacon_processor_tx, + chain, log.new(o!("component" => "network_context")), ); let test_rig = TestRig { diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index b13e3aa9c3..a335cbd7b2 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -23,7 +23,7 @@ pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } pub mod eip4844 { - use crate::Uint256; + use crate::{Epoch, Uint256}; use lazy_static::lazy_static; @@ -32,6 +32,7 @@ pub mod eip4844 { "52435875175126190479447740508185965837690552500527637822603658699938581184513" ) .expect("should initialize BLS_MODULUS"); + pub static ref MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS: Epoch = Epoch::from(4096_u64); } pub const BLOB_TX_TYPE: u8 = 5; pub const VERSIONED_HASH_VERSION_KZG: u8 = 1; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 190078507c..38af4d23b7 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -23,7 +23,7 @@ use std::fmt::Debug; #[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] use std::marker::PhantomData; use std::path::Path; -#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] +#[cfg(all(feature = "withdrawals"))] use types::SignedBlsToExecutionChange; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, @@ -45,10 +45,7 @@ struct ExecutionMetadata { /// Newtype for testing withdrawals. #[derive(Debug, Clone, Deserialize)] pub struct WithdrawalsPayload { - #[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))] payload: FullPayload, - #[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))] - _phantom_data: PhantomData, } #[derive(Debug, Clone)]