Fork aware max values in rpc (#6847)

N/A


  In https://github.com/sigp/lighthouse/pull/6329 we changed `max_blobs_per_block` from a preset to a config value.
We weren't using the right value based on fork in that PR. This is a follow up PR to use the fork dependent values.

In the proces, I also updated other places where we weren't using fork dependent values from the ChainSpec.

Note to reviewer: easier to go through by commit
This commit is contained in:
Pawan Dhananjay
2025-01-29 11:42:13 -08:00
committed by GitHub
parent e7ea69647a
commit 4a07c08c4f
16 changed files with 203 additions and 114 deletions

View File

@@ -576,7 +576,7 @@ fn handle_rpc_request<E: EthSpec>(
BlocksByRootRequest::V2(BlocksByRootRequestV2 {
block_roots: RuntimeVariableList::from_ssz_bytes(
decoded_buffer,
spec.max_request_blocks as usize,
spec.max_request_blocks(current_fork),
)?,
}),
))),
@@ -584,32 +584,18 @@ fn handle_rpc_request<E: EthSpec>(
BlocksByRootRequest::V1(BlocksByRootRequestV1 {
block_roots: RuntimeVariableList::from_ssz_bytes(
decoded_buffer,
spec.max_request_blocks as usize,
spec.max_request_blocks(current_fork),
)?,
}),
))),
SupportedProtocol::BlobsByRangeV1 => {
let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?;
let max_requested_blobs = req
.count
.saturating_mul(spec.max_blobs_per_block_by_fork(current_fork));
// TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR
if max_requested_blobs > spec.max_request_blob_sidecars {
return Err(RPCError::ErrorResponse(
RpcErrorResponse::InvalidRequest,
format!(
"requested exceeded limit. allowed: {}, requested: {}",
spec.max_request_blob_sidecars, max_requested_blobs
),
));
}
Ok(Some(RequestType::BlobsByRange(req)))
}
SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange(
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
))),
SupportedProtocol::BlobsByRootV1 => {
Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest {
blob_ids: RuntimeVariableList::from_ssz_bytes(
decoded_buffer,
spec.max_request_blob_sidecars as usize,
spec.max_request_blob_sidecars(current_fork),
)?,
})))
}
@@ -1097,21 +1083,21 @@ mod tests {
}
}
fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest {
BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec)
fn bbroot_request_v1(fork_name: ForkName) -> BlocksByRootRequest {
BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name))
}
fn bbroot_request_v2(spec: &ChainSpec) -> BlocksByRootRequest {
BlocksByRootRequest::new(vec![Hash256::zero()], spec)
fn bbroot_request_v2(fork_name: ForkName) -> BlocksByRootRequest {
BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name))
}
fn blbroot_request(spec: &ChainSpec) -> BlobsByRootRequest {
fn blbroot_request(fork_name: ForkName) -> BlobsByRootRequest {
BlobsByRootRequest::new(
vec![BlobIdentifier {
block_root: Hash256::zero(),
index: 0,
}],
spec,
&fork_context(fork_name),
)
}
@@ -1909,7 +1895,8 @@ mod tests {
#[test]
fn test_encode_then_decode_request() {
let chain_spec = Spec::default_spec();
let fork_context = fork_context(ForkName::Electra);
let chain_spec = fork_context.spec.clone();
let requests: &[RequestType<Spec>] = &[
RequestType::Ping(ping_message()),
@@ -1917,21 +1904,33 @@ mod tests {
RequestType::Goodbye(GoodbyeReason::Fault),
RequestType::BlocksByRange(bbrange_request_v1()),
RequestType::BlocksByRange(bbrange_request_v2()),
RequestType::BlocksByRoot(bbroot_request_v1(&chain_spec)),
RequestType::BlocksByRoot(bbroot_request_v2(&chain_spec)),
RequestType::MetaData(MetadataRequest::new_v1()),
RequestType::BlobsByRange(blbrange_request()),
RequestType::BlobsByRoot(blbroot_request(&chain_spec)),
RequestType::DataColumnsByRange(dcbrange_request()),
RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec)),
RequestType::MetaData(MetadataRequest::new_v2()),
];
for req in requests.iter() {
for fork_name in ForkName::list_all() {
encode_then_decode_request(req.clone(), fork_name, &chain_spec);
}
}
// Request types that have different length limits depending on the fork
// Handled separately to have consistent `ForkName` across request and responses
let fork_dependent_requests = |fork_name| {
[
RequestType::BlobsByRoot(blbroot_request(fork_name)),
RequestType::BlocksByRoot(bbroot_request_v1(fork_name)),
RequestType::BlocksByRoot(bbroot_request_v2(fork_name)),
]
};
for fork_name in ForkName::list_all() {
let requests = fork_dependent_requests(fork_name);
for req in requests {
encode_then_decode_request(req.clone(), fork_name, &chain_spec);
}
}
}
/// Test a malicious snappy encoding for a V1 `Status` message where the attacker

View File

@@ -855,6 +855,45 @@ where
}
let (req, substream) = substream;
let current_fork = self.fork_context.current_fork();
let spec = &self.fork_context.spec;
match &req {
RequestType::BlocksByRange(request) => {
let max_allowed = spec.max_request_blocks(current_fork) as u64;
if *request.count() > max_allowed {
self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound {
id: self.current_inbound_substream_id,
proto: Protocol::BlocksByRange,
error: RPCError::InvalidData(format!(
"requested exceeded limit. allowed: {}, requested: {}",
max_allowed,
request.count()
)),
}));
return self.shutdown(None);
}
}
RequestType::BlobsByRange(request) => {
let max_requested_blobs = request
.count
.saturating_mul(spec.max_blobs_per_block_by_fork(current_fork));
let max_allowed = spec.max_request_blob_sidecars(current_fork) as u64;
if max_requested_blobs > max_allowed {
self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound {
id: self.current_inbound_substream_id,
proto: Protocol::BlobsByRange,
error: RPCError::InvalidData(format!(
"requested exceeded limit. allowed: {}, requested: {}",
max_allowed, max_requested_blobs
)),
}));
return self.shutdown(None);
}
}
_ => {}
};
let max_responses =
req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec);

View File

@@ -15,12 +15,12 @@ use strum::IntoStaticStr;
use superstruct::superstruct;
use types::blob_sidecar::BlobIdentifier;
use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES;
use types::ForkName;
use types::{
blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar,
Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate,
LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot,
};
use types::{ForkContext, ForkName};
/// Maximum length of error message.
pub type MaxErrorLen = U256;
@@ -420,15 +420,19 @@ pub struct BlocksByRootRequest {
}
impl BlocksByRootRequest {
pub fn new(block_roots: Vec<Hash256>, spec: &ChainSpec) -> Self {
let block_roots =
RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize);
pub fn new(block_roots: Vec<Hash256>, fork_context: &ForkContext) -> Self {
let max_request_blocks = fork_context
.spec
.max_request_blocks(fork_context.current_fork());
let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks);
Self::V2(BlocksByRootRequestV2 { block_roots })
}
pub fn new_v1(block_roots: Vec<Hash256>, spec: &ChainSpec) -> Self {
let block_roots =
RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize);
pub fn new_v1(block_roots: Vec<Hash256>, fork_context: &ForkContext) -> Self {
let max_request_blocks = fork_context
.spec
.max_request_blocks(fork_context.current_fork());
let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks);
Self::V1(BlocksByRootRequestV1 { block_roots })
}
}
@@ -441,9 +445,11 @@ pub struct BlobsByRootRequest {
}
impl BlobsByRootRequest {
pub fn new(blob_ids: Vec<BlobIdentifier>, spec: &ChainSpec) -> Self {
let blob_ids =
RuntimeVariableList::from_vec(blob_ids, spec.max_request_blob_sidecars as usize);
pub fn new(blob_ids: Vec<BlobIdentifier>, fork_context: &ForkContext) -> Self {
let max_request_blob_sidecars = fork_context
.spec
.max_request_blob_sidecars(fork_context.current_fork());
let blob_ids = RuntimeVariableList::from_vec(blob_ids, max_request_blob_sidecars);
Self { blob_ids }
}
}

View File

@@ -282,7 +282,7 @@ impl<E: EthSpec> Network<E> {
let max_topics = ctx.chain_spec.attestation_subnet_count as usize
+ SYNC_COMMITTEE_SUBNET_COUNT as usize
+ ctx.chain_spec.blob_sidecar_subnet_count_electra as usize
+ ctx.chain_spec.blob_sidecar_subnet_count_max() as usize
+ ctx.chain_spec.data_column_sidecar_subnet_count as usize
+ BASE_CORE_TOPICS.len()
+ ALTAIR_CORE_TOPICS.len()

View File

@@ -263,11 +263,7 @@ pub(crate) fn create_whitelist_filter(
for id in 0..sync_committee_subnet_count {
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
}
let blob_subnet_count = if spec.electra_fork_epoch.is_some() {
spec.blob_sidecar_subnet_count_electra
} else {
spec.blob_sidecar_subnet_count
};
let blob_subnet_count = spec.blob_sidecar_subnet_count_max();
for id in 0..blob_subnet_count {
add(BlobSidecar(id));
}

View File

@@ -51,7 +51,7 @@ pub fn fork_core_topics<E: EthSpec>(fork_name: &ForkName, spec: &ChainSpec) -> V
ForkName::Deneb => {
// All of deneb blob topics are core topics
let mut deneb_blob_topics = Vec::new();
for i in 0..spec.blob_sidecar_subnet_count {
for i in 0..spec.blob_sidecar_subnet_count(ForkName::Deneb) {
deneb_blob_topics.push(GossipKind::BlobSidecar(i));
}
deneb_blob_topics
@@ -59,7 +59,7 @@ pub fn fork_core_topics<E: EthSpec>(fork_name: &ForkName, spec: &ChainSpec) -> V
ForkName::Electra => {
// All of electra blob topics are core topics
let mut electra_blob_topics = Vec::new();
for i in 0..spec.blob_sidecar_subnet_count_electra {
for i in 0..spec.blob_sidecar_subnet_count(ForkName::Electra) {
electra_blob_topics.push(GossipKind::BlobSidecar(i));
}
electra_blob_topics

View File

@@ -16,7 +16,7 @@ use tokio::time::sleep;
use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec,
EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec,
Signature, SignedBeaconBlock, Slot,
RuntimeVariableList, Signature, SignedBeaconBlock, Slot,
};
type E = MinimalEthSpec;
@@ -810,7 +810,9 @@ fn test_tcp_blocks_by_root_chunked_rpc() {
.await;
// BlocksByRoot Request
let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new(
let rpc_request =
RequestType::BlocksByRoot(BlocksByRootRequest::V2(BlocksByRootRequestV2 {
block_roots: RuntimeVariableList::from_vec(
vec![
Hash256::zero(),
Hash256::zero(),
@@ -819,8 +821,9 @@ fn test_tcp_blocks_by_root_chunked_rpc() {
Hash256::zero(),
Hash256::zero(),
],
&spec,
));
spec.max_request_blocks_upper_bound(),
),
}));
// BlocksByRoot Response
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
@@ -953,7 +956,9 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() {
.await;
// BlocksByRoot Request
let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new(
let rpc_request =
RequestType::BlocksByRoot(BlocksByRootRequest::V2(BlocksByRootRequestV2 {
block_roots: RuntimeVariableList::from_vec(
vec![
Hash256::zero(),
Hash256::zero(),
@@ -966,8 +971,9 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() {
Hash256::zero(),
Hash256::zero(),
],
&spec,
));
spec.max_request_blocks_upper_bound(),
),
}));
// BlocksByRoot Response
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));

View File

@@ -659,24 +659,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
"start_slot" => req.start_slot(),
);
// Should not send more than max request blocks
let max_request_size =
self.chain
.epoch()
.map_or(self.chain.spec.max_request_blocks, |epoch| {
if self.chain.spec.fork_name_at_epoch(epoch).deneb_enabled() {
self.chain.spec.max_request_blocks_deneb
} else {
self.chain.spec.max_request_blocks
}
});
if *req.count() > max_request_size {
return Err((
RpcErrorResponse::InvalidRequest,
"Request exceeded max size",
));
}
let forwards_block_root_iter = match self
.chain
.forwards_iter_block_roots(Slot::from(*req.start_slot()))

View File

@@ -28,7 +28,7 @@ use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock};
use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock};
/// Handles messages from the network and routes them to the appropriate service to be handled.
pub struct Router<T: BeaconChainTypes> {
@@ -90,6 +90,7 @@ impl<T: BeaconChainTypes> Router<T> {
invalid_block_storage: InvalidBlockStorage,
beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
beacon_processor_reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
fork_context: Arc<ForkContext>,
log: slog::Logger,
) -> Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>, String> {
let message_handler_log = log.new(o!("service"=> "router"));
@@ -122,6 +123,7 @@ impl<T: BeaconChainTypes> Router<T> {
network_send.clone(),
network_beacon_processor.clone(),
sync_recv,
fork_context,
sync_logger,
);

View File

@@ -312,6 +312,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
invalid_block_storage,
beacon_processor_send,
beacon_processor_reprocess_tx,
fork_context.clone(),
network_log.clone(),
)?;

View File

@@ -69,7 +69,9 @@ use std::ops::Sub;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
use types::{
BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot,
};
#[cfg(test)]
use types::ColumnIndex;
@@ -258,10 +260,11 @@ pub fn spawn<T: BeaconChainTypes>(
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
beacon_processor: Arc<NetworkBeaconProcessor<T>>,
sync_recv: mpsc::UnboundedReceiver<SyncMessage<T::EthSpec>>,
fork_context: Arc<ForkContext>,
log: slog::Logger,
) {
assert!(
beacon_chain.spec.max_request_blocks >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH,
beacon_chain.spec.max_request_blocks(fork_context.current_fork()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH,
"Max blocks that can be requested in a single batch greater than max allowed blocks in a single request"
);
@@ -272,6 +275,7 @@ pub fn spawn<T: BeaconChainTypes>(
beacon_processor,
sync_recv,
SamplingConfig::Default,
fork_context,
log.clone(),
);
@@ -287,6 +291,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
beacon_processor: Arc<NetworkBeaconProcessor<T>>,
sync_recv: mpsc::UnboundedReceiver<SyncMessage<T::EthSpec>>,
sampling_config: SamplingConfig,
fork_context: Arc<ForkContext>,
log: slog::Logger,
) -> Self {
let network_globals = beacon_processor.network_globals.clone();
@@ -297,6 +302,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
network_send,
beacon_processor.clone(),
beacon_chain.clone(),
fork_context.clone(),
log.clone(),
),
range_sync: RangeSync::new(

View File

@@ -43,8 +43,8 @@ use std::time::Duration;
use tokio::sync::mpsc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{
BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256,
SignedBeaconBlock, Slot,
BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, ForkContext,
Hash256, SignedBeaconBlock, Slot,
};
pub mod custody;
@@ -216,6 +216,8 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
fork_context: Arc<ForkContext>,
/// Logger for the `SyncNetworkContext`.
pub log: slog::Logger,
}
@@ -244,6 +246,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
network_beacon_processor: Arc<NetworkBeaconProcessor<T>>,
chain: Arc<BeaconChain<T>>,
fork_context: Arc<ForkContext>,
log: slog::Logger,
) -> Self {
SyncNetworkContext {
@@ -257,6 +260,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
range_block_components_requests: FnvHashMap::default(),
network_beacon_processor,
chain,
fork_context,
log,
}
}
@@ -455,7 +459,6 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
(None, None)
};
// TODO(pawan): this would break if a batch contains multiple epochs
let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch);
let info = RangeBlockComponentsRequest::new(
expected_blobs,
@@ -624,7 +627,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
self.network_send
.send(NetworkMessage::SendRequest {
peer_id,
request: RequestType::BlocksByRoot(request.into_request(&self.chain.spec)),
request: RequestType::BlocksByRoot(request.into_request(&self.fork_context)),
request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }),
})
.map_err(|_| RpcRequestSendError::NetworkSendError)?;
@@ -706,7 +709,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
self.network_send
.send(NetworkMessage::SendRequest {
peer_id,
request: RequestType::BlobsByRoot(request.clone().into_request(&self.chain.spec)),
request: RequestType::BlobsByRoot(request.clone().into_request(&self.fork_context)),
request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }),
})
.map_err(|_| RpcRequestSendError::NetworkSendError)?;

View File

@@ -1,6 +1,6 @@
use lighthouse_network::rpc::methods::BlobsByRootRequest;
use std::sync::Arc;
use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256};
use types::{blob_sidecar::BlobIdentifier, BlobSidecar, EthSpec, ForkContext, Hash256};
use super::{ActiveRequestItems, LookupVerifyError};
@@ -11,7 +11,7 @@ pub struct BlobsByRootSingleBlockRequest {
}
impl BlobsByRootSingleBlockRequest {
pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest {
pub fn into_request(self, spec: &ForkContext) -> BlobsByRootRequest {
BlobsByRootRequest::new(
self.indices
.into_iter()

View File

@@ -1,7 +1,7 @@
use beacon_chain::get_block_root;
use lighthouse_network::rpc::BlocksByRootRequest;
use std::sync::Arc;
use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock};
use types::{EthSpec, ForkContext, Hash256, SignedBeaconBlock};
use super::{ActiveRequestItems, LookupVerifyError};
@@ -9,8 +9,8 @@ use super::{ActiveRequestItems, LookupVerifyError};
pub struct BlocksByRootSingleRequest(pub Hash256);
impl BlocksByRootSingleRequest {
pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest {
BlocksByRootRequest::new(vec![self.0], spec)
pub fn into_request(self, fork_context: &ForkContext) -> BlocksByRootRequest {
BlocksByRootRequest::new(vec![self.0], fork_context)
}
}

View File

@@ -39,6 +39,7 @@ use lighthouse_network::{
use slog::info;
use slot_clock::{SlotClock, TestingSlotClock};
use tokio::sync::mpsc;
use types::ForkContext;
use types::{
data_column_sidecar::ColumnIndex,
test_utils::{SeedableRng, TestRandom, XorShiftRng},
@@ -92,6 +93,11 @@ impl TestRig {
.build();
let chain = harness.chain.clone();
let fork_context = Arc::new(ForkContext::new::<E>(
Slot::new(0),
chain.genesis_validators_root,
&chain.spec,
));
let (network_tx, network_rx) = mpsc::unbounded_channel();
let (sync_tx, sync_rx) = mpsc::unbounded_channel::<SyncMessage<E>>();
@@ -139,6 +145,7 @@ impl TestRig {
SamplingConfig::Custom {
required_successes: vec![SAMPLING_REQUIRED_SUCCESSES],
},
fork_context,
log.clone(),
),
harness,

View File

@@ -217,7 +217,7 @@ pub struct ChainSpec {
pub network_id: u8,
pub target_aggregators_per_committee: u64,
pub gossip_max_size: u64,
pub max_request_blocks: u64,
max_request_blocks: u64,
pub min_epochs_for_block_requests: u64,
pub max_chunk_size: u64,
pub ttfb_timeout: u64,
@@ -233,19 +233,19 @@ pub struct ChainSpec {
/*
* Networking Deneb
*/
pub max_request_blocks_deneb: u64,
pub max_request_blob_sidecars: u64,
max_request_blocks_deneb: u64,
max_request_blob_sidecars: u64,
pub max_request_data_column_sidecars: u64,
pub min_epochs_for_blob_sidecars_requests: u64,
pub blob_sidecar_subnet_count: u64,
pub max_blobs_per_block: u64,
blob_sidecar_subnet_count: u64,
max_blobs_per_block: u64,
/*
* Networking Electra
*/
max_blobs_per_block_electra: u64,
pub blob_sidecar_subnet_count_electra: u64,
pub max_request_blob_sidecars_electra: u64,
blob_sidecar_subnet_count_electra: u64,
max_request_blob_sidecars_electra: u64,
/*
* Networking Derived
@@ -625,6 +625,17 @@ impl ChainSpec {
}
}
/// Returns the highest possible value for max_request_blocks based on enabled forks.
///
/// This is useful for upper bounds in testing.
pub fn max_request_blocks_upper_bound(&self) -> usize {
if self.deneb_fork_epoch.is_some() {
self.max_request_blocks_deneb as usize
} else {
self.max_request_blocks as usize
}
}
pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize {
if fork_name.electra_enabled() {
self.max_request_blob_sidecars_electra as usize
@@ -633,6 +644,17 @@ impl ChainSpec {
}
}
/// Returns the highest possible value for max_request_blobs based on enabled forks.
///
/// This is useful for upper bounds in testing.
pub fn max_request_blobs_upper_bound(&self) -> usize {
if self.electra_fork_epoch.is_some() {
self.max_request_blob_sidecars_electra as usize
} else {
self.max_request_blob_sidecars as usize
}
}
/// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`.
pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 {
self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch))
@@ -647,6 +669,26 @@ impl ChainSpec {
}
}
/// Returns the `BLOB_SIDECAR_SUBNET_COUNT` at the given fork_name.
pub fn blob_sidecar_subnet_count(&self, fork_name: ForkName) -> u64 {
if fork_name.electra_enabled() {
self.blob_sidecar_subnet_count_electra
} else {
self.blob_sidecar_subnet_count
}
}
/// Returns the highest possible value of blob sidecar subnet count based on enabled forks.
///
/// This is useful for upper bounds for the subnet count during a given run of lighthouse.
pub fn blob_sidecar_subnet_count_max(&self) -> u64 {
if self.electra_fork_epoch.is_some() {
self.blob_sidecar_subnet_count_electra
} else {
self.blob_sidecar_subnet_count
}
}
/// Returns the number of data columns per custody group.
pub fn data_columns_per_group(&self) -> u64 {
self.number_of_columns