Make max_blobs_per_block a config parameter (#6329)

* First pass

* Add restrictions to RuntimeVariableList api

* Use empty_uninitialized and fix warnings

* Fix some todos

* Merge branch 'unstable' into max-blobs-preset

* Fix take impl on RuntimeFixedList

* cleanup

* Fix test compilations

* Fix some more tests

* Fix test from unstable

* Merge branch 'unstable' into max-blobs-preset

* Merge remote-tracking branch 'origin/unstable' into max-blobs-preset

* Remove footgun function

* Minor simplifications

* Move from preset to config

* Fix typo

* Revert "Remove footgun function"

This reverts commit de01f923c7.

* Try fixing tests

* Thread through ChainSpec

* Fix release tests

* Move RuntimeFixedVector into module and rename

* Add test

* Remove empty RuntimeVarList awefullness

* Fix tests

* Simplify BlobSidecarListFromRoot

* Merge remote-tracking branch 'origin/unstable' into max-blobs-preset

* Bump quota to account for new target (6)

* Remove clone

* Fix issue from review

* Try to remove ugliness

* Merge branch 'unstable' into max-blobs-preset

* Fix max value

* Fix doctest

* Fix formatting

* Fix max check

* Delete hardcoded max_blobs_per_block in RPC limits

* Merge remote-tracking branch 'origin/unstable' into max-blobs-preset
This commit is contained in:
Pawan Dhananjay
2025-01-10 12:04:58 +05:30
committed by GitHub
parent ecdf2d891f
commit 05727290fb
61 changed files with 655 additions and 335 deletions

View File

@@ -186,6 +186,7 @@ impl<E: EthSpec> Decoder for SSZSnappyInboundCodec<E> {
handle_rpc_request(
self.protocol.versioned_protocol,
&decoded_buffer,
self.fork_context.current_fork(),
&self.fork_context.spec,
)
}
@@ -555,6 +556,7 @@ fn handle_length(
fn handle_rpc_request<E: EthSpec>(
versioned_protocol: SupportedProtocol,
decoded_buffer: &[u8],
current_fork: ForkName,
spec: &ChainSpec,
) -> Result<Option<RequestType<E>>, RPCError> {
match versioned_protocol {
@@ -586,9 +588,23 @@ fn handle_rpc_request<E: EthSpec>(
)?,
}),
))),
SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange(
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
))),
SupportedProtocol::BlobsByRangeV1 => {
let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?;
let max_requested_blobs = req
.count
.saturating_mul(spec.max_blobs_per_block_by_fork(current_fork));
// TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR
if max_requested_blobs > spec.max_request_blob_sidecars {
return Err(RPCError::ErrorResponse(
RpcErrorResponse::InvalidRequest,
format!(
"requested exceeded limit. allowed: {}, requested: {}",
spec.max_request_blob_sidecars, max_requested_blobs
),
));
}
Ok(Some(RequestType::BlobsByRange(req)))
}
SupportedProtocol::BlobsByRootV1 => {
Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest {
blob_ids: RuntimeVariableList::from_ssz_bytes(

View File

@@ -110,8 +110,8 @@ impl RateLimiterConfig {
pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10);
pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10);
// `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(512, 10);
pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10);
pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10);
pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10);
// 320 blocks worth of columns for regular node, or 40 blocks for supernode.
// Range sync load balances when requesting blocks, and each batch is 32 blocks.
pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10);

View File

@@ -855,7 +855,8 @@ where
}
let (req, substream) = substream;
let max_responses = req.max_responses();
let max_responses =
req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec);
// store requests that expect responses
if max_responses > 0 {
@@ -924,7 +925,8 @@ where
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let max_responses = request.max_responses();
let max_responses =
request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec);
if max_responses > 0 {
let max_remaining_chunks = if request.expect_exactly_one_response() {
// Currently enforced only for multiple responses

View File

@@ -15,6 +15,7 @@ use strum::IntoStaticStr;
use superstruct::superstruct;
use types::blob_sidecar::BlobIdentifier;
use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES;
use types::ForkName;
use types::{
blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar,
Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate,
@@ -327,8 +328,9 @@ pub struct BlobsByRangeRequest {
}
impl BlobsByRangeRequest {
pub fn max_blobs_requested<E: EthSpec>(&self) -> u64 {
self.count.saturating_mul(E::max_blobs_per_block() as u64)
pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 {
let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork);
self.count.saturating_mul(max_blobs_per_block)
}
}

View File

@@ -181,12 +181,13 @@ impl<Id: ReqId, E: EthSpec> RPC<Id, E> {
let inbound_limiter = inbound_rate_limiter_config.map(|config| {
debug!(log, "Using inbound rate limiting params"; "config" => ?config);
RateLimiter::new_with_config(config.0)
RateLimiter::new_with_config(config.0, fork_context.clone())
.expect("Inbound limiter configuration parameters are valid")
});
let self_limiter = outbound_rate_limiter_config.map(|config| {
SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid")
SelfRateLimiter::new(config, fork_context.clone(), log.clone())
.expect("Configuration parameters are valid")
});
RPC {

View File

@@ -86,6 +86,10 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock<usize> = LazyL
/// We calculate the value from its fields instead of constructing the block and checking the length.
/// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network
/// with `max_chunk_size`.
///
/// FIXME: Given that these limits are useless we should probably delete them. See:
///
/// https://github.com/sigp/lighthouse/issues/6790
pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock<usize> =
LazyLock::new(|| // Size of a full altair block
*SIGNED_BEACON_BLOCK_ALTAIR_MAX
@@ -102,7 +106,6 @@ pub static SIGNED_BEACON_BLOCK_DENEB_MAX: LazyLock<usize> = LazyLock::new(|| {
*SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload`
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
+ ssz::BYTES_PER_LENGTH_OFFSET
}); // Length offset for the blob commitments field.
//
@@ -110,7 +113,6 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock<usize> = LazyLock::new(|| {
*SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_electra_size() // adding max size of execution payload (~16gb)
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional ssz offset for the `ExecutionPayload` field
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
+ ssz::BYTES_PER_LENGTH_OFFSET
}); // Length offset for the blob commitments field.
@@ -118,8 +120,6 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock<usize> = LazyLock::new(|| {
*SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_fulu_size()
+ ssz::BYTES_PER_LENGTH_OFFSET
+ (<types::KzgCommitment as Encode>::ssz_fixed_len()
* <MainnetEthSpec>::max_blobs_per_block())
+ ssz::BYTES_PER_LENGTH_OFFSET
});
@@ -129,14 +129,6 @@ pub static BLOB_SIDECAR_SIZE: LazyLock<usize> =
pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock<usize> =
LazyLock::new(BlobSidecar::<MinimalEthSpec>::max_size);
pub static DATA_COLUMNS_SIDECAR_MIN: LazyLock<usize> = LazyLock::new(|| {
DataColumnSidecar::<MainnetEthSpec>::empty()
.as_ssz_bytes()
.len()
});
pub static DATA_COLUMNS_SIDECAR_MAX: LazyLock<usize> =
LazyLock::new(DataColumnSidecar::<MainnetEthSpec>::max_size);
pub static ERROR_TYPE_MIN: LazyLock<usize> = LazyLock::new(|| {
VariableList::<u8, MaxErrorLen>::from(Vec::<u8>::new())
.as_ssz_bytes()
@@ -635,8 +627,10 @@ impl ProtocolId {
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlobsByRange => rpc_blob_limits::<E>(),
Protocol::BlobsByRoot => rpc_blob_limits::<E>(),
Protocol::DataColumnsByRoot => rpc_data_column_limits(),
Protocol::DataColumnsByRange => rpc_data_column_limits(),
Protocol::DataColumnsByRoot => rpc_data_column_limits::<E>(fork_context.current_fork()),
Protocol::DataColumnsByRange => {
rpc_data_column_limits::<E>(fork_context.current_fork())
}
Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(),
<Ping as Encode>::ssz_fixed_len(),
@@ -716,8 +710,14 @@ pub fn rpc_blob_limits<E: EthSpec>() -> RpcLimits {
}
}
pub fn rpc_data_column_limits() -> RpcLimits {
RpcLimits::new(*DATA_COLUMNS_SIDECAR_MIN, *DATA_COLUMNS_SIDECAR_MAX)
// TODO(peerdas): fix hardcoded max here
pub fn rpc_data_column_limits<E: EthSpec>(fork_name: ForkName) -> RpcLimits {
RpcLimits::new(
DataColumnSidecar::<E>::empty().as_ssz_bytes().len(),
DataColumnSidecar::<E>::max_size(
E::default_spec().max_blobs_per_block_by_fork(fork_name) as usize
),
)
}
/* Inbound upgrade */
@@ -815,13 +815,13 @@ impl<E: EthSpec> RequestType<E> {
/* These functions are used in the handler for stream management */
/// Maximum number of responses expected for this request.
pub fn max_responses(&self) -> u64 {
pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 {
match self {
RequestType::Status(_) => 1,
RequestType::Goodbye(_) => 0,
RequestType::BlocksByRange(req) => *req.count(),
RequestType::BlocksByRoot(req) => req.block_roots().len() as u64,
RequestType::BlobsByRange(req) => req.max_blobs_requested::<E>(),
RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec),
RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64,
RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64,
RequestType::DataColumnsByRange(req) => req.max_requested::<E>(),

View File

@@ -6,10 +6,11 @@ use serde::{Deserialize, Serialize};
use std::future::Future;
use std::hash::Hash;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use tokio::time::Interval;
use types::EthSpec;
use types::{ChainSpec, EthSpec, ForkContext, ForkName};
/// Nanoseconds since a given time.
// Maintained as u64 to reduce footprint
@@ -109,6 +110,7 @@ pub struct RPCRateLimiter {
lc_finality_update_rl: Limiter<PeerId>,
/// LightClientUpdatesByRange rate limiter.
lc_updates_by_range_rl: Limiter<PeerId>,
fork_context: Arc<ForkContext>,
}
/// Error type for non conformant requests
@@ -176,7 +178,7 @@ impl RPCRateLimiterBuilder {
self
}
pub fn build(self) -> Result<RPCRateLimiter, &'static str> {
pub fn build(self, fork_context: Arc<ForkContext>) -> Result<RPCRateLimiter, &'static str> {
// get our quotas
let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?;
let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?;
@@ -253,13 +255,14 @@ impl RPCRateLimiterBuilder {
lc_finality_update_rl,
lc_updates_by_range_rl,
init_time: Instant::now(),
fork_context,
})
}
}
pub trait RateLimiterItem {
fn protocol(&self) -> Protocol;
fn max_responses(&self) -> u64;
fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64;
}
impl<E: EthSpec> RateLimiterItem for super::RequestType<E> {
@@ -267,13 +270,16 @@ impl<E: EthSpec> RateLimiterItem for super::RequestType<E> {
self.versioned_protocol().protocol()
}
fn max_responses(&self) -> u64 {
self.max_responses()
fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 {
self.max_responses(current_fork, spec)
}
}
impl RPCRateLimiter {
pub fn new_with_config(config: RateLimiterConfig) -> Result<Self, &'static str> {
pub fn new_with_config(
config: RateLimiterConfig,
fork_context: Arc<ForkContext>,
) -> Result<Self, &'static str> {
// Destructure to make sure every configuration value is used.
let RateLimiterConfig {
ping_quota,
@@ -316,7 +322,7 @@ impl RPCRateLimiter {
Protocol::LightClientUpdatesByRange,
light_client_updates_by_range_quota,
)
.build()
.build(fork_context)
}
/// Get a builder instance.
@@ -330,7 +336,9 @@ impl RPCRateLimiter {
request: &Item,
) -> Result<(), RateLimitedErr> {
let time_since_start = self.init_time.elapsed();
let tokens = request.max_responses().max(1);
let tokens = request
.max_responses(self.fork_context.current_fork(), &self.fork_context.spec)
.max(1);
let check =
|limiter: &mut Limiter<PeerId>| limiter.allows(time_since_start, peer_id, tokens);

View File

@@ -1,5 +1,6 @@
use std::{
collections::{hash_map::Entry, HashMap, VecDeque},
sync::Arc,
task::{Context, Poll},
time::Duration,
};
@@ -9,7 +10,7 @@ use libp2p::{swarm::NotifyHandler, PeerId};
use slog::{crit, debug, Logger};
use smallvec::SmallVec;
use tokio_util::time::DelayQueue;
use types::EthSpec;
use types::{EthSpec, ForkContext};
use super::{
config::OutboundRateLimiterConfig,
@@ -50,9 +51,13 @@ pub enum Error {
impl<Id: ReqId, E: EthSpec> SelfRateLimiter<Id, E> {
/// Creates a new [`SelfRateLimiter`] based on configration values.
pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result<Self, &'static str> {
pub fn new(
config: OutboundRateLimiterConfig,
fork_context: Arc<ForkContext>,
log: Logger,
) -> Result<Self, &'static str> {
debug!(log, "Using self rate limiting params"; "config" => ?config);
let limiter = RateLimiter::new_with_config(config.0)?;
let limiter = RateLimiter::new_with_config(config.0, fork_context)?;
Ok(SelfRateLimiter {
delayed_requests: Default::default(),
@@ -215,7 +220,7 @@ mod tests {
use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId};
use libp2p::PeerId;
use std::time::Duration;
use types::MainnetEthSpec;
use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot};
/// Test that `next_peer_request_ready` correctly maintains the queue.
#[tokio::test]
@@ -225,8 +230,13 @@ mod tests {
ping_quota: Quota::n_every(1, 2),
..Default::default()
});
let fork_context = std::sync::Arc::new(ForkContext::new::<MainnetEthSpec>(
Slot::new(0),
Hash256::ZERO,
&MainnetEthSpec::default_spec(),
));
let mut limiter: SelfRateLimiter<RequestId, MainnetEthSpec> =
SelfRateLimiter::new(config, log).unwrap();
SelfRateLimiter::new(config, fork_context, log).unwrap();
let peer_id = PeerId::random();
for i in 1..=5u32 {