Add new config options

This commit is contained in:
Pawan Dhananjay
2025-01-07 21:37:18 -08:00
parent fb195373d5
commit d7e24e59e2
5 changed files with 101 additions and 28 deletions

View File

@@ -16,7 +16,7 @@ use crate::rpc::{
use crate::types::{
attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding,
GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
};
use crate::EnrExt;
use crate::Eth2Enr;
@@ -285,26 +285,23 @@ impl<E: EthSpec> Network<E> {
let max_topics = ctx.chain_spec.attestation_subnet_count as usize
+ SYNC_COMMITTEE_SUBNET_COUNT as usize
+ ctx.chain_spec.blob_sidecar_subnet_count as usize
+ ctx.chain_spec.blob_sidecar_subnet_count_electra as usize
+ ctx.chain_spec.data_column_sidecar_subnet_count as usize
+ BASE_CORE_TOPICS.len()
+ ALTAIR_CORE_TOPICS.len()
+ CAPELLA_CORE_TOPICS.len()
+ DENEB_CORE_TOPICS.len()
+ CAPELLA_CORE_TOPICS.len() // 0 core deneb and electra topics
+ LIGHT_CLIENT_GOSSIP_TOPICS.len();
let possible_fork_digests = ctx.fork_context.all_fork_digests();
let filter = gossipsub::MaxCountSubscriptionFilter {
filter: utils::create_whitelist_filter(
possible_fork_digests,
ctx.chain_spec.attestation_subnet_count,
&ctx.chain_spec,
SYNC_COMMITTEE_SUBNET_COUNT,
ctx.chain_spec.blob_sidecar_subnet_count,
ctx.chain_spec.data_column_sidecar_subnet_count,
),
// during a fork we subscribe to both the old and new topics
max_subscribed_topics: max_topics * 4,
// 418 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics + 128 column topics) * 2
// 424 in theory = (64 attestation + 4 sync committee + 7 core topics + 9 blob topics + 128 column topics) * 2
max_subscriptions_per_request: max_topics * 2,
};

View File

@@ -236,10 +236,8 @@ pub fn load_or_build_metadata<E: EthSpec>(
/// possible fork digests.
pub(crate) fn create_whitelist_filter(
possible_fork_digests: Vec<[u8; 4]>,
attestation_subnet_count: u64,
spec: &ChainSpec,
sync_committee_subnet_count: u64,
blob_sidecar_subnet_count: u64,
data_column_sidecar_subnet_count: u64,
) -> gossipsub::WhitelistSubscriptionFilter {
let mut possible_hashes = HashSet::new();
for fork_digest in possible_fork_digests {
@@ -259,16 +257,21 @@ pub(crate) fn create_whitelist_filter(
add(BlsToExecutionChange);
add(LightClientFinalityUpdate);
add(LightClientOptimisticUpdate);
for id in 0..attestation_subnet_count {
for id in 0..spec.attestation_subnet_count {
add(Attestation(SubnetId::new(id)));
}
for id in 0..sync_committee_subnet_count {
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
}
for id in 0..blob_sidecar_subnet_count {
let blob_subnet_count = if spec.electra_fork_epoch.is_some() {
spec.blob_sidecar_subnet_count
} else {
spec.blob_sidecar_subnet_count_electra
};
for id in 0..blob_subnet_count {
add(BlobSidecar(id));
}
for id in 0..data_column_sidecar_subnet_count {
for id in 0..spec.data_column_sidecar_subnet_count {
add(DataColumnSidecar(DataColumnSubnetId::new(id)));
}
}

View File

@@ -18,5 +18,5 @@ pub use sync_state::{BackFillState, SyncState};
pub use topics::{
attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics,
subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
};

View File

@@ -41,8 +41,6 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [
GossipKind::LightClientOptimisticUpdate,
];
pub const DENEB_CORE_TOPICS: [GossipKind; 0] = [];
/// Returns the core topics associated with each fork that are new to the previous fork
pub fn fork_core_topics<E: EthSpec>(fork_name: &ForkName, spec: &ChainSpec) -> Vec<GossipKind> {
match fork_name {
@@ -56,11 +54,16 @@ pub fn fork_core_topics<E: EthSpec>(fork_name: &ForkName, spec: &ChainSpec) -> V
for i in 0..spec.blob_sidecar_subnet_count {
deneb_blob_topics.push(GossipKind::BlobSidecar(i));
}
let mut deneb_topics = DENEB_CORE_TOPICS.to_vec();
deneb_topics.append(&mut deneb_blob_topics);
deneb_topics
deneb_blob_topics
}
ForkName::Electra => {
// All of electra blob topics are core topics
let mut electra_blob_topics = Vec::new();
for i in 0..spec.blob_sidecar_subnet_count {
electra_blob_topics.push(GossipKind::BlobSidecar(i));
}
electra_blob_topics
}
ForkName::Electra => vec![],
}
}
@@ -87,7 +90,12 @@ pub fn core_topics_to_subscribe<E: EthSpec>(
topics.extend(previous_fork_topics);
current_fork = previous_fork;
}
// Remove duplicates
topics
.into_iter()
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect()
}
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
@@ -466,16 +474,19 @@ mod tests {
type E = MainnetEthSpec;
let spec = E::default_spec();
let mut all_topics = Vec::new();
let mut electra_core_topics = fork_core_topics::<E>(&ForkName::Electra, &spec);
let mut deneb_core_topics = fork_core_topics::<E>(&ForkName::Deneb, &spec);
all_topics.append(&mut electra_core_topics);
all_topics.append(&mut deneb_core_topics);
all_topics.extend(CAPELLA_CORE_TOPICS);
all_topics.extend(ALTAIR_CORE_TOPICS);
all_topics.extend(BASE_CORE_TOPICS);
let latest_fork = *ForkName::list_all().last().unwrap();
assert_eq!(
core_topics_to_subscribe::<E>(latest_fork, &spec),
all_topics
);
let core_topics = core_topics_to_subscribe::<E>(latest_fork, &spec);
// Need to check all the topics exist in an order independent manner
for topic in all_topics {
assert!(core_topics.contains(&topic));
}
}
}

View File

@@ -231,6 +231,13 @@ pub struct ChainSpec {
pub blob_sidecar_subnet_count: u64,
max_blobs_per_block: u64,
/*
* Networking Electra
*/
max_blobs_per_block_electra: u64,
pub blob_sidecar_subnet_count_electra: u64,
pub max_request_blob_sidecars_electra: u64,
/*
* Networking Derived
*
@@ -606,15 +613,26 @@ impl ChainSpec {
}
}
pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize {
if fork_name >= ForkName::Electra {
self.max_request_blob_sidecars_electra as usize
} else {
self.max_request_blob_sidecars as usize
}
}
/// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`.
pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 {
self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch))
}
/// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`.
pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 {
// TODO(electra): add Electra blobs per block change here
self.max_blobs_per_block
pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 {
if fork_name >= ForkName::Electra {
self.max_blobs_per_block_electra
} else {
self.max_blobs_per_block
}
}
pub fn data_columns_per_subnet(&self) -> usize {
@@ -863,6 +881,13 @@ impl ChainSpec {
max_blobs_by_root_request: default_max_blobs_by_root_request(),
max_data_columns_by_root_request: default_data_columns_by_root_request(),
/*
* Networking Electra specific
*/
max_blobs_per_block_electra: default_max_blobs_per_block_electra(),
blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(),
max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(),
/*
* Application specific
*/
@@ -1182,6 +1207,13 @@ impl ChainSpec {
max_blobs_by_root_request: default_max_blobs_by_root_request(),
max_data_columns_by_root_request: default_data_columns_by_root_request(),
/*
* Networking Electra specific
*/
max_blobs_per_block_electra: default_max_blobs_per_block_electra(),
blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(),
max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(),
/*
* Application specific
*/
@@ -1376,6 +1408,15 @@ pub struct Config {
#[serde(default = "default_max_per_epoch_activation_exit_churn_limit")]
#[serde(with = "serde_utils::quoted_u64")]
max_per_epoch_activation_exit_churn_limit: u64,
#[serde(default = "default_max_blobs_per_block_electra")]
#[serde(with = "serde_utils::quoted_u64")]
max_blobs_per_block_electra: u64,
#[serde(default = "default_blob_sidecar_subnet_count_electra")]
#[serde(with = "serde_utils::quoted_u64")]
pub blob_sidecar_subnet_count_electra: u64,
#[serde(default = "default_max_request_blob_sidecars_electra")]
#[serde(with = "serde_utils::quoted_u64")]
max_request_blob_sidecars_electra: u64,
#[serde(default = "default_custody_requirement")]
#[serde(with = "serde_utils::quoted_u64")]
@@ -1505,6 +1546,18 @@ const fn default_max_blobs_per_block() -> u64 {
6
}
const fn default_max_blobs_per_block_electra() -> u64 {
9
}
const fn default_blob_sidecar_subnet_count_electra() -> u64 {
9
}
const fn default_max_request_blob_sidecars_electra() -> u64 {
1152
}
const fn default_min_per_epoch_churn_limit_electra() -> u64 {
128_000_000_000
}
@@ -1727,6 +1780,9 @@ impl Config {
min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra,
max_per_epoch_activation_exit_churn_limit: spec
.max_per_epoch_activation_exit_churn_limit,
max_blobs_per_block_electra: spec.max_blobs_per_block_electra,
blob_sidecar_subnet_count_electra: spec.blob_sidecar_subnet_count_electra,
max_request_blob_sidecars_electra: spec.max_request_blob_sidecars_electra,
custody_requirement: spec.custody_requirement,
data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count,
@@ -1802,6 +1858,9 @@ impl Config {
min_per_epoch_churn_limit_electra,
max_per_epoch_activation_exit_churn_limit,
max_blobs_per_block_electra,
blob_sidecar_subnet_count_electra,
max_request_blob_sidecars_electra,
custody_requirement,
data_column_sidecar_subnet_count,
number_of_columns,
@@ -1869,6 +1928,9 @@ impl Config {
min_per_epoch_churn_limit_electra,
max_per_epoch_activation_exit_churn_limit,
max_blobs_per_block_electra,
max_request_blob_sidecars_electra,
blob_sidecar_subnet_count_electra,
// We need to re-derive any values that might have changed in the config.
max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks),