Final changes for fusaka-devnet-2 (#7655)

Closes #7467.

This PR primarily addresses [the P2P changes](https://github.com/ethereum/EIPs/pull/9840) in [fusaka-devnet-2](https://fusaka-devnet-2.ethpandaops.io/). Specifically:

* [the new `nfd` parameter added to the `ENR`](https://github.com/ethereum/EIPs/pull/9840)
* [the modified `compute_fork_digest()` changes for every BPO fork](https://github.com/ethereum/EIPs/pull/9840)

90% of this PR was absolutely hacked together as fast as possible during the Berlinterop as fast as I could while running between Glamsterdam debates. Luckily, it seems to work. But I was unable to be as careful in avoiding bugs as I usually am. I've cleaned up the things *I remember* wanting to come back and have a closer look at. But still working on this.

Progress:
* [x] get it working on `fusaka-devnet-2`
* [ ] [*optional* disconnect from peers with incorrect `nfd` at the fork boundary](https://github.com/ethereum/consensus-specs/pull/4407) - Can be addressed in a future PR if necessary
* [x] first pass clean-up
* [x] fix up all the broken tests
* [x] final self-review
* [x] more thorough review from people more familiar with affected code
This commit is contained in:
ethDreamer
2025-07-10 16:32:58 -05:00
committed by GitHub
parent 3826fe91f4
commit b43e0b446c
26 changed files with 1047 additions and 581 deletions

View File

@@ -457,7 +457,7 @@ pub fn gossipsub_config(
) -> Vec<u8> {
let topic_bytes = message.topic.as_str().as_bytes();
if fork_context.current_fork().altair_enabled() {
if fork_context.current_fork_name().altair_enabled() {
let topic_len_bytes = topic_bytes.len().to_le_bytes();
let mut vec = Vec::with_capacity(
prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(),

View File

@@ -3,6 +3,7 @@
pub use discv5::enr::CombinedKey;
use super::enr_ext::CombinedKeyExt;
use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY};
use super::ENR_FILENAME;
use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield};
use crate::NetworkConfig;
@@ -18,10 +19,10 @@ use std::str::FromStr;
use tracing::{debug, warn};
use types::{ChainSpec, EnrForkId, EthSpec};
use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY};
/// The ENR field specifying the fork id.
pub const ETH2_ENR_KEY: &str = "eth2";
/// The ENR field specifying the next fork digest.
pub const NEXT_FORK_DIGEST_ENR_KEY: &str = "nfd";
/// The ENR field specifying the attestation subnet bitfield.
pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets";
/// The ENR field specifying the sync committee subnet bitfield.
@@ -42,6 +43,9 @@ pub trait Eth2Enr {
/// The peerdas custody group count associated with the ENR.
fn custody_group_count<E: EthSpec>(&self, spec: &ChainSpec) -> Result<u64, &'static str>;
/// The next fork digest associated with the ENR.
fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>;
fn eth2(&self) -> Result<EnrForkId, &'static str>;
}
@@ -81,6 +85,12 @@ impl Eth2Enr for Enr {
}
}
fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> {
self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY)
.ok_or("ENR next fork digest non-existent")?
.map_err(|_| "Could not decode the ENR next fork digest")
}
fn eth2(&self) -> Result<EnrForkId, &'static str> {
let eth2_bytes: Bytes = self
.get_decodable(ETH2_ENR_KEY)
@@ -149,13 +159,14 @@ pub fn build_or_load_enr<E: EthSpec>(
local_key: Keypair,
config: &NetworkConfig,
enr_fork_id: &EnrForkId,
next_fork_digest: [u8; 4],
spec: &ChainSpec,
) -> Result<Enr, String> {
// Build the local ENR.
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
// majority of our peers, if the CLI doesn't expressly forbid it.
let enr_key = CombinedKey::from_libp2p(local_key)?;
let mut local_enr = build_enr::<E>(&enr_key, config, enr_fork_id, spec)?;
let mut local_enr = build_enr::<E>(&enr_key, config, enr_fork_id, next_fork_digest, spec)?;
use_or_load_enr(&enr_key, &mut local_enr, config)?;
Ok(local_enr)
@@ -166,6 +177,7 @@ pub fn build_enr<E: EthSpec>(
enr_key: &CombinedKey,
config: &NetworkConfig,
enr_fork_id: &EnrForkId,
next_fork_digest: [u8; 4],
spec: &ChainSpec,
) -> Result<Enr, String> {
let mut builder = discv5::enr::Enr::builder();
@@ -257,7 +269,7 @@ pub fn build_enr<E: EthSpec>(
&bitfield.as_ssz_bytes().into(),
);
// only set `cgc` if PeerDAS fork epoch has been scheduled
// only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled
if spec.is_peer_das_scheduled() {
let custody_group_count =
if let Some(false_cgc) = config.advertise_false_custody_group_count {
@@ -268,6 +280,7 @@ pub fn build_enr<E: EthSpec>(
spec.custody_requirement
};
builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count);
builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest);
}
builder
@@ -340,6 +353,7 @@ mod test {
use types::{Epoch, MainnetEthSpec};
type E = MainnetEthSpec;
const TEST_NFD: [u8; 4] = [0x01, 0x02, 0x03, 0x04];
fn make_fulu_spec() -> ChainSpec {
let mut spec = E::default_spec();
@@ -351,10 +365,17 @@ mod test {
let keypair = libp2p::identity::secp256k1::Keypair::generate();
let enr_key = CombinedKey::from_secp256k1(&keypair);
let enr_fork_id = EnrForkId::default();
let enr = build_enr::<E>(&enr_key, &config, &enr_fork_id, spec).unwrap();
let enr = build_enr::<E>(&enr_key, &config, &enr_fork_id, TEST_NFD, spec).unwrap();
(enr, enr_key)
}
#[test]
fn test_nfd_enr_encoding() {
let spec = make_fulu_spec();
let enr = build_enr_with_config(NetworkConfig::default(), &spec).0;
assert_eq!(enr.next_fork_digest().unwrap(), TEST_NFD);
}
#[test]
fn custody_group_count_default() {
let config = NetworkConfig {

View File

@@ -49,7 +49,7 @@ use tracing::{debug, error, info, trace, warn};
use types::{ChainSpec, EnrForkId, EthSpec};
mod subnet_predicate;
use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY;
use crate::discovery::enr::{NEXT_FORK_DIGEST_ENR_KEY, PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY};
pub use subnet_predicate::subnet_predicate;
use types::non_zero_usize::new_non_zero_usize;
@@ -570,6 +570,19 @@ impl<E: EthSpec> Discovery<E> {
Ok(())
}
pub fn update_enr_nfd(&mut self, nfd: [u8; 4]) -> Result<(), String> {
self.discv5
.enr_insert::<Bytes>(NEXT_FORK_DIGEST_ENR_KEY, &nfd.as_ssz_bytes().into())
.map_err(|e| format!("{:?}", e))?;
info!(
next_fork_digest = ?nfd,
"Updating the ENR nfd"
);
enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr());
*self.network_globals.local_enr.write() = self.discv5.local_enr();
Ok(())
}
/// Updates the `eth2` field of our local ENR.
pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) {
// to avoid having a reference to the spec constant, for the logging we assume
@@ -1217,7 +1230,15 @@ mod tests {
config.set_listening_addr(crate::ListenAddress::unused_v4_ports());
let config = Arc::new(config);
let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair);
let enr: Enr = build_enr::<E>(&enr_key, &config, &EnrForkId::default(), &spec).unwrap();
let next_fork_digest = [0; 4];
let enr: Enr = build_enr::<E>(
&enr_key,
&config,
&EnrForkId::default(),
next_fork_digest,
&spec,
)
.unwrap();
let globals = NetworkGlobals::new(
enr,
MetaData::V2(MetaDataV2 {

View File

@@ -193,7 +193,7 @@ impl<E: EthSpec> Decoder for SSZSnappyInboundCodec<E> {
handle_rpc_request(
self.protocol.versioned_protocol,
&decoded_buffer,
self.fork_context.current_fork(),
self.fork_context.current_fork_name(),
&self.fork_context.spec,
)
}
@@ -469,65 +469,9 @@ fn context_bytes<E: EthSpec>(
// Add the context bytes if required
if protocol.has_context_bytes() {
if let RpcResponse::Success(rpc_variant) = resp {
match rpc_variant {
RpcSuccessResponse::BlocksByRange(ref_box_block)
| RpcSuccessResponse::BlocksByRoot(ref_box_block) => {
return match **ref_box_block {
// NOTE: If you are adding another fork type here, be sure to modify the
// `fork_context.to_context_bytes()` function to support it as well!
SignedBeaconBlock::Fulu { .. } => {
fork_context.to_context_bytes(ForkName::Fulu)
}
SignedBeaconBlock::Electra { .. } => {
fork_context.to_context_bytes(ForkName::Electra)
}
SignedBeaconBlock::Deneb { .. } => {
fork_context.to_context_bytes(ForkName::Deneb)
}
SignedBeaconBlock::Capella { .. } => {
fork_context.to_context_bytes(ForkName::Capella)
}
SignedBeaconBlock::Bellatrix { .. } => {
fork_context.to_context_bytes(ForkName::Bellatrix)
}
SignedBeaconBlock::Altair { .. } => {
fork_context.to_context_bytes(ForkName::Altair)
}
SignedBeaconBlock::Base { .. } => {
Some(fork_context.genesis_context_bytes())
}
};
}
RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => {
return fork_context.to_context_bytes(ForkName::Deneb);
}
RpcSuccessResponse::DataColumnsByRoot(_)
| RpcSuccessResponse::DataColumnsByRange(_) => {
return fork_context.to_context_bytes(ForkName::Fulu);
}
RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => {
return lc_bootstrap
.map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name));
}
RpcSuccessResponse::LightClientOptimisticUpdate(lc_optimistic_update) => {
return lc_optimistic_update
.map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name));
}
RpcSuccessResponse::LightClientFinalityUpdate(lc_finality_update) => {
return lc_finality_update
.map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name));
}
RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => {
return lc_update
.map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name));
}
// These will not pass the has_context_bytes() check
RpcSuccessResponse::Status(_)
| RpcSuccessResponse::Pong(_)
| RpcSuccessResponse::MetaData(_) => {
return None;
}
}
return rpc_variant
.slot()
.map(|slot| fork_context.context_bytes(slot.epoch(E::slots_per_epoch())));
}
}
None
@@ -938,7 +882,7 @@ fn context_bytes_to_fork_name(
fork_context: Arc<ForkContext>,
) -> Result<ForkName, RPCError> {
fork_context
.from_context_bytes(context_bytes)
.get_fork_from_context_bytes(context_bytes)
.cloned()
.ok_or_else(|| {
let encoded = hex::encode(context_bytes);
@@ -966,69 +910,88 @@ mod tests {
type Spec = types::MainnetEthSpec;
fn fork_context(fork_name: ForkName) -> ForkContext {
fn spec_with_all_forks_enabled() -> ChainSpec {
let mut chain_spec = Spec::default_spec();
let altair_fork_epoch = Epoch::new(1);
let bellatrix_fork_epoch = Epoch::new(2);
let capella_fork_epoch = Epoch::new(3);
let deneb_fork_epoch = Epoch::new(4);
let electra_fork_epoch = Epoch::new(5);
let fulu_fork_epoch = Epoch::new(6);
chain_spec.altair_fork_epoch = Some(Epoch::new(1));
chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2));
chain_spec.capella_fork_epoch = Some(Epoch::new(3));
chain_spec.deneb_fork_epoch = Some(Epoch::new(4));
chain_spec.electra_fork_epoch = Some(Epoch::new(5));
chain_spec.fulu_fork_epoch = Some(Epoch::new(6));
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch);
chain_spec.electra_fork_epoch = Some(electra_fork_epoch);
chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch);
// check that we have all forks covered
assert!(chain_spec.fork_epoch(ForkName::latest()).is_some());
chain_spec
}
let current_slot = match fork_name {
ForkName::Base => Slot::new(0),
ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()),
ForkName::Fulu => fulu_fork_epoch.start_slot(Spec::slots_per_epoch()),
fn fork_context(fork_name: ForkName, spec: &ChainSpec) -> ForkContext {
let current_epoch = match fork_name {
ForkName::Base => Some(Epoch::new(0)),
ForkName::Altair => spec.altair_fork_epoch,
ForkName::Bellatrix => spec.bellatrix_fork_epoch,
ForkName::Capella => spec.capella_fork_epoch,
ForkName::Deneb => spec.deneb_fork_epoch,
ForkName::Electra => spec.electra_fork_epoch,
ForkName::Fulu => spec.fulu_fork_epoch,
};
ForkContext::new::<Spec>(current_slot, Hash256::zero(), &chain_spec)
let current_slot = current_epoch.unwrap().start_slot(Spec::slots_per_epoch());
ForkContext::new::<Spec>(current_slot, Hash256::zero(), spec)
}
/// Smallest sized block across all current forks. Useful for testing
/// min length check conditions.
fn empty_base_block() -> SignedBeaconBlock<Spec> {
let empty_block = BeaconBlock::Base(BeaconBlockBase::<Spec>::empty(&Spec::default_spec()));
fn empty_base_block(spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
let empty_block = BeaconBlock::Base(BeaconBlockBase::<Spec>::empty(spec));
SignedBeaconBlock::from_block(empty_block, Signature::empty())
}
fn altair_block() -> SignedBeaconBlock<Spec> {
let full_block =
BeaconBlock::Altair(BeaconBlockAltair::<Spec>::full(&Spec::default_spec()));
fn altair_block(spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
// The context bytes are now derived from the block epoch, so we need to have the slot set
// here.
let full_block = BeaconBlock::Altair(BeaconBlockAltair::<Spec>::full(spec));
SignedBeaconBlock::from_block(full_block, Signature::empty())
}
fn empty_blob_sidecar() -> Arc<BlobSidecar<Spec>> {
Arc::new(BlobSidecar::empty())
fn empty_blob_sidecar(spec: &ChainSpec) -> Arc<BlobSidecar<Spec>> {
// The context bytes are now derived from the block epoch, so we need to have the slot set
// here.
let mut blob_sidecar = BlobSidecar::<Spec>::empty();
blob_sidecar.signed_block_header.message.slot = spec
.deneb_fork_epoch
.expect("deneb fork epoch must be set")
.start_slot(Spec::slots_per_epoch());
Arc::new(blob_sidecar)
}
fn empty_data_column_sidecar() -> Arc<DataColumnSidecar<Spec>> {
Arc::new(DataColumnSidecar {
fn empty_data_column_sidecar(spec: &ChainSpec) -> Arc<DataColumnSidecar<Spec>> {
// The context bytes are now derived from the block epoch, so we need to have the slot set
// here.
let data_column_sidecar = DataColumnSidecar {
index: 0,
column: VariableList::new(vec![Cell::<Spec>::default()]).unwrap(),
kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(),
kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(),
signed_block_header: SignedBeaconBlockHeader {
message: BeaconBlockHeader::empty(),
message: BeaconBlockHeader {
slot: spec
.fulu_fork_epoch
.expect("fulu fork epoch must be set")
.start_slot(Spec::slots_per_epoch()),
..BeaconBlockHeader::empty()
},
signature: Signature::empty(),
},
kzg_commitments_inclusion_proof: Default::default(),
})
};
Arc::new(data_column_sidecar)
}
/// Bellatrix block with length < max_rpc_size.
fn bellatrix_block_small(spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
// The context bytes are now derived from the block epoch, so we need to have the slot set
// here.
let mut block: BeaconBlockBellatrix<_, FullPayload<Spec>> =
BeaconBlockBellatrix::empty(&Spec::default_spec());
BeaconBlockBellatrix::empty(spec);
let tx = VariableList::from(vec![0; 1024]);
let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::<Vec<_>>());
@@ -1044,8 +1007,10 @@ mod tests {
/// The max limit for a Bellatrix block is in the order of ~16GiB which wouldn't fit in memory.
/// Hence, we generate a Bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer.
fn bellatrix_block_large(spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
// The context bytes are now derived from the block epoch, so we need to have the slot set
// here.
let mut block: BeaconBlockBellatrix<_, FullPayload<Spec>> =
BeaconBlockBellatrix::empty(&Spec::default_spec());
BeaconBlockBellatrix::empty(spec);
let tx = VariableList::from(vec![0; 1024]);
let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::<Vec<_>>());
@@ -1101,7 +1066,7 @@ mod tests {
}
}
fn dcbroot_request(spec: &ChainSpec, fork_name: ForkName) -> DataColumnsByRootRequest {
fn dcbroot_request(fork_name: ForkName, spec: &ChainSpec) -> DataColumnsByRootRequest {
let number_of_columns = spec.number_of_columns as usize;
DataColumnsByRootRequest {
data_column_ids: RuntimeVariableList::new(
@@ -1115,21 +1080,21 @@ mod tests {
}
}
fn bbroot_request_v1(fork_name: ForkName) -> BlocksByRootRequest {
BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name))
fn bbroot_request_v1(fork_name: ForkName, spec: &ChainSpec) -> BlocksByRootRequest {
BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name, spec))
}
fn bbroot_request_v2(fork_name: ForkName) -> BlocksByRootRequest {
BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name))
fn bbroot_request_v2(fork_name: ForkName, spec: &ChainSpec) -> BlocksByRootRequest {
BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name, spec))
}
fn blbroot_request(fork_name: ForkName) -> BlobsByRootRequest {
fn blbroot_request(fork_name: ForkName, spec: &ChainSpec) -> BlobsByRootRequest {
BlobsByRootRequest::new(
vec![BlobIdentifier {
block_root: Hash256::zero(),
index: 0,
}],
&fork_context(fork_name),
&fork_context(fork_name, spec),
)
}
@@ -1172,7 +1137,7 @@ mod tests {
spec: &ChainSpec,
) -> Result<BytesMut, RPCError> {
let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(fork_name));
let fork_context = Arc::new(fork_context(fork_name, spec));
let max_packet_size = spec.max_payload_size as usize;
let mut buf = BytesMut::new();
@@ -1186,12 +1151,13 @@ mod tests {
fn encode_without_length_checks(
bytes: Vec<u8>,
fork_name: ForkName,
spec: &ChainSpec,
) -> Result<BytesMut, RPCError> {
let fork_context = fork_context(fork_name);
let fork_context = fork_context(fork_name, spec);
let mut dst = BytesMut::new();
// Add context bytes if required
dst.extend_from_slice(&fork_context.to_context_bytes(fork_name).unwrap());
dst.extend_from_slice(&fork_context.context_bytes(fork_context.current_fork_epoch()));
let mut uvi_codec: Uvi<usize> = Uvi::default();
@@ -1219,7 +1185,7 @@ mod tests {
spec: &ChainSpec,
) -> Result<Option<RpcSuccessResponse<Spec>>, RPCError> {
let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(fork_name));
let fork_context = Arc::new(fork_context(fork_name, spec));
let max_packet_size = spec.max_payload_size as usize;
let mut snappy_outbound_codec =
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, max_packet_size, fork_context);
@@ -1240,7 +1206,7 @@ mod tests {
/// Verifies that requests we send are encoded in a way that we would correctly decode too.
fn encode_then_decode_request(req: RequestType<Spec>, fork_name: ForkName, spec: &ChainSpec) {
let fork_context = Arc::new(fork_context(fork_name));
let fork_context = Arc::new(fork_context(fork_name, spec));
let max_packet_size = spec.max_payload_size as usize;
let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy);
// Encode a request we send
@@ -1311,7 +1277,7 @@ mod tests {
// Test RPCResponse encoding/decoding for V1 messages
#[test]
fn test_encode_then_decode_v1() {
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
assert_eq!(
encode_then_decode_response(
@@ -1348,13 +1314,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::BlocksByRangeV1,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))),
ForkName::Base,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))))
);
@@ -1363,7 +1329,7 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::BlocksByRangeV1,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(
altair_block()
altair_block(&chain_spec)
))),
ForkName::Altair,
&chain_spec,
@@ -1378,13 +1344,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::BlocksByRootV1,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))),
ForkName::Base,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))))
);
@@ -1392,9 +1358,9 @@ mod tests {
matches!(
encode_then_decode_response(
SupportedProtocol::BlocksByRootV1,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(
Arc::new(altair_block())
)),
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block(
&chain_spec
)))),
ForkName::Altair,
&chain_spec,
)
@@ -1439,74 +1405,98 @@ mod tests {
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlobsByRangeV1,
RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())),
RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar(
&chain_spec
))),
ForkName::Deneb,
&chain_spec
),
Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))),
Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar(
&chain_spec
)))),
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlobsByRangeV1,
RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())),
RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar(
&chain_spec
))),
ForkName::Electra,
&chain_spec
),
Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))),
Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar(
&chain_spec
)))),
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlobsByRangeV1,
RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())),
RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar(
&chain_spec
))),
ForkName::Fulu,
&chain_spec
),
Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))),
Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar(
&chain_spec
)))),
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlobsByRootV1,
RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())),
RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar(
&chain_spec
))),
ForkName::Deneb,
&chain_spec
),
Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))),
Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar(
&chain_spec
)))),
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlobsByRootV1,
RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())),
RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar(
&chain_spec
))),
ForkName::Electra,
&chain_spec
),
Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))),
Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar(
&chain_spec
)))),
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlobsByRootV1,
RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())),
RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar(
&chain_spec
))),
ForkName::Fulu,
&chain_spec
),
Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))),
Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar(
&chain_spec
)))),
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::DataColumnsByRangeV1,
RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
)),
ForkName::Deneb,
&chain_spec
),
Ok(Some(RpcSuccessResponse::DataColumnsByRange(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
))),
);
@@ -1514,13 +1504,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::DataColumnsByRangeV1,
RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
)),
ForkName::Electra,
&chain_spec
),
Ok(Some(RpcSuccessResponse::DataColumnsByRange(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
))),
);
@@ -1528,13 +1518,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::DataColumnsByRangeV1,
RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
)),
ForkName::Fulu,
&chain_spec
),
Ok(Some(RpcSuccessResponse::DataColumnsByRange(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
))),
);
@@ -1542,13 +1532,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::DataColumnsByRootV1,
RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
)),
ForkName::Deneb,
&chain_spec
),
Ok(Some(RpcSuccessResponse::DataColumnsByRoot(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
))),
);
@@ -1556,13 +1546,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::DataColumnsByRootV1,
RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
)),
ForkName::Electra,
&chain_spec
),
Ok(Some(RpcSuccessResponse::DataColumnsByRoot(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
))),
);
@@ -1570,13 +1560,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::DataColumnsByRootV1,
RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
)),
ForkName::Fulu,
&chain_spec
),
Ok(Some(RpcSuccessResponse::DataColumnsByRoot(
empty_data_column_sidecar()
empty_data_column_sidecar(&chain_spec)
))),
);
}
@@ -1584,19 +1574,19 @@ mod tests {
// Test RPCResponse encoding/decoding for V1 messages
#[test]
fn test_encode_then_decode_v2() {
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlocksByRangeV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))),
ForkName::Base,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))))
);
@@ -1607,25 +1597,27 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::BlocksByRangeV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))),
ForkName::Altair,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))))
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlocksByRangeV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block()))),
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block(
&chain_spec
)))),
ForkName::Altair,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new(
altair_block()
altair_block(&chain_spec)
))))
);
@@ -1646,9 +1638,12 @@ mod tests {
))))
);
let mut encoded =
encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix)
.unwrap();
let mut encoded = encode_without_length_checks(
bellatrix_block_large.as_ssz_bytes(),
ForkName::Bellatrix,
&chain_spec,
)
.unwrap();
assert!(
matches!(
@@ -1668,13 +1663,13 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))),
ForkName::Base,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
)))),
);
@@ -1685,25 +1680,27 @@ mod tests {
encode_then_decode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))),
ForkName::Altair,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block()
empty_base_block(&chain_spec)
))))
);
assert_eq!(
encode_then_decode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))),
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block(
&chain_spec
)))),
ForkName::Altair,
&chain_spec,
),
Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new(
altair_block()
altair_block(&chain_spec)
))))
);
@@ -1721,9 +1718,12 @@ mod tests {
))))
);
let mut encoded =
encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix)
.unwrap();
let mut encoded = encode_without_length_checks(
bellatrix_block_large.as_ssz_bytes(),
ForkName::Bellatrix,
&chain_spec,
)
.unwrap();
assert!(
matches!(
@@ -1785,15 +1785,14 @@ mod tests {
// Test RPCResponse encoding/decoding for V2 messages
#[test]
fn test_context_bytes_v2() {
let fork_context = fork_context(ForkName::Altair);
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
let fork_context = fork_context(ForkName::Altair, &chain_spec);
// Removing context bytes for v2 messages should error
let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRangeV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block(),
empty_base_block(&chain_spec),
))),
ForkName::Base,
&chain_spec,
@@ -1816,7 +1815,7 @@ mod tests {
let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block(),
empty_base_block(&chain_spec),
))),
ForkName::Base,
&chain_spec,
@@ -1840,7 +1839,7 @@ mod tests {
let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRangeV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(
empty_base_block(),
empty_base_block(&chain_spec),
))),
ForkName::Altair,
&chain_spec,
@@ -1848,8 +1847,8 @@ mod tests {
.unwrap();
let mut wrong_fork_bytes = BytesMut::new();
wrong_fork_bytes
.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap());
let altair_epoch = chain_spec.altair_fork_epoch.unwrap();
wrong_fork_bytes.extend_from_slice(&fork_context.context_bytes(altair_epoch));
wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4));
assert!(matches!(
@@ -1866,14 +1865,18 @@ mod tests {
// Trying to decode an altair block with base context bytes should give ssz decoding error
let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))),
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block(
&chain_spec,
)))),
ForkName::Altair,
&chain_spec,
)
.unwrap();
let mut wrong_fork_bytes = BytesMut::new();
wrong_fork_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Base).unwrap());
wrong_fork_bytes.extend_from_slice(
&fork_context.context_bytes(chain_spec.genesis_slot.epoch(Spec::slots_per_epoch())),
);
wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4));
assert!(matches!(
@@ -1889,7 +1892,7 @@ mod tests {
// Adding context bytes to Protocols that don't require it should return an error
let mut encoded_bytes = BytesMut::new();
encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap());
encoded_bytes.extend_from_slice(&fork_context.context_bytes(altair_epoch));
encoded_bytes.extend_from_slice(
&encode_response(
SupportedProtocol::MetaDataV2,
@@ -1912,7 +1915,7 @@ mod tests {
let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block(),
empty_base_block(&chain_spec),
))),
ForkName::Altair,
&chain_spec,
@@ -1938,7 +1941,7 @@ mod tests {
let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRootV2,
RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(
empty_base_block(),
empty_base_block(&chain_spec),
))),
ForkName::Altair,
&chain_spec,
@@ -1960,8 +1963,7 @@ mod tests {
#[test]
fn test_encode_then_decode_request() {
let fork_context = fork_context(ForkName::Electra);
let chain_spec = fork_context.spec.clone();
let chain_spec = spec_with_all_forks_enabled();
let requests: &[RequestType<Spec>] = &[
RequestType::Ping(ping_message()),
@@ -1985,10 +1987,10 @@ mod tests {
// Handled separately to have consistent `ForkName` across request and responses
let fork_dependent_requests = |fork_name| {
[
RequestType::BlobsByRoot(blbroot_request(fork_name)),
RequestType::BlocksByRoot(bbroot_request_v1(fork_name)),
RequestType::BlocksByRoot(bbroot_request_v2(fork_name)),
RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec, fork_name)),
RequestType::BlobsByRoot(blbroot_request(fork_name, &chain_spec)),
RequestType::BlocksByRoot(bbroot_request_v1(fork_name, &chain_spec)),
RequestType::BlocksByRoot(bbroot_request_v2(fork_name, &chain_spec)),
RequestType::DataColumnsByRoot(dcbroot_request(fork_name, &chain_spec)),
]
};
for fork_name in ForkName::list_all() {
@@ -2048,7 +2050,7 @@ mod tests {
assert_eq!(writer.get_ref().len(), 42);
dst.extend_from_slice(writer.get_ref());
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
// 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`.
assert!(matches!(
decode_response(
@@ -2066,7 +2068,8 @@ mod tests {
/// sends a valid message filled with a stream of useless padding before the actual message.
#[test]
fn test_decode_malicious_v2_message() {
let fork_context = Arc::new(fork_context(ForkName::Altair));
let chain_spec = spec_with_all_forks_enabled();
let fork_context = Arc::new(fork_context(ForkName::Altair, &chain_spec));
// 10 byte snappy stream identifier
let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY";
@@ -2078,7 +2081,7 @@ mod tests {
let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00";
// Full altair block is 157916 bytes uncompressed. `max_compressed_len` is 32 + 157916 + 157916/6 = 184267.
let block_message_bytes = altair_block().as_ssz_bytes();
let block_message_bytes = altair_block(&fork_context.spec).as_ssz_bytes();
assert_eq!(block_message_bytes.len(), 157916);
assert_eq!(
@@ -2090,7 +2093,8 @@ mod tests {
let mut dst = BytesMut::with_capacity(1024);
// Insert context bytes
dst.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap());
let altair_epoch = fork_context.spec.altair_fork_epoch.unwrap();
dst.extend_from_slice(&fork_context.context_bytes(altair_epoch));
// Insert length-prefix
uvi_codec
@@ -2105,14 +2109,14 @@ mod tests {
dst.extend_from_slice(malicious_padding);
}
// Insert payload (8103 bytes compressed)
// Insert payload (8102 bytes compressed)
let mut writer = FrameEncoder::new(Vec::new());
writer.write_all(&block_message_bytes).unwrap();
writer.flush().unwrap();
assert_eq!(writer.get_ref().len(), 8103);
assert_eq!(writer.get_ref().len(), 8102);
dst.extend_from_slice(writer.get_ref());
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
// 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`.
assert!(matches!(
@@ -2148,7 +2152,7 @@ mod tests {
let mut uvi_codec: Uvi<usize> = Uvi::default();
let mut dst = BytesMut::with_capacity(1024);
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
// Insert length-prefix
uvi_codec
@@ -2184,9 +2188,8 @@ mod tests {
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(ForkName::Base));
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec));
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
snappy_protocol_id,
@@ -2220,9 +2223,8 @@ mod tests {
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(ForkName::Base));
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec));
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
snappy_protocol_id,
@@ -2251,9 +2253,8 @@ mod tests {
let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy);
// Response limits
let fork_context = Arc::new(fork_context(ForkName::Base));
let chain_spec = Spec::default_spec();
let chain_spec = spec_with_all_forks_enabled();
let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec));
let max_rpc_size = chain_spec.max_payload_size as usize;
let limit = protocol_id.rpc_response_limits::<Spec>(&fork_context);

View File

@@ -912,7 +912,7 @@ where
}
let (req, substream) = substream;
let current_fork = self.fork_context.current_fork();
let current_fork = self.fork_context.current_fork_name();
let spec = &self.fork_context.spec;
match &req {
@@ -950,8 +950,10 @@ where
_ => {}
};
let max_responses =
req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec);
let max_responses = req.max_responses(
self.fork_context.current_fork_epoch(),
&self.fork_context.spec,
);
// store requests that expect responses
if max_responses > 0 {
@@ -1021,8 +1023,10 @@ where
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let max_responses =
request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec);
let max_responses = request.max_responses(
self.fork_context.current_fork_epoch(),
&self.fork_context.spec,
);
if max_responses > 0 {
let max_remaining_chunks = if request.expect_exactly_one_response() {
// Currently enforced only for multiple responses

View File

@@ -484,7 +484,7 @@ impl BlocksByRootRequest {
pub fn new(block_roots: Vec<Hash256>, fork_context: &ForkContext) -> Self {
let max_request_blocks = fork_context
.spec
.max_request_blocks(fork_context.current_fork());
.max_request_blocks(fork_context.current_fork_name());
let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks);
Self::V2(BlocksByRootRequestV2 { block_roots })
}
@@ -492,7 +492,7 @@ impl BlocksByRootRequest {
pub fn new_v1(block_roots: Vec<Hash256>, fork_context: &ForkContext) -> Self {
let max_request_blocks = fork_context
.spec
.max_request_blocks(fork_context.current_fork());
.max_request_blocks(fork_context.current_fork_name());
let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks);
Self::V1(BlocksByRootRequestV1 { block_roots })
}
@@ -509,7 +509,7 @@ impl BlobsByRootRequest {
pub fn new(blob_ids: Vec<BlobIdentifier>, fork_context: &ForkContext) -> Self {
let max_request_blob_sidecars = fork_context
.spec
.max_request_blob_sidecars(fork_context.current_fork());
.max_request_blob_sidecars(fork_context.current_fork_name());
let blob_ids = RuntimeVariableList::from_vec(blob_ids, max_request_blob_sidecars);
Self { blob_ids }
}
@@ -749,6 +749,23 @@ impl<E: EthSpec> RpcSuccessResponse<E> {
RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange,
}
}
pub fn slot(&self) -> Option<Slot> {
match self {
Self::BlocksByRange(r) | Self::BlocksByRoot(r) => Some(r.slot()),
Self::BlobsByRange(r) | Self::BlobsByRoot(r) => {
Some(r.signed_block_header.message.slot)
}
Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => {
Some(r.signed_block_header.message.slot)
}
Self::LightClientBootstrap(r) => Some(r.get_slot()),
Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()),
Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()),
Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()),
Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None,
}
}
}
impl std::fmt::Display for RpcErrorResponse {

View File

@@ -18,10 +18,10 @@ use tokio_util::{
};
use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar,
EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap,
EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap,
LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair,
LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate,
MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock,
};
// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
@@ -545,15 +545,15 @@ impl ProtocolId {
<StatusMessageV2 as Encode>::ssz_fixed_len(),
),
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork_name()),
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork_name()),
Protocol::BlobsByRange => rpc_blob_limits::<E>(),
Protocol::BlobsByRoot => rpc_blob_limits::<E>(),
Protocol::DataColumnsByRoot => {
rpc_data_column_limits::<E>(fork_context.current_fork(), &fork_context.spec)
rpc_data_column_limits::<E>(fork_context.current_fork_epoch(), &fork_context.spec)
}
Protocol::DataColumnsByRange => {
rpc_data_column_limits::<E>(fork_context.current_fork(), &fork_context.spec)
rpc_data_column_limits::<E>(fork_context.current_fork_epoch(), &fork_context.spec)
}
Protocol::Ping => RpcLimits::new(
<Ping as Encode>::ssz_fixed_len(),
@@ -564,16 +564,16 @@ impl ProtocolId {
<MetaDataV3<E> as Encode>::ssz_fixed_len(),
),
Protocol::LightClientBootstrap => {
rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork())
rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork_name())
}
Protocol::LightClientOptimisticUpdate => {
rpc_light_client_optimistic_update_limits_by_fork(fork_context.current_fork())
rpc_light_client_optimistic_update_limits_by_fork(fork_context.current_fork_name())
}
Protocol::LightClientFinalityUpdate => {
rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork())
rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork_name())
}
Protocol::LightClientUpdatesByRange => {
rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork())
rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork_name())
}
}
}
@@ -635,11 +635,13 @@ pub fn rpc_blob_limits<E: EthSpec>() -> RpcLimits {
}
}
pub fn rpc_data_column_limits<E: EthSpec>(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits {
pub fn rpc_data_column_limits<E: EthSpec>(
current_digest_epoch: Epoch,
spec: &ChainSpec,
) -> RpcLimits {
RpcLimits::new(
DataColumnSidecar::<E>::min_size(),
// TODO(EIP-7892): fix this once we change fork-version on BPO forks
DataColumnSidecar::<E>::max_size(spec.max_blobs_per_block_within_fork(fork_name) as usize),
DataColumnSidecar::<E>::max_size(spec.max_blobs_per_block(current_digest_epoch) as usize),
)
}
@@ -738,16 +740,13 @@ impl<E: EthSpec> RequestType<E> {
/* These functions are used in the handler for stream management */
/// Maximum number of responses expected for this request.
/// TODO(EIP-7892): refactor this to remove `_current_fork`
pub fn max_responses(&self, _current_fork: ForkName, spec: &ChainSpec) -> u64 {
pub fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64 {
match self {
RequestType::Status(_) => 1,
RequestType::Goodbye(_) => 0,
RequestType::BlocksByRange(req) => *req.count(),
RequestType::BlocksByRoot(req) => req.block_roots().len() as u64,
RequestType::BlobsByRange(req) => {
req.max_blobs_requested(Slot::new(req.start_slot).epoch(E::slots_per_epoch()), spec)
}
RequestType::BlobsByRange(req) => req.max_blobs_requested(digest_epoch, spec),
RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64,
RequestType::DataColumnsByRoot(req) => req.max_requested() as u64,
RequestType::DataColumnsByRange(req) => req.max_requested::<E>(),

View File

@@ -13,7 +13,7 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use tokio::time::Interval;
use types::{ChainSpec, EthSpec, ForkContext, ForkName};
use types::{ChainSpec, Epoch, EthSpec, ForkContext};
/// Nanoseconds since a given time.
// Maintained as u64 to reduce footprint
@@ -267,7 +267,7 @@ impl RPCRateLimiterBuilder {
pub trait RateLimiterItem {
fn protocol(&self) -> Protocol;
fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64;
fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64;
}
impl<E: EthSpec> RateLimiterItem for super::RequestType<E> {
@@ -275,8 +275,8 @@ impl<E: EthSpec> RateLimiterItem for super::RequestType<E> {
self.versioned_protocol().protocol()
}
fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 {
self.max_responses(current_fork, spec)
fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64 {
self.max_responses(digest_epoch, spec)
}
}
@@ -285,7 +285,7 @@ impl<E: EthSpec> RateLimiterItem for (super::RpcResponse<E>, Protocol) {
self.1
}
fn max_responses(&self, _current_fork: ForkName, _spec: &ChainSpec) -> u64 {
fn max_responses(&self, _digest_epoch: Epoch, _spec: &ChainSpec) -> u64 {
// A response chunk consumes one token of the rate limiter.
1
}
@@ -353,7 +353,10 @@ impl RPCRateLimiter {
) -> Result<(), RateLimitedErr> {
let time_since_start = self.init_time.elapsed();
let tokens = request
.max_responses(self.fork_context.current_fork(), &self.fork_context.spec)
.max_responses(
self.fork_context.current_fork_epoch(),
&self.fork_context.spec,
)
.max(1);
let check =

View File

@@ -193,10 +193,15 @@ impl<E: EthSpec> Network<E> {
// set up a collection of variables accessible outside of the network crate
// Create an ENR or load from disk if appropriate
let next_fork_digest = ctx
.fork_context
.next_fork_digest()
.unwrap_or_else(|| ctx.fork_context.current_fork_digest());
let enr = crate::discovery::enr::build_or_load_enr::<E>(
local_keypair.clone(),
&config,
&ctx.enr_fork_id,
next_fork_digest,
&ctx.chain_spec,
)?;
@@ -280,27 +285,26 @@ impl<E: EthSpec> Network<E> {
// Set up a scoring update interval
let update_gossipsub_scores = tokio::time::interval(params.decay_interval);
let current_and_future_forks = ForkName::list_all().into_iter().filter_map(|fork| {
if fork >= ctx.fork_context.current_fork() {
ctx.fork_context
.to_context_bytes(fork)
.map(|fork_digest| (fork, fork_digest))
} else {
None
}
});
let current_digest_epoch = ctx.fork_context.current_fork_epoch();
let current_and_future_digests =
ctx.chain_spec
.all_digest_epochs()
.filter_map(|digest_epoch| {
if digest_epoch >= current_digest_epoch {
Some((digest_epoch, ctx.fork_context.context_bytes(digest_epoch)))
} else {
None
}
});
let all_topics_for_forks = current_and_future_forks
.map(|(fork, fork_digest)| {
let all_topics_for_digests = current_and_future_digests
.map(|(epoch, digest)| {
let fork = ctx.chain_spec.fork_name_at_epoch(epoch);
all_topics_at_fork::<E>(fork, &ctx.chain_spec)
.into_iter()
.map(|topic| {
Topic::new(GossipTopic::new(
topic,
GossipEncoding::default(),
fork_digest,
))
.into()
Topic::new(GossipTopic::new(topic, GossipEncoding::default(), digest))
.into()
})
.collect::<Vec<TopicHash>>()
})
@@ -308,7 +312,7 @@ impl<E: EthSpec> Network<E> {
// For simplicity find the fork with the most individual topics and assume all forks
// have the same topic count
let max_topics_at_any_fork = all_topics_for_forks
let max_topics_at_any_fork = all_topics_for_digests
.iter()
.map(|topics| topics.len())
.max()
@@ -359,7 +363,7 @@ impl<E: EthSpec> Network<E> {
// If we are using metrics, then register which topics we want to make sure to keep
// track of
if ctx.libp2p_registry.is_some() {
for topics in all_topics_for_forks {
for topics in all_topics_for_digests {
gossipsub.register_topics_for_metrics(topics);
}
}
@@ -1347,6 +1351,12 @@ impl<E: EthSpec> Network<E> {
self.enr_fork_id = enr_fork_id;
}
pub fn update_nfd(&mut self, nfd: [u8; 4]) {
if let Err(e) = self.discovery_mut().update_enr_nfd(nfd) {
crit!(error = e, "Could not update nfd in ENR");
}
}
/* Private internal functions */
/// Updates the current meta data of the node to match the local ENR.

View File

@@ -171,28 +171,29 @@ impl<E: EthSpec> PubsubMessage<E> {
// the ssz decoders
match gossip_topic.kind() {
GossipKind::BeaconAggregateAndProof => {
let signed_aggregate_and_proof =
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
Some(&fork_name) => {
if fork_name.electra_enabled() {
SignedAggregateAndProof::Electra(
SignedAggregateAndProofElectra::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
} else {
SignedAggregateAndProof::Base(
SignedAggregateAndProofBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
}
let signed_aggregate_and_proof = match fork_context
.get_fork_from_context_bytes(gossip_topic.fork_digest)
{
Some(&fork_name) => {
if fork_name.electra_enabled() {
SignedAggregateAndProof::Electra(
SignedAggregateAndProofElectra::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
} else {
SignedAggregateAndProof::Base(
SignedAggregateAndProofBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
}
None => {
return Err(format!(
"Unknown gossipsub fork digest: {:?}",
gossip_topic.fork_digest
))
}
};
}
None => {
return Err(format!(
"Unknown gossipsub fork digest: {:?}",
gossip_topic.fork_digest
))
}
};
Ok(PubsubMessage::AggregateAndProofAttestation(Box::new(
signed_aggregate_and_proof,
)))
@@ -206,48 +207,49 @@ impl<E: EthSpec> PubsubMessage<E> {
))))
}
GossipKind::BeaconBlock => {
let beacon_block =
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
Some(ForkName::Base) => SignedBeaconBlock::<E>::Base(
SignedBeaconBlockBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Altair) => SignedBeaconBlock::<E>::Altair(
SignedBeaconBlockAltair::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Bellatrix) => SignedBeaconBlock::<E>::Bellatrix(
SignedBeaconBlockBellatrix::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Capella) => SignedBeaconBlock::<E>::Capella(
SignedBeaconBlockCapella::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Deneb) => SignedBeaconBlock::<E>::Deneb(
SignedBeaconBlockDeneb::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Electra) => SignedBeaconBlock::<E>::Electra(
SignedBeaconBlockElectra::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Fulu) => SignedBeaconBlock::<E>::Fulu(
SignedBeaconBlockFulu::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
None => {
return Err(format!(
"Unknown gossipsub fork digest: {:?}",
gossip_topic.fork_digest
))
}
};
let beacon_block = match fork_context
.get_fork_from_context_bytes(gossip_topic.fork_digest)
{
Some(ForkName::Base) => SignedBeaconBlock::<E>::Base(
SignedBeaconBlockBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Altair) => SignedBeaconBlock::<E>::Altair(
SignedBeaconBlockAltair::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Bellatrix) => SignedBeaconBlock::<E>::Bellatrix(
SignedBeaconBlockBellatrix::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Capella) => SignedBeaconBlock::<E>::Capella(
SignedBeaconBlockCapella::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Deneb) => SignedBeaconBlock::<E>::Deneb(
SignedBeaconBlockDeneb::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Electra) => SignedBeaconBlock::<E>::Electra(
SignedBeaconBlockElectra::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
Some(ForkName::Fulu) => SignedBeaconBlock::<E>::Fulu(
SignedBeaconBlockFulu::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
),
None => {
return Err(format!(
"Unknown gossipsub fork digest: {:?}",
gossip_topic.fork_digest
))
}
};
Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block)))
}
GossipKind::BlobSidecar(blob_index) => {
if let Some(fork_name) =
fork_context.from_context_bytes(gossip_topic.fork_digest)
fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest)
{
if fork_name.deneb_enabled() {
let blob_sidecar = Arc::new(
@@ -267,7 +269,7 @@ impl<E: EthSpec> PubsubMessage<E> {
))
}
GossipKind::DataColumnSidecar(subnet_id) => {
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) {
Some(fork) if fork.fulu_enabled() => {
let col_sidecar = Arc::new(
DataColumnSidecar::from_ssz_bytes(data)
@@ -295,28 +297,29 @@ impl<E: EthSpec> PubsubMessage<E> {
Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing)))
}
GossipKind::AttesterSlashing => {
let attester_slashing =
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
Some(&fork_name) => {
if fork_name.electra_enabled() {
AttesterSlashing::Electra(
AttesterSlashingElectra::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
} else {
AttesterSlashing::Base(
AttesterSlashingBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
}
let attester_slashing = match fork_context
.get_fork_from_context_bytes(gossip_topic.fork_digest)
{
Some(&fork_name) => {
if fork_name.electra_enabled() {
AttesterSlashing::Electra(
AttesterSlashingElectra::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
} else {
AttesterSlashing::Base(
AttesterSlashingBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
)
}
None => {
return Err(format!(
"Unknown gossipsub fork digest: {:?}",
gossip_topic.fork_digest
))
}
};
}
None => {
return Err(format!(
"Unknown gossipsub fork digest: {:?}",
gossip_topic.fork_digest
))
}
};
Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing)))
}
GossipKind::SignedContributionAndProof => {
@@ -343,7 +346,7 @@ impl<E: EthSpec> PubsubMessage<E> {
)))
}
GossipKind::LightClientFinalityUpdate => {
let light_client_finality_update = match fork_context.from_context_bytes(gossip_topic.fork_digest) {
let light_client_finality_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) {
Some(&fork_name) => {
LightClientFinalityUpdate::from_ssz_bytes(data, fork_name)
.map_err(|e| format!("{:?}", e))?
@@ -358,7 +361,7 @@ impl<E: EthSpec> PubsubMessage<E> {
)))
}
GossipKind::LightClientOptimisticUpdate => {
let light_client_optimistic_update = match fork_context.from_context_bytes(gossip_topic.fork_digest) {
let light_client_optimistic_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) {
Some(&fork_name) => {
LightClientOptimisticUpdate::from_ssz_bytes(data, fork_name)
.map_err(|e| format!("{:?}", e))?