From 672dcbd868198ec6430c90f9d41373cde740dc3d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Sep 2024 12:41:19 +1000 Subject: [PATCH 01/66] Ignore Rust 1.82 warnings about void patterns (#6357) * Ignore Rust 1.82 warnings about void patterns --- beacon_node/lighthouse_network/gossipsub/src/handler.rs | 4 ++++ beacon_node/lighthouse_network/src/service/mod.rs | 1 + common/warp_utils/src/reject.rs | 2 ++ 3 files changed, 7 insertions(+) diff --git a/beacon_node/lighthouse_network/gossipsub/src/handler.rs b/beacon_node/lighthouse_network/gossipsub/src/handler.rs index 359bf8da42..d89013eb2f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/handler.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/handler.rs @@ -520,6 +520,7 @@ impl ConnectionHandler for Handler { .. }) => match protocol { Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol), + #[allow(unreachable_patterns)] Either::Right(v) => void::unreachable(v), }, ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { @@ -531,6 +532,9 @@ impl ConnectionHandler for Handler { }) => { tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); } + // This pattern is unreachable as of Rust 1.82, we can remove it once the + // MSRV is increased past that version. + #[allow(unreachable_patterns)] ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::Apply(e), .. diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a95912ff06..d97b52f79f 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1809,6 +1809,7 @@ impl Network { self.inject_upnp_event(e); None } + #[allow(unreachable_patterns)] BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index 9b28c65212..bbd5274a7e 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -265,6 +265,8 @@ pub async fn convert_rejection(res: Result) -> Res Ok(response) => response.into_response(), Err(e) => match handle_rejection(e).await { Ok(reply) => reply.into_response(), + // We can simplify this once Rust 1.82 is MSRV + #[allow(unreachable_patterns)] Err(_) => warp::reply::with_status( warp::reply::json(&"unhandled error"), eth2::StatusCode::INTERNAL_SERVER_ERROR, From 26c19d65a392b984702d5ef8bff7411a87818b76 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Sep 2024 15:01:16 +1000 Subject: [PATCH 02/66] Enable `large_stack_frames` lint (#6343) * Enable `large_stack_frames` lint --- Makefile | 1 + beacon_node/beacon_chain/src/kzg_utils.rs | 12 ++++++++---- .../src/test_utils/execution_block_generator.rs | 1 + beacon_node/src/cli.rs | 1 + testing/simulator/src/basic_sim.rs | 1 + testing/state_transition_vectors/src/macros.rs | 1 + watch/tests/tests.rs | 2 ++ 7 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index d94c2df261..e6420a4c98 100644 --- a/Makefile +++ b/Makefile @@ -207,6 +207,7 @@ lint: cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ + -D clippy::large_stack_frames \ -D warnings \ -A clippy::derive_partial_eq_without_eq \ -A clippy::upper-case-acronyms \ diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 55c1ee9e98..c2355e6f4f 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -17,6 +17,10 @@ fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result(blob: &Blob) -> Result, KzgError> { + ssz_blob_to_crypto_blob::(blob).map(Box::new) +} + /// Converts a cell ssz List object to an array to be used with the kzg /// crypto library. fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result { @@ -34,7 +38,7 @@ pub fn validate_blob( kzg_proof: KzgProof, ) -> Result<(), KzgError> { let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); - let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } @@ -104,7 +108,7 @@ pub fn compute_blob_kzg_proof( blob: &Blob, kzg_commitment: KzgCommitment, ) -> Result { - let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment) } @@ -113,7 +117,7 @@ pub fn blob_to_kzg_commitment( kzg: &Kzg, blob: &Blob, ) -> Result { - let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.blob_to_kzg_commitment(&kzg_blob) } @@ -124,7 +128,7 @@ pub fn compute_kzg_proof( z: Hash256, ) -> Result<(KzgProof, Hash256), KzgError> { let z = z.0.into(); - let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + let kzg_blob = ssz_blob_to_crypto_blob_boxed::(blob)?; kzg.compute_kzg_proof(&kzg_blob, &z) .map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec()))) } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 041b31e2b0..8a30800fa7 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -954,6 +954,7 @@ mod test { let kzg = load_kzg()?; let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) + .map(Box::new) .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) .map_err(|e| format!("Invalid blobs bundle: {e:?}")) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 67bc9d7d40..1e9611fd1e 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -4,6 +4,7 @@ use clap::{builder::ArgPredicate, crate_version, Arg, ArgAction, ArgGroup, Comma use clap_utils::{get_color_style, FLAG_HEADER}; use strum::VariantNames; +#[allow(clippy::large_stack_frames)] pub fn cli_app() -> Command { Command::new("beacon_node") .display_order(0) diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 46196ba2b1..16badaffc2 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -26,6 +26,7 @@ const DENEB_FORK_EPOCH: u64 = 2; const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; +#[allow(clippy::large_stack_frames)] pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = matches .get_one::("nodes") diff --git a/testing/state_transition_vectors/src/macros.rs b/testing/state_transition_vectors/src/macros.rs index 5dafbf549a..a7f87b1c26 100644 --- a/testing/state_transition_vectors/src/macros.rs +++ b/testing/state_transition_vectors/src/macros.rs @@ -4,6 +4,7 @@ /// - `mod tests`: runs all the test vectors locally. macro_rules! vectors_and_tests { ($($name: ident, $test: expr),*) => { + #[allow(clippy::large_stack_frames)] pub async fn vectors() -> Vec { let mut vec = vec![]; diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index 5461508edd..e21cf151b1 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -852,6 +852,7 @@ async fn chain_grows() { #[cfg(unix)] #[tokio::test] +#[allow(clippy::large_stack_frames)] async fn chain_grows_with_metadata() { let builder = TesterBuilder::new().await; @@ -959,6 +960,7 @@ async fn chain_grows_with_metadata() { #[cfg(unix)] #[tokio::test] +#[allow(clippy::large_stack_frames)] async fn chain_grows_with_metadata_and_multiple_skip_slots() { let builder = TesterBuilder::new().await; From b50ce606623b7b6bad9ad0bf649807f6ea2226bb Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 5 Sep 2024 15:39:00 +1000 Subject: [PATCH 03/66] Add blob count label to `DATA_COLUMN_SIDECAR_COMPUTATION` metric (#6340) * Add blob count label to `DATA_COLUMN_SIDECAR_COMPUTATION` metric, and move metrics into the compute function, recording only successful computation. * Move `discard_timer_on_break` usage to caller site. * Merge branch 'unstable' into compute-data-column-metric * Merge branch 'unstable' into compute-data-column-metric --- .../beacon_chain/src/block_verification.rs | 9 ++++-- beacon_node/beacon_chain/src/metrics.rs | 7 +++-- common/lighthouse_metrics/src/lib.rs | 28 +++++++++++++++++++ 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 8bd93a3753..976388a4f5 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -74,6 +74,7 @@ use derivative::Derivative; use eth2::types::{BlockGossip, EventKind, PublishBlockRequest}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; +use lighthouse_metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; @@ -796,8 +797,12 @@ fn build_gossip_verified_data_columns( GossipDataColumnError::KzgNotInitialized, ))?; - let timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_COMPUTATION); - let sidecars = blobs_to_data_column_sidecars(&blobs, block, kzg, &chain.spec)?; + let mut timer = metrics::start_timer_vec( + &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, + &[&blobs.len().to_string()], + ); + let sidecars = blobs_to_data_column_sidecars(&blobs, block, kzg, &chain.spec) + .discard_timer_on_break(&mut timer)?; drop(timer); let mut gossip_verified_data_columns = vec![]; for sidecar in sidecars { diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 3da2bea36c..79b2fc592b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1647,11 +1647,12 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> "Time taken to compute blob sidecar inclusion proof", ) }); -pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { - try_create_histogram_with_buckets( +pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec_with_buckets( "data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", - Ok(vec![0.04, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 1.0]), + Ok(vec![0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]), + &["blob_count"], ) }); pub static DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index fa8f47e364..f52913dd00 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -400,3 +400,31 @@ pub fn decimal_buckets(min_power: i32, max_power: i32) -> Result> { } Ok(buckets) } + +/// Would be nice to use the `Try` trait bound and have a single implementation, but try_trait_v2 +/// is not a stable feature yet. +pub trait TryExt { + fn discard_timer_on_break(self, timer: &mut Option) -> Self; +} + +impl TryExt for std::result::Result { + fn discard_timer_on_break(self, timer_opt: &mut Option) -> Self { + if self.is_err() { + if let Some(timer) = timer_opt.take() { + timer.stop_and_discard(); + } + } + self + } +} + +impl TryExt for Option { + fn discard_timer_on_break(self, timer_opt: &mut Option) -> Self { + if self.is_none() { + if let Some(timer) = timer_opt.take() { + timer.stop_and_discard(); + } + } + self + } +} From 0fb4a2046cdb2dd040f4b3e74da2fb09f211d646 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 4 Sep 2024 22:39:03 -0700 Subject: [PATCH 04/66] Metadata request ordering (#6336) * Send metadata request ordering * Merge branch 'unstable' into metadata-order --- .../src/peer_manager/network_behaviour.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index d9df8e7c4b..b7fd5b5e5d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -240,10 +240,6 @@ impl PeerManager { "connection" => ?endpoint.to_endpoint() ); - if other_established == 0 { - self.events.push(PeerManagerEvent::MetaData(peer_id)); - } - // Update the prometheus metrics if self.metrics_enabled { metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); @@ -267,6 +263,10 @@ impl PeerManager { return; } + if other_established == 0 { + self.events.push(PeerManagerEvent::MetaData(peer_id)); + } + // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. match endpoint { From 0e94fe1aa85c1700cb52b9c9a1a64ce5a90aaf10 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Sep 2024 15:39:05 +1000 Subject: [PATCH 05/66] Clarify validator monitor block log (#6342) * Clarify validator monitor block log * Merge branch 'unstable' into clarify-block-log --- beacon_node/beacon_chain/src/validator_monitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index d452490081..f8a483c621 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -1185,7 +1185,7 @@ impl ValidatorMonitor { info!( self.log, - "Block from API"; + "Block from monitored validator"; "root" => ?block_root, "delay" => %delay.as_millis(), "slot" => %block.slot(), From 369807becc5ba3189bdb732beb1ac6b6f6159e90 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 5 Sep 2024 17:24:21 +0200 Subject: [PATCH 06/66] Check known parent on rpc blob process (#5893) * Check known parent on rpc blob process * fix test * Merge branch 'unstable' of https://github.com/sigp/lighthouse into blob-unknown-parent --- beacon_node/beacon_chain/src/beacon_chain.rs | 30 +++++++++++++++++++ beacon_node/beacon_chain/tests/events.rs | 14 +++++---- .../sync/block_lookups/single_block_lookup.rs | 9 +----- 3 files changed, 40 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fa9a0c2e69..2ba0ba7cb0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3073,6 +3073,23 @@ impl BeaconChain { return Err(BlockError::BlockIsAlreadyKnown(block_root)); } + // Reject RPC blobs referencing unknown parents. Otherwise we allow potentially invalid data + // into the da_checker, where invalid = descendant of invalid blocks. + // Note: blobs should have at least one item and all items have the same parent root. + if let Some(parent_root) = blobs + .iter() + .filter_map(|b| b.as_ref().map(|b| b.block_parent_root())) + .next() + { + if !self + .canonical_head + .fork_choice_read_lock() + .contains_block(&parent_root) + { + return Err(BlockError::ParentUnknown { parent_root }); + } + } + if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_blob_sidecar_subscribers() { for blob in blobs.iter().filter_map(|maybe_blob| maybe_blob.as_ref()) { @@ -3122,6 +3139,19 @@ impl BeaconChain { return Err(BlockError::BlockIsAlreadyKnown(block_root)); } + // Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data + // into the da_checker, where invalid = descendant of invalid blocks. + // Note: custody_columns should have at least one item and all items have the same parent root. + if let Some(parent_root) = custody_columns.iter().map(|c| c.block_parent_root()).next() { + if !self + .canonical_head + .fork_choice_read_lock() + .contains_block(&parent_root) + { + return Err(BlockError::ParentUnknown { parent_root }); + } + } + let r = self .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) .await; diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index d54543e4f6..1261e2d53e 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -62,13 +62,17 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let kzg = harness.chain.kzg.as_ref().unwrap(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); - let blob_1 = BlobSidecar::random_valid(&mut rng, kzg) - .map(Arc::new) - .unwrap(); - let blob_2 = Arc::new(BlobSidecar { + let mut blob_1 = BlobSidecar::random_valid(&mut rng, kzg).unwrap(); + let mut blob_2 = BlobSidecar { index: 1, ..BlobSidecar::random_valid(&mut rng, kzg).unwrap() - }); + }; + let parent_root = harness.chain.head().head_block_root(); + blob_1.signed_block_header.message.parent_root = parent_root; + blob_2.signed_block_header.message.parent_root = parent_root; + let blob_1 = Arc::new(blob_1); + let blob_2 = Arc::new(blob_2); + let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); let expected_sse_blobs = vec![ SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 4ae55d5aaf..73ffcd4384 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,3 @@ -use super::common::ResponseType; use super::{BlockComponent, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS}; use crate::sync::block_lookups::common::RequestState; use crate::sync::network_context::{ @@ -188,7 +187,6 @@ impl SingleBlockLookup { .state .peek_downloaded_data() .cloned(); - let block_is_processed = self.block_request_state.state.is_processed(); let request = R::request_state_mut(self); // Attempt to progress awaiting downloads @@ -241,12 +239,7 @@ impl SingleBlockLookup { // Otherwise, attempt to progress awaiting processing // If this request is awaiting a parent lookup to be processed, do not send for processing. // The request will be rejected with unknown parent error. - // - // TODO: The condition `block_is_processed || Block` can be dropped after checking for - // unknown parent root when import RPC blobs - } else if !awaiting_parent - && (block_is_processed || matches!(R::response_type(), ResponseType::Block)) - { + } else if !awaiting_parent { // maybe_start_processing returns Some if state == AwaitingProcess. This pattern is // useful to conditionally access the result data. if let Some(result) = request.get_state_mut().maybe_start_processing() { From df19b6220ab8c7f8097f28748ff38b734873a478 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 6 Sep 2024 02:19:07 +1000 Subject: [PATCH 07/66] Remove beta tag from gossipsub 1.2 (#6344) * Remove the beta tag from gossipsub v1.2 * fix clippy * Merge branch 'unstable' into remove-beta-tag --- beacon_node/lighthouse_network/gossipsub/CHANGELOG.md | 2 ++ beacon_node/lighthouse_network/gossipsub/src/behaviour.rs | 2 +- .../lighthouse_network/gossipsub/src/behaviour/tests.rs | 8 ++++---- beacon_node/lighthouse_network/gossipsub/src/protocol.rs | 6 +++--- beacon_node/lighthouse_network/gossipsub/src/types.rs | 6 +++--- 5 files changed, 13 insertions(+), 11 deletions(-) diff --git a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md index 7ec10af741..006eb20a70 100644 --- a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md +++ b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md @@ -1,4 +1,6 @@ ## 0.5 Sigma Prime fork +- Remove the beta tag from the v1.2 upgrade. + See [PR 6344](https://github.com/sigp/lighthouse/pull/6344) - Implement IDONTWANT messages as per [spec](https://github.com/libp2p/specs/pull/548). See [PR 5422](https://github.com/sigp/lighthouse/pull/5422) diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 94a7ee1fc3..996f701e89 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -2716,7 +2716,7 @@ where }; // Only gossipsub 1.2 peers support IDONTWANT. - if peer.kind != PeerKind::Gossipsubv1_2_beta { + if peer.kind != PeerKind::Gossipsubv1_2 { continue; } diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 19d09cd890..00de3ba2db 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -5259,7 +5259,7 @@ fn sends_idontwant() { .to_subscribe(true) .gs_config(Config::default()) .explicit(1) - .peer_kind(PeerKind::Gossipsubv1_2_beta) + .peer_kind(PeerKind::Gossipsubv1_2) .create_network(); let local_id = PeerId::random(); @@ -5344,7 +5344,7 @@ fn doesnt_forward_idontwant() { .to_subscribe(true) .gs_config(Config::default()) .explicit(1) - .peer_kind(PeerKind::Gossipsubv1_2_beta) + .peer_kind(PeerKind::Gossipsubv1_2) .create_network(); let local_id = PeerId::random(); @@ -5393,7 +5393,7 @@ fn parses_idontwant() { .to_subscribe(true) .gs_config(Config::default()) .explicit(1) - .peer_kind(PeerKind::Gossipsubv1_2_beta) + .peer_kind(PeerKind::Gossipsubv1_2) .create_network(); let message_id = MessageId::new(&[0, 1, 2, 3]); @@ -5425,7 +5425,7 @@ fn clear_stale_idontwant() { .to_subscribe(true) .gs_config(Config::default()) .explicit(1) - .peer_kind(PeerKind::Gossipsubv1_2_beta) + .peer_kind(PeerKind::Gossipsubv1_2) .create_network(); let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); diff --git a/beacon_node/lighthouse_network/gossipsub/src/protocol.rs b/beacon_node/lighthouse_network/gossipsub/src/protocol.rs index 5611ae32c9..b72f4ccc9b 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/protocol.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/protocol.rs @@ -40,9 +40,9 @@ use void::Void; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; -pub(crate) const GOSSIPSUB_1_2_0_BETA_PROTOCOL: ProtocolId = ProtocolId { +pub(crate) const GOSSIPSUB_1_2_0_PROTOCOL: ProtocolId = ProtocolId { protocol: StreamProtocol::new("/meshsub/1.2.0"), - kind: PeerKind::Gossipsubv1_2_beta, + kind: PeerKind::Gossipsubv1_2, }; pub(crate) const GOSSIPSUB_1_1_0_PROTOCOL: ProtocolId = ProtocolId { protocol: StreamProtocol::new("/meshsub/1.1.0"), @@ -74,7 +74,7 @@ impl Default for ProtocolConfig { max_transmit_size: 65536, validation_mode: ValidationMode::Strict, protocol_ids: vec![ - GOSSIPSUB_1_2_0_BETA_PROTOCOL, + GOSSIPSUB_1_2_0_PROTOCOL, GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL, ], diff --git a/beacon_node/lighthouse_network/gossipsub/src/types.rs b/beacon_node/lighthouse_network/gossipsub/src/types.rs index 8df307d470..d14a929374 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/types.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/types.rs @@ -132,7 +132,7 @@ pub(crate) struct PeerConnections { #[allow(non_camel_case_types)] pub enum PeerKind { /// A gossipsub 1.2 peer. - Gossipsubv1_2_beta, + Gossipsubv1_2, /// A gossipsub 1.1 peer. Gossipsubv1_1, /// A gossipsub 1.0 peer. @@ -148,7 +148,7 @@ impl PeerKind { pub(crate) fn is_gossipsub(&self) -> bool { matches!( self, - Self::Gossipsubv1_2_beta | Self::Gossipsubv1_1 | Self::Gossipsub + Self::Gossipsubv1_2 | Self::Gossipsubv1_1 | Self::Gossipsub ) } } @@ -623,7 +623,7 @@ impl PeerKind { Self::Floodsub => "Floodsub", Self::Gossipsub => "Gossipsub v1.0", Self::Gossipsubv1_1 => "Gossipsub v1.1", - Self::Gossipsubv1_2_beta => "Gossipsub v1.2-beta", + Self::Gossipsubv1_2 => "Gossipsub v1.2", } } } From c824142a6dad50c4c05cb17a5718ec79a66d4aaf Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Sep 2024 11:45:34 +1000 Subject: [PATCH 08/66] Fix lints for Rust 1.81 (#6363) * Fix lints for Rust 1.81 --- .../src/attestation_verification.rs | 6 ++--- beacon_node/beacon_chain/src/beacon_chain.rs | 27 +++++++------------ .../beacon_chain/src/block_verification.rs | 3 +-- beacon_node/eth1/src/inner.rs | 5 +--- beacon_node/store/src/leveldb_store.rs | 3 +-- common/account_utils/src/lib.rs | 2 +- common/logging/src/lib.rs | 5 +--- 7 files changed, 16 insertions(+), 35 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 5a730719bf..491271d6a9 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -456,11 +456,10 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result { Self::verify_slashable(signed_aggregate, chain) - .map(|verified_aggregate| { + .inspect(|verified_aggregate| { if let Some(slasher) = chain.slasher.as_ref() { slasher.accept_attestation(verified_aggregate.indexed_attestation.clone()); } - verified_aggregate }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } @@ -892,11 +891,10 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result { Self::verify_slashable(attestation.to_ref(), subnet_id, chain) - .map(|verified_unaggregated| { + .inspect(|verified_unaggregated| { if let Some(slasher) = chain.slasher.as_ref() { slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone()); } - verified_unaggregated }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2ba0ba7cb0..322a2caa67 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2035,7 +2035,7 @@ impl BeaconChain { let _timer = metrics::start_timer(&metrics::UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES); - VerifiedUnaggregatedAttestation::verify(unaggregated_attestation, subnet_id, self).map( + VerifiedUnaggregatedAttestation::verify(unaggregated_attestation, subnet_id, self).inspect( |v| { // This method is called for API and gossip attestations, so this covers all unaggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { @@ -2046,7 +2046,6 @@ impl BeaconChain { } } metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); - v }, ) } @@ -2074,7 +2073,7 @@ impl BeaconChain { let _timer = metrics::start_timer(&metrics::AGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES); - VerifiedAggregatedAttestation::verify(signed_aggregate, self).map(|v| { + VerifiedAggregatedAttestation::verify(signed_aggregate, self).inspect(|v| { // This method is called for API and gossip attestations, so this covers all aggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_attestation_subscribers() { @@ -2084,7 +2083,6 @@ impl BeaconChain { } } metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); - v }) } @@ -2098,9 +2096,8 @@ impl BeaconChain { metrics::inc_counter(&metrics::SYNC_MESSAGE_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::SYNC_MESSAGE_GOSSIP_VERIFICATION_TIMES); - VerifiedSyncCommitteeMessage::verify(sync_message, subnet_id, self).map(|v| { + VerifiedSyncCommitteeMessage::verify(sync_message, subnet_id, self).inspect(|_| { metrics::inc_counter(&metrics::SYNC_MESSAGE_PROCESSING_SUCCESSES); - v }) } @@ -2112,7 +2109,7 @@ impl BeaconChain { ) -> Result, SyncCommitteeError> { metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES); - VerifiedSyncContribution::verify(sync_contribution, self).map(|v| { + VerifiedSyncContribution::verify(sync_contribution, self).inspect(|v| { if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_contribution_subscribers() { event_handler.register(EventKind::ContributionAndProof(Box::new( @@ -2121,7 +2118,6 @@ impl BeaconChain { } } metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_PROCESSING_SUCCESSES); - v }) } @@ -2136,9 +2132,8 @@ impl BeaconChain { self, seen_timestamp, ) - .map(|v| { + .inspect(|_| { metrics::inc_counter(&metrics::FINALITY_UPDATE_PROCESSING_SUCCESSES); - v }) } @@ -2149,9 +2144,8 @@ impl BeaconChain { ) -> Result, GossipDataColumnError> { metrics::inc_counter(&metrics::DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES); - GossipVerifiedDataColumn::new(data_column_sidecar, subnet_id, self).map(|v| { + GossipVerifiedDataColumn::new(data_column_sidecar, subnet_id, self).inspect(|_| { metrics::inc_counter(&metrics::DATA_COLUMN_SIDECAR_PROCESSING_SUCCESSES); - v }) } @@ -2162,9 +2156,8 @@ impl BeaconChain { ) -> Result, GossipBlobError> { metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); - GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).map(|v| { + GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).inspect(|_| { metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); - v }) } @@ -2179,9 +2172,8 @@ impl BeaconChain { self, seen_timestamp, ) - .map(|v| { + .inspect(|_| { metrics::inc_counter(&metrics::OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES); - v }) } @@ -2485,7 +2477,7 @@ impl BeaconChain { .observed_voluntary_exits .lock() .verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec) - .map(|exit| { + .inspect(|exit| { // this method is called for both API and gossip exits, so this covers all exit events if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_exit_subscribers() { @@ -2494,7 +2486,6 @@ impl BeaconChain { } } } - exit })?) } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 976388a4f5..55547aaa18 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -834,12 +834,11 @@ pub trait IntoExecutionPendingBlock: Sized { notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockError> { self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) - .map(|execution_pending| { + .inspect(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { slasher.accept_block_header(execution_pending.block.signed_block_header()); } - execution_pending }) .map_err(|slash_info| process_block_slash_info::<_, BlockError>(chain, slash_info)) } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 452922b173..7387642bf4 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -75,10 +75,7 @@ impl Inner { SszEth1Cache::from_ssz_bytes(bytes) .map_err(|e| format!("Ssz decoding error: {:?}", e))? .to_inner(config, spec) - .map(|inner| { - inner.block_cache.write().rebuild_by_hash_map(); - inner - }) + .inspect(|inner| inner.block_cache.write().rebuild_by_hash_map()) } /// Returns a reference to the specification. diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 28e04f5620..720afd0f3f 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -98,14 +98,13 @@ impl KeyValueStore for LevelDB { .get(self.read_options(), BytesKey::from_vec(column_key)) .map_err(Into::into) .map(|opt| { - opt.map(|bytes| { + opt.inspect(|bytes| { metrics::inc_counter_vec_by( &metrics::DISK_DB_READ_BYTES, &[col], bytes.len() as u64, ); metrics::stop_timer(timer); - bytes }) }) } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 665953fa52..2c8bbbf4b4 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -228,7 +228,7 @@ impl ZeroizeString { /// Remove any number of newline or carriage returns from the end of a vector of bytes. pub fn without_newlines(&self) -> ZeroizeString { - let stripped_string = self.0.trim_end_matches(|c| c == '\r' || c == '\n').into(); + let stripped_string = self.0.trim_end_matches(['\r', '\n']).into(); Self(stripped_string) } } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index d3d91497cc..a4a1acabd4 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -100,10 +100,7 @@ impl<'a> AlignedRecordDecorator<'a> { self.ignore_comma = false; Ok(buf.len()) } else if self.message_active { - self.wrapped.write(buf).map(|n| { - self.message_count += n; - n - }) + self.wrapped.write(buf).inspect(|n| self.message_count += n) } else { self.wrapped.write(buf) } From 0c5e25b62af6043e07d8c781fb8c57641c398a36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Fri, 6 Sep 2024 05:21:21 +0200 Subject: [PATCH 09/66] Use tikv-jemallocator instead of jemallocator (#6354) * Use tikv-jemallocator instead of jemallocator * Merge branch 'unstable' into tikv-jemallocator * Bump tikv-jemallocator and tikv-jemalloc-ctl --- .cargo/config.toml | 1 - Cargo.lock | 66 ++++++++++++++--------------- common/malloc_utils/Cargo.toml | 13 +++--- common/malloc_utils/src/jemalloc.rs | 4 +- 4 files changed, 43 insertions(+), 41 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index dac0163003..a408305c4d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,4 +1,3 @@ [env] # Set the number of arenas to 16 when using jemalloc. JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" - diff --git a/Cargo.lock b/Cargo.lock index d76b198780..8054c812f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4355,37 +4355,6 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" -[[package]] -name = "jemalloc-ctl" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cffc705424a344c054e135d12ee591402f4539245e8bbd64e6c9eaa9458b63c" -dependencies = [ - "jemalloc-sys", - "libc", - "paste", -] - -[[package]] -name = "jemalloc-sys" -version = "0.5.4+5.3.0-patched" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "jemallocator" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" -dependencies = [ - "jemalloc-sys", - "libc", -] - [[package]] name = "jobserver" version = "0.1.32" @@ -5413,11 +5382,11 @@ dependencies = [ name = "malloc_utils" version = "0.1.0" dependencies = [ - "jemalloc-ctl", - "jemallocator", "libc", "lighthouse_metrics", "parking_lot 0.12.3", + "tikv-jemalloc-ctl", + "tikv-jemallocator", ] [[package]] @@ -8622,6 +8591,37 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "tikv-jemalloc-ctl" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f21f216790c8df74ce3ab25b534e0718da5a1916719771d3fec23315c99e468b" +dependencies = [ + "libc", + "paste", + "tikv-jemalloc-sys", +] + +[[package]] +name = "tikv-jemalloc-sys" +version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + [[package]] name = "time" version = "0.3.36" diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index ac309cec9d..b91e68c518 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -8,16 +8,19 @@ edition = { workspace = true } lighthouse_metrics = { workspace = true } libc = "0.2.79" parking_lot = { workspace = true } -jemalloc-ctl = { version = "0.5.0", optional = true } +tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } # Jemalloc's background_threads feature requires Linux (pthreads). [target.'cfg(target_os = "linux")'.dependencies] -jemallocator = { version = "0.5.0", optional = true, features = ["stats", "background_threads"] } +tikv-jemallocator = { version = "0.6.0", optional = true, features = [ + "stats", + "background_threads", +] } [target.'cfg(not(target_os = "linux"))'.dependencies] -jemallocator = { version = "0.5.0", optional = true, features = ["stats"] } +tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats"] } [features] mallinfo2 = [] -jemalloc = ["jemallocator", "jemalloc-ctl"] -jemalloc-profiling = ["jemallocator/profiling"] +jemalloc = ["tikv-jemallocator", "tikv-jemalloc-ctl"] +jemalloc-profiling = ["tikv-jemallocator/profiling"] diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index 70685d5960..a392a74e8f 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -7,12 +7,12 @@ //! //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. -use jemalloc_ctl::{arenas, epoch, stats, Error}; use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; +use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; #[global_allocator] -static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; +static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; // Metrics for jemalloc. pub static NUM_ARENAS: LazyLock> = From c0b4f01cf37257bab8dab2b4312dcf07eac93572 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 6 Sep 2024 17:39:16 +1000 Subject: [PATCH 10/66] Improve `get_custody_columns` validation, caching and error handling (#6308) * Improve `get_custody_columns` validation, caching and error handling. * Merge branch 'unstable' into get-custody-columns-error-handing * Fix failing test and add more test. * Fix failing test and add more test. * Merge branch 'unstable' into get-custody-columns-error-handing # Conflicts: # beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs # beacon_node/lighthouse_network/src/peer_manager/peerdb.rs # beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs # beacon_node/lighthouse_network/src/types/globals.rs # beacon_node/network/src/service.rs # consensus/types/src/data_column_subnet_id.rs * Add unit test to make sure the default specs won't panic on the `compute_custody_requirement_subnets` function. * Add condition when calling `compute_custody_subnets_from_metadata` and update logs. * Validate `csc` when returning from enr. Remove `csc` computation on connection since we get them on metadata anyway. * Add `peers_per_custody_subnet_count` to track peer csc and supernodes. * Disconnect peers with invalid metadata and find other peers instead. * Fix sampling tests. * Merge branch 'unstable' into get-custody-columns-error-handing * Merge branch 'unstable' into get-custody-columns-error-handing --- beacon_node/http_api/src/publish_blocks.rs | 2 +- .../lighthouse_network/src/discovery/enr.rs | 45 ++---- .../src/discovery/subnet_predicate.rs | 19 +-- beacon_node/lighthouse_network/src/metrics.rs | 9 ++ .../src/peer_manager/mod.rs | 140 ++++++++++++------ .../src/peer_manager/peerdb.rs | 38 ++--- .../src/peer_manager/peerdb/peer_info.rs | 34 +---- .../lighthouse_network/src/types/globals.rs | 136 ++++++++++------- beacon_node/network/src/service.rs | 13 +- .../network/src/sync/network_context.rs | 9 +- .../network/src/sync/range_sync/chain.rs | 36 ++--- consensus/types/src/data_column_subnet_id.rs | 45 ++++-- .../ef_tests/src/cases/get_custody_columns.rs | 1 + 13 files changed, 292 insertions(+), 235 deletions(-) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index e0fc518d46..ad7cb3081e 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -273,7 +273,7 @@ pub async fn publish_block Result, &'static str>; /// The peerdas custody subnet count associated with the ENR. - fn custody_subnet_count(&self, spec: &ChainSpec) -> u64; + fn custody_subnet_count(&self, spec: &ChainSpec) -> Result; fn eth2(&self) -> Result; } @@ -64,14 +64,17 @@ impl Eth2Enr for Enr { .map_err(|_| "Could not decode the ENR syncnets bitfield") } - /// if the custody value is non-existent in the ENR, then we assume the minimum custody value - /// defined in the spec. - fn custody_subnet_count(&self, spec: &ChainSpec) -> u64 { - self.get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) - .and_then(|r| r.ok()) - // If value supplied in ENR is invalid, fallback to `custody_requirement` - .filter(|csc| csc <= &spec.data_column_sidecar_subnet_count) - .unwrap_or(spec.custody_requirement) + fn custody_subnet_count(&self, spec: &ChainSpec) -> Result { + let csc = self + .get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + .ok_or("ENR custody subnet count non-existent")? + .map_err(|_| "Could not decode the ENR custody subnet count")?; + + if csc >= spec.custody_requirement && csc <= spec.data_column_sidecar_subnet_count { + Ok(csc) + } else { + Err("Invalid custody subnet count in ENR") + } } fn eth2(&self) -> Result { @@ -335,7 +338,7 @@ mod test { let enr = build_enr_with_config(config, &spec).0; assert_eq!( - enr.custody_subnet_count::(&spec), + enr.custody_subnet_count::(&spec).unwrap(), spec.custody_requirement, ); } @@ -350,31 +353,11 @@ mod test { let enr = build_enr_with_config(config, &spec).0; assert_eq!( - enr.custody_subnet_count::(&spec), + enr.custody_subnet_count::(&spec).unwrap(), spec.data_column_sidecar_subnet_count, ); } - #[test] - fn custody_subnet_count_fallback_default() { - let config = NetworkConfig::default(); - let spec = make_eip7594_spec(); - let (mut enr, enr_key) = build_enr_with_config(config, &spec); - let invalid_subnet_count = 999u64; - - enr.insert( - PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, - &invalid_subnet_count, - &enr_key, - ) - .unwrap(); - - assert_eq!( - enr.custody_subnet_count::(&spec), - spec.custody_requirement, - ); - } - fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&keypair); diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index e198b3ee17..02ff0cc3ca 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -16,7 +16,6 @@ where E: EthSpec, { let log_clone = log.clone(); - let spec_clone = spec.clone(); move |enr: &Enr| { let attestation_bitfield: EnrAttestationBitfield = match enr.attestation_bitfield::() @@ -30,8 +29,6 @@ where let sync_committee_bitfield: Result, _> = enr.sync_committee_bitfield::(); - let custody_subnet_count = enr.custody_subnet_count::(&spec_clone); - let predicate = subnets.iter().any(|subnet| match subnet { Subnet::Attestation(s) => attestation_bitfield .get(*s.deref() as usize) @@ -40,12 +37,16 @@ where .as_ref() .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), Subnet::DataColumn(s) => { - let mut subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, - &spec, - ); - subnets.contains(s) + if let Ok(custody_subnet_count) = enr.custody_subnet_count::(&spec) { + DataColumnSubnetId::compute_custody_subnets::( + enr.node_id().raw(), + custody_subnet_count, + &spec, + ) + .map_or(false, |mut subnets| subnets.contains(s)) + } else { + false + } } }); diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 85da8dc211..c3f64a5a1f 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -91,6 +91,15 @@ pub static PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { &["Client"], ) }); + +pub static PEERS_PER_CUSTODY_SUBNET_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "peers_per_custody_subnet_count", + "The current count of peers by custody subnet count", + &["custody_subnet_count"], + ) +}); + pub static FAILED_ATTESTATION_PUBLISHES_PER_SUBNET: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 31ff8bdfc2..4d91331235 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -19,7 +19,7 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -use types::{EthSpec, SyncSubnetId}; +use types::{DataColumnSubnetId, EthSpec, SyncSubnetId}; pub use libp2p::core::Multiaddr; pub use libp2p::identity::Keypair; @@ -33,7 +33,7 @@ pub use peerdb::peer_info::{ }; use peerdb::score::{PeerAction, ReportSource}; pub use peerdb::sync_status::{SyncInfo, SyncStatus}; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::net::IpAddr; use strum::IntoEnumIterator; @@ -701,6 +701,8 @@ impl PeerManager { /// Received a metadata response from a peer. pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { + let mut invalid_meta_data = false; + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data() { if *known_meta_data.seq_number() < *meta_data.seq_number() { @@ -717,12 +719,39 @@ impl PeerManager { debug!(self.log, "Obtained peer's metadata"; "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number()); } - let node_id_opt = peer_id_to_node_id(peer_id).ok(); - peer_info.set_meta_data(meta_data, node_id_opt, &self.network_globals.spec); + + let custody_subnet_count_opt = meta_data.custody_subnet_count().copied().ok(); + peer_info.set_meta_data(meta_data); + + if self.network_globals.spec.is_peer_das_scheduled() { + // Gracefully ignore metadata/v2 peers. Potentially downscore after PeerDAS to + // prioritize PeerDAS peers. + if let Some(custody_subnet_count) = custody_subnet_count_opt { + match self.compute_peer_custody_subnets(peer_id, custody_subnet_count) { + Ok(custody_subnets) => { + peer_info.set_custody_subnets(custody_subnets); + } + Err(err) => { + debug!(self.log, "Unable to compute peer custody subnets from metadata"; + "info" => "Sending goodbye to peer", + "peer_id" => %peer_id, + "custody_subnet_count" => custody_subnet_count, + "error" => ?err, + ); + invalid_meta_data = true; + } + }; + } + } } else { error!(self.log, "Received METADATA from an unknown peer"; "peer_id" => %peer_id); } + + // Disconnect peers with invalid metadata and find other peers instead. + if invalid_meta_data { + self.goodbye_peer(peer_id, GoodbyeReason::Fault, ReportSource::PeerManager) + } } /// Updates the gossipsub scores for all known peers in gossipsub. @@ -1290,6 +1319,7 @@ impl PeerManager { let mut peers_connected = 0; let mut clients_per_peer = HashMap::new(); let mut peers_connected_mutli: HashMap<(&str, &str), i32> = HashMap::new(); + let mut peers_per_custody_subnet_count: HashMap = HashMap::new(); for (_, peer_info) in self.network_globals.peers.read().connected_peers() { peers_connected += 1; @@ -1320,11 +1350,26 @@ impl PeerManager { *peers_connected_mutli .entry((direction, transport)) .or_default() += 1; + + if let Some(MetaData::V3(meta_data)) = peer_info.meta_data() { + *peers_per_custody_subnet_count + .entry(meta_data.custody_subnet_count) + .or_default() += 1; + } } // PEERS_CONNECTED metrics::set_gauge(&metrics::PEERS_CONNECTED, peers_connected); + // CUSTODY_SUBNET_COUNT + for (custody_subnet_count, peer_count) in peers_per_custody_subnet_count.into_iter() { + metrics::set_gauge_vec( + &metrics::PEERS_PER_CUSTODY_SUBNET_COUNT, + &[&custody_subnet_count.to_string()], + peer_count, + ) + } + // PEERS_PER_CLIENT for client_kind in ClientKind::iter() { let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); @@ -1348,6 +1393,45 @@ impl PeerManager { } } } + + fn compute_peer_custody_subnets( + &self, + peer_id: &PeerId, + custody_subnet_count: u64, + ) -> Result, String> { + // If we don't have a node id, we cannot compute the custody duties anyway + let node_id = peer_id_to_node_id(peer_id)?; + let spec = &self.network_globals.spec; + + if !(spec.custody_requirement..=spec.data_column_sidecar_subnet_count) + .contains(&custody_subnet_count) + { + return Err("Invalid custody subnet count in metadata: out of range".to_string()); + } + + let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id.raw(), + custody_subnet_count, + spec, + ) + .map(|subnets| subnets.collect()) + .unwrap_or_else(|e| { + // This is an unreachable scenario unless there's a bug, as we've validated the csc + // just above. + error!( + self.log, + "Computing peer custody subnets failed unexpectedly"; + "info" => "Falling back to default custody requirement subnets", + "peer_id" => %peer_id, + "custody_subnet_count" => custody_subnet_count, + "error" => ?e + ); + DataColumnSubnetId::compute_custody_requirement_subnets::(node_id.raw(), spec) + .collect() + }); + + Ok(custody_subnets) + } } enum ConnectingType { @@ -1680,11 +1764,7 @@ mod tests { .write() .peer_info_mut(&peer0) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); peer_manager .network_globals .peers @@ -1704,11 +1784,7 @@ mod tests { .write() .peer_info_mut(&peer2) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); peer_manager .network_globals .peers @@ -1728,11 +1804,7 @@ mod tests { .write() .peer_info_mut(&peer4) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); peer_manager .network_globals .peers @@ -1806,11 +1878,7 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); peer_manager .network_globals .peers @@ -1934,11 +2002,7 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); let long_lived_subnets = peer_manager .network_globals .peers @@ -2047,11 +2111,7 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); let long_lived_subnets = peer_manager .network_globals .peers @@ -2217,11 +2277,7 @@ mod tests { .write() .peer_info_mut(&peer) .unwrap() - .set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + .set_meta_data(MetaData::V2(metadata)); let long_lived_subnets = peer_manager .network_globals .peers @@ -2378,11 +2434,7 @@ mod tests { let mut peer_db = peer_manager.network_globals.peers.write(); let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); - peer_info.set_meta_data( - MetaData::V2(metadata), - None, - &peer_manager.network_globals.spec, - ); + peer_info.set_meta_data(MetaData::V2(metadata)); peer_info.set_gossipsub_score(condition.gossipsub_score); peer_info.add_to_score(condition.score); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 6e76fd4bb0..f6b63e6de2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,8 +1,6 @@ use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; use crate::discovery::CombinedKey; -use crate::{ - metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Eth2Enr, Gossipsub, PeerId, -}; +use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; @@ -47,16 +45,10 @@ pub struct PeerDB { disable_peer_scoring: bool, /// PeerDB's logger log: slog::Logger, - spec: ChainSpec, } impl PeerDB { - pub fn new( - trusted_peers: Vec, - disable_peer_scoring: bool, - log: &slog::Logger, - spec: ChainSpec, - ) -> Self { + pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() @@ -68,7 +60,6 @@ impl PeerDB { banned_peers_count: BannedPeersCount::default(), disable_peer_scoring, peers, - spec, } } @@ -726,6 +717,14 @@ impl PeerDB { }, ); + if supernode { + let peer_info = self.peers.get_mut(&peer_id).expect("peer exists"); + let all_subnets = (0..spec.data_column_sidecar_subnet_count) + .map(|csc| csc.into()) + .collect(); + peer_info.set_custody_subnets(all_subnets); + } + peer_id } @@ -791,14 +790,6 @@ impl PeerDB { ) => { // Update the ENR if one exists, and compute the custody subnets if let Some(enr) = enr { - let custody_subnet_count = enr.custody_subnet_count::(&self.spec); - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, - &self.spec, - ) - .collect::>(); - info.set_custody_subnets(custody_subnets); info.set_enr(enr); } @@ -1349,8 +1340,7 @@ mod tests { fn get_db() -> PeerDB { let log = build_log(slog::Level::Debug, false); - let spec = M::default_spec(); - PeerDB::new(vec![], false, &log, spec) + PeerDB::new(vec![], false, &log) } #[test] @@ -2049,8 +2039,7 @@ mod tests { fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let spec = M::default_spec(); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log, spec); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2074,8 +2063,7 @@ mod tests { fn test_disable_peer_scoring() { let peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let spec = M::default_spec(); - let mut pdb: PeerDB = PeerDB::new(vec![], true, &log, spec); + let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 1ea3f8ed5f..ee8c27f474 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -3,7 +3,6 @@ use super::score::{PeerAction, Score, ScoreState}; use super::sync_status::SyncStatus; use crate::discovery::Eth2Enr; use crate::{rpc::MetaData, types::Subnet}; -use discv5::enr::NodeId; use discv5::Enr; use libp2p::core::multiaddr::{Multiaddr, Protocol}; use serde::{ @@ -14,7 +13,7 @@ use std::collections::HashSet; use std::net::IpAddr; use std::time::Instant; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec}; +use types::{DataColumnSubnetId, EthSpec}; use PeerConnectionStatus::*; /// Information about a given connected peer. @@ -358,31 +357,7 @@ impl PeerInfo { /// Sets an explicit value for the meta data. // VISIBILITY: The peer manager is able to adjust the meta_data - pub(in crate::peer_manager) fn set_meta_data( - &mut self, - meta_data: MetaData, - node_id_opt: Option, - spec: &ChainSpec, - ) { - // If we don't have a node id, we cannot compute the custody duties anyway - let Some(node_id) = node_id_opt else { - self.meta_data = Some(meta_data); - return; - }; - - // Already set by enr if custody_subnets is non empty - if self.custody_subnets.is_empty() { - if let Ok(custody_subnet_count) = meta_data.custody_subnet_count() { - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - node_id.raw(), - std::cmp::min(*custody_subnet_count, spec.data_column_sidecar_subnet_count), - spec, - ) - .collect::>(); - self.set_custody_subnets(custody_subnets); - } - } - + pub(in crate::peer_manager) fn set_meta_data(&mut self, meta_data: MetaData) { self.meta_data = Some(meta_data); } @@ -391,7 +366,10 @@ impl PeerInfo { self.connection_status = connection_status } - pub(super) fn set_custody_subnets(&mut self, custody_subnets: HashSet) { + pub(in crate::peer_manager) fn set_custody_subnets( + &mut self, + custody_subnets: HashSet, + ) { self.custody_subnets = custody_subnets } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index c76e0a1857..ac78e2cb01 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -2,9 +2,10 @@ use crate::peer_manager::peerdb::PeerDB; use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; +use crate::Client; use crate::EnrExt; -use crate::{Client, Eth2Enr}; use crate::{Enr, GossipTopic, Multiaddr, PeerId}; +use itertools::Itertools; use parking_lot::RwLock; use std::collections::HashSet; use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; @@ -26,6 +27,9 @@ pub struct NetworkGlobals { pub sync_state: RwLock, /// The current state of the backfill sync. pub backfill_state: RwLock, + /// The computed custody subnets and columns is stored to avoid re-computing. + pub custody_subnets: Vec, + pub custody_columns: Vec, pub spec: ChainSpec, } @@ -38,20 +42,39 @@ impl NetworkGlobals { log: &slog::Logger, spec: ChainSpec, ) -> Self { + let (custody_subnets, custody_columns) = if spec.is_peer_das_scheduled() { + let custody_subnet_count = local_metadata + .custody_subnet_count() + .copied() + .expect("custody subnet count must be set if PeerDAS is scheduled"); + let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( + enr.node_id().raw(), + custody_subnet_count, + &spec, + ) + .expect("custody subnet count must be valid") + .collect::>(); + let custody_columns = custody_subnets + .iter() + .flat_map(|subnet| subnet.columns::(&spec)) + .sorted() + .collect(); + (custody_subnets, custody_columns) + } else { + (vec![], vec![]) + }; + NetworkGlobals { local_enr: RwLock::new(enr.clone()), peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new( - trusted_peers, - disable_peer_scoring, - log, - spec.clone(), - )), + peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), + custody_subnets, + custody_columns, spec, } } @@ -118,29 +141,6 @@ impl NetworkGlobals { std::mem::replace(&mut *self.sync_state.write(), new_state) } - /// Compute custody data columns the node is assigned to custody. - pub fn custody_columns(&self) -> Vec { - let enr = self.local_enr(); - let custody_subnet_count = enr.custody_subnet_count::(&self.spec); - DataColumnSubnetId::compute_custody_columns::( - enr.node_id().raw(), - custody_subnet_count, - &self.spec, - ) - .collect() - } - - /// Compute custody data column subnets the node is assigned to custody. - pub fn custody_subnets(&self) -> impl Iterator { - let enr = self.local_enr(); - let custody_subnet_count = enr.custody_subnet_count::(&self.spec); - DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, - &self.spec, - ) - } - /// Returns a connected peer that: /// 1. is connected /// 2. assigned to custody the column based on it's `custody_subnet_count` from ENR or metadata @@ -161,44 +161,70 @@ impl NetworkGlobals { trusted_peers: Vec, log: &slog::Logger, spec: ChainSpec, + ) -> NetworkGlobals { + let metadata = MetaData::V3(MetaDataV3 { + seq_number: 0, + attnets: Default::default(), + syncnets: Default::default(), + custody_subnet_count: spec.custody_requirement, + }); + Self::new_test_globals_with_metadata(trusted_peers, metadata, log, spec) + } + + pub(crate) fn new_test_globals_with_metadata( + trusted_peers: Vec, + metadata: MetaData, + log: &slog::Logger, + spec: ChainSpec, ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); - NetworkGlobals::new( - enr, - MetaData::V3(MetaDataV3 { - seq_number: 0, - attnets: Default::default(), - syncnets: Default::default(), - custody_subnet_count: spec.data_column_sidecar_subnet_count, - }), - trusted_peers, - false, - log, - spec, - ) + NetworkGlobals::new(enr, metadata, trusted_peers, false, log, spec) } } #[cfg(test)] mod test { use super::*; - use types::{EthSpec, MainnetEthSpec as E}; + use types::{Epoch, EthSpec, MainnetEthSpec as E}; #[test] - fn test_custody_count_default() { - let spec = E::default_spec(); + fn test_custody_subnets() { let log = logging::test_logger(); - let default_custody_requirement_column_count = spec.number_of_columns as u64 - / spec.data_column_sidecar_subnet_count - * spec.custody_requirement; - let globals = NetworkGlobals::::new_test_globals(vec![], &log, spec.clone()); - let columns = globals.custody_columns(); - assert_eq!( - columns.len(), - default_custody_requirement_column_count as usize - ); + let mut spec = E::default_spec(); + spec.eip7594_fork_epoch = Some(Epoch::new(0)); + + let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; + let metadata = get_metadata(custody_subnet_count); + + let globals = + NetworkGlobals::::new_test_globals_with_metadata(vec![], metadata, &log, spec); + assert_eq!(globals.custody_subnets.len(), custody_subnet_count as usize); + } + + #[test] + fn test_custody_columns() { + let log = logging::test_logger(); + let mut spec = E::default_spec(); + spec.eip7594_fork_epoch = Some(Epoch::new(0)); + + let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; + let custody_columns_count = spec.number_of_columns / 2; + let metadata = get_metadata(custody_subnet_count); + + let globals = + NetworkGlobals::::new_test_globals_with_metadata(vec![], metadata, &log, spec); + assert_eq!(globals.custody_columns.len(), custody_columns_count); + } + + fn get_metadata(custody_subnet_count: u64) -> MetaData { + MetaData::V3(MetaDataV3 { + seq_number: 0, + attnets: Default::default(), + syncnets: Default::default(), + custody_subnet_count, + }) } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5b9a3125ea..5782fb00b6 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -16,7 +16,6 @@ use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; -use lighthouse_network::Eth2Enr; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -808,17 +807,9 @@ impl NetworkService { } } } else { - for column_subnet in DataColumnSubnetId::compute_custody_subnets::( - self.network_globals.local_enr().node_id().raw(), - self.network_globals - .local_enr() - .custody_subnet_count::<::EthSpec>( - &self.fork_context.spec, - ), - &self.fork_context.spec, - ) { + for column_subnet in &self.network_globals.custody_subnets { for fork_digest in self.required_gossip_fork_digests() { - let gossip_kind = Subnet::DataColumn(column_subnet).into(); + let gossip_kind = Subnet::DataColumn(*column_subnet).into(); let topic = GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); if self.libp2p.subscribe(topic.clone()) { diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 1cf028dbcd..b9f6d180c1 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -389,7 +389,7 @@ impl SyncNetworkContext { let (expects_custody_columns, num_of_custody_column_req) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let custody_indexes = self.network_globals().custody_columns(); + let custody_indexes = self.network_globals().custody_columns.clone(); let mut num_of_custody_column_req = 0; for (peer_id, columns_by_range_request) in @@ -758,10 +758,11 @@ impl SyncNetworkContext { .imported_custody_column_indexes(&block_root) .unwrap_or_default(); - let custody_indexes_duty = self.network_globals().custody_columns(); - // Include only the blob indexes not yet imported (received through gossip) - let custody_indexes_to_fetch = custody_indexes_duty + let custody_indexes_to_fetch = self + .network_globals() + .custody_columns + .clone() .into_iter() .filter(|index| !custody_indexes_imported.contains(index)) .collect::>(); diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 1756fb513d..ed5946ada7 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1112,25 +1112,25 @@ impl SyncingChain { fn good_peers_on_custody_subnets(&self, epoch: Epoch, network: &SyncNetworkContext) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { // Require peers on all custody column subnets before sending batches - let peers_on_all_custody_subnets = - network - .network_globals() - .custody_subnets() - .all(|subnet_id| { - let peer_count = network - .network_globals() - .peers - .read() - .good_custody_subnet_peer(subnet_id) - .count(); + let peers_on_all_custody_subnets = network + .network_globals() + .custody_subnets + .iter() + .all(|subnet_id| { + let peer_count = network + .network_globals() + .peers + .read() + .good_custody_subnet_peer(*subnet_id) + .count(); - set_int_gauge( - &PEERS_PER_COLUMN_SUBNET, - &[&subnet_id.to_string()], - peer_count as i64, - ); - peer_count > 0 - }); + set_int_gauge( + &PEERS_PER_COLUMN_SUBNET, + &[&subnet_id.to_string()], + peer_count as i64, + ); + peer_count > 0 + }); peers_on_all_custody_subnets } else { true diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data_column_subnet_id.rs index df964cf8de..df61d711c1 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data_column_subnet_id.rs @@ -41,9 +41,10 @@ impl DataColumnSubnetId { raw_node_id: [u8; 32], custody_subnet_count: u64, spec: &ChainSpec, - ) -> impl Iterator { - // TODO(das): we could perform check on `custody_subnet_count` here to ensure that it is a valid - // value, but here we assume it is valid. + ) -> Result, Error> { + if custody_subnet_count > spec.data_column_sidecar_subnet_count { + return Err(Error::InvalidCustodySubnetCount(custody_subnet_count)); + } let mut subnets: HashSet = HashSet::new(); let mut current_id = U256::from_be_slice(&raw_node_id); @@ -66,17 +67,26 @@ impl DataColumnSubnetId { } current_id += U256::from(1u64) } - subnets.into_iter().map(DataColumnSubnetId::new) + Ok(subnets.into_iter().map(DataColumnSubnetId::new)) + } + + /// Compute the custody subnets for a given node id with the default `custody_requirement`. + /// This operation should be infallable, and empty iterator is returned if it fails unexpectedly. + pub fn compute_custody_requirement_subnets( + node_id: [u8; 32], + spec: &ChainSpec, + ) -> impl Iterator { + Self::compute_custody_subnets::(node_id, spec.custody_requirement, spec) + .expect("should compute default custody subnets") } pub fn compute_custody_columns( raw_node_id: [u8; 32], custody_subnet_count: u64, spec: &ChainSpec, - ) -> impl Iterator { + ) -> Result, Error> { Self::compute_custody_subnets::(raw_node_id, custody_subnet_count, spec) - .flat_map(|subnet| subnet.columns::(spec)) - .sorted() + .map(|subnet| subnet.flat_map(|subnet| subnet.columns::(spec)).sorted()) } } @@ -121,6 +131,7 @@ impl From<&DataColumnSubnetId> for u64 { #[derive(Debug)] pub enum Error { ArithError(ArithError), + InvalidCustodySubnetCount(u64), } impl From for Error { @@ -132,9 +143,9 @@ impl From for Error { #[cfg(test)] mod test { use crate::data_column_subnet_id::DataColumnSubnetId; - use crate::EthSpec; use crate::MainnetEthSpec; use crate::Uint256; + use crate::{EthSpec, GnosisEthSpec, MinimalEthSpec}; type E = MainnetEthSpec; @@ -163,7 +174,8 @@ mod test { node_id, custody_requirement, &spec, - ); + ) + .unwrap(); let computed_subnets: Vec<_> = computed_subnets.collect(); // the number of subnets is equal to the custody requirement @@ -183,6 +195,21 @@ mod test { } } + #[test] + fn test_compute_custody_requirement_subnets_never_panics() { + let node_id = [1u8; 32]; + test_compute_custody_requirement_subnets_with_spec::(node_id); + test_compute_custody_requirement_subnets_with_spec::(node_id); + test_compute_custody_requirement_subnets_with_spec::(node_id); + } + + fn test_compute_custody_requirement_subnets_with_spec(node_id: [u8; 32]) { + let _ = DataColumnSubnetId::compute_custody_requirement_subnets::( + node_id, + &E::default_spec(), + ); + } + #[test] fn test_columns_subnet_conversion() { let spec = E::default_spec(); diff --git a/testing/ef_tests/src/cases/get_custody_columns.rs b/testing/ef_tests/src/cases/get_custody_columns.rs index d31e72a473..9665f87730 100644 --- a/testing/ef_tests/src/cases/get_custody_columns.rs +++ b/testing/ef_tests/src/cases/get_custody_columns.rs @@ -31,6 +31,7 @@ impl Case for GetCustodyColumns { self.custody_subnet_count, &spec, ) + .expect("should compute custody columns") .collect::>(); let expected = &self.result; if computed == *expected { From d6861380a2a4476d1063c2d765bf89998e860243 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Sep 2024 18:10:55 +1000 Subject: [PATCH 11/66] Delete legacy payload reconstruction (#6213) * Delete legacy payload reconstruction * Delete unneeded failing test * Merge remote-tracking branch 'origin/unstable' into remove-more-ethers * Merge remote-tracking branch 'origin/unstable' into remove-more-ethers * Cleanups --- .../beacon_chain/src/beacon_block_streamer.rs | 147 +------------ .../beacon_chain/src/bellatrix_readiness.rs | 45 ---- beacon_node/execution_layer/src/engine_api.rs | 192 ----------------- .../execution_layer/src/engine_api/http.rs | 48 ----- .../src/engine_api/json_structures.rs | 51 +++++ beacon_node/execution_layer/src/lib.rs | 199 +----------------- beacon_node/execution_layer/src/metrics.rs | 7 - .../test_utils/execution_block_generator.rs | 44 ++-- .../src/test_utils/handle_rpc.rs | 122 +++-------- .../src/test_rig.rs | 10 +- 10 files changed, 110 insertions(+), 755 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 0ce33f1689..ace5f0be74 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -710,10 +710,8 @@ impl From for BeaconChainError { mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; - use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES}; - use execution_layer::EngineCapabilities; + use execution_layer::test_utils::Block; use std::sync::LazyLock; - use std::time::Duration; use tokio::sync::mpsc; use types::{ ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MinimalEthSpec, Slot, @@ -864,147 +862,4 @@ mod tests { } } } - - #[tokio::test] - async fn check_fallback_altair_to_electra() { - let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; - let num_epochs = 10; - let bellatrix_fork_epoch = 2usize; - let capella_fork_epoch = 4usize; - let deneb_fork_epoch = 6usize; - let electra_fork_epoch = 8usize; - let num_blocks_produced = num_epochs * slots_per_epoch; - - let mut spec = test_spec::(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); - spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); - spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); - spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); - - let harness = get_harness(VALIDATOR_COUNT, spec); - - // modify execution engine so it doesn't support engine_payloadBodiesBy* methods - let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .set_engine_capabilities(EngineCapabilities { - get_payload_bodies_by_hash_v1: false, - get_payload_bodies_by_range_v1: false, - ..DEFAULT_ENGINE_CAPABILITIES - }); - // refresh capabilities cache - harness - .chain - .execution_layer - .as_ref() - .unwrap() - .get_engine_capabilities(Some(Duration::ZERO)) - .await - .unwrap(); - - // go to bellatrix fork - harness - .extend_slots(bellatrix_fork_epoch * slots_per_epoch) - .await; - // extend half an epoch - harness.extend_slots(slots_per_epoch / 2).await; - // trigger merge - harness - .execution_block_generator() - .move_to_terminal_block() - .expect("should move to terminal block"); - let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; - harness - .execution_block_generator() - .modify_last_block(|block| { - if let Block::PoW(terminal_block) = block { - terminal_block.timestamp = timestamp; - } - }); - // finish out merge epoch - harness.extend_slots(slots_per_epoch / 2).await; - // finish rest of epochs - harness - .extend_slots((num_epochs - 1 - bellatrix_fork_epoch) * slots_per_epoch) - .await; - - let head = harness.chain.head_snapshot(); - let state = &head.beacon_state; - - assert_eq!( - state.slot(), - Slot::new(num_blocks_produced as u64), - "head should be at the current slot" - ); - assert_eq!( - state.current_epoch(), - num_blocks_produced as u64 / MinimalEthSpec::slots_per_epoch(), - "head should be at the expected epoch" - ); - assert_eq!( - state.current_justified_checkpoint().epoch, - state.current_epoch() - 1, - "the head should be justified one behind the current epoch" - ); - assert_eq!( - state.finalized_checkpoint().epoch, - state.current_epoch() - 2, - "the head should be finalized two behind the current epoch" - ); - - let block_roots: Vec = harness - .chain - .forwards_iter_block_roots(Slot::new(0)) - .expect("should get iter") - .map(Result::unwrap) - .map(|(root, _)| root) - .collect(); - - let mut expected_blocks = vec![]; - // get all blocks the old fashioned way - for root in &block_roots { - let block = harness - .chain - .get_block(root) - .await - .expect("should get block") - .expect("block should exist"); - expected_blocks.push(block); - } - - for epoch in 0..num_epochs { - let start = epoch * slots_per_epoch; - let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch]; - epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]); - let streamer = BeaconBlockStreamer::new(&harness.chain, CheckCaches::No) - .expect("should create streamer"); - let (block_tx, mut block_rx) = mpsc::unbounded_channel(); - streamer.stream(epoch_roots.clone(), block_tx).await; - - for (i, expected_root) in epoch_roots.into_iter().enumerate() { - let (found_root, found_block_result) = - block_rx.recv().await.expect("should get block"); - - assert_eq!( - found_root, expected_root, - "expected block root should match" - ); - match found_block_result.as_ref() { - Ok(maybe_block) => { - let found_block = maybe_block.clone().expect("should have a block"); - let expected_block = expected_blocks - .get(start + i) - .expect("should get expected block"); - assert_eq!( - found_block.as_ref(), - expected_block, - "expected block should match found block" - ); - } - Err(e) => panic!("Error retrieving block {}: {:?}", expected_root, e), - } - } - } - } } diff --git a/beacon_node/beacon_chain/src/bellatrix_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs index c2e387c422..500588953f 100644 --- a/beacon_node/beacon_chain/src/bellatrix_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -4,7 +4,6 @@ use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes}; use execution_layer::BlockByNumberQuery; use serde::{Deserialize, Serialize, Serializer}; -use slog::debug; use std::fmt; use std::fmt::Write; use types::*; @@ -199,7 +198,6 @@ impl BeaconChain { else { return Ok(GenesisExecutionPayloadStatus::Irrelevant); }; - let fork = self.spec.fork_name_at_epoch(Epoch::new(0)); let execution_layer = self .execution_layer @@ -222,49 +220,6 @@ impl BeaconChain { }); } - // Double-check the block by reconstructing it. - let execution_payload = execution_layer - .get_payload_by_hash_legacy(exec_block_hash, fork) - .await - .map_err(|e| Error::ExecutionLayerGetBlockByHashFailed(Box::new(e)))? - .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; - - // Verify payload integrity. - let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); - - let got_transactions_root = header_from_payload.transactions_root(); - let expected_transactions_root = latest_execution_payload_header.transactions_root(); - let got_withdrawals_root = header_from_payload.withdrawals_root().ok(); - let expected_withdrawals_root = latest_execution_payload_header.withdrawals_root().ok(); - - if got_transactions_root != expected_transactions_root { - return Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { - got: got_transactions_root, - expected: expected_transactions_root, - }); - } - - if let Some(expected) = expected_withdrawals_root { - if let Some(got) = got_withdrawals_root { - if got != expected { - return Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { - got, - expected, - }); - } - } - } - - if header_from_payload.to_ref() != latest_execution_payload_header { - debug!( - self.log, - "Genesis execution payload reconstruction failure"; - "consensus_node_header" => ?latest_execution_payload_header, - "execution_node_header" => ?header_from_payload - ); - return Ok(GenesisExecutionPayloadStatus::OtherMismatch); - } - Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash)) } } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 8cfe6e9efd..8ba8ecfffb 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -11,9 +11,6 @@ use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, SsePayloadAttributesV3, }; -use ethers_core::types::Transaction; -use ethers_core::utils::rlp; -use ethers_core::utils::rlp::{Decodable, Rlp}; use http::deposit_methods::RpcError; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use pretty_reqwest_error::PrettyReqwestError; @@ -43,8 +40,6 @@ pub use new_payload_request::{ NewPayloadRequestDeneb, NewPayloadRequestElectra, }; -use self::json_structures::{JsonConsolidationRequest, JsonDepositRequest, JsonWithdrawalRequest}; - pub const LATEST_TAG: &str = "latest"; pub type PayloadId = [u8; 8]; @@ -74,7 +69,6 @@ pub enum Error { RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), InvalidClientVersion(String), - RlpDecoderError(rlp::DecoderError), TooManyConsolidationRequests(usize), } @@ -109,12 +103,6 @@ impl From for Error { } } -impl From for Error { - fn from(e: rlp::DecoderError) -> Self { - Error::RlpDecoderError(e) - } -} - impl From for Error { fn from(e: ssz_types::Error) -> Self { Error::SszError(e) @@ -161,186 +149,6 @@ pub struct ExecutionBlock { pub timestamp: u64, } -/// Representation of an execution block with enough detail to reconstruct a payload. -#[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), - variant_attributes( - derive(Clone, Debug, PartialEq, Serialize, Deserialize,), - serde(bound = "E: EthSpec", rename_all = "camelCase"), - ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "E: EthSpec", rename_all = "camelCase", untagged)] -pub struct ExecutionBlockWithTransactions { - pub parent_hash: ExecutionBlockHash, - #[serde(alias = "miner")] - #[serde(with = "serde_utils::address_hex")] - pub fee_recipient: Address, - pub state_root: Hash256, - pub receipts_root: Hash256, - #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] - pub logs_bloom: FixedVector, - #[serde(alias = "mixHash")] - pub prev_randao: Hash256, - #[serde(rename = "number", with = "serde_utils::u64_hex_be")] - pub block_number: u64, - #[serde(with = "serde_utils::u64_hex_be")] - pub gas_limit: u64, - #[serde(with = "serde_utils::u64_hex_be")] - pub gas_used: u64, - #[serde(with = "serde_utils::u64_hex_be")] - pub timestamp: u64, - #[serde(with = "ssz_types::serde_utils::hex_var_list")] - pub extra_data: VariableList, - pub base_fee_per_gas: Uint256, - #[serde(rename = "hash")] - pub block_hash: ExecutionBlockHash, - pub transactions: Vec, - #[superstruct(only(Capella, Deneb, Electra))] - pub withdrawals: Vec, - #[superstruct(only(Deneb, Electra))] - #[serde(with = "serde_utils::u64_hex_be")] - pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra))] - #[serde(with = "serde_utils::u64_hex_be")] - pub excess_blob_gas: u64, - #[superstruct(only(Electra))] - pub deposit_requests: Vec, - #[superstruct(only(Electra))] - pub withdrawal_requests: Vec, - #[superstruct(only(Electra))] - pub consolidation_requests: Vec, -} - -impl TryFrom> for ExecutionBlockWithTransactions { - type Error = Error; - - fn try_from(payload: ExecutionPayload) -> Result { - let json_payload = match payload { - ExecutionPayload::Bellatrix(block) => { - Self::Bellatrix(ExecutionBlockWithTransactionsBellatrix { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions: block - .transactions - .iter() - .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>()?, - }) - } - ExecutionPayload::Capella(block) => { - Self::Capella(ExecutionBlockWithTransactionsCapella { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions: block - .transactions - .iter() - .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>()?, - withdrawals: Vec::from(block.withdrawals) - .into_iter() - .map(|withdrawal| withdrawal.into()) - .collect(), - }) - } - ExecutionPayload::Deneb(block) => Self::Deneb(ExecutionBlockWithTransactionsDeneb { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions: block - .transactions - .iter() - .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>()?, - withdrawals: Vec::from(block.withdrawals) - .into_iter() - .map(|withdrawal| withdrawal.into()) - .collect(), - blob_gas_used: block.blob_gas_used, - excess_blob_gas: block.excess_blob_gas, - }), - ExecutionPayload::Electra(block) => { - Self::Electra(ExecutionBlockWithTransactionsElectra { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions: block - .transactions - .iter() - .map(|tx| Transaction::decode(&Rlp::new(tx))) - .collect::, _>>()?, - withdrawals: Vec::from(block.withdrawals) - .into_iter() - .map(|withdrawal| withdrawal.into()) - .collect(), - blob_gas_used: block.blob_gas_used, - excess_blob_gas: block.excess_blob_gas, - deposit_requests: block - .deposit_requests - .into_iter() - .map(|deposit| deposit.into()) - .collect(), - withdrawal_requests: block - .withdrawal_requests - .into_iter() - .map(|withdrawal| withdrawal.into()) - .collect(), - consolidation_requests: block - .consolidation_requests - .into_iter() - .map(Into::into) - .collect(), - }) - } - }; - Ok(json_payload) - } -} - #[superstruct( variants(V1, V2, V3), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 5bc1343a0e..c497a4a725 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -734,54 +734,6 @@ impl HttpJsonRpc { .await } - pub async fn get_block_by_hash_with_txns( - &self, - block_hash: ExecutionBlockHash, - fork: ForkName, - ) -> Result>, Error> { - let params = json!([block_hash, true]); - Ok(Some(match fork { - ForkName::Bellatrix => ExecutionBlockWithTransactions::Bellatrix( - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?, - ), - ForkName::Capella => ExecutionBlockWithTransactions::Capella( - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?, - ), - ForkName::Deneb => ExecutionBlockWithTransactions::Deneb( - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?, - ), - ForkName::Electra => ExecutionBlockWithTransactions::Electra( - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?, - ), - ForkName::Base | ForkName::Altair => { - return Err(Error::UnsupportedForkVariant(format!( - "called get_block_by_hash_with_txns with fork {:?}", - fork - ))) - } - })) - } - pub async fn new_payload_v1( &self, execution_payload: ExecutionPayload, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index dbf889bbc8..a05d584cfc 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -778,6 +778,57 @@ pub struct JsonExecutionPayloadBody { Option>, } +impl From> for JsonExecutionPayloadBodyV1 { + fn from(value: ExecutionPayloadBodyV1) -> Self { + Self { + transactions: value.transactions, + withdrawals: value.withdrawals.map(|json_withdrawals| { + VariableList::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + } + } +} + +impl From> for JsonExecutionPayloadBodyV2 { + fn from(value: ExecutionPayloadBodyV2) -> Self { + Self { + transactions: value.transactions, + withdrawals: value.withdrawals.map(|json_withdrawals| { + VariableList::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + deposit_requests: value.deposit_requests.map(|receipts| { + VariableList::from(receipts.into_iter().map(Into::into).collect::>()) + }), + withdrawal_requests: value.withdrawal_requests.map(|withdrawal_requests| { + VariableList::from( + withdrawal_requests + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + consolidation_requests: value.consolidation_requests.map(|consolidation_requests| { + VariableList::from( + consolidation_requests + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + } + } +} + impl From> for ExecutionPayloadBody { fn from(value: JsonExecutionPayloadBody) -> Self { match value { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6e3aca3959..648963a320 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -144,6 +144,7 @@ pub enum Error { payload: ExecutionBlockHash, transactions_root: Hash256, }, + PayloadBodiesByRangeNotSupported, InvalidJWTSecret(String), InvalidForkForPayload, InvalidPayloadBody(String), @@ -1804,7 +1805,6 @@ impl ExecutionLayer { header: &ExecutionPayloadHeader, fork: ForkName, ) -> Result>, Error> { - let hash = header.block_hash(); let block_number = header.block_number(); // Handle default payload body. @@ -1823,7 +1823,9 @@ impl ExecutionLayer { // Use efficient payload bodies by range method if supported. let capabilities = self.get_engine_capabilities(None).await?; - if capabilities.get_payload_bodies_by_range_v1 { + if capabilities.get_payload_bodies_by_range_v1 + || capabilities.get_payload_bodies_by_range_v2 + { let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; if payload_bodies.len() != 1 { @@ -1838,8 +1840,7 @@ impl ExecutionLayer { }) .transpose() } else { - // Fall back to eth_blockByHash. - self.get_payload_by_hash_legacy(hash, fork).await + Err(Error::PayloadBodiesByRangeNotSupported) } } @@ -1854,196 +1855,6 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - pub async fn get_payload_by_hash_legacy( - &self, - hash: ExecutionBlockHash, - fork: ForkName, - ) -> Result>, Error> { - self.engine() - .request(|engine| async move { - self.get_payload_by_hash_from_engine(engine, hash, fork) - .await - }) - .await - .map_err(Box::new) - .map_err(Error::EngineError) - } - - async fn get_payload_by_hash_from_engine( - &self, - engine: &Engine, - hash: ExecutionBlockHash, - fork: ForkName, - ) -> Result>, ApiError> { - let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); - - if hash == ExecutionBlockHash::zero() { - return match fork { - ForkName::Bellatrix => Ok(Some(ExecutionPayloadBellatrix::default().into())), - ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), - ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())), - ForkName::Electra => Ok(Some(ExecutionPayloadElectra::default().into())), - ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( - format!("called get_payload_by_hash_from_engine with {}", fork), - )), - }; - } - - let Some(block) = engine - .api - .get_block_by_hash_with_txns::(hash, fork) - .await? - else { - return Ok(None); - }; - - let convert_transactions = |transactions: Vec| { - VariableList::new( - transactions - .into_iter() - .map(|tx| VariableList::new(tx.rlp().to_vec())) - .collect::, ssz_types::Error>>()?, - ) - .map_err(ApiError::SszError) - }; - - let payload = match block { - ExecutionBlockWithTransactions::Bellatrix(bellatrix_block) => { - ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { - parent_hash: bellatrix_block.parent_hash, - fee_recipient: bellatrix_block.fee_recipient, - state_root: bellatrix_block.state_root, - receipts_root: bellatrix_block.receipts_root, - logs_bloom: bellatrix_block.logs_bloom, - prev_randao: bellatrix_block.prev_randao, - block_number: bellatrix_block.block_number, - gas_limit: bellatrix_block.gas_limit, - gas_used: bellatrix_block.gas_used, - timestamp: bellatrix_block.timestamp, - extra_data: bellatrix_block.extra_data, - base_fee_per_gas: bellatrix_block.base_fee_per_gas, - block_hash: bellatrix_block.block_hash, - transactions: convert_transactions(bellatrix_block.transactions)?, - }) - } - ExecutionBlockWithTransactions::Capella(capella_block) => { - let withdrawals = VariableList::new( - capella_block - .withdrawals - .into_iter() - .map(Into::into) - .collect(), - ) - .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: capella_block.parent_hash, - fee_recipient: capella_block.fee_recipient, - state_root: capella_block.state_root, - receipts_root: capella_block.receipts_root, - logs_bloom: capella_block.logs_bloom, - prev_randao: capella_block.prev_randao, - block_number: capella_block.block_number, - gas_limit: capella_block.gas_limit, - gas_used: capella_block.gas_used, - timestamp: capella_block.timestamp, - extra_data: capella_block.extra_data, - base_fee_per_gas: capella_block.base_fee_per_gas, - block_hash: capella_block.block_hash, - transactions: convert_transactions(capella_block.transactions)?, - withdrawals, - }) - } - ExecutionBlockWithTransactions::Deneb(deneb_block) => { - let withdrawals = VariableList::new( - deneb_block - .withdrawals - .into_iter() - .map(Into::into) - .collect(), - ) - .map_err(ApiError::DeserializeWithdrawals)?; - ExecutionPayload::Deneb(ExecutionPayloadDeneb { - parent_hash: deneb_block.parent_hash, - fee_recipient: deneb_block.fee_recipient, - state_root: deneb_block.state_root, - receipts_root: deneb_block.receipts_root, - logs_bloom: deneb_block.logs_bloom, - prev_randao: deneb_block.prev_randao, - block_number: deneb_block.block_number, - gas_limit: deneb_block.gas_limit, - gas_used: deneb_block.gas_used, - timestamp: deneb_block.timestamp, - extra_data: deneb_block.extra_data, - base_fee_per_gas: deneb_block.base_fee_per_gas, - block_hash: deneb_block.block_hash, - transactions: convert_transactions(deneb_block.transactions)?, - withdrawals, - blob_gas_used: deneb_block.blob_gas_used, - excess_blob_gas: deneb_block.excess_blob_gas, - }) - } - ExecutionBlockWithTransactions::Electra(electra_block) => { - let withdrawals = VariableList::new( - electra_block - .withdrawals - .into_iter() - .map(Into::into) - .collect(), - ) - .map_err(ApiError::DeserializeWithdrawals)?; - let deposit_requests = VariableList::new( - electra_block - .deposit_requests - .into_iter() - .map(Into::into) - .collect(), - ) - .map_err(ApiError::DeserializeDepositRequests)?; - let withdrawal_requests = VariableList::new( - electra_block - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect(), - ) - .map_err(ApiError::DeserializeWithdrawalRequests)?; - let n_consolidations = electra_block.consolidation_requests.len(); - let consolidation_requests = VariableList::new( - electra_block - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - .map_err(|_| ApiError::TooManyConsolidationRequests(n_consolidations))?; - ExecutionPayload::Electra(ExecutionPayloadElectra { - parent_hash: electra_block.parent_hash, - fee_recipient: electra_block.fee_recipient, - state_root: electra_block.state_root, - receipts_root: electra_block.receipts_root, - logs_bloom: electra_block.logs_bloom, - prev_randao: electra_block.prev_randao, - block_number: electra_block.block_number, - gas_limit: electra_block.gas_limit, - gas_used: electra_block.gas_used, - timestamp: electra_block.timestamp, - extra_data: electra_block.extra_data, - base_fee_per_gas: electra_block.base_fee_per_gas, - block_hash: electra_block.block_hash, - transactions: convert_transactions(electra_block.transactions)?, - withdrawals, - blob_gas_used: electra_block.blob_gas_used, - excess_blob_gas: electra_block.excess_blob_gas, - deposit_requests, - withdrawal_requests, - consolidation_requests, - }) - } - }; - - Ok(Some(payload)) - } - pub async fn propose_blinded_beacon_block( &self, block_root: Hash256, diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index c3da449535..184031af4d 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -54,13 +54,6 @@ pub static EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "execution_layer_get_payload_by_block_hash_time", - "Time to reconstruct a payload from the EE using eth_getBlockByHash", - ) - }); pub static EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE: LazyLock> = LazyLock::new(|| { try_create_histogram( diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 8a30800fa7..6094e0d696 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,14 +1,11 @@ +use crate::engine_api::{ + json_structures::{ + JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + }, + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, +}; use crate::engines::ForkchoiceState; use crate::EthersTransaction; -use crate::{ - engine_api::{ - json_structures::{ - JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, - }, - ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, - }, - ExecutionBlockWithTransactions, -}; use eth2::types::BlobsBundle; use kzg::{Kzg, KzgCommitment, KzgProof}; use parking_lot::Mutex; @@ -89,17 +86,13 @@ impl Block { } } - pub fn as_execution_block_with_tx(&self) -> Option> { + pub fn as_execution_payload(&self) -> Option> { match self { - Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), - Block::PoW(block) => Some( - ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { - block_hash: block.block_hash, - ..Default::default() - }) - .try_into() - .unwrap(), - ), + Block::PoS(payload) => Some(payload.clone()), + Block::PoW(block) => Some(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { + block_hash: block.block_hash, + ..Default::default() + })), } } } @@ -255,20 +248,17 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } - pub fn execution_block_with_txs_by_hash( + pub fn execution_payload_by_hash( &self, hash: ExecutionBlockHash, - ) -> Option> { + ) -> Option> { self.block_by_hash(hash) - .and_then(|block| block.as_execution_block_with_tx()) + .and_then(|block| block.as_execution_payload()) } - pub fn execution_block_with_txs_by_number( - &self, - number: u64, - ) -> Option> { + pub fn execution_payload_by_number(&self, number: u64) -> Option> { self.block_by_number(number) - .and_then(|block| block.as_execution_block_with_tx()) + .and_then(|block| block.as_execution_payload()) } pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 81c69caf82..f36cb9797d 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -83,12 +83,10 @@ pub async fn handle_rpc( .ok_or_else(|| "missing/invalid params[1] value".to_string()) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; if full_tx { - Ok(serde_json::to_value( - ctx.execution_block_generator - .read() - .execution_block_with_txs_by_hash(hash), - ) - .unwrap()) + Err(( + "full_tx support has been removed".to_string(), + BAD_PARAMS_ERROR_CODE, + )) } else { Ok(serde_json::to_value( ctx.execution_block_generator @@ -556,40 +554,25 @@ pub async fn handle_rpc( let mut response = vec![]; for block_num in start..(start + count) { - let maybe_block = ctx + let maybe_payload = ctx .execution_block_generator .read() - .execution_block_with_txs_by_number(block_num); + .execution_payload_by_number(block_num); - match maybe_block { - Some(block) => { - let transactions = Transactions::::new( - block - .transactions() - .iter() - .map(|transaction| VariableList::new(transaction.rlp().to_vec())) - .collect::>() - .map_err(|e| { - ( - format!("failed to deserialize transaction: {:?}", e), - GENERIC_ERROR_CODE, - ) - })?, - ) - .map_err(|e| { - ( - format!("failed to deserialize transactions: {:?}", e), - GENERIC_ERROR_CODE, - ) - })?; - - response.push(Some(JsonExecutionPayloadBodyV1:: { - transactions, - withdrawals: block - .withdrawals() - .ok() - .map(|withdrawals| VariableList::from(withdrawals.clone())), - })); + match maybe_payload { + Some(payload) => { + assert!( + !payload.fork_name().electra_enabled(), + "payload bodies V1 is not supported for Electra blocks" + ); + let payload_body = ExecutionPayloadBodyV1 { + transactions: payload.transactions().clone(), + withdrawals: payload.withdrawals().ok().cloned(), + }; + let json_payload_body = JsonExecutionPayloadBody::V1( + JsonExecutionPayloadBodyV1::::from(payload_body), + ); + response.push(Some(json_payload_body)); } None => response.push(None), } @@ -611,63 +594,28 @@ pub async fn handle_rpc( let mut response = vec![]; for block_num in start..(start + count) { - let maybe_block = ctx + let maybe_payload = ctx .execution_block_generator .read() - .execution_block_with_txs_by_number(block_num); - - match maybe_block { - Some(block) => { - let transactions = Transactions::::new( - block - .transactions() - .iter() - .map(|transaction| VariableList::new(transaction.rlp().to_vec())) - .collect::>() - .map_err(|e| { - ( - format!("failed to deserialize transaction: {:?}", e), - GENERIC_ERROR_CODE, - ) - })?, - ) - .map_err(|e| { - ( - format!("failed to deserialize transactions: {:?}", e), - GENERIC_ERROR_CODE, - ) - })?; + .execution_payload_by_number(block_num); + match maybe_payload { + Some(payload) => { // TODO(electra): add testing for: // deposit_requests // withdrawal_requests // consolidation_requests - response.push(Some(JsonExecutionPayloadBodyV2:: { - transactions, - withdrawals: block - .withdrawals() - .ok() - .map(|withdrawals| VariableList::from(withdrawals.clone())), - deposit_requests: block.deposit_requests().ok().map( - |deposit_requests| VariableList::from(deposit_requests.clone()), - ), - withdrawal_requests: block.withdrawal_requests().ok().map( - |withdrawal_requests| { - VariableList::from(withdrawal_requests.clone()) - }, - ), - consolidation_requests: block.consolidation_requests().ok().map( - |consolidation_requests| { - VariableList::from( - consolidation_requests - .clone() - .into_iter() - .map(Into::into) - .collect::>(), - ) - }, - ), - })); + let payload_body = ExecutionPayloadBodyV2 { + transactions: payload.transactions().clone(), + withdrawals: payload.withdrawals().ok().cloned(), + deposit_requests: payload.deposit_requests().ok().cloned(), + withdrawal_requests: payload.withdrawal_requests().ok().cloned(), + consolidation_requests: payload.consolidation_requests().ok().cloned(), + }; + let json_payload_body = JsonExecutionPayloadBody::V2( + JsonExecutionPayloadBodyV2::::from(payload_body), + ); + response.push(Some(json_payload_body)); } None => response.push(None), } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index f3f5a72cb6..0289fd4206 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -649,15 +649,7 @@ async fn check_payload_reconstruction( ee: &ExecutionPair, payload: &ExecutionPayload, ) { - // check via legacy eth_getBlockByHash - let reconstructed = ee - .execution_layer - .get_payload_by_hash_legacy(payload.block_hash(), payload.fork_name()) - .await - .unwrap() - .unwrap(); - assert_eq!(reconstructed, *payload); - // also check via payload bodies method + // check via payload bodies method let capabilities = ee .execution_layer .get_engine_capabilities(None) From 873748d2c3bb314a00ebfab5f314aba6cba2a373 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 6 Sep 2024 10:42:34 +0100 Subject: [PATCH 12/66] simplify rpc codec logic (#6304) * simplify rpc codec logic * Merge branch 'unstable' of github.com:sigp/lighthouse into simplify-rpc-codec * Merge branch 'unstable' of github.com:sigp/lighthouse into simplify-rpc-codec * Merge branch 'unstable' of github.com:sigp/lighthouse into simply-rpc-codec * Merge branch 'unstable' into simplify-rpc-codec * Merge branch 'unstable' into simplify-rpc-codec --- .../src/rpc/{codec/ssz_snappy.rs => codec.rs} | 364 +++++++++++++----- .../lighthouse_network/src/rpc/codec/base.rs | 334 ---------------- .../lighthouse_network/src/rpc/codec/mod.rs | 61 --- .../lighthouse_network/src/rpc/outbound.rs | 13 +- .../lighthouse_network/src/rpc/protocol.rs | 18 +- 5 files changed, 278 insertions(+), 512 deletions(-) rename beacon_node/lighthouse_network/src/rpc/{codec/ssz_snappy.rs => codec.rs} (91%) delete mode 100644 beacon_node/lighthouse_network/src/rpc/codec/base.rs delete mode 100644 beacon_node/lighthouse_network/src/rpc/codec/mod.rs diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs similarity index 91% rename from beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs rename to beacon_node/lighthouse_network/src/rpc/codec.rs index 8f5143d7ed..224fb8a5f7 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1,9 +1,9 @@ use crate::rpc::methods::*; -use crate::rpc::{ - codec::base::OutboundCodec, - protocol::{Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN}, +use crate::rpc::protocol::{ + Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN, }; use crate::rpc::{InboundRequest, OutboundRequest}; +use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; @@ -57,13 +57,13 @@ impl SSZSnappyInboundCodec { max_packet_size, } } -} -// Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder> for SSZSnappyInboundCodec { - type Error = RPCError; - - fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { + /// Encodes RPC Responses sent to peers. + fn encode_response( + &mut self, + item: RPCCodedResponse, + dst: &mut BytesMut, + ) -> Result<(), RPCError> { let bytes = match &item { RPCCodedResponse::Success(resp) => match &resp { RPCResponse::Status(res) => res.as_ssz_bytes(), @@ -125,6 +125,21 @@ impl Encoder> for SSZSnappyInboundCodec { } } +// Encoder for inbound streams: Encodes RPC Responses sent to peers. +impl Encoder> for SSZSnappyInboundCodec { + type Error = RPCError; + + fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { + dst.clear(); + dst.reserve(1); + dst.put_u8( + item.as_u8() + .expect("Should never encode a stream termination"), + ); + self.encode_response(item, dst) + } +} + // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { type Item = InboundRequest; @@ -188,6 +203,8 @@ pub struct SSZSnappyOutboundCodec { /// The fork name corresponding to the received context bytes. fork_name: Option, fork_context: Arc, + /// Keeps track of the current response code for a chunk. + current_response_code: Option, phantom: PhantomData, } @@ -209,6 +226,93 @@ impl SSZSnappyOutboundCodec { fork_name: None, fork_context, phantom: PhantomData, + current_response_code: None, + } + } + + // Decode an Rpc response. + fn decode_response(&mut self, src: &mut BytesMut) -> Result>, RPCError> { + // Read the context bytes if required + if self.protocol.has_context_bytes() && self.fork_name.is_none() { + if src.len() >= CONTEXT_BYTES_LEN { + let context_bytes = src.split_to(CONTEXT_BYTES_LEN); + let mut result = [0; CONTEXT_BYTES_LEN]; + result.copy_from_slice(context_bytes.as_ref()); + self.fork_name = Some(context_bytes_to_fork_name( + result, + self.fork_context.clone(), + )?); + } else { + return Ok(None); + } + } + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); + }; + + // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of + // packet size for ssz container corresponding to `self.protocol`. + let ssz_limits = self.protocol.rpc_response_limits::(&self.fork_context); + if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { + return Err(RPCError::InvalidData(format!( + "RPC response length is out of bounds, length {}, max {}, min {}", + length, ssz_limits.max, ssz_limits.min + ))); + } + // Calculate worst case compression length for given uncompressed length + let max_compressed_len = snap::raw::max_compress_len(length) as u64; + // Create a limit reader as a wrapper that reads only upto `max_compressed_len` from `src`. + let limit_reader = Cursor::new(src.as_ref()).take(max_compressed_len); + let mut reader = FrameDecoder::new(limit_reader); + + let mut decoded_buffer = vec![0; length]; + + match reader.read_exact(&mut decoded_buffer) { + Ok(()) => { + // `n` is how many bytes the reader read in the compressed stream + let n = reader.get_ref().get_ref().position(); + self.len = None; + let _read_bytes = src.split_to(n as usize); + // Safe to `take` from `self.fork_name` as we have all the bytes we need to + // decode an ssz object at this point. + let fork_name = self.fork_name.take(); + handle_rpc_response(self.protocol.versioned_protocol, &decoded_buffer, fork_name) + } + Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), + } + } + + fn decode_error(&mut self, src: &mut BytesMut) -> Result, RPCError> { + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); + }; + + // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of + // packet size for ssz container corresponding to `ErrorType`. + if length > self.max_packet_size || length > *ERROR_TYPE_MAX || length < *ERROR_TYPE_MIN { + return Err(RPCError::InvalidData(format!( + "RPC Error length is out of bounds, length {}", + length + ))); + } + + // Calculate worst case compression length for given uncompressed length + let max_compressed_len = snap::raw::max_compress_len(length) as u64; + // Create a limit reader as a wrapper that reads only upto `max_compressed_len` from `src`. + let limit_reader = Cursor::new(src.as_ref()).take(max_compressed_len); + let mut reader = FrameDecoder::new(limit_reader); + let mut decoded_buffer = vec![0; length]; + match reader.read_exact(&mut decoded_buffer) { + Ok(()) => { + // `n` is how many bytes the reader read in the compressed stream + let n = reader.get_ref().get_ref().position(); + self.len = None; + let _read_bytes = src.split_to(n as usize); + Ok(Some(ErrorType(VariableList::from_ssz_bytes( + &decoded_buffer, + )?))) + } + Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), } } } @@ -265,99 +369,40 @@ impl Encoder> for SSZSnappyOutboundCodec { // We prefer to decode blocks and attestations with extra knowledge about the chain to perform // faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZSnappyOutboundCodec { - type Item = RPCResponse; + type Item = RPCCodedResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - // Read the context bytes if required - if self.protocol.has_context_bytes() && self.fork_name.is_none() { - if src.len() >= CONTEXT_BYTES_LEN { - let context_bytes = src.split_to(CONTEXT_BYTES_LEN); - let mut result = [0; CONTEXT_BYTES_LEN]; - result.copy_from_slice(context_bytes.as_ref()); - self.fork_name = Some(context_bytes_to_fork_name( - result, - self.fork_context.clone(), - )?); + // if we have only received the response code, wait for more bytes + if src.len() <= 1 { + return Ok(None); + } + // using the response code determine which kind of payload needs to be decoded. + let response_code = self.current_response_code.unwrap_or_else(|| { + let resp_code = src.split_to(1)[0]; + self.current_response_code = Some(resp_code); + resp_code + }); + + let inner_result = { + if RPCCodedResponse::::is_response(response_code) { + // decode an actual response and mutates the buffer if enough bytes have been read + // returning the result. + self.decode_response(src) + .map(|r| r.map(RPCCodedResponse::Success)) } else { - return Ok(None); + // decode an error + self.decode_error(src) + .map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp))) } - } - let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { - return Ok(None); }; - - // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of - // packet size for ssz container corresponding to `self.protocol`. - let ssz_limits = self.protocol.rpc_response_limits::(&self.fork_context); - if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { - return Err(RPCError::InvalidData(format!( - "RPC response length is out of bounds, length {}, max {}, min {}", - length, ssz_limits.max, ssz_limits.min - ))); - } - // Calculate worst case compression length for given uncompressed length - let max_compressed_len = snap::raw::max_compress_len(length) as u64; - // Create a limit reader as a wrapper that reads only upto `max_compressed_len` from `src`. - let limit_reader = Cursor::new(src.as_ref()).take(max_compressed_len); - let mut reader = FrameDecoder::new(limit_reader); - - let mut decoded_buffer = vec![0; length]; - - match reader.read_exact(&mut decoded_buffer) { - Ok(()) => { - // `n` is how many bytes the reader read in the compressed stream - let n = reader.get_ref().get_ref().position(); - self.len = None; - let _read_bytes = src.split_to(n as usize); - // Safe to `take` from `self.fork_name` as we have all the bytes we need to - // decode an ssz object at this point. - let fork_name = self.fork_name.take(); - handle_rpc_response(self.protocol.versioned_protocol, &decoded_buffer, fork_name) - } - Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), - } - } -} - -impl OutboundCodec> for SSZSnappyOutboundCodec { - type CodecErrorType = ErrorType; - - fn decode_error( - &mut self, - src: &mut BytesMut, - ) -> Result, RPCError> { - let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { - return Ok(None); - }; - - // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of - // packet size for ssz container corresponding to `ErrorType`. - if length > self.max_packet_size || length > *ERROR_TYPE_MAX || length < *ERROR_TYPE_MIN { - return Err(RPCError::InvalidData(format!( - "RPC Error length is out of bounds, length {}", - length - ))); - } - - // Calculate worst case compression length for given uncompressed length - let max_compressed_len = snap::raw::max_compress_len(length) as u64; - // Create a limit reader as a wrapper that reads only upto `max_compressed_len` from `src`. - let limit_reader = Cursor::new(src.as_ref()).take(max_compressed_len); - let mut reader = FrameDecoder::new(limit_reader); - let mut decoded_buffer = vec![0; length]; - match reader.read_exact(&mut decoded_buffer) { - Ok(()) => { - // `n` is how many bytes the reader read in the compressed stream - let n = reader.get_ref().get_ref().position(); - self.len = None; - let _read_bytes = src.split_to(n as usize); - Ok(Some(ErrorType(VariableList::from_ssz_bytes( - &decoded_buffer, - )?))) - } - Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), + // if the inner decoder was capable of decoding a chunk, we need to reset the current + // response code for the next chunk + if let Ok(Some(_)) = inner_result { + self.current_response_code = None; } + // return the result + inner_result } } @@ -1030,7 +1075,7 @@ mod tests { let mut snappy_inbound_codec = SSZSnappyInboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); - snappy_inbound_codec.encode(message, &mut buf)?; + snappy_inbound_codec.encode_response(message, &mut buf)?; Ok(buf) } @@ -1075,7 +1120,7 @@ mod tests { let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); // decode message just as snappy message - snappy_outbound_codec.decode(message) + snappy_outbound_codec.decode_response(message) } /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. @@ -1847,4 +1892,129 @@ mod tests { RPCError::InvalidData(_) )); } + + #[test] + fn test_decode_status_message() { + let message = hex::decode("0054ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap(); + let mut buf = BytesMut::new(); + buf.extend_from_slice(&message); + + let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); + + let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( + snappy_protocol_id, + max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), + fork_context, + ); + + // remove response code + let mut snappy_buf = buf.clone(); + let _ = snappy_buf.split_to(1); + + // decode message just as snappy message + let _snappy_decoded_message = snappy_outbound_codec + .decode_response(&mut snappy_buf) + .unwrap(); + + // decode message as ssz snappy chunk + let _snappy_decoded_chunk = snappy_outbound_codec.decode(&mut buf).unwrap(); + } + + #[test] + fn test_invalid_length_prefix() { + let mut uvi_codec: Uvi = Uvi::default(); + let mut dst = BytesMut::with_capacity(1024); + + // Smallest > 10 byte varint + let len: u128 = 2u128.pow(70); + + // Insert length-prefix + uvi_codec.encode(len, &mut dst).unwrap(); + + let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); + + let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( + snappy_protocol_id, + max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), + fork_context, + ); + + let snappy_decoded_message = snappy_outbound_codec.decode_response(&mut dst).unwrap_err(); + + assert_eq!( + snappy_decoded_message, + RPCError::IoError("input bytes exceed maximum".to_string()), + "length-prefix of > 10 bytes is invalid" + ); + } + + #[test] + fn test_length_limits() { + fn encode_len(len: usize) -> BytesMut { + let mut uvi_codec: Uvi = Uvi::default(); + let mut dst = BytesMut::with_capacity(1024); + uvi_codec.encode(len, &mut dst).unwrap(); + dst + } + + let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy); + + // Response limits + let fork_context = Arc::new(fork_context(ForkName::Base)); + + let chain_spec = Spec::default_spec(); + + let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize); + let limit = protocol_id.rpc_response_limits::(&fork_context); + let mut max = encode_len(limit.max + 1); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + max_rpc_size, + fork_context.clone(), + ); + assert!(matches!( + codec.decode_response(&mut max).unwrap_err(), + RPCError::InvalidData(_) + )); + + let mut min = encode_len(limit.min - 1); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + max_rpc_size, + fork_context.clone(), + ); + assert!(matches!( + codec.decode_response(&mut min).unwrap_err(), + RPCError::InvalidData(_) + )); + + // Request limits + let limit = protocol_id.rpc_request_limits(&fork_context.spec); + let mut max = encode_len(limit.max + 1); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + max_rpc_size, + fork_context.clone(), + ); + assert!(matches!( + codec.decode_response(&mut max).unwrap_err(), + RPCError::InvalidData(_) + )); + + let mut min = encode_len(limit.min - 1); + let mut codec = + SSZSnappyOutboundCodec::::new(protocol_id, max_rpc_size, fork_context); + assert!(matches!( + codec.decode_response(&mut min).unwrap_err(), + RPCError::InvalidData(_) + )); + } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs deleted file mode 100644 index 4b9e8d5097..0000000000 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ /dev/null @@ -1,334 +0,0 @@ -//! This handles the various supported encoding mechanism for the Eth 2.0 RPC. - -use crate::rpc::methods::ErrorType; -use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse}; -use libp2p::bytes::BufMut; -use libp2p::bytes::BytesMut; -use std::marker::PhantomData; -use tokio_util::codec::{Decoder, Encoder}; -use types::EthSpec; - -pub trait OutboundCodec: Encoder + Decoder { - type CodecErrorType; - - fn decode_error( - &mut self, - src: &mut BytesMut, - ) -> Result, ::Error>; -} - -/* Global Inbound Codec */ -// This deals with Decoding RPC Requests from other peers and encoding our responses - -pub struct BaseInboundCodec -where - TCodec: Encoder> + Decoder, - E: EthSpec, -{ - /// Inner codec for handling various encodings - inner: TCodec, - phantom: PhantomData, -} - -impl BaseInboundCodec -where - TCodec: Encoder> + Decoder, - E: EthSpec, -{ - pub fn new(codec: TCodec) -> Self { - BaseInboundCodec { - inner: codec, - phantom: PhantomData, - } - } -} - -/* Global Outbound Codec */ -// This deals with Decoding RPC Responses from other peers and encoding our requests -pub struct BaseOutboundCodec -where - TOutboundCodec: OutboundCodec>, - E: EthSpec, -{ - /// Inner codec for handling various encodings. - inner: TOutboundCodec, - /// Keeps track of the current response code for a chunk. - current_response_code: Option, - phantom: PhantomData, -} - -impl BaseOutboundCodec -where - E: EthSpec, - TOutboundCodec: OutboundCodec>, -{ - pub fn new(codec: TOutboundCodec) -> Self { - BaseOutboundCodec { - inner: codec, - current_response_code: None, - phantom: PhantomData, - } - } -} - -/* Implementation of the Encoding/Decoding for the global codecs */ - -/* Base Inbound Codec */ - -// This Encodes RPC Responses sent to external peers -impl Encoder> for BaseInboundCodec -where - E: EthSpec, - TCodec: Decoder + Encoder>, -{ - type Error = >>::Error; - - fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { - dst.clear(); - dst.reserve(1); - dst.put_u8( - item.as_u8() - .expect("Should never encode a stream termination"), - ); - self.inner.encode(item, dst) - } -} - -// This Decodes RPC Requests from external peers -impl Decoder for BaseInboundCodec -where - E: EthSpec, - TCodec: Encoder> + Decoder>, -{ - type Item = InboundRequest; - type Error = ::Error; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - self.inner.decode(src) - } -} - -/* Base Outbound Codec */ - -// This Encodes RPC Requests sent to external peers -impl Encoder> for BaseOutboundCodec -where - E: EthSpec, - TCodec: OutboundCodec> + Encoder>, -{ - type Error = >>::Error; - - fn encode(&mut self, item: OutboundRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { - self.inner.encode(item, dst) - } -} - -// This decodes RPC Responses received from external peers -impl Decoder for BaseOutboundCodec -where - E: EthSpec, - TCodec: OutboundCodec, CodecErrorType = ErrorType> - + Decoder>, -{ - type Item = RPCCodedResponse; - type Error = ::Error; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - // if we have only received the response code, wait for more bytes - if src.len() <= 1 { - return Ok(None); - } - // using the response code determine which kind of payload needs to be decoded. - let response_code = self.current_response_code.unwrap_or_else(|| { - let resp_code = src.split_to(1)[0]; - self.current_response_code = Some(resp_code); - resp_code - }); - - let inner_result = { - if RPCCodedResponse::::is_response(response_code) { - // decode an actual response and mutates the buffer if enough bytes have been read - // returning the result. - self.inner - .decode(src) - .map(|r| r.map(RPCCodedResponse::Success)) - } else { - // decode an error - self.inner - .decode_error(src) - .map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp))) - } - }; - // if the inner decoder was capable of decoding a chunk, we need to reset the current - // response code for the next chunk - if let Ok(Some(_)) = inner_result { - self.current_response_code = None; - } - // return the result - inner_result - } -} - -#[cfg(test)] -mod tests { - use super::super::ssz_snappy::*; - use super::*; - use crate::rpc::protocol::*; - - use std::sync::Arc; - use types::{Epoch, FixedBytesExtended, ForkContext, ForkName, Hash256, Slot}; - use unsigned_varint::codec::Uvi; - - type Spec = types::MainnetEthSpec; - - fn fork_context(fork_name: ForkName) -> ForkContext { - let mut chain_spec = Spec::default_spec(); - let altair_fork_epoch = Epoch::new(1); - let bellatrix_fork_epoch = Epoch::new(2); - let capella_fork_epoch = Epoch::new(3); - let deneb_fork_epoch = Epoch::new(4); - let electra_fork_epoch = Epoch::new(5); - - chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - chain_spec.capella_fork_epoch = Some(capella_fork_epoch); - chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); - chain_spec.electra_fork_epoch = Some(electra_fork_epoch); - - let current_slot = match fork_name { - ForkName::Base => Slot::new(0), - ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), - }; - ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) - } - - #[test] - fn test_decode_status_message() { - let message = hex::decode("0054ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap(); - let mut buf = BytesMut::new(); - buf.extend_from_slice(&message); - - let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); - - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); - - let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( - snappy_protocol_id, - max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), - fork_context, - ); - - // remove response code - let mut snappy_buf = buf.clone(); - let _ = snappy_buf.split_to(1); - - // decode message just as snappy message - let _snappy_decoded_message = snappy_outbound_codec.decode(&mut snappy_buf).unwrap(); - - // build codecs for entire chunk - let mut snappy_base_outbound_codec = BaseOutboundCodec::new(snappy_outbound_codec); - - // decode message as ssz snappy chunk - let _snappy_decoded_chunk = snappy_base_outbound_codec.decode(&mut buf).unwrap(); - } - - #[test] - fn test_invalid_length_prefix() { - let mut uvi_codec: Uvi = Uvi::default(); - let mut dst = BytesMut::with_capacity(1024); - - // Smallest > 10 byte varint - let len: u128 = 2u128.pow(70); - - // Insert length-prefix - uvi_codec.encode(len, &mut dst).unwrap(); - - let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); - - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); - - let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( - snappy_protocol_id, - max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), - fork_context, - ); - - let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err(); - - assert_eq!( - snappy_decoded_message, - RPCError::IoError("input bytes exceed maximum".to_string()), - "length-prefix of > 10 bytes is invalid" - ); - } - - #[test] - fn test_length_limits() { - fn encode_len(len: usize) -> BytesMut { - let mut uvi_codec: Uvi = Uvi::default(); - let mut dst = BytesMut::with_capacity(1024); - uvi_codec.encode(len, &mut dst).unwrap(); - dst - } - - let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy); - - // Response limits - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); - - let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize); - let limit = protocol_id.rpc_response_limits::(&fork_context); - let mut max = encode_len(limit.max + 1); - let mut codec = SSZSnappyOutboundCodec::::new( - protocol_id.clone(), - max_rpc_size, - fork_context.clone(), - ); - assert!(matches!( - codec.decode(&mut max).unwrap_err(), - RPCError::InvalidData(_) - )); - - let mut min = encode_len(limit.min - 1); - let mut codec = SSZSnappyOutboundCodec::::new( - protocol_id.clone(), - max_rpc_size, - fork_context.clone(), - ); - assert!(matches!( - codec.decode(&mut min).unwrap_err(), - RPCError::InvalidData(_) - )); - - // Request limits - let limit = protocol_id.rpc_request_limits(&fork_context.spec); - let mut max = encode_len(limit.max + 1); - let mut codec = SSZSnappyOutboundCodec::::new( - protocol_id.clone(), - max_rpc_size, - fork_context.clone(), - ); - assert!(matches!( - codec.decode(&mut max).unwrap_err(), - RPCError::InvalidData(_) - )); - - let mut min = encode_len(limit.min - 1); - let mut codec = - SSZSnappyOutboundCodec::::new(protocol_id, max_rpc_size, fork_context); - assert!(matches!( - codec.decode(&mut min).unwrap_err(), - RPCError::InvalidData(_) - )); - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/codec/mod.rs b/beacon_node/lighthouse_network/src/rpc/codec/mod.rs deleted file mode 100644 index dbe99af5bf..0000000000 --- a/beacon_node/lighthouse_network/src/rpc/codec/mod.rs +++ /dev/null @@ -1,61 +0,0 @@ -pub(crate) mod base; -pub(crate) mod ssz_snappy; - -use self::base::{BaseInboundCodec, BaseOutboundCodec}; -use self::ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec}; -use crate::rpc::protocol::RPCError; -use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse}; -use libp2p::bytes::BytesMut; -use tokio_util::codec::{Decoder, Encoder}; -use types::EthSpec; - -// Known types of codecs -pub enum InboundCodec { - SSZSnappy(BaseInboundCodec, E>), -} - -pub enum OutboundCodec { - SSZSnappy(BaseOutboundCodec, E>), -} - -impl Encoder> for InboundCodec { - type Error = RPCError; - - fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { - match self { - InboundCodec::SSZSnappy(codec) => codec.encode(item, dst), - } - } -} - -impl Decoder for InboundCodec { - type Item = InboundRequest; - type Error = RPCError; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - match self { - InboundCodec::SSZSnappy(codec) => codec.decode(src), - } - } -} - -impl Encoder> for OutboundCodec { - type Error = RPCError; - - fn encode(&mut self, item: OutboundRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { - match self { - OutboundCodec::SSZSnappy(codec) => codec.encode(item, dst), - } - } -} - -impl Decoder for OutboundCodec { - type Item = RPCCodedResponse; - type Error = RPCError; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - match self { - OutboundCodec::SSZSnappy(codec) => codec.decode(src), - } - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index c67c7865ea..2bfa42ccac 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -2,9 +2,7 @@ use super::methods::*; use super::protocol::ProtocolId; use super::protocol::SupportedProtocol; use super::RPCError; -use crate::rpc::codec::{ - base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec, -}; +use crate::rpc::codec::SSZSnappyOutboundCodec; use crate::rpc::protocol::Encoding; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; @@ -183,7 +181,7 @@ impl OutboundRequest { /* Outbound upgrades */ -pub type OutboundFramed = Framed, OutboundCodec>; +pub type OutboundFramed = Framed, SSZSnappyOutboundCodec>; impl OutboundUpgrade for OutboundRequestContainer where @@ -199,12 +197,7 @@ where let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { - let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new( - protocol, - self.max_rpc_size, - self.fork_context.clone(), - )); - OutboundCodec::SSZSnappy(ssz_snappy_codec) + SSZSnappyOutboundCodec::new(protocol, self.max_rpc_size, self.fork_context.clone()) } }; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f4bdf6450b..09a18e5de6 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -1,5 +1,5 @@ use super::methods::*; -use crate::rpc::codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCodec, InboundCodec}; +use crate::rpc::codec::SSZSnappyInboundCodec; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; @@ -647,7 +647,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { pub type InboundOutput = (InboundRequest, InboundFramed); pub type InboundFramed = - Framed>>>, InboundCodec>; + Framed>>>, SSZSnappyInboundCodec>; impl InboundUpgrade for RPCProtocol where @@ -664,15 +664,13 @@ where // convert the socket to tokio compatible socket let socket = socket.compat(); let codec = match protocol.encoding { - Encoding::SSZSnappy => { - let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new( - protocol, - self.max_rpc_size, - self.fork_context.clone(), - )); - InboundCodec::SSZSnappy(ssz_snappy_codec) - } + Encoding::SSZSnappy => SSZSnappyInboundCodec::new( + protocol, + self.max_rpc_size, + self.fork_context.clone(), + ), }; + let mut timed_socket = TimeoutStream::new(socket); timed_socket.set_read_timeout(Some(self.ttfb_timeout)); From 815567a91a23c9d29fa72bb2c7e8a79b97129441 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 9 Sep 2024 21:02:16 +1000 Subject: [PATCH 13/66] Add more granularity to data column proof computation metric (#6371) * Add more granularity to data column proof computation metric to capture more variations between 0.25 and 1 second. --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 79b2fc592b..f15b46fc4b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1651,7 +1651,7 @@ pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = Laz try_create_histogram_vec_with_buckets( "data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", - Ok(vec![0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]), + Ok(vec![0.1, 0.15, 0.25, 0.35, 0.5, 0.7, 1.0, 2.5, 5.0, 10.0]), &["blob_count"], ) }); From 51091a40fa97353815961eea130abbb89eb75d30 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:18:30 +0200 Subject: [PATCH 14/66] Register processor queue length as histogram (#6012) * Register processor queue length as histogram * Merge branch 'unstable' of https://github.com/sigp/lighthouse into processor-queue-histogram --- beacon_node/beacon_processor/src/lib.rs | 453 +++++++++--------- beacon_node/beacon_processor/src/metrics.rs | 163 +------ .../src/network_beacon_processor/tests.rs | 97 ++-- beacon_node/network/src/router.rs | 2 +- .../network/src/sync/block_lookups/tests.rs | 14 +- .../network/src/sync/range_sync/range.rs | 7 +- common/lighthouse_metrics/src/lib.rs | 6 + 7 files changed, 319 insertions(+), 423 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index f506f0bb94..cd5a1d6cff 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -57,6 +57,7 @@ use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::time::Duration; +use strum::IntoStaticStr; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; @@ -219,46 +220,6 @@ const DEFAULT_MAX_GOSSIP_AGGREGATE_BATCH_SIZE: usize = 64; /// Unique IDs used for metrics and testing. pub const WORKER_FREED: &str = "worker_freed"; pub const NOTHING_TO_DO: &str = "nothing_to_do"; -pub const GOSSIP_ATTESTATION: &str = "gossip_attestation"; -pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; -pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; -pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; -pub const GOSSIP_BLOCK: &str = "gossip_block"; -pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar"; -pub const GOSSIP_BLOBS_COLUMN_SIDECAR: &str = "gossip_blobs_column_sidecar"; -pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; -pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; -pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; -pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing"; -pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature"; -pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; -pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; -pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; -pub const RPC_BLOCK: &str = "rpc_block"; -pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; -pub const RPC_BLOBS: &str = "rpc_blob"; -pub const RPC_CUSTODY_COLUMN: &str = "rpc_custody_column"; -pub const RPC_VERIFY_DATA_COLUMNS: &str = "rpc_verify_data_columns"; -pub const SAMPLING_RESULT: &str = "sampling_result"; -pub const CHAIN_SEGMENT: &str = "chain_segment"; -pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; -pub const STATUS_PROCESSING: &str = "status_processing"; -pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; -pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; -pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; -pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; -pub const DATA_COLUMNS_BY_ROOTS_REQUEST: &str = "data_columns_by_roots_request"; -pub const DATA_COLUMNS_BY_RANGE_REQUEST: &str = "data_columns_by_range_request"; -pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; -pub const LIGHT_CLIENT_FINALITY_UPDATE_REQUEST: &str = "light_client_finality_update_request"; -pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST: &str = "light_client_optimistic_update_request"; -pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; -pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; -pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; -pub const UNKNOWN_BLOCK_SAMPLING_REQUEST: &str = "unknown_block_sampling_request"; -pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; -pub const API_REQUEST_P0: &str = "api_request_p0"; -pub const API_REQUEST_P1: &str = "api_request_p1"; #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct BeaconProcessorConfig { @@ -454,9 +415,14 @@ pub struct WorkEvent { } impl WorkEvent { + /// Get a representation of the type of work this `WorkEvent` contains. + pub fn work_type(&self) -> WorkType { + self.work.to_type() + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. - pub fn work_type(&self) -> &'static str { - self.work.str_id() + pub fn work_type_str(&self) -> &'static str { + self.work_type().into() } } @@ -555,7 +521,7 @@ impl BeaconProcessorSend { Err(e) => { metrics::inc_counter_vec( &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, - &[work_type], + &[work_type.into()], ); Err(e) } @@ -651,54 +617,109 @@ pub enum Work { impl fmt::Debug for Work { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.str_id()) + write!(f, "{}", Into::<&'static str>::into(self.to_type())) } } +#[derive(IntoStaticStr, PartialEq, Eq, Debug)] +#[strum(serialize_all = "snake_case")] +pub enum WorkType { + GossipAttestation, + UnknownBlockAttestation, + GossipAttestationBatch, + GossipAggregate, + UnknownBlockAggregate, + UnknownLightClientOptimisticUpdate, + UnknownBlockSamplingRequest, + GossipAggregateBatch, + GossipBlock, + GossipBlobSidecar, + GossipDataColumnSidecar, + DelayedImportBlock, + GossipVoluntaryExit, + GossipProposerSlashing, + GossipAttesterSlashing, + GossipSyncSignature, + GossipSyncContribution, + GossipLightClientFinalityUpdate, + GossipLightClientOptimisticUpdate, + RpcBlock, + RpcBlobs, + RpcCustodyColumn, + RpcVerifyDataColumn, + SamplingResult, + IgnoredRpcBlock, + ChainSegment, + ChainSegmentBackfill, + Status, + BlocksByRangeRequest, + BlocksByRootsRequest, + BlobsByRangeRequest, + BlobsByRootsRequest, + DataColumnsByRootsRequest, + DataColumnsByRangeRequest, + GossipBlsToExecutionChange, + LightClientBootstrapRequest, + LightClientOptimisticUpdateRequest, + LightClientFinalityUpdateRequest, + ApiRequestP0, + ApiRequestP1, +} + impl Work { - /// Provides a `&str` that uniquely identifies each enum variant. fn str_id(&self) -> &'static str { + self.to_type().into() + } + + /// Provides a `&str` that uniquely identifies each enum variant. + fn to_type(&self) -> WorkType { match self { - Work::GossipAttestation { .. } => GOSSIP_ATTESTATION, - Work::GossipAttestationBatch { .. } => GOSSIP_ATTESTATION_BATCH, - Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, - Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, - Work::GossipBlock(_) => GOSSIP_BLOCK, - Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, - Work::GossipDataColumnSidecar(_) => GOSSIP_BLOBS_COLUMN_SIDECAR, - Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, - Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, - Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, - Work::GossipAttesterSlashing(_) => GOSSIP_ATTESTER_SLASHING, - Work::GossipSyncSignature(_) => GOSSIP_SYNC_SIGNATURE, - Work::GossipSyncContribution(_) => GOSSIP_SYNC_CONTRIBUTION, - Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, - Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, - Work::RpcBlock { .. } => RPC_BLOCK, - Work::RpcBlobs { .. } => RPC_BLOBS, - Work::RpcCustodyColumn { .. } => RPC_CUSTODY_COLUMN, - Work::RpcVerifyDataColumn(_) => RPC_VERIFY_DATA_COLUMNS, - Work::SamplingResult(_) => SAMPLING_RESULT, - Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, - Work::ChainSegment { .. } => CHAIN_SEGMENT, - Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, - Work::Status(_) => STATUS_PROCESSING, - Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST, - Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, - Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, - Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, - Work::DataColumnsByRootsRequest(_) => DATA_COLUMNS_BY_ROOTS_REQUEST, - Work::DataColumnsByRangeRequest(_) => DATA_COLUMNS_BY_RANGE_REQUEST, - Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, - Work::LightClientOptimisticUpdateRequest(_) => LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST, - Work::LightClientFinalityUpdateRequest(_) => LIGHT_CLIENT_FINALITY_UPDATE_REQUEST, - Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, - Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, - Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, - Work::UnknownBlockSamplingRequest { .. } => UNKNOWN_BLOCK_SAMPLING_REQUEST, - Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, - Work::ApiRequestP0 { .. } => API_REQUEST_P0, - Work::ApiRequestP1 { .. } => API_REQUEST_P1, + Work::GossipAttestation { .. } => WorkType::GossipAttestation, + Work::GossipAttestationBatch { .. } => WorkType::GossipAttestationBatch, + Work::GossipAggregate { .. } => WorkType::GossipAggregate, + Work::GossipAggregateBatch { .. } => WorkType::GossipAggregateBatch, + Work::GossipBlock(_) => WorkType::GossipBlock, + Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, + Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, + Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, + Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, + Work::GossipAttesterSlashing(_) => WorkType::GossipAttesterSlashing, + Work::GossipSyncSignature(_) => WorkType::GossipSyncSignature, + Work::GossipSyncContribution(_) => WorkType::GossipSyncContribution, + Work::GossipLightClientFinalityUpdate(_) => WorkType::GossipLightClientFinalityUpdate, + Work::GossipLightClientOptimisticUpdate(_) => { + WorkType::GossipLightClientOptimisticUpdate + } + Work::GossipBlsToExecutionChange(_) => WorkType::GossipBlsToExecutionChange, + Work::RpcBlock { .. } => WorkType::RpcBlock, + Work::RpcBlobs { .. } => WorkType::RpcBlobs, + Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, + Work::RpcVerifyDataColumn { .. } => WorkType::RpcVerifyDataColumn, + Work::SamplingResult { .. } => WorkType::SamplingResult, + Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, + Work::ChainSegment { .. } => WorkType::ChainSegment, + Work::ChainSegmentBackfill(_) => WorkType::ChainSegmentBackfill, + Work::Status(_) => WorkType::Status, + Work::BlocksByRangeRequest(_) => WorkType::BlocksByRangeRequest, + Work::BlocksByRootsRequest(_) => WorkType::BlocksByRootsRequest, + Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, + Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, + Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, + Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, + Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, + Work::LightClientOptimisticUpdateRequest(_) => { + WorkType::LightClientOptimisticUpdateRequest + } + Work::LightClientFinalityUpdateRequest(_) => WorkType::LightClientFinalityUpdateRequest, + Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, + Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, + Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, + Work::UnknownLightClientOptimisticUpdate { .. } => { + WorkType::UnknownLightClientOptimisticUpdate + } + Work::ApiRequestP0 { .. } => WorkType::ApiRequestP0, + Work::ApiRequestP1 { .. } => WorkType::ApiRequestP1, } } } @@ -987,7 +1008,7 @@ impl BeaconProcessor { .map_or(false, |event| event.drop_during_sync); let idle_tx = idle_tx.clone(); - match work_event { + let modified_queue_id = match work_event { // There is no new work event, but we are able to spawn a new worker. // // We don't check the `work.drop_during_sync` here. We assume that if it made @@ -995,38 +1016,40 @@ impl BeaconProcessor { None if can_spawn => { // Check for chain segments first, they're the most efficient way to get // blocks into the system. - if let Some(item) = chain_segment_queue.pop() { - self.spawn_worker(item, idle_tx); + let work_event: Option> = if let Some(item) = + chain_segment_queue.pop() + { + Some(item) // Check sync blocks before gossip blocks, since we've already explicitly // requested these blocks. } else if let Some(item) = rpc_block_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = rpc_blob_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // TODO(das): decide proper prioritization for sampling columns } else if let Some(item) = rpc_custody_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = rpc_verify_data_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = sampling_result_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check gossip blocks before gossip attestations, since a block might be // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_blob_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_data_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. } else if let Some(item) = api_request_p0_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. @@ -1038,9 +1061,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single aggregate is in the queue, process it individually. - if let Some(item) = aggregate_queue.pop() { - self.spawn_worker(item, idle_tx); - } + aggregate_queue.pop() } else { // Collect two or more aggregates into a batch, so they can take // advantage of batch signature verification. @@ -1071,13 +1092,10 @@ impl BeaconProcessor { if let Some(process_batch) = process_batch_opt { // Process all aggregates with a single worker. - self.spawn_worker( - Work::GossipAggregateBatch { - aggregates, - process_batch, - }, - idle_tx, - ) + Some(Work::GossipAggregateBatch { + aggregates, + process_batch, + }) } else { // There is no good reason for this to // happen, it is a serious logic error. @@ -1085,6 +1103,7 @@ impl BeaconProcessor { // work items exist, we should always have a // work closure at this point. crit!(self.log, "Missing aggregate work"); + None } } // Check the unaggregated attestation queue. @@ -1098,9 +1117,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single attestation is in the queue, process it individually. - if let Some(item) = attestation_queue.pop() { - self.spawn_worker(item, idle_tx); - } + attestation_queue.pop() } else { // Collect two or more attestations into a batch, so they can take // advantage of batch signature verification. @@ -1132,13 +1149,10 @@ impl BeaconProcessor { if let Some(process_batch) = process_batch_opt { // Process all attestations with a single worker. - self.spawn_worker( - Work::GossipAttestationBatch { - attestations, - process_batch, - }, - idle_tx, - ) + Some(Work::GossipAttestationBatch { + attestations, + process_batch, + }) } else { // There is no good reason for this to // happen, it is a serious logic error. @@ -1146,71 +1160,72 @@ impl BeaconProcessor { // work items exist, we should always have a // work closure at this point. crit!(self.log, "Missing attestations work"); + None } } // Check sync committee messages after attestations as their rewards are lesser // and they don't influence fork choice. } else if let Some(item) = sync_contribution_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = sync_message_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Aggregates and unaggregates queued for re-processing are older and we // care about fresher ones, so check those first. } else if let Some(item) = unknown_block_aggregate_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = unknown_block_attestation_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check RPC methods next. Status messages are needed for sync so // prioritize them over syncing requests from other peers (BlocksByRange // and BlocksByRoot) } else if let Some(item) = status_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = bbrange_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = bbroots_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = blbrange_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = blbroots_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = dcbroots_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = dcbrange_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Prioritize sampling requests after block syncing requests } else if let Some(item) = unknown_block_sampling_request_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check slashings after all other consensus messages so we prioritize // following head. // // Check attester slashings before proposer slashings since they have the // potential to slash multiple validators at once. } else if let Some(item) = gossip_attester_slashing_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_proposer_slashing_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check exits and address changes late since our validators don't get // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check the priority 1 API requests after we've // processed all the interesting things from the network // and things required for us to stay in good repute // with our P2P peers. } else if let Some(item) = api_request_p1_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Handle light client requests. } else if let Some(item) = lc_bootstrap_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = lc_optimistic_update_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = lc_finality_update_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // This statement should always be the final else statement. } else { // Let the journal know that a worker is freed and there's nothing else @@ -1220,6 +1235,15 @@ impl BeaconProcessor { // during testing. let _ = work_journal_tx.try_send(NOTHING_TO_DO); } + None + }; + + if let Some(work_event) = work_event { + let work_type = work_event.to_type(); + self.spawn_worker(work_event, idle_tx); + Some(work_type) + } else { + None } } // There is no new work event and we are unable to spawn a new worker. @@ -1231,6 +1255,7 @@ impl BeaconProcessor { "Unexpected gossip processor condition"; "msg" => "no new work and cannot spawn worker" ); + None } // The chain is syncing and this event should be dropped during sync. Some(work_event) @@ -1248,11 +1273,13 @@ impl BeaconProcessor { "msg" => "chain is syncing", "work_id" => work_id ); + None } // There is a new work event and the chain is not syncing. Process it or queue // it. Some(WorkEvent { work, .. }) => { let work_id = work.str_id(); + let work_type = work.to_type(); match work { _ if can_spawn => self.spawn_worker(work, idle_tx), @@ -1371,94 +1398,76 @@ impl BeaconProcessor { Work::ApiRequestP1 { .. } => { api_request_p1_queue.push(work, work_id, &self.log) } - } + }; + Some(work_type) } - } + }; metrics::set_gauge( &metrics::BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL, self.current_workers as i64, ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL, - attestation_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL, - aggregate_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL, - sync_message_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL, - sync_contribution_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, - gossip_block_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL, - gossip_blob_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_QUEUE_TOTAL, - gossip_data_column_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, - rpc_block_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, - rpc_blob_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_CUSTODY_COLUMN_QUEUE_TOTAL, - rpc_custody_column_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_VERIFY_DATA_COLUMN_QUEUE_TOTAL, - rpc_verify_data_column_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_SAMPLING_RESULT_QUEUE_TOTAL, - sampling_result_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, - chain_segment_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL, - backfill_chain_segment.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_EXIT_QUEUE_TOTAL, - gossip_voluntary_exit_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL, - gossip_proposer_slashing_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, - gossip_attester_slashing_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL, - gossip_bls_to_execution_change_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL, - api_request_p0_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL, - api_request_p1_queue.len() as i64, - ); + + if let Some(modified_queue_id) = modified_queue_id { + let queue_len = match modified_queue_id { + WorkType::GossipAttestation => aggregate_queue.len(), + WorkType::UnknownBlockAttestation => unknown_block_attestation_queue.len(), + WorkType::GossipAttestationBatch => 0, // No queue + WorkType::GossipAggregate => aggregate_queue.len(), + WorkType::UnknownBlockAggregate => unknown_block_aggregate_queue.len(), + WorkType::UnknownLightClientOptimisticUpdate => { + unknown_light_client_update_queue.len() + } + WorkType::UnknownBlockSamplingRequest => { + unknown_block_sampling_request_queue.len() + } + WorkType::GossipAggregateBatch => 0, // No queue + WorkType::GossipBlock => gossip_block_queue.len(), + WorkType::GossipBlobSidecar => gossip_blob_queue.len(), + WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::DelayedImportBlock => delayed_block_queue.len(), + WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), + WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), + WorkType::GossipAttesterSlashing => gossip_attester_slashing_queue.len(), + WorkType::GossipSyncSignature => sync_message_queue.len(), + WorkType::GossipSyncContribution => sync_contribution_queue.len(), + WorkType::GossipLightClientFinalityUpdate => finality_update_queue.len(), + WorkType::GossipLightClientOptimisticUpdate => { + optimistic_update_queue.len() + } + WorkType::RpcBlock => rpc_block_queue.len(), + WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), + WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), + WorkType::RpcVerifyDataColumn => rpc_verify_data_column_queue.len(), + WorkType::SamplingResult => sampling_result_queue.len(), + WorkType::ChainSegment => chain_segment_queue.len(), + WorkType::ChainSegmentBackfill => backfill_chain_segment.len(), + WorkType::Status => status_queue.len(), + WorkType::BlocksByRangeRequest => blbrange_queue.len(), + WorkType::BlocksByRootsRequest => blbroots_queue.len(), + WorkType::BlobsByRangeRequest => bbrange_queue.len(), + WorkType::BlobsByRootsRequest => bbroots_queue.len(), + WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), + WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), + WorkType::GossipBlsToExecutionChange => { + gossip_bls_to_execution_change_queue.len() + } + WorkType::LightClientBootstrapRequest => lc_bootstrap_queue.len(), + WorkType::LightClientOptimisticUpdateRequest => { + lc_optimistic_update_queue.len() + } + WorkType::LightClientFinalityUpdateRequest => { + lc_finality_update_queue.len() + } + WorkType::ApiRequestP0 => api_request_p0_queue.len(), + WorkType::ApiRequestP1 => api_request_p1_queue.len(), + }; + metrics::observe_vec( + &metrics::BEACON_PROCESSOR_QUEUE_LENGTH, + &[modified_queue_id.into()], + queue_len as f64, + ); + } if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 8bc03cee6c..0a7bdba18d 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -62,163 +62,16 @@ pub static BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: LazyLock> "Time spent handling a new message and allocating it to a queue or worker.", ) }); -// Gossip blocks. -pub static BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_gossip_block_queue_total", - "Count of blocks from gossip waiting to be verified.", - ) - }); -// Gossip blobs. -pub static BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_gossip_blob_queue_total", - "Count of blobs from gossip waiting to be verified.", - ) - }); -// Gossip data column sidecars. -pub static BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_gossip_data_column_queue_total", - "Count of data column sidecars from gossip waiting to be verified.", - ) - }); -// Gossip Exits. -pub static BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_exit_queue_total", - "Count of exits from gossip waiting to be verified.", +pub static BEACON_PROCESSOR_QUEUE_LENGTH: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec_with_buckets( + "beacon_processor_work_event_queue_length", + "Count of work events in queue waiting to be processed.", + Ok(vec![ + 0.0, 1.0, 4.0, 16.0, 64.0, 256.0, 1024.0, 4096.0, 16384.0, 65536.0, + ]), + &["type"], ) }); -// Gossip proposer slashings. -pub static BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_proposer_slashing_queue_total", - "Count of proposer slashings from gossip waiting to be verified.", - ) - }); -// Gossip attester slashings. -pub static BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_attester_slashing_queue_total", - "Count of attester slashings from gossip waiting to be verified.", - ) - }); -// Gossip BLS to execution changes. -pub static BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_bls_to_execution_change_queue_total", - "Count of address changes from gossip waiting to be verified.", - ) - }); -// Rpc blocks. -pub static BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_block_queue_total", - "Count of blocks from the rpc waiting to be verified.", - ) - }); -// Rpc blobs. -pub static BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_blob_queue_total", - "Count of blobs from the rpc waiting to be verified.", - ) - }); -// Rpc custody data columns. -pub static BEACON_PROCESSOR_RPC_CUSTODY_COLUMN_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_custody_column_queue_total", - "Count of custody columns from the rpc waiting to be imported.", - ) - }); -// Rpc verify data columns -pub static BEACON_PROCESSOR_RPC_VERIFY_DATA_COLUMN_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_verify_data_column_queue_total", - "Count of data columns from the rpc waiting to be verified.", - ) - }); -// Sampling result -pub static BEACON_PROCESSOR_SAMPLING_RESULT_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_sampling_result_queue_total", - "Count of sampling results waiting to be processed.", - ) - }); -// Chain segments. -pub static BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_chain_segment_queue_total", - "Count of chain segments from the rpc waiting to be verified.", - ) - }); -pub static BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_backfill_chain_segment_queue_total", - "Count of backfill chain segments from the rpc waiting to be verified.", - ) - }); -// Unaggregated attestations. -pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_unaggregated_attestation_queue_total", - "Count of unagg. attestations waiting to be processed.", - ) - }); -// Aggregated attestations. -pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_aggregated_attestation_queue_total", - "Count of agg. attestations waiting to be processed.", - ) - }); -// Sync committee messages. -pub static BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_sync_message_queue_total", - "Count of sync committee messages waiting to be processed.", - ) - }); -// Sync contribution. -pub static BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_sync_contribution_queue_total", - "Count of sync committee contributions waiting to be processed.", - ) - }); -// HTTP API requests. -pub static BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_api_request_p0_queue_total", - "Count of P0 HTTP requesets waiting to be processed.", - ) - }); -pub static BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_api_request_p1_queue_total", - "Count of P1 HTTP requesets waiting to be processed.", - ) - }); /* * Attestation reprocessing queue metrics. diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 40c69a0baa..391175ccd4 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -467,10 +467,11 @@ impl TestRig { /// /// Given the described logic, `expected` must not contain `WORKER_FREED` or `NOTHING_TO_DO` /// events. - pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { - assert!(expected + pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[WorkType]) { + let expected = expected .iter() - .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); + .map(|ev| ev.into()) + .collect::>(); let mut events = Vec::with_capacity(expected.len()); let mut worker_freed_remaining = expected.len(); @@ -517,6 +518,18 @@ impl TestRig { .await } + pub async fn assert_event_journal_completes(&mut self, expected: &[WorkType]) { + self.assert_event_journal( + &expected + .iter() + .map(|ev| Into::<&'static str>::into(ev)) + .chain(std::iter::once(WORKER_FREED)) + .chain(std::iter::once(NOTHING_TO_DO)) + .collect::>(), + ) + .await + } + /// Assert that the `BeaconProcessor` event journal is as `expected`. /// /// ## Note @@ -587,13 +600,13 @@ async fn import_gossip_block_acceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlobSidecar]) .await; } @@ -611,7 +624,7 @@ async fn import_gossip_block_acceptably_early() { "block not yet imported" ); - rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::DelayedImportBlock]) .await; assert_eq!( @@ -644,7 +657,7 @@ async fn import_gossip_block_unacceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; // Waiting for 5 seconds is a bit arbitrary, however it *should* be long enough to ensure the @@ -670,7 +683,7 @@ async fn import_gossip_block_at_current_slot() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; let num_blobs = rig @@ -682,7 +695,7 @@ async fn import_gossip_block_at_current_slot() { for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlobSidecar]) .await; } @@ -702,7 +715,7 @@ async fn import_gossip_attestation() { rig.enqueue_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttestation]) .await; assert_eq!( @@ -728,7 +741,7 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttestation]) .await; assert_eq!( @@ -747,23 +760,23 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - events.push(GOSSIP_BLOCK); + events.push(WorkType::GossipBlock); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - events.push(GOSSIP_BLOBS_SIDECAR); + events.push(WorkType::GossipBlobSidecar); } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - events.push(RPC_BLOCK); + events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); - events.push(RPC_BLOBS); + events.push(WorkType::RpcBlobs); } } }; - events.push(UNKNOWN_BLOCK_ATTESTATION); + events.push(WorkType::UnknownBlockAttestation); rig.assert_event_journal_contains_ordered(&events).await; @@ -809,7 +822,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAggregate]) .await; assert_eq!( @@ -828,23 +841,23 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - events.push(GOSSIP_BLOCK); + events.push(WorkType::GossipBlock); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - events.push(GOSSIP_BLOBS_SIDECAR); + events.push(WorkType::GossipBlobSidecar); } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - events.push(RPC_BLOCK); + events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); - events.push(RPC_BLOBS); + events.push(WorkType::RpcBlobs); } } }; - events.push(UNKNOWN_BLOCK_AGGREGATE); + events.push(WorkType::UnknownBlockAggregate); rig.assert_event_journal_contains_ordered(&events).await; @@ -887,7 +900,7 @@ async fn requeue_unknown_block_gossip_attestation_without_import() { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttestation]) .await; assert_eq!( @@ -899,7 +912,11 @@ async fn requeue_unknown_block_gossip_attestation_without_import() { // Ensure that the attestation is received back but not imported. rig.assert_event_journal_with_timeout( - &[UNKNOWN_BLOCK_ATTESTATION, WORKER_FREED, NOTHING_TO_DO], + &[ + WorkType::UnknownBlockAttestation.into(), + WORKER_FREED, + NOTHING_TO_DO, + ], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, ) .await; @@ -923,7 +940,7 @@ async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAggregate]) .await; assert_eq!( @@ -935,7 +952,11 @@ async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { // Ensure that the attestation is received back but not imported. rig.assert_event_journal_with_timeout( - &[UNKNOWN_BLOCK_AGGREGATE, WORKER_FREED, NOTHING_TO_DO], + &[ + WorkType::UnknownBlockAggregate.into(), + WORKER_FREED, + NOTHING_TO_DO, + ], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, ) .await; @@ -961,7 +982,7 @@ async fn import_misc_gossip_ops() { rig.enqueue_gossip_attester_slashing(); - rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttesterSlashing]) .await; assert_eq!( @@ -978,7 +999,7 @@ async fn import_misc_gossip_ops() { rig.enqueue_gossip_proposer_slashing(); - rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipProposerSlashing]) .await; assert_eq!( @@ -995,7 +1016,7 @@ async fn import_misc_gossip_ops() { rig.enqueue_gossip_voluntary_exit(); - rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipVoluntaryExit]) .await; assert_eq!( @@ -1014,12 +1035,12 @@ async fn test_rpc_block_reprocessing() { // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); rig.enqueue_single_lookup_rpc_block(); - rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; rig.enqueue_single_lookup_rpc_blobs(); if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { - rig.assert_event_journal(&[RPC_BLOBS, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::RpcBlobs]) .await; } @@ -1033,7 +1054,7 @@ async fn test_rpc_block_reprocessing() { // the specified delay. tokio::time::sleep(QUEUED_RPC_BLOCK_DELAY).await; - rig.assert_event_journal(&[RPC_BLOCK]).await; + rig.assert_event_journal(&[WorkType::RpcBlock.into()]).await; // Add an extra delay for block processing tokio::time::sleep(Duration::from_millis(10)).await; // head should update to next block now since the duplicate @@ -1055,7 +1076,11 @@ async fn test_backfill_sync_processing() { rig.assert_no_events_for(Duration::from_millis(100)).await; // A new batch should be processed within a slot. rig.assert_event_journal_with_timeout( - &[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO], + &[ + WorkType::ChainSegmentBackfill.into(), + WORKER_FREED, + NOTHING_TO_DO, + ], rig.chain.slot_clock.slot_duration(), ) .await; @@ -1075,9 +1100,9 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { // ensure all batches are processed rig.assert_event_journal_with_timeout( &[ - CHAIN_SEGMENT_BACKFILL, - CHAIN_SEGMENT_BACKFILL, - CHAIN_SEGMENT_BACKFILL, + WorkType::ChainSegmentBackfill.into(), + WorkType::ChainSegmentBackfill.into(), + WorkType::ChainSegmentBackfill.into(), ], Duration::from_millis(100), ) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index a5e27f582a..26c1d14f02 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -710,7 +710,7 @@ impl Router { if let Err(e) = result { let work_type = match &e { mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => { - work.work_type() + work.work_type_str() } }; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 6d852b2572..a8a7ad5849 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1032,17 +1032,17 @@ impl TestRig { match response_type { ResponseType::Block => self .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::RPC_BLOCK).then_some(()) + (ev.work_type() == beacon_processor::WorkType::RpcBlock).then_some(()) }) .unwrap_or_else(|e| panic!("Expected block work event: {e}")), ResponseType::Blob => self .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::RPC_BLOBS).then_some(()) + (ev.work_type() == beacon_processor::WorkType::RpcBlobs).then_some(()) }) .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), ResponseType::CustodyColumn => self .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::RPC_CUSTODY_COLUMN).then_some(()) + (ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn).then_some(()) }) .unwrap_or_else(|e| panic!("Expected column work event: {e}")), } @@ -1050,7 +1050,7 @@ impl TestRig { fn expect_rpc_custody_column_work_event(&mut self) { self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::RPC_CUSTODY_COLUMN { + if ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn { Some(()) } else { None @@ -1061,7 +1061,7 @@ impl TestRig { fn expect_rpc_sample_verify_work_event(&mut self) { self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::RPC_VERIFY_DATA_COLUMNS { + if ev.work_type() == beacon_processor::WorkType::RpcVerifyDataColumn { Some(()) } else { None @@ -1072,7 +1072,7 @@ impl TestRig { fn expect_sampling_result_work(&mut self) { self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::SAMPLING_RESULT { + if ev.work_type() == beacon_processor::WorkType::SamplingResult { Some(()) } else { None @@ -1103,7 +1103,7 @@ impl TestRig { match self.beacon_processor_rx.try_recv() { Ok(work) => { // Parent chain sends blocks one by one - assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); + assert_eq!(work.work_type(), beacon_processor::WorkType::RpcBlock); } other => panic!( "Expected rpc_block from chain segment process, found {:?}", diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index beb04fac28..28dea8e4b5 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -652,7 +652,10 @@ mod tests { fn expect_empty_processor(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - panic!("Expected empty processor. Instead got {}", work.work_type()); + panic!( + "Expected empty processor. Instead got {}", + work.work_type_str() + ); } Err(e) => match e { mpsc::error::TryRecvError::Empty => {} @@ -665,7 +668,7 @@ mod tests { fn expect_chain_segment(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); + assert_eq!(work.work_type(), beacon_processor::WorkType::ChainSegment); } other => panic!("Expected chain segment process, found {:?}", other), } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index f52913dd00..2a1e99defa 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -283,6 +283,12 @@ pub fn stop_timer(timer: Option) { } } +pub fn observe_vec(vec: &Result, name: &[&str], value: f64) { + if let Some(h) = get_histogram(vec, name) { + h.observe(value) + } +} + pub fn inc_counter(counter: &Result) { if let Ok(counter) = counter { counter.inc(); From a94b12b4d5ec4ee4bb4866d4e74586b01877bbf5 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 9 Sep 2024 17:27:49 -0700 Subject: [PATCH 15/66] Persist light client bootstrap (#5915) * persist light client updates * update beacon chain to serve light client updates * resolve todos * cache best update * extend cache parts * is better light client update * resolve merge conflict * initial api changes * add lc update db column * fmt * added tests * add sim * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * fix some weird issues with the simulator * tests * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * test changes * merge conflict * testing * started work on ef tests and some code clean up * update tests * linting * noop pre altair, were still failing on electra though * allow for zeroed light client header * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * merge unstable * remove unwraps * remove unwraps * fetch bootstrap without always querying for state * storing bootstrap parts in db * mroe code cleanup * test * prune sync committee branches from dropped chains * Update light_client_update.rs * merge unstable * move functionality to helper methods * refactor is best update fn * refactor is best update fn * improve organization of light client server cache logic * fork diget calc, and only spawn as many blcoks as we need for the lc update test * resovle merge conflict * add electra bootstrap logic, add logic to cache current sync committee * add latest sync committe branch cache * fetch lc update from the cache if it exists * fmt * Fix beacon_chain tests * Add debug code to update ranking_order ef test * Fix compare code * merge conflicts * merge conflict * add better error messaging * resolve merge conflicts * remove lc update from basicsim * rename sync comittte variable and fix persist condition * refactor get_light_client_update logic * add better comments, return helpful error messages over http and rpc * pruning canonical non checkpoint slots * fix test * rerun test * update pruning logic, add tests * fix tests * fix imports * fmt * refactor db code * Refactor db method * Refactor db method * add additional comments * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-bootstrap * fix merge * linting * merge conflict * prevent overflow * enable lc server for http api tests * fix tests * remove prints * remove warning * revert change --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 36 +-- beacon_node/beacon_chain/src/errors.rs | 5 +- .../src/light_client_server_cache.rs | 174 +++++++++---- beacon_node/beacon_chain/src/migrate.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 149 ++++++++++- beacon_node/beacon_chain/tests/store_tests.rs | 237 ++++++++++-------- beacon_node/http_api/src/lib.rs | 37 +-- beacon_node/http_api/src/light_client.rs | 54 +++- .../http_api/tests/interactive_tests.rs | 4 +- beacon_node/http_api/tests/status_tests.rs | 3 +- beacon_node/http_api/tests/tests.rs | 18 +- .../network_beacon_processor/rpc_methods.rs | 12 +- beacon_node/store/Cargo.toml | 1 + beacon_node/store/src/errors.rs | 7 + beacon_node/store/src/hot_cold_store.rs | 185 +++++++++++++- beacon_node/store/src/lib.rs | 9 + consensus/types/src/lib.rs | 2 +- consensus/types/src/light_client_bootstrap.rs | 36 +++ 19 files changed, 733 insertions(+), 238 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8054c812f1..1d243d9554 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8262,6 +8262,7 @@ dependencies = [ "lighthouse_metrics", "lru", "parking_lot 0.12.3", + "safe_arith", "serde", "slog", "sloggers", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 322a2caa67..bf660c9eaf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6987,32 +6987,18 @@ impl BeaconChain { &self, block_root: &Hash256, ) -> Result, ForkName)>, Error> { - let Some(block) = self.get_blinded_block(block_root)? else { - return Ok(None); - }; + let head_state = &self.head().snapshot.beacon_state; + let finalized_period = head_state + .finalized_checkpoint() + .epoch + .sync_committee_period(&self.spec)?; - let (state_root, slot) = (block.state_root(), block.slot()); - - let Some(mut state) = self.get_state(&state_root, Some(slot))? else { - return Ok(None); - }; - - let fork_name = state - .fork_name(&self.spec) - .map_err(Error::InconsistentFork)?; - - match fork_name { - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { - LightClientBootstrap::from_beacon_state(&mut state, &block, &self.spec) - .map(|bootstrap| Some((bootstrap, fork_name))) - .map_err(Error::LightClientError) - } - ForkName::Base => Err(Error::UnsupportedFork), - } + self.light_client_server_cache.get_light_client_bootstrap( + &self.store, + block_root, + finalized_period, + &self.spec, + ) } pub fn metrics(&self) -> BeaconChainMetrics { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 4db3f0ebb4..994ac79af7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -216,7 +216,8 @@ pub enum BeaconChainError { UnableToPublish, UnableToBuildColumnSidecar(String), AvailabilityCheckError(AvailabilityCheckError), - LightClientError(LightClientError), + LightClientUpdateError(LightClientUpdateError), + LightClientBootstrapError(String), UnsupportedFork, MilhouseError(MilhouseError), EmptyRpcCustodyColumns, @@ -250,7 +251,7 @@ easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(InconsistentFork, BeaconChainError); easy_from_to!(AvailabilityCheckError, BeaconChainError); easy_from_to!(EpochCacheError, BeaconChainError); -easy_from_to!(LightClientError, BeaconChainError); +easy_from_to!(LightClientUpdateError, BeaconChainError); easy_from_to!(MilhouseError, BeaconChainError); easy_from_to!(AttestationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index efc746675d..ca015d0365 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,23 +1,25 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; +use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, Logger}; use ssz::Decode; -use ssz::Encode; use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; +use tree_hash::TreeHash; use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, FINALIZED_ROOT_INDEX, - NEXT_SYNC_COMMITTEE_INDEX, + FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, + FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, }; use types::non_zero_usize::new_non_zero_usize; use types::{ - BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, + LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, + LightClientUpdate, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -28,7 +30,6 @@ const PREV_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(32); /// This cache computes light client messages ahead of time, required to satisfy p2p and API /// requests. These messages include proofs on historical states, so on-demand computation is /// expensive. -/// pub struct LightClientServerCache { /// Tracks a single global latest finality update out of all imported blocks. /// @@ -41,6 +42,8 @@ pub struct LightClientServerCache { latest_optimistic_update: RwLock>>, /// Caches the most recent light client update latest_light_client_update: RwLock>>, + /// Caches the current sync committee, + latest_written_current_sync_committee: RwLock>>>, /// Caches state proofs by block root prev_block_cache: Mutex>>, } @@ -51,6 +54,7 @@ impl LightClientServerCache { latest_finality_update: None.into(), latest_optimistic_update: None.into(), latest_light_client_update: None.into(), + latest_written_current_sync_committee: None.into(), prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), } } @@ -96,6 +100,10 @@ impl LightClientServerCache { let signature_slot = block_slot; let attested_block_root = block_parent_root; + let sync_period = block_slot + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + let attested_block = store.get_blinded_block(attested_block_root)?.ok_or( BeaconChainError::DBInconsistent(format!( "Block not available {:?}", @@ -110,6 +118,18 @@ impl LightClientServerCache { attested_block.slot(), )?; + let finalized_period = cached_parts + .finalized_checkpoint + .epoch + .sync_committee_period(chain_spec)?; + + store.store_sync_committee_branch( + attested_block.message().tree_hash_root(), + &cached_parts.current_sync_committee_branch, + )?; + + self.store_current_sync_committee(&store, &cached_parts, sync_period, finalized_period)?; + let attested_slot = attested_block.slot(); let maybe_finalized_block = store.get_blinded_block(&cached_parts.finalized_block_root)?; @@ -178,57 +198,57 @@ impl LightClientServerCache { // Spec: Full nodes SHOULD provide the best derivable LightClientUpdate (according to is_better_update) // for each sync committee period - let prev_light_client_update = match &self.latest_light_client_update.read().clone() { - Some(prev_light_client_update) => Some(prev_light_client_update.clone()), - None => self.get_light_client_update(&store, sync_period, chain_spec)?, - }; + let prev_light_client_update = + self.get_light_client_update(&store, sync_period, chain_spec)?; let should_persist_light_client_update = if let Some(prev_light_client_update) = prev_light_client_update { - let prev_sync_period = prev_light_client_update - .signature_slot() - .epoch(T::EthSpec::slots_per_epoch()) - .sync_committee_period(chain_spec)?; - - if sync_period != prev_sync_period { - true - } else { - prev_light_client_update - .is_better_light_client_update(&new_light_client_update, chain_spec)? - } + prev_light_client_update + .is_better_light_client_update(&new_light_client_update, chain_spec)? } else { true }; if should_persist_light_client_update { - self.store_light_client_update(&store, sync_period, &new_light_client_update)?; + store.store_light_client_update(sync_period, &new_light_client_update)?; + *self.latest_light_client_update.write() = Some(new_light_client_update); } Ok(()) } - fn store_light_client_update( + fn store_current_sync_committee( &self, store: &BeaconStore, + cached_parts: &LightClientCachedData, sync_committee_period: u64, - light_client_update: &LightClientUpdate, + finalized_period: u64, ) -> Result<(), BeaconChainError> { - let column = DBColumn::LightClientUpdate; + if let Some(latest_sync_committee) = + self.latest_written_current_sync_committee.read().clone() + { + if latest_sync_committee == cached_parts.current_sync_committee { + return Ok(()); + } + }; - store.hot_db.put_bytes( - column.into(), - &sync_committee_period.to_le_bytes(), - &light_client_update.as_ssz_bytes(), - )?; - - *self.latest_light_client_update.write() = Some(light_client_update.clone()); + if finalized_period + 1 >= sync_committee_period { + store.store_sync_committee( + sync_committee_period, + &cached_parts.current_sync_committee, + )?; + *self.latest_written_current_sync_committee.write() = + Some(cached_parts.current_sync_committee.clone()); + } Ok(()) } - // Used to fetch the most recently persisted "best" light client update. - // Should not be used outside the light client server, as it also caches the fetched - // light client update. + /// Used to fetch the most recently persisted light client update for the given `sync_committee_period`. + /// It first checks the `latest_light_client_update` cache before querying the db. + /// + /// Note: Should not be used outside the light client server, as it also caches the fetched + /// light client update. fn get_light_client_update( &self, store: &BeaconStore, @@ -245,21 +265,7 @@ impl LightClientServerCache { } } - let column = DBColumn::LightClientUpdate; - let res = store - .hot_db - .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; - - if let Some(light_client_update_bytes) = res { - let epoch = sync_committee_period - .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; - - let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); - - let light_client_update = - LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) - .map_err(store::errors::Error::SszDecodeError)?; - + if let Some(light_client_update) = store.get_light_client_update(sync_committee_period)? { *self.latest_light_client_update.write() = Some(light_client_update.clone()); return Ok(Some(light_client_update)); } @@ -340,6 +346,65 @@ impl LightClientServerCache { pub fn get_latest_optimistic_update(&self) -> Option> { self.latest_optimistic_update.read().clone() } + + /// Fetches a light client bootstrap for a given finalized checkpoint `block_root`. We eagerly persist + /// `sync_committee_branch and `sync_committee` to allow for a more efficient bootstrap construction. + /// + /// Note: It should be the case that a `sync_committee_branch` and `sync_committee` exist in the db + /// for a finalized checkpoint block root. However, we currently have no backfill mechanism for these values. + /// Therefore, `sync_committee_branch` and `sync_committee` are only persisted while a node is synced. + #[allow(clippy::type_complexity)] + pub fn get_light_client_bootstrap( + &self, + store: &BeaconStore, + block_root: &Hash256, + finalized_period: u64, + chain_spec: &ChainSpec, + ) -> Result, ForkName)>, BeaconChainError> { + let Some(block) = store.get_blinded_block(block_root)? else { + return Err(BeaconChainError::LightClientBootstrapError(format!( + "Block root {block_root} not found" + ))); + }; + + let (_, slot) = (block.state_root(), block.slot()); + + let fork_name = chain_spec.fork_name_at_slot::(slot); + + let sync_committee_period = block + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + + let Some(current_sync_committee_branch) = store.get_sync_committee_branch(block_root)? + else { + return Err(BeaconChainError::LightClientBootstrapError(format!( + "Sync committee branch for block root {:?} not found", + block_root + ))); + }; + + if sync_committee_period > finalized_period { + return Err(BeaconChainError::LightClientBootstrapError( + format!("The blocks sync committee period {sync_committee_period} is greater than the current finalized period {finalized_period}"), + )); + } + + let Some(current_sync_committee) = store.get_sync_committee(sync_committee_period)? else { + return Err(BeaconChainError::LightClientBootstrapError(format!( + "Sync committee for period {sync_committee_period} not found" + ))); + }; + + let light_client_bootstrap = LightClientBootstrap::new( + &block, + Arc::new(current_sync_committee), + current_sync_committee_branch, + chain_spec, + )?; + + Ok(Some((light_client_bootstrap, fork_name))) + } } impl Default for LightClientServerCache { @@ -350,23 +415,32 @@ impl Default for LightClientServerCache { type FinalityBranch = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type CurrentSyncCommitteeBranch = FixedVector; #[derive(Clone)] struct LightClientCachedData { + finalized_checkpoint: Checkpoint, finality_branch: FinalityBranch, next_sync_committee_branch: NextSyncCommitteeBranch, + current_sync_committee_branch: CurrentSyncCommitteeBranch, next_sync_committee: Arc>, + current_sync_committee: Arc>, finalized_block_root: Hash256, } impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { Ok(Self { + finalized_checkpoint: state.finalized_checkpoint(), finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), next_sync_committee: state.next_sync_committee()?.clone(), + current_sync_committee: state.current_sync_committee()?.clone(), next_sync_committee_branch: state .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? .into(), + current_sync_committee_branch: state + .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? + .into(), finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index fcb8fb1c89..f83df7b446 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -676,6 +676,7 @@ impl, Cold: ItemStore> BackgroundMigrator, + slot: Slot, + block_root: Hash256, + ) { + let fork_name = state.fork_name(&self.spec).unwrap(); + if !fork_name.altair_enabled() { + return; + } + + let log = self.logger(); + let contributions = + self.make_sync_contributions(state, block_root, slot, RelativeSyncCommittee::Current); + + for (_, contribution_and_proof) in contributions { + let Some(contribution_and_proof) = contribution_and_proof else { + continue; + }; + let contribution = contribution_and_proof.message.contribution; + self.chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + self.chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + let Some(sync_aggregate) = self.chain.op_pool.get_sync_aggregate(state).unwrap() else { + return; + }; + + let _ = self + .chain + .light_client_server_cache + .recompute_and_cache_updates( + self.chain.store.clone(), + slot, + &block_root, + &sync_aggregate, + log, + &self.spec, + ); + } + + pub async fn add_attested_blocks_at_slots_with_lc_data( + &self, + mut state: BeaconState, + state_root: Hash256, + slots: &[Slot], + validators: &[usize], + mut latest_block_hash: Option, + sync_committee_strategy: SyncCommitteeStrategy, + ) -> AddBlocksResult { + assert!( + slots.windows(2).all(|w| w[0] <= w[1]), + "Slots have to be sorted" + ); // slice.is_sorted() isn't stabilized at the moment of writing this + let mut block_hash_from_slot: HashMap = HashMap::new(); + let mut state_hash_from_slot: HashMap = HashMap::new(); + for slot in slots { + let (block_hash, new_state) = self + .add_attested_block_at_slot_with_sync( + *slot, + state, + state_root, + validators, + sync_committee_strategy, + ) + .await + .unwrap(); + + state = new_state; + + self.update_light_client_server_cache(&state, *slot, block_hash.into()); + + block_hash_from_slot.insert(*slot, block_hash); + state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); + latest_block_hash = Some(block_hash); + } + ( + block_hash_from_slot, + state_hash_from_slot, + latest_block_hash.unwrap(), + state, + ) + } + async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, @@ -2250,7 +2346,9 @@ where ) .await .unwrap(); + state = new_state; + block_hash_from_slot.insert(*slot, block_hash); state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); latest_block_hash = Some(block_hash); @@ -2459,6 +2557,23 @@ where block_strategy, attestation_strategy, SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await + } + + pub async fn extend_chain_with_light_client_data( + &self, + num_blocks: usize, + block_strategy: BlockStrategy, + attestation_strategy: AttestationStrategy, + ) -> Hash256 { + self.extend_chain_with_sync( + num_blocks, + block_strategy, + attestation_strategy, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Enabled, ) .await } @@ -2469,6 +2584,7 @@ where block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, sync_committee_strategy: SyncCommitteeStrategy, + light_client_strategy: LightClientStrategy, ) -> Hash256 { let (mut state, slots) = match block_strategy { BlockStrategy::OnCanonicalHead => { @@ -2500,15 +2616,30 @@ where }; let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, last_produced_block_hash, _) = self - .add_attested_blocks_at_slots_with_sync( - state, - state_root, - &slots, - &validators, - sync_committee_strategy, - ) - .await; + let (_, _, last_produced_block_hash, _) = match light_client_strategy { + LightClientStrategy::Enabled => { + self.add_attested_blocks_at_slots_with_lc_data( + state, + state_root, + &slots, + &validators, + None, + sync_committee_strategy, + ) + .await + } + LightClientStrategy::Disabled => { + self.add_attested_blocks_at_slots_with_sync( + state, + state_root, + &slots, + &validators, + sync_committee_strategy, + ) + .await + } + }; + last_produced_block_hash.into() } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 95bf7f1ce8..1b1e5ea514 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::test_utils::RelativeSyncCommittee; +use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, KZG, @@ -104,6 +104,142 @@ fn get_harness_generic( harness } +#[tokio::test] +async fn light_client_bootstrap_test() { + let spec = test_spec::(); + let Some(_) = spec.altair_fork_epoch else { + // No-op prior to Altair. + return; + }; + + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 6); + let db_path = tempdir().unwrap(); + let log = test_logger(); + + let seconds_per_slot = spec.seconds_per_slot; + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + let num_initial_slots = E::slots_per_epoch() * 7; + let slots: Vec = (1..num_initial_slots).map(Slot::new).collect(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots_with_lc_data( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + None, + SyncCommitteeStrategy::NoValidators, + ) + .await; + + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_state = store + .get_state(&wss_state_root, Some(checkpoint_slot)) + .unwrap() + .unwrap(); + + let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .kzg(kzg) + .build() + .expect("should build"); + + let current_state = harness.get_current_state(); + + if ForkName::Electra == current_state.fork_name_unchecked() { + // TODO(electra) fix beacon state `compute_merkle_proof` + return; + } + + let finalized_checkpoint = beacon_chain + .canonical_head + .cached_head() + .finalized_checkpoint(); + + let block_root = finalized_checkpoint.root; + + let (lc_bootstrap, _) = harness + .chain + .get_light_client_bootstrap(&block_root) + .unwrap() + .unwrap(); + + let bootstrap_slot = match lc_bootstrap { + LightClientBootstrap::Altair(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Capella(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Deneb(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Electra(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + }; + + assert_eq!( + bootstrap_slot.epoch(E::slots_per_epoch()), + finalized_checkpoint.epoch + ); +} + #[tokio::test] async fn light_client_updates_test() { let spec = test_spec::(); @@ -170,7 +306,7 @@ async fn light_client_updates_test() { harness.advance_slot(); harness - .extend_chain( + .extend_chain_with_light_client_data( num_final_blocks as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, @@ -224,53 +360,6 @@ async fn light_client_updates_test() { return; } - let block_root = *current_state - .get_block_root(current_state.slot() - Slot::new(1)) - .unwrap(); - - let contributions = harness.make_sync_contributions( - ¤t_state, - block_root, - current_state.slot() - Slot::new(1), - RelativeSyncCommittee::Current, - ); - - // generate sync aggregates - for (_, contribution_and_proof) in contributions { - let contribution = contribution_and_proof - .expect("contribution exists for committee") - .message - .contribution; - beacon_chain - .op_pool - .insert_sync_contribution(contribution.clone()) - .unwrap(); - beacon_chain - .op_pool - .insert_sync_contribution(contribution) - .unwrap(); - } - - // check that we can fetch the newly generated sync aggregate - let sync_aggregate = beacon_chain - .op_pool - .get_sync_aggregate(¤t_state) - .unwrap() - .unwrap(); - - // cache light client data - beacon_chain - .light_client_server_cache - .recompute_and_cache_updates( - store.clone(), - current_state.slot() - Slot::new(1), - &block_root, - &sync_aggregate, - &log, - &spec, - ) - .unwrap(); - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) @@ -291,61 +380,13 @@ async fn light_client_updates_test() { } harness - .extend_chain( + .extend_chain_with_light_client_data( num_final_blocks as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) .await; - let current_state = harness.get_current_state(); - - let block_root = *current_state - .get_block_root(current_state.slot() - Slot::new(1)) - .unwrap(); - - let contributions = harness.make_sync_contributions( - ¤t_state, - block_root, - current_state.slot() - Slot::new(1), - RelativeSyncCommittee::Current, - ); - - // generate new sync aggregates from this new state - for (_, contribution_and_proof) in contributions { - let contribution = contribution_and_proof - .expect("contribution exists for committee") - .message - .contribution; - beacon_chain - .op_pool - .insert_sync_contribution(contribution.clone()) - .unwrap(); - beacon_chain - .op_pool - .insert_sync_contribution(contribution) - .unwrap(); - } - - let sync_aggregate = beacon_chain - .op_pool - .get_sync_aggregate(¤t_state) - .unwrap() - .unwrap(); - - // cache new light client data - beacon_chain - .light_client_server_cache - .recompute_and_cache_updates( - store.clone(), - current_state.slot() - Slot::new(1), - &block_root, - &sync_aggregate, - &log, - &spec, - ) - .unwrap(); - // we should now have two light client updates in the db let lc_updates = beacon_chain .get_light_client_updates(sync_period, 100) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 22e9931043..998114f565 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -31,7 +31,7 @@ mod validator_inclusion; mod validators; mod version; -use crate::light_client::get_light_client_updates; +use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; use beacon_chain::{ @@ -2411,40 +2411,7 @@ pub fn serve( block_root: Hash256, accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { - let (bootstrap, fork_name) = match chain.get_light_client_bootstrap(&block_root) - { - Ok(Some(res)) => res, - Ok(None) => { - return Err(warp_utils::reject::custom_not_found( - "Light client bootstrap unavailable".to_string(), - )); - } - Err(e) => { - return Err(warp_utils::reject::custom_server_error(format!( - "Unable to obtain LightClientBootstrap instance: {e:?}" - ))); - } - }; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(bootstrap.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json(&ForkVersionedResponse { - version: Some(fork_name), - metadata: EmptyMetadata {}, - data: bootstrap, - }) - .into_response()), - } - .map(|resp| add_consensus_version_header(resp, fork_name)) + get_light_client_bootstrap::(chain, &block_root, accept_header) }) }, ); diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index a6543114b8..ac8c08581c 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -1,18 +1,20 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::version::{ + add_consensus_version_header, add_ssz_content_type_header, fork_versioned_response, V1, +}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; +use types::{ForkName, Hash256, LightClientBootstrap}; use warp::{ hyper::{Body, Response}, reply::Reply, Rejection, }; -use crate::version::{add_ssz_content_type_header, fork_versioned_response, V1}; - const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; pub fn get_light_client_updates( @@ -62,6 +64,45 @@ pub fn get_light_client_updates( } } +pub fn get_light_client_bootstrap( + chain: Arc>, + block_root: &Hash256, + accept_header: Option, +) -> Result, Rejection> { + let (light_client_bootstrap, fork_name) = chain + .get_light_client_bootstrap(block_root) + .map_err(|err| { + let error_message = if let BeaconChainError::LightClientBootstrapError(err) = err { + println!("{:?}", err); + err + } else { + "No LightClientBootstrap found".to_string() + }; + warp_utils::reject::custom_not_found(error_message) + })? + .ok_or(warp_utils::reject::custom_not_found( + "No LightClientBootstrap found".to_string(), + ))?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(light_client_bootstrap.as_ssz_bytes().into()) + .map(|res: Response| add_consensus_version_header(res, fork_name)) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) + }), + _ => { + let fork_versioned_response = map_light_client_bootstrap_to_json_response::( + fork_name, + light_client_bootstrap, + )?; + Ok(warp::reply::json(&fork_versioned_response).into_response()) + } + } +} + pub fn validate_light_client_updates_request( chain: &BeaconChain, query: &LightClientUpdatesQuery, @@ -131,6 +172,13 @@ fn map_light_client_update_to_ssz_chunk( } } +fn map_light_client_bootstrap_to_json_response( + fork_name: ForkName, + light_client_bootstrap: LightClientBootstrap, +) -> Result>, Rejection> { + fork_versioned_response(V1, fork_name, light_client_bootstrap) +} + fn map_light_client_update_to_json_response( chain: &BeaconChain, light_client_update: LightClientUpdate, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 9ff411cf1c..5034492e25 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,7 +1,7 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use beacon_chain::{ chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, - test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, ChainConfig, }; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; @@ -88,6 +88,7 @@ async fn state_by_root_pruned_from_fork_choice() { BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, ) .await; @@ -469,6 +470,7 @@ pub async fn proposer_boost_re_org_test( BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, SyncCommitteeStrategy::AllValidators, + LightClientStrategy::Disabled, ) .await; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 8f96299530..01731530d3 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -1,6 +1,6 @@ //! Tests related to the beacon node's sync status use beacon_chain::{ - test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, BlockError, }; use eth2::StatusCode; @@ -37,6 +37,7 @@ async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> Interactiv BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, SyncCommitteeStrategy::AllValidators, + LightClientStrategy::Disabled, ) .await; tester diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6e6f72b6c0..19a01a91c5 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -153,7 +153,7 @@ impl ApiTester { if !SKIPPED_SLOTS.contains(&slot) { harness - .extend_chain( + .extend_chain_with_light_client_data( 1, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, @@ -1926,6 +1926,7 @@ impl ApiTester { ) .unwrap(); + assert_eq!(1, expected.len()); assert_eq!(result.clone().unwrap().len(), expected.len()); self } @@ -1933,19 +1934,26 @@ impl ApiTester { pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { let block_id = BlockId(CoreBlockId::Finalized); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); - let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let result = match self .client .get_light_client_bootstrap::(block_root) .await { - Ok(result) => result.unwrap().data, + Ok(result) => result, Err(e) => panic!("query failed incorrectly: {e:?}"), }; - let expected = block.slot(); - assert_eq!(result.get_slot(), expected); + assert!(result.is_some()); + + let expected = self + .chain + .light_client_server_cache + .get_light_client_bootstrap(&self.chain.store, &block_root, 1u64, &self.chain.spec); + + assert!(expected.is_ok()); + + assert_eq!(result.unwrap().data, expected.unwrap().unwrap().0); self } diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index dde6f2e313..0c98f5c17e 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -394,7 +394,7 @@ impl NetworkBeaconProcessor { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not available", + "Bootstrap not available".to_string(), )), Err(e) => { error!(self.log, "Error getting LightClientBootstrap instance"; @@ -404,7 +404,7 @@ impl NetworkBeaconProcessor { ); Err(( RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not available", + format!("{:?}", e), )) } }, @@ -429,7 +429,7 @@ impl NetworkBeaconProcessor { Some(update) => Ok(Arc::new(update)), None => Err(( RPCResponseErrorCode::ResourceUnavailable, - "Latest optimistic update not available", + "Latest optimistic update not available".to_string(), )), }, Response::LightClientOptimisticUpdate, @@ -453,7 +453,7 @@ impl NetworkBeaconProcessor { Some(update) => Ok(Arc::new(update)), None => Err(( RPCResponseErrorCode::ResourceUnavailable, - "Latest finality update not available", + "Latest finality update not available".to_string(), )), }, Response::LightClientFinalityUpdate, @@ -1081,7 +1081,7 @@ impl NetworkBeaconProcessor { &self, peer_id: PeerId, request_id: PeerRequestId, - result: Result, + result: Result, into_response: F, ) { match result { @@ -1096,7 +1096,7 @@ impl NetworkBeaconProcessor { }); } Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason.into(), request_id); + self.send_error_response(peer_id, error_code, reason, request_id); } } } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b26eb2bb91..cdb18b3b9c 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -16,6 +16,7 @@ itertools = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } types = { workspace = true } +safe_arith = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index e3b2d327b0..c543a9c4e4 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -59,6 +59,7 @@ pub enum Error { state_root: Hash256, slot: Slot, }, + ArithError(safe_arith::ArithError), } pub trait HandleUnavailable { @@ -129,6 +130,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: safe_arith::ArithError) -> Error { + Error::ArithError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index a53b697ea7..bd87cdcfee 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -27,6 +27,7 @@ use itertools::process_results; use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; @@ -36,13 +37,14 @@ use state_processing::{ SlotProcessingError, }; use std::cmp::min; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; +use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; /// On-disk database that stores finalized states efficiently. @@ -634,6 +636,143 @@ impl, Cold: ItemStore> HotColdDB .map(|payload| payload.is_some()) } + /// Get the sync committee branch for the given block root + /// Note: we only persist sync committee branches for checkpoint slots + pub fn get_sync_committee_branch( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let column = DBColumn::SyncCommitteeBranch; + + if let Some(bytes) = self + .hot_db + .get_bytes(column.into(), &block_root.as_ssz_bytes())? + { + let sync_committee_branch: FixedVector = + FixedVector::from_ssz_bytes(&bytes)?; + return Ok(Some(sync_committee_branch)); + } + + Ok(None) + } + + /// Fetch sync committee by sync committee period + pub fn get_sync_committee( + &self, + sync_committee_period: u64, + ) -> Result>, Error> { + let column = DBColumn::SyncCommittee; + + if let Some(bytes) = self + .hot_db + .get_bytes(column.into(), &sync_committee_period.as_ssz_bytes())? + { + let sync_committee: SyncCommittee = SyncCommittee::from_ssz_bytes(&bytes)?; + return Ok(Some(sync_committee)); + } + + Ok(None) + } + + pub fn store_sync_committee_branch( + &self, + block_root: Hash256, + sync_committee_branch: &FixedVector, + ) -> Result<(), Error> { + let column = DBColumn::SyncCommitteeBranch; + self.hot_db.put_bytes( + column.into(), + &block_root.as_ssz_bytes(), + &sync_committee_branch.as_ssz_bytes(), + )?; + Ok(()) + } + + pub fn store_sync_committee( + &self, + sync_committee_period: u64, + sync_committee: &SyncCommittee, + ) -> Result<(), Error> { + let column = DBColumn::SyncCommittee; + self.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &sync_committee.as_ssz_bytes(), + )?; + + Ok(()) + } + + pub fn get_light_client_update( + &self, + sync_committee_period: u64, + ) -> Result>, Error> { + let column = DBColumn::LightClientUpdate; + let res = self + .hot_db + .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + + if let Some(light_client_update_bytes) = res { + let epoch = sync_committee_period + .safe_mul(self.spec.epochs_per_sync_committee_period.into())?; + + let fork_name = self.spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name)?; + + return Ok(Some(light_client_update)); + } + + Ok(None) + } + + pub fn get_light_client_updates( + &self, + start_period: u64, + count: u64, + ) -> Result>, Error> { + let column = DBColumn::LightClientUpdate; + let mut light_client_updates = vec![]; + for res in self + .hot_db + .iter_column_from::>(column, &start_period.to_le_bytes()) + { + let (sync_committee_bytes, light_client_update_bytes) = res?; + let sync_committee_period = u64::from_ssz_bytes(&sync_committee_bytes)?; + let epoch = sync_committee_period + .safe_mul(self.spec.epochs_per_sync_committee_period.into())?; + + let fork_name = self.spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name)?; + + light_client_updates.push(light_client_update); + + if sync_committee_period >= start_period + count { + break; + } + } + Ok(light_client_updates) + } + + pub fn store_light_client_update( + &self, + sync_committee_period: u64, + light_client_update: &LightClientUpdate, + ) -> Result<(), Error> { + let column = DBColumn::LightClientUpdate; + + self.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &light_client_update.as_ssz_bytes(), + )?; + + Ok(()) + } + /// Check if the blobs for a block exists on disk. pub fn blobs_exist(&self, block_root: &Hash256) -> Result { self.blobs_db @@ -1037,6 +1176,14 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + StoreOp::DeleteSyncCommitteeBranch(block_root) => { + let key = get_key_for_col( + DBColumn::SyncCommitteeBranch.into(), + block_root.as_slice(), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } + StoreOp::KeyValueOp(kv_op) => { key_value_batch.push(kv_op); } @@ -1182,6 +1329,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteExecutionPayload(_) => (), + StoreOp::DeleteSyncCommitteeBranch(_) => (), + StoreOp::KeyValueOp(_) => (), } } @@ -2816,12 +2965,16 @@ pub fn migrate_database, Cold: ItemStore>( .into()); } + // finalized_state.slot() must be at an epoch boundary + // else we may introduce bugs to the migration/pruning logic if finalized_state.slot() % E::slots_per_epoch() != 0 { return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } let mut hot_db_ops = vec![]; let mut cold_db_ops = vec![]; + let mut epoch_boundary_blocks = HashSet::new(); + let mut non_checkpoint_block_roots = HashSet::new(); // Chunk writer for the linear block roots in the freezer DB. // Start at the new upper limit because we iterate backwards. @@ -2849,6 +3002,22 @@ pub fn migrate_database, Cold: ItemStore>( hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); } + // At a missed slot, `state_root_iter` will return the block root + // from the previous non-missed slot. This ensures that the block root at an + // epoch boundary is always a checkpoint block root. We keep track of block roots + // at epoch boundaries by storing them in the `epoch_boundary_blocks` hash set. + // We then ensure that block roots at the epoch boundary aren't included in the + // `non_checkpoint_block_roots` hash set. + if slot % E::slots_per_epoch() == 0 { + epoch_boundary_blocks.insert(block_root); + } else { + non_checkpoint_block_roots.insert(block_root); + } + + if epoch_boundary_blocks.contains(&block_root) { + non_checkpoint_block_roots.remove(&block_root); + } + // Delete the old summary, and the full state if we lie on an epoch boundary. hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); @@ -2888,6 +3057,19 @@ pub fn migrate_database, Cold: ItemStore>( } } + // Prune sync committee branch data for all non checkpoint block roots. + // Note that `non_checkpoint_block_roots` should only contain non checkpoint block roots + // as long as `finalized_state.slot()` is at an epoch boundary. If this were not the case + // we risk the chance of pruning a `sync_committee_branch` for a checkpoint block root. + // E.g. if `current_split_slot` = (Epoch A slot 0) and `finalized_state.slot()` = (Epoch C slot 31) + // and (Epoch D slot 0) is a skipped slot, we will have pruned a `sync_committee_branch` + // for a checkpoint block root. + non_checkpoint_block_roots + .into_iter() + .for_each(|block_root| { + hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(block_root)); + }); + // Finish writing the block roots and commit the remaining cold DB ops. block_root_writer.write(&mut cold_db_ops)?; store.cold_db.do_atomically(cold_db_ops)?; @@ -2904,7 +3086,6 @@ pub fn migrate_database, Cold: ItemStore>( // Flush to disk all the states that have just been migrated to the cold store. store.cold_db.sync()?; - { let mut split_guard = store.split.write(); let latest_split_slot = split_guard.slot; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 60dddeb176..1d02bfbb3c 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -241,6 +241,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteDataColumns(Hash256, Vec), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), + DeleteSyncCommitteeBranch(Hash256), KeyValueOp(KeyValueStoreOp), } @@ -303,6 +304,12 @@ pub enum DBColumn { /// For persisting eagerly computed light client data #[strum(serialize = "lcu")] LightClientUpdate, + /// For helping persist eagerly computed light client bootstrap data + #[strum(serialize = "scb")] + SyncCommitteeBranch, + /// For helping persist eagerly computed light client bootstrap data + #[strum(serialize = "scm")] + SyncCommittee, } /// A block from the database, which might have an execution payload or not. @@ -346,6 +353,8 @@ impl DBColumn { | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries | Self::BeaconRandaoMixes + | Self::SyncCommittee + | Self::SyncCommitteeBranch | Self::LightClientUpdate => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index afc64e86a8..281a84d859 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -197,7 +197,7 @@ pub use crate::light_client_optimistic_update::{ LightClientOptimisticUpdateElectra, }; pub use crate::light_client_update::{ - Error as LightClientError, LightClientUpdate, LightClientUpdateAltair, + Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, }; pub use crate::participation_flags::ParticipationFlags; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index f06a94adce..7c716e6bb2 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -112,6 +112,42 @@ impl LightClientBootstrap { fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + pub fn new( + block: &SignedBlindedBeaconBlock, + current_sync_committee: Arc>, + current_sync_committee_branch: FixedVector, + chain_spec: &ChainSpec, + ) -> Result { + let light_client_bootstrap = match block + .fork_name(chain_spec) + .map_err(|_| Error::InconsistentFork)? + { + ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { + header: LightClientHeaderAltair::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + ForkName::Capella => Self::Capella(LightClientBootstrapCapella { + header: LightClientHeaderCapella::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { + header: LightClientHeaderDeneb::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + ForkName::Electra => Self::Electra(LightClientBootstrapElectra { + header: LightClientHeaderElectra::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + }; + + Ok(light_client_bootstrap) + } + pub fn from_beacon_state( beacon_state: &mut BeaconState, block: &SignedBlindedBeaconBlock, From e5a40fb73bd725ffbd1af5a212c837941e30723d Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 12 Sep 2024 10:26:20 +1000 Subject: [PATCH 16/66] Upgrade to discv5 0.7.0 (#6385) * Upgrade to discv5 v0.7.0 --- Cargo.lock | 251 ++++-------------- Cargo.toml | 2 +- .../lighthouse_network/src/discovery/enr.rs | 33 ++- .../lighthouse_network/src/discovery/mod.rs | 5 +- beacon_node/network/Cargo.toml | 2 +- beacon_node/network/src/persisted_dht.rs | 19 +- boot_node/src/server.rs | 7 +- 7 files changed, 84 insertions(+), 235 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d243d9554..94eb903844 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -84,15 +84,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.5.2" @@ -127,31 +118,17 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle", -] - [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", + "aead", "aes 0.8.4", "cipher 0.4.4", "ctr 0.9.2", - "ghash 0.5.1", + "ghash", "subtle", ] @@ -1277,7 +1254,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.5.2", + "aead", "chacha20", "cipher 0.4.4", "poly1305", @@ -1748,15 +1725,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.8.0" @@ -2200,13 +2168,15 @@ dependencies = [ [[package]] name = "discv5" -version = "0.4.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac33cb3f99889a57e56a8c6ccb77aaf0cfc7787602b7af09783f736d77314e1" +checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" dependencies = [ - "aes 0.7.5", - "aes-gcm 0.9.2", + "aes 0.8.4", + "aes-gcm", + "alloy-rlp", "arrayvec", + "ctr 0.9.2", "delay_map", "enr", "fnv", @@ -2215,12 +2185,12 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p 0.53.2", + "libp2p-identity", "lru", "more-asserts", + "multiaddr", "parking_lot 0.11.2", "rand", - "rlp", "smallvec", "socket2 0.4.10", "tokio", @@ -2405,10 +2375,11 @@ dependencies = [ [[package]] name = "enr" -version = "0.10.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +checksum = "972070166c68827e64bd1ebc8159dd8e32d9bc2da7ebe8f20b61308f7974ad30" dependencies = [ + "alloy-rlp", "base64 0.21.7", "bytes", "ed25519-dalek", @@ -2416,7 +2387,6 @@ dependencies = [ "k256 0.13.3", "log", "rand", - "rlp", "serde", "sha3 0.10.8", "zeroize", @@ -3454,16 +3424,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.1" @@ -3471,7 +3431,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", - "polyval 0.6.2", + "polyval", ] [[package]] @@ -3523,7 +3483,7 @@ dependencies = [ "getrandom", "hashlink 0.9.1", "hex_fmt", - "libp2p 0.54.1", + "libp2p", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -4599,29 +4559,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "libp2p" -version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" -dependencies = [ - "bytes", - "either", - "futures", - "futures-timer", - "getrandom", - "instant", - "libp2p-allow-block-list 0.3.0", - "libp2p-connection-limits 0.3.1", - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "multiaddr", - "pin-project", - "rw-stream-sink", - "thiserror", -] - [[package]] name = "libp2p" version = "0.54.1" @@ -4633,9 +4570,9 @@ dependencies = [ "futures", "futures-timer", "getrandom", - "libp2p-allow-block-list 0.4.0", - "libp2p-connection-limits 0.4.0", - "libp2p-core 0.42.0", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -4644,7 +4581,7 @@ dependencies = [ "libp2p-noise", "libp2p-plaintext", "libp2p-quic", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", "libp2p-yamux", @@ -4654,39 +4591,15 @@ dependencies = [ "thiserror", ] -[[package]] -name = "libp2p-allow-block-list" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" -dependencies = [ - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "void", -] - [[package]] name = "libp2p-allow-block-list" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-connection-limits" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" -dependencies = [ - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", + "libp2p-swarm", "void", ] @@ -4696,40 +4609,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "void", ] -[[package]] -name = "libp2p-core" -version = "0.41.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-identity", - "multiaddr", - "multihash", - "multistream-select", - "once_cell", - "parking_lot 0.12.3", - "pin-project", - "quick-protobuf", - "rand", - "rw-stream-sink", - "smallvec", - "thiserror", - "tracing", - "unsigned-varint 0.8.0", - "void", - "web-time", -] - [[package]] name = "libp2p-core" version = "0.42.0" @@ -4767,7 +4652,7 @@ dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "parking_lot 0.12.3", "smallvec", @@ -4785,9 +4670,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "lru", "quick-protobuf", "quick-protobuf-codec", @@ -4830,9 +4715,9 @@ dependencies = [ "futures", "hickory-proto", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand", "smallvec", "socket2 0.5.7", @@ -4848,10 +4733,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identify", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "pin-project", "prometheus-client", "web-time", @@ -4866,7 +4751,7 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "nohash-hasher", "parking_lot 0.12.3", @@ -4886,7 +4771,7 @@ dependencies = [ "bytes", "curve25519-dalek", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "multiaddr", "multihash", @@ -4911,7 +4796,7 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "quick-protobuf", "quick-protobuf-codec", @@ -4928,7 +4813,7 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.3", @@ -4942,28 +4827,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "libp2p-swarm" -version = "0.44.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-core 0.41.3", - "libp2p-identity", - "lru", - "multistream-select", - "once_cell", - "rand", - "smallvec", - "tracing", - "void", -] - [[package]] name = "libp2p-swarm" version = "0.45.1" @@ -4974,7 +4837,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", "lru", @@ -5010,7 +4873,7 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "socket2 0.5.7", "tokio", @@ -5025,7 +4888,7 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "rcgen", "ring 0.17.8", @@ -5045,8 +4908,8 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.42.0", - "libp2p-swarm 0.45.1", + "libp2p-core", + "libp2p-swarm", "tokio", "tracing", "void", @@ -5060,7 +4923,7 @@ checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "thiserror", "tracing", "yamux 0.12.1", @@ -5215,7 +5078,7 @@ dependencies = [ "gossipsub", "hex", "itertools 0.10.5", - "libp2p 0.54.1", + "libp2p", "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", @@ -5767,6 +5630,7 @@ name = "network" version = "0.2.0" dependencies = [ "alloy-primitives", + "alloy-rlp", "anyhow", "async-channel", "beacon_chain", @@ -5792,7 +5656,6 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "rand", - "rlp", "slog", "slog-async", "slog-term", @@ -6450,19 +6313,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash 0.5.1", -] - -[[package]] -name = "polyval" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -6474,7 +6325,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -8116,7 +7967,7 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.10.3", + "aes-gcm", "blake2", "chacha20poly1305", "curve25519-dalek", @@ -9205,16 +9056,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "universal-hash" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 8c6aa308c1..125231ad20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,7 +116,7 @@ derivative = "2" dirs = "3" either = "1.9" rust_eth_kzg = "0.5.1" -discv5 = { version = "0.4.1", features = ["libp2p"] } +discv5 = { version = "0.7", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum_hashing = "0.7.0" diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 73552e0197..6aa4e232d2 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -45,22 +45,24 @@ pub trait Eth2Enr { impl Eth2Enr for Enr { fn attestation_bitfield(&self) -> Result, &'static str> { - let bitfield_bytes = self - .get(ATTESTATION_BITFIELD_ENR_KEY) - .ok_or("ENR attestation bitfield non-existent")?; + let bitfield_bytes: Vec = self + .get_decodable(ATTESTATION_BITFIELD_ENR_KEY) + .ok_or("ENR attestation bitfield non-existent")? + .map_err(|_| "Invalid RLP Encoding")?; - BitVector::::from_ssz_bytes(bitfield_bytes) + BitVector::::from_ssz_bytes(&bitfield_bytes) .map_err(|_| "Could not decode the ENR attnets bitfield") } fn sync_committee_bitfield( &self, ) -> Result, &'static str> { - let bitfield_bytes = self - .get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) - .ok_or("ENR sync committee bitfield non-existent")?; + let bitfield_bytes: Vec = self + .get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + .ok_or("ENR sync committee bitfield non-existent")? + .map_err(|_| "Invalid RLP Encoding")?; - BitVector::::from_ssz_bytes(bitfield_bytes) + BitVector::::from_ssz_bytes(&bitfield_bytes) .map_err(|_| "Could not decode the ENR syncnets bitfield") } @@ -78,9 +80,12 @@ impl Eth2Enr for Enr { } fn eth2(&self) -> Result { - let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?; + let eth2_bytes: Vec = self + .get_decodable(ETH2_ENR_KEY) + .ok_or("ENR has no eth2 field")? + .map_err(|_| "Invalid RLP Encoding")?; - EnrForkId::from_ssz_bytes(eth2_bytes).map_err(|_| "Could not decode EnrForkId") + EnrForkId::from_ssz_bytes(ð2_bytes).map_err(|_| "Could not decode EnrForkId") } } @@ -270,16 +275,16 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && local_enr.quic4() == disk_enr.quic4() && local_enr.quic6() == disk_enr.quic6() // must match on the same fork - && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) + && local_enr.get_decodable::>(ETH2_ENR_KEY) == disk_enr.get_decodable(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will // likely only be true for non-validating nodes. - && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) - && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) - && local_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + && local_enr.get_decodable::>(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) + && local_enr.get_decodable::>(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + && local_enr.get_decodable::>(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index c92a8bd2b4..3356dd3cf7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1072,10 +1072,7 @@ impl NetworkBehaviour for Discovery { // NOTE: We assume libp2p itself can keep track of IP changes and we do // not inform it about IP changes found via discovery. } - discv5::Event::EnrAdded { .. } - | discv5::Event::TalkRequest(_) - | discv5::Event::NodeInserted { .. } - | discv5::Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events + _ => {} // Ignore all other discv5 server events } } } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 192fdd644c..6a81eb33f0 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -33,7 +33,7 @@ tokio-stream = { workspace = true } smallvec = { workspace = true } rand = { workspace = true } fnv = { workspace = true } -rlp = "0.5.0" +alloy-rlp = { workspace = true } lighthouse_metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index e1085c4f0c..522ff0536e 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -45,14 +45,23 @@ impl StoreItem for PersistedDht { } fn as_store_bytes(&self) -> Vec { - rlp::encode_list(&self.enrs).to_vec() + let mut buffer = Vec::::new(); + alloy_rlp::encode_list(&self.enrs, &mut buffer); + buffer } fn from_store_bytes(bytes: &[u8]) -> Result { - let rlp = rlp::Rlp::new(bytes); - let enrs: Vec = rlp - .as_list() - .map_err(|e| StoreError::RlpError(format!("{}", e)))?; + let mut enrs: Vec = Vec::new(); + let mut rlp = alloy_rlp::Rlp::new(bytes) + .map_err(|e| StoreError::RlpError(format!("Failed to decode RLP: {}", e)))?; + loop { + match rlp.get_next() { + Ok(Some(enr)) => enrs.push(enr), + Ok(None) => break, // No more list elements + Err(e) => return Err(StoreError::RlpError(format!("{}", e))), + } + } + Ok(PersistedDht { enrs }) } } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 286fa9e0f0..00738462e0 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -145,16 +145,13 @@ pub async fn run( Some(event) = event_stream.recv() => { match event { discv5::Event::Discovered(_enr) => { - // An ENR has bee obtained by the server + // An ENR has been obtained by the server // Ignore these events here } - discv5::Event::EnrAdded { .. } => {} // Ignore - discv5::Event::TalkRequest(_) => {} // Ignore - discv5::Event::NodeInserted { .. } => {} // Ignore discv5::Event::SocketUpdated(socket_addr) => { info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } - discv5::Event::SessionEstablished{ .. } => {} // Ignore + _ => {} // Ignore } } } From 351dd6cb801633418d8796c0b508af437124027a Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Thu, 12 Sep 2024 15:29:26 +0900 Subject: [PATCH 17/66] Set custody subnets on tests (#6382) * Set custody subnets on tests * Enable sampling_with_retries test * Add another supernode to ensure the node can retry --- .../lighthouse_network/src/peer_manager/peerdb.rs | 13 ++++++++++++- beacon_node/network/src/sync/block_lookups/tests.rs | 3 ++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index f6b63e6de2..08d9e5209c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,5 +1,5 @@ use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; -use crate::discovery::CombinedKey; +use crate::discovery::{peer_id_to_node_id, CombinedKey}; use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; @@ -723,6 +723,17 @@ impl PeerDB { .map(|csc| csc.into()) .collect(); peer_info.set_custody_subnets(all_subnets); + } else { + let peer_info = self.peers.get_mut(&peer_id).expect("peer exists"); + let node_id = peer_id_to_node_id(&peer_id).expect("convert peer_id to node_id"); + let subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id.raw(), + spec.custody_requirement, + spec, + ) + .expect("should compute custody subnets") + .collect(); + peer_info.set_custody_subnets(subnets); } peer_id diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index a8a7ad5849..5b4f17ac0d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1972,12 +1972,13 @@ fn sampling_happy_path() { } #[test] -#[ignore] // Ignoring due to flakiness https://github.com/sigp/lighthouse/issues/6319 fn sampling_with_retries() { let Some(mut r) = TestRig::test_setup_after_peerdas() else { return; }; r.new_connected_peers_for_peerdas(); + // Add another supernode to ensure that the node can retry. + r.new_connected_supernode_peer(); let (block, data_columns) = r.rand_block_and_data_columns(); let block_root = block.canonical_root(); r.trigger_sample_block(block_root, block.slot()); From e0ccadbae2493c474aeedaffb3ff335f893832f0 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:29:34 +0200 Subject: [PATCH 18/66] Move sync active requests to own modules (#6272) * Move sync active requests to own modules * Merge branch 'unstable' into sync-requests-modules --- .../src/sync/network_context/requests.rs | 159 +----------------- .../network_context/requests/blobs_by_root.rs | 96 +++++++++++ .../requests/blocks_by_root.rs | 60 +++++++ 3 files changed, 161 insertions(+), 154 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs create mode 100644 beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 94eecff42d..0c2f59d143 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,18 +1,14 @@ -use beacon_chain::get_block_root; -use lighthouse_network::{ - rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}, - PeerId, -}; -use std::sync::Arc; use strum::IntoStaticStr; -use types::{ - blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, -}; +use types::Hash256; +pub use blobs_by_root::{ActiveBlobsByRootRequest, BlobsByRootSingleBlockRequest}; +pub use blocks_by_root::{ActiveBlocksByRootRequest, BlocksByRootSingleRequest}; pub use data_columns_by_root::{ ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, }; +mod blobs_by_root; +mod blocks_by_root; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -25,148 +21,3 @@ pub enum LookupVerifyError { InvalidInclusionProof, DuplicateData, } - -pub struct ActiveBlocksByRootRequest { - request: BlocksByRootSingleRequest, - resolved: bool, - pub(crate) peer_id: PeerId, -} - -impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { - Self { - request, - resolved: false, - peer_id, - } - } - - /// Append a response to the single chunk request. If the chunk is valid, the request is - /// resolved immediately. - /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - block: Arc>, - ) -> Result>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - - let block_root = get_block_root(&block); - if self.request.0 != block_root { - return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); - } - - // Valid data, blocks by root expects a single response - self.resolved = true; - Ok(block) - } - - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NoResponseReturned) - } - } -} - -#[derive(Debug, Copy, Clone)] -pub struct BlocksByRootSingleRequest(pub Hash256); - -impl BlocksByRootSingleRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![self.0], spec) - } -} - -#[derive(Debug, Clone)] -pub struct BlobsByRootSingleBlockRequest { - pub block_root: Hash256, - pub indices: Vec, -} - -impl BlobsByRootSingleBlockRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { - BlobsByRootRequest::new( - self.indices - .into_iter() - .map(|index| BlobIdentifier { - block_root: self.block_root, - index, - }) - .collect(), - spec, - ) - } -} - -pub struct ActiveBlobsByRootRequest { - request: BlobsByRootSingleBlockRequest, - blobs: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, -} - -impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { - Self { - request, - blobs: vec![], - resolved: false, - peer_id, - } - } - - /// Appends a chunk to this multi-item request. If all expected chunks are received, this - /// method returns `Some`, resolving the request before the stream terminator. - /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - blob: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - - let block_root = blob.block_root(); - if self.request.block_root != block_root { - return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); - } - if !blob.verify_blob_sidecar_inclusion_proof() { - return Err(LookupVerifyError::InvalidInclusionProof); - } - if !self.request.indices.contains(&blob.index) { - return Err(LookupVerifyError::UnrequestedIndex(blob.index)); - } - if self.blobs.iter().any(|b| b.index == blob.index) { - return Err(LookupVerifyError::DuplicateData); - } - - self.blobs.push(blob); - if self.blobs.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.blobs))) - } else { - Ok(None) - } - } - - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.blobs.len(), - }) - } - } - - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) - } -} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs new file mode 100644 index 0000000000..cb2b1a42ec --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -0,0 +1,96 @@ +use lighthouse_network::{rpc::methods::BlobsByRootRequest, PeerId}; +use std::sync::Arc; +use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; + +use super::LookupVerifyError; + +#[derive(Debug, Clone)] +pub struct BlobsByRootSingleBlockRequest { + pub block_root: Hash256, + pub indices: Vec, +} + +impl BlobsByRootSingleBlockRequest { + pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { + BlobsByRootRequest::new( + self.indices + .into_iter() + .map(|index| BlobIdentifier { + block_root: self.block_root, + index, + }) + .collect(), + spec, + ) + } +} + +pub struct ActiveBlobsByRootRequest { + request: BlobsByRootSingleBlockRequest, + blobs: Vec>>, + resolved: bool, + pub(crate) peer_id: PeerId, +} + +impl ActiveBlobsByRootRequest { + pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { + Self { + request, + blobs: vec![], + resolved: false, + peer_id, + } + } + + /// Appends a chunk to this multi-item request. If all expected chunks are received, this + /// method returns `Some`, resolving the request before the stream terminator. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + blob: Arc>, + ) -> Result>>>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = blob.block_root(); + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + if !blob.verify_blob_sidecar_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if !self.request.indices.contains(&blob.index) { + return Err(LookupVerifyError::UnrequestedIndex(blob.index)); + } + if self.blobs.iter().any(|b| b.index == blob.index) { + return Err(LookupVerifyError::DuplicateData); + } + + self.blobs.push(blob); + if self.blobs.len() >= self.request.indices.len() { + // All expected chunks received, return result early + self.resolved = true; + Ok(Some(std::mem::take(&mut self.blobs))) + } else { + Ok(None) + } + } + + pub fn terminate(self) -> Result<(), LookupVerifyError> { + if self.resolved { + Ok(()) + } else { + Err(LookupVerifyError::NotEnoughResponsesReturned { + expected: self.request.indices.len(), + actual: self.blobs.len(), + }) + } + } + + /// Mark request as resolved (= has returned something downstream) while marking this status as + /// true for future calls. + pub fn resolve(&mut self) -> bool { + std::mem::replace(&mut self.resolved, true) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs new file mode 100644 index 0000000000..a15d4e3935 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -0,0 +1,60 @@ +use beacon_chain::get_block_root; +use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; +use std::sync::Arc; +use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; + +use super::LookupVerifyError; + +#[derive(Debug, Copy, Clone)] +pub struct BlocksByRootSingleRequest(pub Hash256); + +impl BlocksByRootSingleRequest { + pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.0], spec) + } +} + +pub struct ActiveBlocksByRootRequest { + request: BlocksByRootSingleRequest, + resolved: bool, + pub(crate) peer_id: PeerId, +} + +impl ActiveBlocksByRootRequest { + pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { + Self { + request, + resolved: false, + peer_id, + } + } + + /// Append a response to the single chunk request. If the chunk is valid, the request is + /// resolved immediately. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + block: Arc>, + ) -> Result>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = get_block_root(&block); + if self.request.0 != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + + // Valid data, blocks by root expects a single response + self.resolved = true; + Ok(block) + } + + pub fn terminate(self) -> Result<(), LookupVerifyError> { + if self.resolved { + Ok(()) + } else { + Err(LookupVerifyError::NoResponseReturned) + } + } +} From 2f6ad347959dbf4172b85ca2aabb7fdbb3b8b491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 17 Sep 2024 07:12:21 +0100 Subject: [PATCH 19/66] Improve rpc logic (#6400) * update rpc imports to be explicit * avoid exposing HandlerEvent outside RPC it's unnecessary. * handle Pongs at RPC handler level --- .../lighthouse_network/src/rpc/handler.rs | 24 ++++++++- .../lighthouse_network/src/rpc/methods.rs | 2 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 52 +++++++++++-------- .../lighthouse_network/src/service/mod.rs | 37 ++++--------- 4 files changed, 64 insertions(+), 51 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 6f338ebc8b..08e55e50c9 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -4,7 +4,7 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; -use super::{RPCReceived, RPCSend, ReqId}; +use super::{RPCReceived, RPCResponse, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; @@ -14,7 +14,8 @@ use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::Stream; +use libp2p::swarm::{ConnectionId, Stream}; +use libp2p::PeerId; use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ @@ -88,6 +89,12 @@ pub struct RPCHandler where E: EthSpec, { + /// This `ConnectionId`. + id: ConnectionId, + + /// The matching `PeerId` of this connection. + peer_id: PeerId, + /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, ()>, @@ -218,12 +225,16 @@ where E: EthSpec, { pub fn new( + id: ConnectionId, + peer_id: PeerId, listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, log: &slog::Logger, resp_timeout: Duration, ) -> Self { RPCHandler { + id, + peer_id, listen_protocol, events_out: SmallVec::new(), dial_queue: SmallVec::new(), @@ -892,6 +903,15 @@ where self.shutdown(None); } + // If we received a Ping, we queue a Pong response. + if let InboundRequest::Ping(ping) = req { + trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %self.id, "peer_id" => %self.peer_id); + self.send_response( + self.current_inbound_substream_id, + RPCCodedResponse::Success(RPCResponse::Pong(ping)), + ); + } + self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( self.current_inbound_substream_id, req, diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index a96b9d1b16..6e1ba9cd30 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -81,7 +81,7 @@ pub struct StatusMessage { } /// The PING request/response message. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] +#[derive(Encode, Decode, Copy, Clone, Debug, PartialEq)] pub struct Ping { /// The metadata sequence number. pub data: u64, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index cd591554a3..eae206e022 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -106,7 +106,7 @@ pub struct RPCMessage { /// Handler managing this message. pub conn_id: ConnectionId, /// The message that was sent. - pub event: HandlerEvent, + pub message: Result, HandlerErr>, } type BehaviourAction = ToSwarm, RPCSend>; @@ -245,6 +245,8 @@ where .log .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( + connection_id, + peer_id, protocol, self.fork_context.clone(), &log, @@ -278,6 +280,8 @@ where .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( + connection_id, + peer_id, protocol, self.fork_context.clone(), &log, @@ -311,7 +315,7 @@ where let error_msg = ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id: connection_id, - event: HandlerEvent::Err(HandlerErr::Outbound { + message: Err(HandlerErr::Outbound { id, proto, error: RPCError::Disconnected, @@ -332,7 +336,7 @@ where *event = ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id: connection_id, - event: HandlerEvent::Err(HandlerErr::Outbound { + message: Err(HandlerErr::Outbound { id: *request_id, proto: req.versioned_protocol().protocol(), error: RPCError::Disconnected, @@ -351,16 +355,16 @@ where event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(ref id, ref req)) => { + HandlerEvent::Ok(RPCReceived::Request(id, req)) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, req) { + match limiter.allows(&peer_id, &req) { Ok(()) => { // send the event to the user self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - event, + message: Ok(RPCReceived::Request(id, req)), })) } Err(RateLimitedErr::TooLarge) => { @@ -384,7 +388,7 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, *id), + (conn_id, id), RPCCodedResponse::Error( RPCResponseErrorCode::RateLimited, "Rate limited. Request too large".into(), @@ -398,7 +402,7 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, *id), + (conn_id, id), RPCCodedResponse::Error( RPCResponseErrorCode::RateLimited, format!("Wait {:?}", wait_time).into(), @@ -411,10 +415,24 @@ where self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - event, + message: Ok(RPCReceived::Request(id, req)), })) } } + HandlerEvent::Ok(rpc) => { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + message: Ok(rpc), + })); + } + HandlerEvent::Err(err) => { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + message: Err(err), + })); + } HandlerEvent::Close(_) => { // Handle the close event here. self.events.push(ToSwarm::CloseConnection { @@ -422,13 +440,6 @@ where connection: CloseConnection::All, }); } - _ => { - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })); - } } } @@ -463,8 +474,8 @@ where serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; - match &self.event { - HandlerEvent::Ok(received) => { + match &self.message { + Ok(received) => { let (msg_kind, protocol) = match received { RPCReceived::Request(_, req) => { ("request", req.versioned_protocol().protocol()) @@ -485,7 +496,7 @@ where serializer.emit_str("msg_kind", msg_kind)?; serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; } - HandlerEvent::Err(error) => { + Err(error) => { let (msg_kind, protocol) = match &error { HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), @@ -493,9 +504,6 @@ where serializer.emit_str("msg_kind", msg_kind)?; serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; } - HandlerEvent::Close(err) => { - serializer.emit_arguments("handler_close", &format_args!("{}", err))?; - } }; slog::Result::Ok(()) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index d97b52f79f..a97157ff0a 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -10,7 +10,11 @@ use crate::peer_manager::{ }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; -use crate::rpc::*; +use crate::rpc::{ + methods, BlocksByRangeRequest, GoodbyeReason, HandlerErr, InboundRequest, NetworkParams, + OutboundRequest, Protocol, RPCCodedResponse, RPCError, RPCMessage, RPCReceived, RPCResponse, + RPCResponseErrorCode, ResponseTermination, RPC, +}; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ @@ -1128,16 +1132,6 @@ impl Network { .send_request(peer_id, id, OutboundRequest::Ping(ping)); } - /// Sends a Pong response to the peer. - fn pong(&mut self, id: PeerRequestId, peer_id: PeerId) { - let ping = crate::rpc::Ping { - data: *self.network_globals.local_metadata.read().seq_number(), - }; - trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); - let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); - self.eth2_rpc_mut().send_response(peer_id, id, event); - } - /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = if self.fork_context.spec.is_peer_das_scheduled() { @@ -1406,10 +1400,7 @@ impl Network { let peer_id = event.peer_id; // Do not permit Inbound events from peers that are being disconnected, or RPC requests. - if !self.peer_manager().is_connected(&peer_id) - && (matches!(event.event, HandlerEvent::Err(HandlerErr::Inbound { .. })) - || matches!(event.event, HandlerEvent::Ok(RPCReceived::Request(..)))) - { + if !self.peer_manager().is_connected(&peer_id) { debug!( self.log, "Ignoring rpc message of disconnecting peer"; @@ -1420,8 +1411,8 @@ impl Network { let handler_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated - match event.event { - HandlerEvent::Err(handler_err) => { + match event.message { + Err(handler_err) => { match handler_err { HandlerErr::Inbound { id: _, @@ -1456,15 +1447,13 @@ impl Network { } } } - HandlerEvent::Ok(RPCReceived::Request(id, request)) => { + Ok(RPCReceived::Request(id, request)) => { let peer_request_id = (handler_id, id); match request { /* Behaviour managed protocols: Ping and Metadata */ InboundRequest::Ping(ping) => { // inform the peer manager and send the response self.peer_manager_mut().ping_request(&peer_id, ping.data); - // send a ping response - self.pong(peer_request_id, peer_id); None } InboundRequest::MetaData(req) => { @@ -1587,7 +1576,7 @@ impl Network { } } } - HandlerEvent::Ok(RPCReceived::Response(id, resp)) => { + Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ RPCResponse::Pong(ping) => { @@ -1640,7 +1629,7 @@ impl Network { ), } } - HandlerEvent::Ok(RPCReceived::EndOfStream(id, termination)) => { + Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), @@ -1651,10 +1640,6 @@ impl Network { }; self.build_response(id, peer_id, response) } - HandlerEvent::Close(_) => { - // NOTE: This is handled in the RPC behaviour. - None - } } } From 8b085dd167c39731845f652de2e39d50bf4e0e1b Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Tue, 17 Sep 2024 08:45:02 +0200 Subject: [PATCH 20/66] Fix phase0 block reward in rewards API (#5101) * Added Block Rewards * added new type * added enum * Fix phase0 block reward in rewards API (#4929) * Merge 'guav00a/proposer-rewards-api' * Merge unstable * Revamp phase0 reward API tests - Add test_rewards_base_slashings (testing #5101) - Improve fix to not include proposer reward in attestation reward API calculation (#4856) - Adjust test approach for phase0 tests: Pad with empty epochs to include all rewards in calculation - Simplify and unify code across all reward tests * Merge branch 'unstable' into fix-4929 * Merge branch 'unstable' into fix-4929 * Merge remote-tracking branch 'origin/unstable' into fix-4929 * Fix merge fallout * Remove junk revived in merge * Address review - check for attestations with lower inclusion delay - check for double attestations in block - add test * Merge branch 'unstable' into fix-4929 * Merge branch 'unstable' into fix-4929 --- .../beacon_chain/src/attestation_rewards.rs | 23 +- .../beacon_chain/src/beacon_block_reward.rs | 127 +++- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/tests/rewards.rs | 550 +++++++++--------- .../http_api/src/standard_block_rewards.rs | 4 +- .../base/rewards_and_penalties.rs | 43 +- testing/ef_tests/src/cases/rewards.rs | 2 + 7 files changed, 443 insertions(+), 308 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index d48a83130e..87b7384ea6 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -11,7 +11,7 @@ use state_processing::per_epoch_processing::altair::{ }; use state_processing::per_epoch_processing::base::rewards_and_penalties::{ get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, - get_inactivity_penalty_delta, get_inclusion_delay_delta, + get_inactivity_penalty_delta, get_inclusion_delay_delta, ProposerRewardCalculation, }; use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; use state_processing::per_epoch_processing::base::{ @@ -81,13 +81,24 @@ impl BeaconChain { self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; let indices_to_attestation_delta = if validators.is_empty() { - get_attestation_deltas_all(&state, &validator_statuses, spec)? - .into_iter() - .enumerate() - .collect() + get_attestation_deltas_all( + &state, + &validator_statuses, + ProposerRewardCalculation::Exclude, + spec, + )? + .into_iter() + .enumerate() + .collect() } else { let validator_indices = Self::validators_ids_to_indices(&mut state, validators)?; - get_attestation_deltas_subset(&state, &validator_statuses, &validator_indices, spec)? + get_attestation_deltas_subset( + &state, + &validator_statuses, + ProposerRewardCalculation::Exclude, + &validator_indices, + spec, + )? }; let mut total_rewards = vec![]; diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 33567001e3..e0bb79bf38 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -1,20 +1,25 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig}; +use attesting_indices_base::get_attesting_indices; use eth2::lighthouse::StandardBlockReward; -use operation_pool::RewardCache; use safe_arith::SafeArith; use slog::error; +use state_processing::common::attesting_indices_base; use state_processing::{ - common::{get_attestation_participation_flag_indices, get_attesting_indices_from_state}, + common::{ + base::{self, SqrtTotalActiveBalance}, + get_attestation_participation_flag_indices, get_attesting_indices_from_state, + }, epoch_cache::initialize_epoch_cache, per_block_processing::{ altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, }, }; +use std::collections::HashSet; use store::{ consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, RelativeEpoch, }; -use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, EthSpec}; type BeaconBlockSubRewardValue = u64; @@ -22,7 +27,6 @@ impl BeaconChain { pub fn compute_beacon_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, state: &mut BeaconState, ) -> Result { if block.slot() != state.slot() { @@ -33,7 +37,7 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; initialize_epoch_cache(state, &self.spec)?; - self.compute_beacon_block_reward_with_cache(block, block_root, state) + self.compute_beacon_block_reward_with_cache(block, state) } // This should only be called after a committee cache has been built @@ -41,7 +45,6 @@ impl BeaconChain { fn compute_beacon_block_reward_with_cache>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, state: &BeaconState, ) -> Result { let proposer_index = block.proposer_index(); @@ -72,7 +75,7 @@ impl BeaconChain { })?; let block_attestation_reward = if let BeaconState::Base(_) = state { - self.compute_beacon_block_attestation_reward_base(block, block_root, state) + self.compute_beacon_block_attestation_reward_base(block, state) .map_err(|e| { error!( self.log, @@ -169,19 +172,85 @@ impl BeaconChain { fn compute_beacon_block_attestation_reward_base>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, state: &BeaconState, ) -> Result { - // Call compute_block_reward in the base case - // Since base does not have sync aggregate, we only grab attesation portion of the returned - // value - let mut reward_cache = RewardCache::default(); - let block_attestation_reward = self - .compute_block_reward(block, block_root, state, &mut reward_cache, true)? - .attestation_rewards - .total; + // In phase0, rewards for including attestations are awarded at epoch boundaries when the corresponding + // attestations are contained in state.previous_epoch_attestations. So, if an attestation within this block has + // target = previous_epoch, it is directly inserted into previous_epoch_attestations and we need the state at + // the end of this epoch, or the attestation has target = current_epoch and thus we need the state at the end + // of the next epoch. + // We fetch these lazily, as only one might be needed depending on the block's content. + let mut current_epoch_end = None; + let mut next_epoch_end = None; - Ok(block_attestation_reward) + let epoch = block.epoch(); + let mut block_reward = 0; + + let mut rewarded_attesters = HashSet::new(); + + for attestation in block.body().attestations() { + let processing_epoch_end = if attestation.data().target.epoch == epoch { + let next_epoch_end = match &mut next_epoch_end { + Some(next_epoch_end) => next_epoch_end, + None => { + let state = self.state_at_slot( + epoch.safe_add(1)?.end_slot(T::EthSpec::slots_per_epoch()), + StateSkipConfig::WithoutStateRoots, + )?; + next_epoch_end.get_or_insert(state) + } + }; + + // If the next epoch end is no longer phase0, no proposer rewards are awarded, as Altair epoch boundry + // processing kicks in. We check this here, as we know that current_epoch_end will always be phase0. + if !matches!(next_epoch_end, BeaconState::Base(_)) { + continue; + } + + next_epoch_end + } else if attestation.data().target.epoch == epoch.safe_sub(1)? { + match &mut current_epoch_end { + Some(current_epoch_end) => current_epoch_end, + None => { + let state = self.state_at_slot( + epoch.end_slot(T::EthSpec::slots_per_epoch()), + StateSkipConfig::WithoutStateRoots, + )?; + current_epoch_end.get_or_insert(state) + } + } + } else { + return Err(BeaconChainError::BlockRewardAttestationError); + }; + + let inclusion_delay = state.slot().safe_sub(attestation.data().slot)?.as_u64(); + let sqrt_total_active_balance = + SqrtTotalActiveBalance::new(processing_epoch_end.get_total_active_balance()?); + for attester in get_attesting_indices_from_state(state, attestation)? { + let validator = processing_epoch_end.get_validator(attester as usize)?; + if !validator.slashed + && !rewarded_attesters.contains(&attester) + && !has_earlier_attestation( + state, + processing_epoch_end, + inclusion_delay, + attester, + )? + { + let base_reward = base::get_base_reward( + validator.effective_balance, + sqrt_total_active_balance, + &self.spec, + )?; + let proposer_reward = + base_reward.safe_div(self.spec.proposer_reward_quotient)?; + block_reward.safe_add_assign(proposer_reward)?; + rewarded_attesters.insert(attester); + } + } + } + + Ok(block_reward) } fn compute_beacon_block_attestation_reward_altair_deneb< @@ -244,3 +313,25 @@ impl BeaconChain { Ok(total_proposer_reward) } } + +fn has_earlier_attestation( + state: &BeaconState, + processing_epoch_end: &BeaconState, + inclusion_delay: u64, + attester: u64, +) -> Result { + if inclusion_delay > 1 { + for epoch_att in processing_epoch_end.previous_epoch_attestations()? { + if epoch_att.inclusion_delay < inclusion_delay { + let committee = + state.get_beacon_committee(epoch_att.data.slot, epoch_att.data.index)?; + let earlier_attesters = + get_attesting_indices::(committee.committee, &epoch_att.aggregation_bits)?; + if earlier_attesters.contains(&attester) { + return Ok(true); + } + } + } + } + Ok(false) +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index bf660c9eaf..d83955854d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5639,7 +5639,7 @@ impl BeaconChain { let mut ctxt = ConsensusContext::new(block.slot()); let consensus_block_value = self - .compute_beacon_block_reward(block.message(), Hash256::zero(), &mut state) + .compute_beacon_block_reward(block.message(), &mut state) .map(|reward| reward.total) .unwrap_or(0); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index f04f4062f1..323f4f38eb 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -1,20 +1,22 @@ #![cfg(test)] -use std::collections::HashMap; -use std::sync::LazyLock; - +use beacon_chain::block_verification_types::AsBlock; use beacon_chain::test_utils::{ generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, }; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, + BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; use eth2::lighthouse::StandardAttestationRewards; use eth2::types::ValidatorId; -use types::beacon_state::Error as BeaconStateError; -use types::{BeaconState, ChainSpec, ForkName, Slot}; +use state_processing::{BlockReplayError, BlockReplayer}; +use std::array::IntoIter; +use std::collections::HashMap; +use std::sync::{Arc, LazyLock}; +use types::{ChainSpec, ForkName, Slot}; pub const VALIDATOR_COUNT: usize = 64; @@ -24,10 +26,16 @@ static KEYPAIRS: LazyLock> = LazyLock::new(|| generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }; + let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .keypairs(KEYPAIRS.to_vec()) .fresh_ephemeral_store() + .chain_config(chain_config) .build(); harness.advance_slot(); @@ -37,9 +45,7 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { #[tokio::test] async fn test_sync_committee_rewards() { - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec); let num_block_produced = E::slots_per_epoch(); @@ -126,123 +132,65 @@ async fn test_sync_committee_rewards() { } #[tokio::test] -async fn test_verify_attestation_rewards_base() { - let harness = get_harness(E::default_spec()); +async fn test_rewards_base() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let initial_balances = harness.get_current_state().balances().to_vec(); - // epoch 0 (N), only two thirds of validators vote. - let two_thirds = (VALIDATOR_COUNT / 3) * 2; - let two_thirds_validators: Vec = (0..two_thirds).collect(); harness - .extend_chain( - E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(two_thirds_validators), - ) + .extend_slots(E::slots_per_epoch() as usize * 2 - 1) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); - - // extend slots to beginning of epoch N + 2 - harness.extend_slots(E::slots_per_epoch() as usize).await; - - // compute reward deltas for all validators in epoch N - let StandardAttestationRewards { - ideal_rewards, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(0), vec![]) - .unwrap(); - - // assert no inactivity penalty for both ideal rewards and individual validators - assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); - assert!(total_rewards.iter().all(|reward| reward.inactivity == 0)); - - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - - // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().to_vec(); - assert_eq!(expected_balances, balances); + check_all_base_rewards(&harness, initial_balances).await; } #[tokio::test] -async fn test_verify_attestation_rewards_base_inactivity_leak() { - let spec = E::default_spec(); +async fn test_rewards_base_inactivity_leak() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); + let initial_balances = harness.get_current_state().balances().to_vec(); let half = VALIDATOR_COUNT / 2; let half_validators: Vec = (0..half).collect(); // target epoch is the epoch where the chain enters inactivity leak let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; - // advance until beginning of epoch N + 1 and get balances + // advance until end of target epoch harness - .extend_chain( - (E::slots_per_epoch() * (target_epoch + 1)) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(half_validators.clone()), + .extend_slots_some_validators( + ((E::slots_per_epoch() * target_epoch) - 1) as usize, + half_validators.clone(), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); - // extend slots to beginning of epoch N + 2 - harness.advance_slot(); - harness - .extend_chain( - E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(half_validators), - ) - .await; - let _slot = harness.get_current_slot(); - - // compute reward deltas for all validators in epoch N - let StandardAttestationRewards { - ideal_rewards, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) - .unwrap(); - - // assert inactivity penalty for both ideal rewards and individual validators - assert!(ideal_rewards.iter().all(|reward| reward.inactivity < 0)); - assert!(total_rewards.iter().all(|reward| reward.inactivity < 0)); - - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - - // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().to_vec(); - assert_eq!(expected_balances, balances); + check_all_base_rewards(&harness, initial_balances).await; } #[tokio::test] -async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoch() { - let spec = E::default_spec(); +async fn test_rewards_base_inactivity_leak_justification_epoch() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); + let initial_balances = harness.get_current_state().balances().to_vec(); let half = VALIDATOR_COUNT / 2; let half_validators: Vec = (0..half).collect(); // target epoch is the epoch where the chain enters inactivity leak - let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2; + let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; - // advance until beginning of epoch N + 2 + // advance until end of target epoch harness .extend_chain( - (E::slots_per_epoch() * (target_epoch + 1)) as usize, + ((E::slots_per_epoch() * target_epoch) - 1) as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::SomeValidators(half_validators.clone()), ) .await; - // advance to create first justification epoch and get initial balances + // advance to create first justification epoch harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); - //assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning + // assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning assert_eq!( 0, harness @@ -252,10 +200,12 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc .as_u64() ); - // extend slots to beginning of epoch N + 1 + // extend slots to end of epoch target_epoch + 2 harness.extend_slots(E::slots_per_epoch() as usize).await; - //assert target epoch and previous_justified_checkpoint match + check_all_base_rewards(&harness, initial_balances).await; + + // assert target epoch and previous_justified_checkpoint match assert_eq!( target_epoch, harness @@ -264,31 +214,94 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc .epoch .as_u64() ); - - // compute reward deltas for all validators in epoch N - let StandardAttestationRewards { - ideal_rewards, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) - .unwrap(); - - // assert we successfully get ideal rewards for justified epoch out of inactivity leak - assert!(ideal_rewards - .iter() - .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); - - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - - // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().to_vec(); - assert_eq!(expected_balances, balances); } #[tokio::test] -async fn test_verify_attestation_rewards_altair() { +async fn test_rewards_base_slashings() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let mut initial_balances = harness.get_current_state().balances().to_vec(); + + harness + .extend_slots(E::slots_per_epoch() as usize - 1) + .await; + + harness.add_attester_slashing(vec![0]).unwrap(); + let slashed_balance = initial_balances.get_mut(0).unwrap(); + *slashed_balance -= *slashed_balance / harness.spec.min_slashing_penalty_quotient; + + harness.extend_slots(E::slots_per_epoch() as usize).await; + + check_all_base_rewards(&harness, initial_balances).await; +} + +#[tokio::test] +async fn test_rewards_base_multi_inclusion() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let initial_balances = harness.get_current_state().balances().to_vec(); + + harness.extend_slots(2).await; + + let prev_block = harness.chain.head_beacon_block(); + + harness.extend_slots(1).await; + + harness.advance_slot(); + let slot = harness.get_current_slot(); + let mut block = + // pin to reduce stack size for clippy + Box::pin( + harness.make_block_with_modifier(harness.get_current_state(), slot, |block| { + // add one attestation from the same block + let attestations = &mut block.body_base_mut().unwrap().attestations; + attestations + .push(attestations.first().unwrap().clone()) + .unwrap(); + + // add one attestation from the previous block + let attestation = prev_block + .as_block() + .message_base() + .unwrap() + .body + .attestations + .first() + .unwrap() + .clone(); + attestations.push(attestation).unwrap(); + }), + ) + .await + .0; + + // funky hack: on first try, the state root will mismatch due to our modification + // thankfully, the correct state root is reported back, so we just take that one :^) + // there probably is a better way... + let Err(BlockError::StateRootMismatch { local, .. }) = harness + .process_block(slot, block.0.canonical_root(), block.clone()) + .await + else { + panic!("unexpected match of state root"); + }; + let mut new_block = block.0.message_base().unwrap().clone(); + new_block.state_root = local; + block.0 = Arc::new(harness.sign_beacon_block(new_block.into(), &harness.get_current_state())); + harness + .process_block(slot, block.0.canonical_root(), block.clone()) + .await + .unwrap(); + + harness + .extend_slots(E::slots_per_epoch() as usize * 2 - 4) + .await; + + // pin to reduce stack size for clippy + Box::pin(check_all_base_rewards(&harness, initial_balances)).await; +} + +#[tokio::test] +async fn test_rewards_altair() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); let target_epoch = 0; @@ -297,11 +310,11 @@ async fn test_verify_attestation_rewards_altair() { harness .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + let mut expected_balances = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map - let mut proposal_rewards_map: HashMap = HashMap::new(); - let mut sync_committee_rewards_map: HashMap = HashMap::new(); + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); for _ in 0..E::slots_per_epoch() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -311,19 +324,13 @@ async fn test_verify_attestation_rewards_altair() { harness.make_block_return_pre_state(state, slot).await; let beacon_block_reward = harness .chain - .compute_beacon_block_reward( - signed_block.message(), - signed_block.canonical_root(), - &mut state, - ) + .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); let total_proposer_reward = proposal_rewards_map - .get(&beacon_block_reward.proposer_index) - .unwrap_or(&0u64) - + beacon_block_reward.total; - - proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; // calculate sync committee rewards / penalties let reward_payload = harness @@ -331,13 +338,12 @@ async fn test_verify_attestation_rewards_altair() { .compute_sync_committee_rewards(signed_block.message(), &mut state) .unwrap(); - reward_payload.iter().for_each(|reward| { - let mut amount = *sync_committee_rewards_map - .get(&reward.validator_index) - .unwrap_or(&0); - amount += reward.reward; - sync_committee_rewards_map.insert(reward.validator_index, amount); - }); + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } harness.extend_slots(1).await; } @@ -357,10 +363,9 @@ async fn test_verify_attestation_rewards_altair() { .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); // apply attestation, proposal, and sync committee rewards and penalties to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); - let expected_balances = - apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); // verify expected balances against actual balances let balances: Vec = harness.get_current_state().balances().to_vec(); @@ -369,7 +374,7 @@ async fn test_verify_attestation_rewards_altair() { } #[tokio::test] -async fn test_verify_attestation_rewards_altair_inactivity_leak() { +async fn test_rewards_altair_inactivity_leak() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); @@ -385,11 +390,11 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { half_validators.clone(), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + let mut expected_balances = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map - let mut proposal_rewards_map: HashMap = HashMap::new(); - let mut sync_committee_rewards_map: HashMap = HashMap::new(); + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); for _ in 0..E::slots_per_epoch() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -399,19 +404,13 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { harness.make_block_return_pre_state(state, slot).await; let beacon_block_reward = harness .chain - .compute_beacon_block_reward( - signed_block.message(), - signed_block.canonical_root(), - &mut state, - ) + .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); let total_proposer_reward = proposal_rewards_map - .get(&beacon_block_reward.proposer_index) - .unwrap_or(&0u64) - + beacon_block_reward.total; - - proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + .entry(beacon_block_reward.proposer_index) + .or_insert(0i64); + *total_proposer_reward += beacon_block_reward.total as i64; // calculate sync committee rewards / penalties let reward_payload = harness @@ -419,13 +418,12 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { .compute_sync_committee_rewards(signed_block.message(), &mut state) .unwrap(); - reward_payload.iter().for_each(|reward| { - let mut amount = *sync_committee_rewards_map - .get(&reward.validator_index) - .unwrap_or(&0); - amount += reward.reward; - sync_committee_rewards_map.insert(reward.validator_index, amount); - }); + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } harness .extend_slots_some_validators(1, half_validators.clone()) @@ -451,10 +449,9 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { .all(|reward| reward.inactivity < 0)); // apply attestation, proposal, and sync committee rewards and penalties to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); - let expected_balances = - apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); // verify expected balances against actual balances let balances: Vec = harness.get_current_state().balances().to_vec(); @@ -463,7 +460,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { } #[tokio::test] -async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_epoch() { +async fn test_rewards_altair_inactivity_leak_justification_epoch() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); @@ -491,11 +488,11 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep // advance for first justification epoch and get balances harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + let mut expected_balances = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map - let mut proposal_rewards_map: HashMap = HashMap::new(); - let mut sync_committee_rewards_map: HashMap = HashMap::new(); + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); for _ in 0..E::slots_per_epoch() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -505,19 +502,13 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep harness.make_block_return_pre_state(state, slot).await; let beacon_block_reward = harness .chain - .compute_beacon_block_reward( - signed_block.message(), - signed_block.canonical_root(), - &mut state, - ) + .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); let total_proposer_reward = proposal_rewards_map - .get(&beacon_block_reward.proposer_index) - .unwrap_or(&0u64) - + beacon_block_reward.total; - - proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; // calculate sync committee rewards / penalties let reward_payload = harness @@ -525,13 +516,12 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep .compute_sync_committee_rewards(signed_block.message(), &mut state) .unwrap(); - reward_payload.iter().for_each(|reward| { - let mut amount = *sync_committee_rewards_map - .get(&reward.validator_index) - .unwrap_or(&0); - amount += reward.reward; - sync_committee_rewards_map.insert(reward.validator_index, amount); - }); + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } harness.extend_slots(1).await; } @@ -561,10 +551,9 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); // apply attestation, proposal, and sync committee rewards and penalties to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); - let expected_balances = - apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); // verify expected balances against actual balances let balances: Vec = harness.get_current_state().balances().to_vec(); @@ -572,109 +561,130 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep } #[tokio::test] -async fn test_verify_attestation_rewards_base_subset_only() { - let harness = get_harness(E::default_spec()); +async fn test_rewards_base_subset_only() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let initial_balances = harness.get_current_state().balances().to_vec(); + + // a subset of validators to compute attestation rewards for + let validators_subset = (0..16).chain(56..64).collect::>(); // epoch 0 (N), only two thirds of validators vote. let two_thirds = (VALIDATOR_COUNT / 3) * 2; let two_thirds_validators: Vec = (0..two_thirds).collect(); harness - .extend_chain( - E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(two_thirds_validators), - ) + .extend_slots_some_validators(E::slots_per_epoch() as usize, two_thirds_validators.clone()) .await; - // a small subset of validators to compute attestation rewards for - let validators_subset = [0, VALIDATOR_COUNT / 2, VALIDATOR_COUNT - 1]; + check_all_base_rewards_for_subset(&harness, initial_balances, validators_subset).await; +} - // capture balances before transitioning to N + 2 - let initial_balances = get_validator_balances(harness.get_current_state(), &validators_subset); +async fn check_all_base_rewards( + harness: &BeaconChainHarness>, + balances: Vec, +) { + check_all_base_rewards_for_subset(harness, balances, vec![]).await; +} - // extend slots to beginning of epoch N + 2 - harness.extend_slots(E::slots_per_epoch() as usize).await; - - let validators_subset_ids: Vec = validators_subset - .into_iter() - .map(|idx| ValidatorId::Index(idx as u64)) +async fn check_all_base_rewards_for_subset( + harness: &BeaconChainHarness>, + mut balances: Vec, + validator_subset: Vec, +) { + let validator_subset_ids: Vec = validator_subset + .iter() + .map(|&idx| ValidatorId::Index(idx)) .collect(); - // compute reward deltas for the subset of validators in epoch N - let StandardAttestationRewards { - ideal_rewards: _, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(0), validators_subset_ids) - .unwrap(); + // capture the amount of epochs generated by the caller + let epochs = harness.get_current_slot().epoch(E::slots_per_epoch()) + 1; - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + // advance two empty epochs to ensure balances are updated by the epoch boundaries + for _ in 0..E::slots_per_epoch() * 2 { + harness.advance_slot(); + } + // fill one slot to ensure state is updated + harness.extend_slots(1).await; + + // calculate proposal awards + let mut proposal_rewards_map = HashMap::new(); + for slot in 1..(E::slots_per_epoch() * epochs.as_u64()) { + if let Some(block) = harness + .chain + .block_at_slot(Slot::new(slot), WhenSlotSkipped::None) + .unwrap() + { + let parent_state = harness + .chain + .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) + .unwrap(); + + let mut pre_state = BlockReplayer::>::new( + parent_state, + &harness.spec, + ) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .unwrap() + .into_state(); + + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward(block.message(), &mut pre_state) + .unwrap(); + let total_proposer_reward = proposal_rewards_map + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; + } + } + apply_other_rewards(&mut balances, &proposal_rewards_map); + + for epoch in 0..epochs.as_u64() { + // compute reward deltas in epoch + let total_rewards = harness + .chain + .compute_attestation_rewards(Epoch::new(epoch), validator_subset_ids.clone()) + .unwrap() + .total_rewards; + + // apply attestation rewards to balances + apply_attestation_rewards(&mut balances, total_rewards); + } // verify expected balances against actual balances - let balances = get_validator_balances(harness.get_current_state(), &validators_subset); - assert_eq!(expected_balances, balances); + let actual_balances: Vec = harness.get_current_state().balances().to_vec(); + if validator_subset.is_empty() { + assert_eq!(balances, actual_balances); + } else { + for validator in validator_subset { + assert_eq!( + balances[validator as usize], + actual_balances[validator as usize] + ); + } + } } /// Apply a vec of `TotalAttestationRewards` to initial balances, and return fn apply_attestation_rewards( - initial_balances: &[u64], + balances: &mut [u64], attestation_rewards: Vec, -) -> Vec { - initial_balances - .iter() - .zip(attestation_rewards) - .map(|(&initial_balance, rewards)| { - let expected_balance = initial_balance as i64 - + rewards.head - + rewards.source - + rewards.target - + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 - + rewards.inactivity; - expected_balance as u64 - }) - .collect::>() +) { + for rewards in attestation_rewards { + let balance = balances.get_mut(rewards.validator_index as usize).unwrap(); + *balance = (*balance as i64 + + rewards.head + + rewards.source + + rewards.target + + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 + + rewards.inactivity) as u64; + } } -fn get_validator_balances(state: BeaconState, validators: &[usize]) -> Vec { - validators - .iter() - .flat_map(|&id| { - state - .balances() - .get(id) - .cloned() - .ok_or(BeaconStateError::BalancesOutOfBounds(id)) - }) - .collect() -} - -fn apply_beacon_block_rewards( - proposal_rewards_map: &HashMap, - expected_balances: Vec, -) -> Vec { - let calculated_balances = expected_balances - .iter() - .enumerate() - .map(|(i, balance)| balance + proposal_rewards_map.get(&(i as u64)).unwrap_or(&0u64)) - .collect(); - - calculated_balances -} - -fn apply_sync_committee_rewards( - sync_committee_rewards_map: &HashMap, - expected_balances: Vec, -) -> Vec { - let calculated_balances = expected_balances - .iter() - .enumerate() - .map(|(i, balance)| { - (*balance as i64 + sync_committee_rewards_map.get(&(i as u64)).unwrap_or(&0i64)) - .unsigned_abs() - }) - .collect(); - - calculated_balances +fn apply_other_rewards(balances: &mut [u64], rewards_map: &HashMap) { + for (i, balance) in balances.iter_mut().enumerate() { + *balance = balance.saturating_add_signed(*rewards_map.get(&(i as u64)).unwrap_or(&0)); + } } diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index 97e5a87fd3..1ab75374ea 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -15,12 +15,10 @@ pub fn compute_beacon_block_rewards( let block_ref = block.message(); - let block_root = block.canonical_root(); - let mut state = get_state_before_applying_block(chain.clone(), &block)?; let rewards = chain - .compute_beacon_block_reward(block_ref, block_root, &mut state) + .compute_beacon_block_reward(block_ref, &mut state) .map_err(beacon_chain_error)?; Ok((rewards, execution_optimistic, finalized)) diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index ecea0b554e..a316c55bef 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -45,6 +45,12 @@ impl AttestationDelta { } } +#[derive(Debug)] +pub enum ProposerRewardCalculation { + Include, + Exclude, +} + /// Apply attester and proposer rewards. pub fn process_rewards_and_penalties( state: &mut BeaconState, @@ -62,7 +68,12 @@ pub fn process_rewards_and_penalties( return Err(Error::ValidatorStatusesInconsistent); } - let deltas = get_attestation_deltas_all(state, validator_statuses, spec)?; + let deltas = get_attestation_deltas_all( + state, + validator_statuses, + ProposerRewardCalculation::Include, + spec, + )?; // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). @@ -79,9 +90,10 @@ pub fn process_rewards_and_penalties( pub fn get_attestation_deltas_all( state: &BeaconState, validator_statuses: &ValidatorStatuses, + proposer_reward: ProposerRewardCalculation, spec: &ChainSpec, ) -> Result, Error> { - get_attestation_deltas(state, validator_statuses, None, spec) + get_attestation_deltas(state, validator_statuses, proposer_reward, None, spec) } /// Apply rewards for participation in attestations during the previous epoch, and only compute @@ -89,10 +101,18 @@ pub fn get_attestation_deltas_all( pub fn get_attestation_deltas_subset( state: &BeaconState, validator_statuses: &ValidatorStatuses, + proposer_reward: ProposerRewardCalculation, validators_subset: &Vec, spec: &ChainSpec, ) -> Result, Error> { - get_attestation_deltas(state, validator_statuses, Some(validators_subset), spec).map(|deltas| { + get_attestation_deltas( + state, + validator_statuses, + proposer_reward, + Some(validators_subset), + spec, + ) + .map(|deltas| { deltas .into_iter() .enumerate() @@ -109,6 +129,7 @@ pub fn get_attestation_deltas_subset( fn get_attestation_deltas( state: &BeaconState, validator_statuses: &ValidatorStatuses, + proposer_reward: ProposerRewardCalculation, maybe_validators_subset: Option<&Vec>, spec: &ChainSpec, ) -> Result, Error> { @@ -169,13 +190,15 @@ fn get_attestation_deltas( .combine(inactivity_penalty_delta)?; } - if let Some((proposer_index, proposer_delta)) = proposer_delta { - if include_validator_delta(proposer_index) { - deltas - .get_mut(proposer_index) - .ok_or(Error::ValidatorStatusesInconsistent)? - .inclusion_delay_delta - .combine(proposer_delta)?; + if let ProposerRewardCalculation::Include = proposer_reward { + if let Some((proposer_index, proposer_delta)) = proposer_delta { + if include_validator_delta(proposer_index) { + deltas + .get_mut(proposer_index) + .ok_or(Error::ValidatorStatusesInconsistent)? + .inclusion_delay_delta + .combine(proposer_delta)?; + } } } } diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index ea75c69c35..c5879f5c9c 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -5,6 +5,7 @@ use compare_fields_derive::CompareFields; use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::base::rewards_and_penalties::ProposerRewardCalculation; use state_processing::{ per_epoch_processing::{ altair, @@ -130,6 +131,7 @@ impl Case for RewardsTest { let deltas = base::rewards_and_penalties::get_attestation_deltas_all( &state, &validator_statuses, + ProposerRewardCalculation::Include, spec, )?; From 8a085fc828cef14674ef342906b715dd816e8047 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 18 Sep 2024 20:12:25 -0700 Subject: [PATCH 21/66] Fix test failure on Rust v1.81 (#6407) * generate rand f64 instead of arbitrary to prevent NaN vals * reintroduce quickcheck arbitrary but prevet NaN --- .../lighthouse_network/src/peer_manager/mod.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4d91331235..320bbc4d63 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2345,6 +2345,16 @@ mod tests { gossipsub_score: f64, } + // generate an arbitrary f64 while preventing NaN values + fn arbitrary_f64(g: &mut Gen) -> f64 { + loop { + let val = f64::arbitrary(g); + if !val.is_nan() { + return val; + } + } + } + impl Arbitrary for PeerCondition { fn arbitrary(g: &mut Gen) -> Self { let attestation_net_bitfield = { @@ -2370,9 +2380,9 @@ mod tests { outgoing: bool::arbitrary(g), attestation_net_bitfield, sync_committee_net_bitfield, - score: f64::arbitrary(g), + score: arbitrary_f64(g), trusted: bool::arbitrary(g), - gossipsub_score: f64::arbitrary(g), + gossipsub_score: arbitrary_f64(g), } } } From 46e0d66e2deea33c9ada4b4767f59c110255da65 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 19 Sep 2024 14:58:43 +1000 Subject: [PATCH 22/66] Fix deadlock on block cache. (#6412) * Fix deadlock on block cache. --- beacon_node/beacon_chain/src/eth1_chain.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 9e1bcbe6fa..2252d5b9c9 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -475,10 +475,10 @@ impl Eth1ChainBackend for CachingEth1Backend { voting_period_start_slot, ); - let blocks = self.core.blocks().read(); - - let votes_to_consider = - get_votes_to_consider(blocks.iter(), voting_period_start_seconds, spec); + let votes_to_consider = { + let blocks = self.core.blocks().read(); + get_votes_to_consider(blocks.iter(), voting_period_start_seconds, spec) + }; trace!( self.log, From a97d77c147da1a08389d5aebc6067975270990eb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 20 Sep 2024 22:14:57 +1000 Subject: [PATCH 23/66] Correct ENR decoding on extension trait (#6402) * Correct enr extension encodings * Clippy my ol friend * Correct all encoding and comparisons * Found some more encodings * Fix remaining tests --- Cargo.lock | 2 + beacon_node/lighthouse_network/Cargo.toml | 1 + .../lighthouse_network/src/discovery/enr.rs | 58 ++++++++++++++----- .../lighthouse_network/src/discovery/mod.rs | 16 +++-- boot_node/Cargo.toml | 1 + boot_node/src/config.rs | 3 +- 6 files changed, 58 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94eb903844..5cfa602ef5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1061,6 +1061,7 @@ name = "boot_node" version = "5.3.0" dependencies = [ "beacon_node", + "bytes", "clap", "clap_utils", "eth2_network_config", @@ -5063,6 +5064,7 @@ name = "lighthouse_network" version = "0.2.0" dependencies = [ "alloy-primitives", + "alloy-rlp", "async-channel", "bytes", "delay_map", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c666b8b455..b0f5b9a5e1 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -44,6 +44,7 @@ delay_map = { workspace = true } bytes = { workspace = true } either = { workspace = true } itertools = { workspace = true } +alloy-rlp = { workspace = true } # Local dependencies void = "1.0.2" diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 6aa4e232d2..ce29480ffd 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -6,6 +6,7 @@ use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; +use alloy_rlp::bytes::Bytes; use libp2p::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; @@ -45,7 +46,7 @@ pub trait Eth2Enr { impl Eth2Enr for Enr { fn attestation_bitfield(&self) -> Result, &'static str> { - let bitfield_bytes: Vec = self + let bitfield_bytes: Bytes = self .get_decodable(ATTESTATION_BITFIELD_ENR_KEY) .ok_or("ENR attestation bitfield non-existent")? .map_err(|_| "Invalid RLP Encoding")?; @@ -57,7 +58,7 @@ impl Eth2Enr for Enr { fn sync_committee_bitfield( &self, ) -> Result, &'static str> { - let bitfield_bytes: Vec = self + let bitfield_bytes: Bytes = self .get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) .ok_or("ENR sync committee bitfield non-existent")? .map_err(|_| "Invalid RLP Encoding")?; @@ -80,7 +81,7 @@ impl Eth2Enr for Enr { } fn eth2(&self) -> Result { - let eth2_bytes: Vec = self + let eth2_bytes: Bytes = self .get_decodable(ETH2_ENR_KEY) .ok_or("ENR has no eth2 field")? .map_err(|_| "Invalid RLP Encoding")?; @@ -234,17 +235,23 @@ pub fn build_enr( } // set the `eth2` field on our ENR - builder.add_value(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes()); + builder.add_value::(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes().into()); // set the "attnets" field on our ENR let bitfield = BitVector::::new(); - builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value::( + ATTESTATION_BITFIELD_ENR_KEY, + &bitfield.as_ssz_bytes().into(), + ); // set the "syncnets" field on our ENR let bitfield = BitVector::::new(); - builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value::( + SYNC_COMMITTEE_BITFIELD_ENR_KEY, + &bitfield.as_ssz_bytes().into(), + ); // only set `csc` if PeerDAS fork epoch has been scheduled if spec.is_peer_das_scheduled() { @@ -275,16 +282,16 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && local_enr.quic4() == disk_enr.quic4() && local_enr.quic6() == disk_enr.quic6() // must match on the same fork - && local_enr.get_decodable::>(ETH2_ENR_KEY) == disk_enr.get_decodable(ETH2_ENR_KEY) + && local_enr.get_decodable::(ETH2_ENR_KEY) == disk_enr.get_decodable(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will // likely only be true for non-validating nodes. - && local_enr.get_decodable::>(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) - && local_enr.get_decodable::>(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) - && local_enr.get_decodable::>(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) + && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + && local_enr.get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) } /// Loads enr from the given directory @@ -332,6 +339,14 @@ mod test { spec } + fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&keypair); + let enr_fork_id = EnrForkId::default(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); + (enr, enr_key) + } + #[test] fn custody_subnet_count_default() { let config = NetworkConfig { @@ -363,11 +378,22 @@ mod test { ); } - fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { - let keypair = libp2p::identity::secp256k1::Keypair::generate(); - let enr_key = CombinedKey::from_secp256k1(&keypair); - let enr_fork_id = EnrForkId::default(); - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); - (enr, enr_key) + #[test] + fn test_encode_decode_eth2_enr() { + let (enr, _key) = build_enr_with_config(NetworkConfig::default(), &E::default_spec()); + // Check all Eth2 Mappings are decodeable + enr.eth2().unwrap(); + enr.attestation_bitfield::().unwrap(); + enr.sync_committee_bitfield::().unwrap(); + } + + #[test] + fn test_eth2_enr_encodings() { + let enr_str = "enr:-Mm4QEX9fFRi1n4H3M9sGIgFQ6op1IysTU4Gz6tpIiOGRM1DbJtIih1KgGgv3Xl-oUlwco3HwdXsbYuXStBuNhUVIPoBh2F0dG5ldHOIAAAAAAAAAACDY3NjBIRldGgykI-3hTFgAAA4AOH1BQAAAACCaWSCdjSCaXCErBAADoRxdWljgiMpiXNlY3AyNTZrMaECph91xMyTVyE5MVj6lBpPgz6KP2--Kr9lPbo6_GjrfRKIc3luY25ldHMAg3RjcIIjKIN1ZHCCIyg"; + //let my_enr_str = "enr:-Ma4QM2I1AxBU116QcMV2wKVrSr5Nsko90gMVkstZO4APysQCEwJJJeuTvODKmv7fDsLhVFjrlidVNhBOxSZ8sZPbCWCCcqHYXR0bmV0c4gAAAAAAAAMAIRldGgykGqVoakEAAAA__________-CaWSCdjSCaXCEJq-HPYRxdWljgiMziXNlY3AyNTZrMaECMPAnmmHQpD1k6DuOxWVoFXBoTYY6Wuv9BP4lxauAlmiIc3luY25ldHMAg3RjcIIjMoN1ZHCCIzI"; + let enr = Enr::from_str(enr_str).unwrap(); + enr.eth2().unwrap(); + enr.attestation_bitfield::().unwrap(); + enr.sync_committee_bitfield::().unwrap(); } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3356dd3cf7..e1cea3153a 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -15,6 +15,7 @@ pub use enr::{build_enr, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2E pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; pub use libp2p::identity::{Keypair, PublicKey}; +use alloy_rlp::bytes::Bytes; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; @@ -512,9 +513,9 @@ impl Discovery { // insert the bitfield into the ENR record self.discv5 - .enr_insert( + .enr_insert::( ATTESTATION_BITFIELD_ENR_KEY, - ¤t_bitfield.as_ssz_bytes(), + ¤t_bitfield.as_ssz_bytes().into(), ) .map_err(|e| format!("{:?}", e))?; } @@ -546,9 +547,9 @@ impl Discovery { // insert the bitfield into the ENR record self.discv5 - .enr_insert( + .enr_insert::( SYNC_COMMITTEE_BITFIELD_ENR_KEY, - ¤t_bitfield.as_ssz_bytes(), + ¤t_bitfield.as_ssz_bytes().into(), ) .map_err(|e| format!("{:?}", e))?; } @@ -582,7 +583,7 @@ impl Discovery { let _ = self .discv5 - .enr_insert(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes()) + .enr_insert::(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes().into()) .map_err(|e| { warn!( self.log, @@ -1289,7 +1290,10 @@ mod tests { bitfield.set(id, true).unwrap(); } - builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value::( + ATTESTATION_BITFIELD_ENR_KEY, + &bitfield.as_ssz_bytes().into(), + ); builder.build(&enr_key).unwrap() } diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 46ccd4566b..76d41ae11a 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -21,3 +21,4 @@ slog-scope = "4.3.0" hex = { workspace = true } serde = { workspace = true } eth2_network_config = { workspace = true } +bytes = { workspace = true } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index aaa9f08482..bb7678631f 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,4 +1,5 @@ use beacon_node::{get_data_dir, set_network_config}; +use bytes::Bytes; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::discv5::{self, enr::CombinedKey, Enr}; @@ -152,7 +153,7 @@ impl BootNodeConfig { // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { - builder.add_value("eth2", &enr_fork_bytes); + builder.add_value::("eth2", &enr_fork_bytes.into()); } builder .build(&local_key) From 18c97a7d64af4dfbab1e21c2f30a1d4492a1e074 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 23 Sep 2024 10:17:09 +1000 Subject: [PATCH 24/66] Update blog reference to new combined blog (#6414) * Update blog reference to new combined blog * Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 11a87b81fe..4b22087bcd 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ as the canonical staking deposit contract address. The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information for users and developers. -The Lighthouse team maintains a blog at [lighthouse-blog.sigmaprime.io][blog] which contains periodic +The Lighthouse team maintains a blog at [https://blog.sigmaprime.io/tag/lighthouse][blog] which contains periodic progress updates, roadmap insights and interesting findings. ## Branches From b619f1ab5c3ba9b610f12b7353383b2bcefb1ca4 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 22 Sep 2024 21:54:32 -0700 Subject: [PATCH 25/66] Make `BeaconChain::kzg` field mandatory (#6267) * make kzg field required * update todo * always load trusted setup WIP * fmt * use new rust_eth_kzg version * merge conlficts * add kzg fn with trusted setup disabled * as_slice * add kzg with no precomp * ignore udep for kzg * refactor kzg init * fix peerdas kzg schedule * fix * udeps * uuuudeps * merge conflict resolved * merge conflict * merge conflicts * resolve TODO * update * move kzg to a test util fn * remove trusted setup default impl * lint fmt * fix failing test * lint * fix test * Merge branch 'unstable' into beacon-chain-kzg-field-required --- Cargo.lock | 8 ++- beacon_node/beacon_chain/benches/benches.rs | 10 +--- beacon_node/beacon_chain/src/beacon_chain.rs | 8 +-- .../beacon_chain/src/blob_verification.rs | 15 +---- .../beacon_chain/src/block_verification.rs | 10 +--- beacon_node/beacon_chain/src/builder.rs | 17 +++--- .../src/data_availability_checker.rs | 55 ++++++------------- .../src/data_availability_checker/error.rs | 4 +- .../src/data_column_verification.rs | 14 +---- beacon_node/beacon_chain/src/errors.rs | 1 - beacon_node/beacon_chain/src/kzg_utils.rs | 5 +- beacon_node/beacon_chain/src/test_utils.rs | 43 +++++++++++---- beacon_node/beacon_chain/tests/events.rs | 4 +- beacon_node/beacon_chain/tests/store_tests.rs | 20 +++---- beacon_node/client/Cargo.toml | 2 + beacon_node/client/src/builder.rs | 26 ++++----- beacon_node/client/src/config.rs | 8 ++- .../test_utils/execution_block_generator.rs | 8 +-- beacon_node/network/Cargo.toml | 3 + .../gossip_methods.rs | 7 +-- .../network_beacon_processor/sync_methods.rs | 7 +-- .../network/src/subnet_service/tests/mod.rs | 5 +- beacon_node/src/config.rs | 12 ++-- common/eth2_network_config/Cargo.toml | 1 + common/eth2_network_config/src/lib.rs | 33 +++-------- crypto/kzg/Cargo.toml | 2 +- crypto/kzg/benches/benchmark.rs | 12 ++-- crypto/kzg/src/lib.rs | 45 +++++++++++---- crypto/kzg/src/trusted_setup.rs | 6 ++ .../kzg}/trusted_setup.json | 0 .../src/cases/kzg_verify_blob_kzg_proof.rs | 4 +- testing/simulator/Cargo.toml | 1 + testing/simulator/src/local_network.rs | 6 +- 33 files changed, 190 insertions(+), 212 deletions(-) rename {common/eth2_network_config/built_in_network_configs => crypto/kzg}/trusted_setup.json (100%) diff --git a/Cargo.lock b/Cargo.lock index 5cfa602ef5..5fcba6b264 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1408,6 +1408,7 @@ dependencies = [ "genesis", "http_api", "http_metrics", + "kzg", "lighthouse_metrics", "lighthouse_network", "monitoring_api", @@ -1415,6 +1416,7 @@ dependencies = [ "operation_pool", "sensitive_url", "serde", + "serde_json", "serde_yaml", "slasher", "slasher_service", @@ -2621,6 +2623,7 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "kzg", "logging", "pretty_reqwest_error", "reqwest", @@ -4413,7 +4416,6 @@ dependencies = [ "c-kzg", "criterion", "derivative", - "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", @@ -5641,6 +5643,7 @@ dependencies = [ "derivative", "error-chain", "eth2", + "eth2_network_config", "ethereum_ssz", "execution_layer", "fnv", @@ -5650,6 +5653,7 @@ dependencies = [ "hex", "igd-next", "itertools 0.10.5", + "kzg", "lighthouse_metrics", "lighthouse_network", "logging", @@ -5658,6 +5662,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "rand", + "serde_json", "slog", "slog-async", "slog-term", @@ -7746,6 +7751,7 @@ dependencies = [ "eth2_network_config", "execution_layer", "futures", + "kzg", "node_test_rig", "parking_lot 0.12.3", "rayon", diff --git a/beacon_node/beacon_chain/benches/benches.rs b/beacon_node/beacon_chain/benches/benches.rs index 4a29be9025..b2f17062dc 100644 --- a/beacon_node/beacon_chain/benches/benches.rs +++ b/beacon_node/beacon_chain/benches/benches.rs @@ -1,11 +1,11 @@ use std::sync::Arc; use beacon_chain::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; +use beacon_chain::test_utils::get_kzg; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use bls::Signature; -use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::{Kzg, KzgCommitment, TrustedSetup}; +use kzg::KzgCommitment; use types::{ beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, @@ -35,11 +35,7 @@ fn all_benches(c: &mut Criterion) { type E = MainnetEthSpec; let spec = Arc::new(E::default_spec()); - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); - let kzg = Arc::new(Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg")); - + let kzg = get_kzg(&spec); for blob_count in [1, 2, 3, 6] { let kzg = kzg.clone(); let (signed_block, blob_sidecars) = create_test_block_and_blobs::(blob_count, &spec); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d83955854d..7094060b71 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -497,7 +497,7 @@ pub struct BeaconChain { /// they are collected and combined. pub data_availability_checker: Arc>, /// The KZG trusted setup used by this chain. - pub kzg: Option>, + pub kzg: Arc, } pub enum BeaconBlockResponseWrapper { @@ -5682,10 +5682,8 @@ impl BeaconChain { let kzg_proofs = Vec::from(proofs); - let kzg = self - .kzg - .as_ref() - .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; + let kzg = self.kzg.as_ref(); + kzg_utils::validate_blobs::( kzg, expected_kzg_commitments, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index e4646d6288..743748a76d 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -115,13 +115,6 @@ pub enum GossipBlobError { index: u64, }, - /// `Kzg` struct hasn't been initialized. This is an internal error. - /// - /// ## Peer scoring - /// - /// The peer isn't faulty, This is an internal error. - KzgNotInitialized, - /// The kzg verification failed. /// /// ## Peer scoring @@ -559,11 +552,9 @@ pub fn validate_blob_sidecar_for_gossip( } // Kzg verification for gossip blob sidecar - let kzg = chain - .kzg - .as_ref() - .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) + let kzg = chain.kzg.as_ref(); + + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar.clone(), kzg, seen_timestamp) .map_err(GossipBlobError::KzgError)?; let blob_sidecar = &kzg_verified_blob.blob; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 55547aaa18..027c013a49 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -789,19 +789,11 @@ fn build_gossip_verified_data_columns( // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. .filter(|b| !b.is_empty()) .map(|blobs| { - // NOTE: we expect KZG to be initialized if the blobs are present - let kzg = chain - .kzg - .as_ref() - .ok_or(BlockContentsError::DataColumnError( - GossipDataColumnError::KzgNotInitialized, - ))?; - let mut timer = metrics::start_timer_vec( &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, &[&blobs.len().to_string()], ); - let sidecars = blobs_to_data_column_sidecars(&blobs, block, kzg, &chain.spec) + let sidecars = blobs_to_data_column_sidecars(&blobs, block, &chain.kzg, &chain.spec) .discard_timer_on_break(&mut timer)?; drop(timer); let mut gossip_verified_data_columns = vec![]; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index d38530b904..c38101e274 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -101,7 +101,7 @@ pub struct BeaconChainBuilder { // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, - kzg: Option>, + kzg: Arc, task_executor: Option, validator_monitor_config: Option, import_all_data_columns: bool, @@ -120,7 +120,7 @@ where /// /// The `_eth_spec_instance` parameter is only supplied to make concrete the `E` trait. /// This should generally be either the `MinimalEthSpec` or `MainnetEthSpec` types. - pub fn new(_eth_spec_instance: E) -> Self { + pub fn new(_eth_spec_instance: E, kzg: Arc) -> Self { Self { store: None, store_migrator_config: None, @@ -143,7 +143,7 @@ where beacon_graffiti: GraffitiOrigin::default(), slasher: None, pending_io_batch: vec![], - kzg: None, + kzg, task_executor: None, validator_monitor_config: None, import_all_data_columns: false, @@ -694,11 +694,6 @@ where self } - pub fn kzg(mut self, kzg: Option>) -> Self { - self.kzg = kzg; - self - } - /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. /// /// An error will be returned at runtime if all required parameters have not been configured. @@ -1157,7 +1152,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { #[cfg(test)] mod test { use super::*; - use crate::test_utils::EphemeralHarnessType; + use crate::test_utils::{get_kzg, EphemeralHarnessType}; use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, @@ -1204,7 +1199,9 @@ mod test { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let runtime = TestRuntime::default(); - let chain = Builder::new(MinimalEthSpec) + let kzg = get_kzg(&spec); + + let chain = Builder::new(MinimalEthSpec, kzg) .logger(log.clone()) .store(Arc::new(store)) .task_executor(runtime.task_executor.clone()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 470cee713f..26fb46ef7f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -69,7 +69,7 @@ pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); pub struct DataAvailabilityChecker { availability_cache: Arc>, slot_clock: T::SlotClock, - kzg: Option>, + kzg: Arc, spec: Arc, } @@ -97,7 +97,7 @@ impl Debug for Availability { impl DataAvailabilityChecker { pub fn new( slot_clock: T::SlotClock, - kzg: Option>, + kzg: Arc, store: BeaconStore, import_all_data_columns: bool, spec: ChainSpec, @@ -190,18 +190,17 @@ impl DataAvailabilityChecker { epoch: Epoch, blobs: FixedBlobSidecarList, ) -> Result, AvailabilityCheckError> { - let Some(kzg) = self.kzg.as_ref() else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; - let seen_timestamp = self .slot_clock .now_duration() .ok_or(AvailabilityCheckError::SlotClockError)?; - let verified_blobs = - KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg, seen_timestamp) - .map_err(AvailabilityCheckError::Kzg)?; + let verified_blobs = KzgVerifiedBlobList::new( + Vec::from(blobs).into_iter().flatten(), + &self.kzg, + seen_timestamp, + ) + .map_err(AvailabilityCheckError::Kzg)?; self.availability_cache .put_kzg_verified_blobs(block_root, epoch, verified_blobs) @@ -217,23 +216,20 @@ impl DataAvailabilityChecker { custody_columns: DataColumnSidecarList, ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> { - let Some(kzg) = self.kzg.as_ref() else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; - // TODO(das): report which column is invalid for proper peer scoring // TODO(das): batch KZG verification here let verified_custody_columns = custody_columns .into_iter() .map(|column| { Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::new(column, kzg).map_err(AvailabilityCheckError::Kzg)?, + KzgVerifiedDataColumn::new(column, &self.kzg) + .map_err(AvailabilityCheckError::Kzg)?, )) }) .collect::, AvailabilityCheckError>>()?; self.availability_cache.put_kzg_verified_data_columns( - kzg, + &self.kzg, block_root, epoch, verified_custody_columns, @@ -269,9 +265,6 @@ impl DataAvailabilityChecker { gossip_data_columns: Vec>, ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> { - let Some(kzg) = self.kzg.as_ref() else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let custody_columns = gossip_data_columns @@ -280,7 +273,7 @@ impl DataAvailabilityChecker { .collect::>(); self.availability_cache.put_kzg_verified_data_columns( - kzg, + &self.kzg, block_root, epoch, custody_columns, @@ -314,11 +307,7 @@ impl DataAvailabilityChecker { let (block_root, block, blobs, data_columns) = block.deconstruct(); if self.blobs_required_for_block(&block) { return if let Some(blob_list) = blobs.as_ref() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(blob_list.iter(), kzg) + verify_kzg_for_blob_list(blob_list.iter(), &self.kzg) .map_err(AvailabilityCheckError::Kzg)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, @@ -334,15 +323,11 @@ impl DataAvailabilityChecker { } if self.data_columns_required_for_block(&block) { return if let Some(data_column_list) = data_columns.as_ref() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; verify_kzg_for_data_column_list( data_column_list .iter() .map(|custody_column| custody_column.as_data_column()), - kzg, + &self.kzg, ) .map_err(AvailabilityCheckError::Kzg)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { @@ -395,11 +380,7 @@ impl DataAvailabilityChecker { // verify kzg for all blobs at once if !all_blobs.is_empty() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(all_blobs.iter(), kzg)?; + verify_kzg_for_blob_list(all_blobs.iter(), &self.kzg)?; } let all_data_columns = blocks @@ -415,11 +396,7 @@ impl DataAvailabilityChecker { // verify kzg for all data columns at once if !all_data_columns.is_empty() { - let kzg = self - .kzg - .as_ref() - .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_data_column_list(all_data_columns.iter(), kzg)?; + verify_kzg_for_data_column_list(all_data_columns.iter(), &self.kzg)?; } for block in blocks { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 79793d6dc2..7f34cacefe 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -4,7 +4,6 @@ use types::{BeaconStateError, Hash256}; #[derive(Debug)] pub enum Error { Kzg(KzgError), - KzgNotInitialized, KzgVerificationFailed, KzgCommitmentMismatch { blob_commitment: KzgCommitment, @@ -36,8 +35,7 @@ pub enum ErrorCategory { impl Error { pub fn category(&self) -> ErrorCategory { match self { - Error::KzgNotInitialized - | Error::SszTypes(_) + Error::SszTypes(_) | Error::MissingBlobs | Error::MissingCustodyColumns | Error::StoreError(_) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index f4a5feaee2..1647f190cf 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -52,12 +52,6 @@ pub enum GossipDataColumnError { data_column_slot: Slot, parent_slot: Slot, }, - /// `Kzg` struct hasn't been initialized. This is an internal error. - /// - /// ## Peer scoring - /// - /// The peer isn't faulty, This is an internal error. - KzgNotInitialized, /// The kzg verification failed. /// /// ## Peer scoring @@ -382,12 +376,8 @@ pub fn validate_data_column_sidecar_for_gossip( let parent_block = verify_parent_block_and_finalized_descendant(data_column.clone(), chain)?; verify_slot_higher_than_parent(&parent_block, column_slot)?; verify_proposer_and_signature(&data_column, &parent_block, chain)?; - - let kzg = chain - .kzg - .clone() - .ok_or(GossipDataColumnError::KzgNotInitialized)?; - let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), &kzg) + let kzg = &chain.kzg; + let kzg_verified_data_column = verify_kzg_for_data_column(data_column.clone(), kzg) .map_err(GossipDataColumnError::InvalidKzgProof)?; chain diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 994ac79af7..8a317ce754 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -291,7 +291,6 @@ pub enum BlockProductionError { TokioJoin(JoinError), BeaconChain(BeaconChainError), InvalidPayloadFork, - TrustedSetupNotInitialized, InvalidBlockVariant(String), KzgError(kzg::Error), FailedToBuildBlobSidecars(String), diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index c2355e6f4f..91c1098f81 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -290,8 +290,7 @@ pub fn reconstruct_data_columns( mod test { use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; use bls::Signature; - use eth2_network_config::TRUSTED_SETUP_BYTES; - use kzg::{Kzg, KzgCommitment, TrustedSetup}; + use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, @@ -377,7 +376,7 @@ mod test { } fn get_kzg() -> Kzg { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1402810491..8261500fba 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -18,7 +18,6 @@ use crate::{ }; use bls::get_withdrawal_credentials; use eth2::types::SignedBlockContentsTuple; -use eth2_network_config::TRUSTED_SETUP_BYTES; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ auth::JwtKey, @@ -31,6 +30,7 @@ use execution_layer::{ use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; +use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; @@ -75,22 +75,40 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; -pub static KZG: LazyLock> = LazyLock::new(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) +static KZG: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); -pub static KZG_PEERDAS: LazyLock> = LazyLock::new(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) +static KZG_PEERDAS: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg"); Arc::new(kzg) }); +static KZG_NO_PRECOMP: LazyLock> = LazyLock::new(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Kzg::new_from_trusted_setup_no_precomp(trusted_setup).expect("should create kzg"); + Arc::new(kzg) +}); + +pub fn get_kzg(spec: &ChainSpec) -> Arc { + if spec.eip7594_fork_epoch.is_some() { + KZG_PEERDAS.clone() + } else if spec.deneb_fork_epoch.is_some() { + KZG.clone() + } else { + KZG_NO_PRECOMP.clone() + } +} + pub type BaseHarnessType = Witness, E, THotStore, TColdStore>; @@ -522,12 +540,13 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let kzg = get_kzg(&spec); let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let chain_config = self.chain_config.unwrap_or_default(); - let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) + let mut builder = BeaconChainBuilder::new(self.eth_spec_instance, kzg.clone()) .logger(log.clone()) .custom_spec(spec.clone()) .store(self.store.expect("cannot build without store")) @@ -546,8 +565,7 @@ where log.clone(), 5, ))) - .validator_monitor_config(validator_monitor_config) - .kzg(kzg); + .validator_monitor_config(validator_monitor_config); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) @@ -602,7 +620,7 @@ pub fn mock_execution_layer_from_parts( HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); - let kzg_opt = spec.deneb_fork_epoch.map(|_| KZG.clone()); + let kzg = get_kzg(spec); MockExecutionLayer::new( task_executor, @@ -612,7 +630,7 @@ pub fn mock_execution_layer_from_parts( prague_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), - kzg_opt, + Some(kzg), ) } @@ -2842,9 +2860,10 @@ pub fn generate_rand_block_and_data_columns( SignedBeaconBlock>, Vec>>, ) { + let kzg = get_kzg(spec); let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); let blob: BlobsList = blobs.into_iter().map(|b| b.blob).collect::>().into(); - let data_columns = blobs_to_data_column_sidecars(&blob, &block, &KZG_PEERDAS, spec).unwrap(); + let data_columns = blobs_to_data_column_sidecars(&blob, &block, &kzg, spec).unwrap(); (block, data_columns) } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 1261e2d53e..b8d4a7722a 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -25,7 +25,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); // build and process a gossip verified blob - let kzg = harness.chain.kzg.as_ref().unwrap(); + let kzg = harness.chain.kzg.as_ref(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let sidecar = BlobSidecar::random_valid(&mut rng, kzg) .map(Arc::new) @@ -59,7 +59,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); // build and process multiple rpc blobs - let kzg = harness.chain.kzg.as_ref().unwrap(); + let kzg = harness.chain.kzg.as_ref(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let mut blob_1 = BlobSidecar::random_valid(&mut rng, kzg).unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 1b1e5ea514..541abaa424 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,8 +7,8 @@ use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ - mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, - BlockStrategy, DiskHarnessType, KZG, + get_kzg, mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, + BlockStrategy, DiskHarnessType, }; use beacon_chain::{ data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, @@ -164,7 +164,7 @@ async fn light_client_bootstrap_test() { .unwrap() .unwrap(); - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + let kzg = get_kzg(&spec); let mock = mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); @@ -180,7 +180,7 @@ async fn light_client_bootstrap_test() { let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) .custom_spec(test_spec::()) .task_executor(harness.chain.task_executor.clone()) @@ -203,7 +203,6 @@ async fn light_client_bootstrap_test() { 1, ))) .execution_layer(Some(mock.el)) - .kzg(kzg) .build() .expect("should build"); @@ -299,7 +298,7 @@ async fn light_client_updates_test() { .unwrap() .unwrap(); - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + let kzg = get_kzg(&spec); let mock = mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); @@ -324,7 +323,7 @@ async fn light_client_updates_test() { let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) .custom_spec(test_spec::()) .task_executor(harness.chain.task_executor.clone()) @@ -347,7 +346,6 @@ async fn light_client_updates_test() { 1, ))) .execution_layer(Some(mock.el)) - .kzg(kzg) .build() .expect("should build"); @@ -2680,7 +2678,8 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let store = get_store(&temp2); let spec = test_spec::(); let seconds_per_slot = spec.seconds_per_slot; - let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let kzg = get_kzg(&spec); let mock = mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); @@ -2694,7 +2693,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { ); slot_clock.set_slot(harness.get_current_slot().as_u64()); - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) .custom_spec(test_spec::()) .task_executor(harness.chain.task_executor.clone()) @@ -2717,7 +2716,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { 1, ))) .execution_layer(Some(mock.el)) - .kzg(kzg) .build() .expect("should build"); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 88ae650e72..06f7763c8a 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -20,6 +20,7 @@ types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } error-chain = { workspace = true } slog = { workspace = true } tokio = { workspace = true } @@ -27,6 +28,7 @@ futures = { workspace = true } dirs = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } +kzg = { workspace = true } sensitive_url = { workspace = true } genesis = { workspace = true } task_executor = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d299eebec8..f64ab7200f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -195,7 +195,17 @@ where None }; - let builder = BeaconChainBuilder::new(eth_spec_instance) + let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); + let trusted_setup = config.trusted_setup.clone(); + let kzg = if spec.is_peer_das_scheduled() { + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? + } else if spec.deneb_fork_epoch.is_some() { + Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? + } else { + Kzg::new_from_trusted_setup_no_precomp(trusted_setup).map_err(kzg_err_msg)? + }; + + let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) .logger(context.log().clone()) .store(store) .task_executor(context.executor.clone()) @@ -623,20 +633,6 @@ where ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, }; - let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { - let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); - - let kzg = if spec.is_peer_das_scheduled() { - Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? - } else { - Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? - }; - - beacon_chain_builder.kzg(Some(Arc::new(kzg))) - } else { - beacon_chain_builder - }; - if config.sync_eth1_chain { self.eth1_service = eth1_service_option; } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 16000374b2..a25216ff3e 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -4,6 +4,7 @@ use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; +use kzg::trusted_setup::get_trusted_setup; use network::NetworkConfig; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; @@ -75,7 +76,7 @@ pub struct Config { pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, pub execution_layer: Option, - pub trusted_setup: Option, + pub trusted_setup: TrustedSetup, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, pub monitoring_api: Option, @@ -89,6 +90,9 @@ pub struct Config { impl Default for Config { fn default() -> Self { + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) + .expect("Unable to read trusted setup file"); + Self { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), @@ -103,7 +107,7 @@ impl Default for Config { sync_eth1_chain: false, eth1: <_>::default(), execution_layer: None, - trusted_setup: None, + trusted_setup, beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), http_metrics: <_>::default(), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 6094e0d696..a5960744f5 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -862,8 +862,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use eth2_network_config::TRUSTED_SETUP_BYTES; - use kzg::TrustedSetup; + use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] @@ -951,8 +950,9 @@ mod test { } fn load_kzg() -> Result { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; + let trusted_setup: TrustedSetup = + serde_json::from_reader(get_trusted_setup().as_slice()) + .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; Kzg::new_from_trusted_setup(trusted_setup) .map_err(|e| format!("Failed to load trusted setup: {e:?}")) } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 6a81eb33f0..fed346127f 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -8,10 +8,13 @@ edition = { workspace = true } sloggers = { workspace = true } genesis = { workspace = true } matches = "0.1.8" +serde_json = { workspace = true } slog-term = { workspace = true } slog-async = { workspace = true } eth2 = { workspace = true } gossipsub = { workspace = true } +eth2_network_config = { workspace = true } +kzg = { workspace = true } [dependencies] alloy-primitives = { workspace = true } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 62f1371c81..ddcd74d20b 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -696,8 +696,7 @@ impl NetworkBeaconProcessor { column_sidecar, )); } - GossipDataColumnError::KzgNotInitialized - | GossipDataColumnError::PubkeyCacheTimeout + GossipDataColumnError::PubkeyCacheTimeout | GossipDataColumnError::BeaconChainError(_) => { crit!( self.log, @@ -839,9 +838,7 @@ impl NetworkBeaconProcessor { blob_sidecar, )); } - GossipBlobError::KzgNotInitialized - | GossipBlobError::PubkeyCacheTimeout - | GossipBlobError::BeaconChainError(_) => { + GossipBlobError::PubkeyCacheTimeout | GossipBlobError::BeaconChainError(_) => { crit!( self.log, "Internal error when verifying blob sidecar"; diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index c21054dab5..50c7ee05a1 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -385,8 +385,8 @@ impl NetworkBeaconProcessor { data_columns: Vec>>, _seen_timestamp: Duration, ) -> Result<(), String> { - let kzg = self.chain.kzg.as_ref().ok_or("Kzg not initialized")?; - verify_kzg_for_data_column_list(data_columns.iter(), kzg).map_err(|err| format!("{err:?}")) + verify_kzg_for_data_column_list(data_columns.iter(), &self.chain.kzg) + .map_err(|err| format!("{err:?}")) } /// Process a sampling completed event, inserting it into fork-choice @@ -561,8 +561,7 @@ impl NetworkBeaconProcessor { }) .collect::>(), Err(e) => match e { - AvailabilityCheckError::StoreError(_) - | AvailabilityCheckError::KzgNotInitialized => { + AvailabilityCheckError::StoreError(_) => { return ( 0, Err(ChainSegmentFailed { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index e8d9218ec4..3ee7c7f768 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -2,6 +2,7 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, + test_utils::get_kzg, BeaconChain, }; use futures::prelude::*; @@ -45,12 +46,14 @@ impl TestBeaconChain { let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); + let kzg = get_kzg(&spec); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let test_runtime = TestRuntime::default(); let chain = Arc::new( - BeaconChainBuilder::new(MainnetEthSpec) + BeaconChainBuilder::new(MainnetEthSpec, kzg.clone()) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6f61748a2d..0eff8577c4 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -396,13 +396,15 @@ pub fn get_config( } // 4844 params - client_config.trusted_setup = context + if let Some(trusted_setup) = context .eth2_network_config .as_ref() - .and_then(|config| config.kzg_trusted_setup.as_ref()) - .map(|trusted_setup_bytes| serde_json::from_slice(trusted_setup_bytes)) + .map(|config| serde_json::from_slice(&config.kzg_trusted_setup)) .transpose() - .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; + .map_err(|e| format!("Unable to read trusted setup file: {}", e))? + { + client_config.trusted_setup = trusted_setup; + }; // Override default trusted setup file if required if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") @@ -411,7 +413,7 @@ pub fn get_config( .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; let trusted_setup: TrustedSetup = serde_json::from_reader(file) .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; - client_config.trusted_setup = Some(trusted_setup); + client_config.trusted_setup = trusted_setup; } if let Some(freezer_dir) = cli_args.get_one::("freezer-dir") { diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 4b34405e5b..09cf2072d2 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -28,3 +28,4 @@ sensitive_url = { workspace = true } slog = { workspace = true } logging = { workspace = true } bytes = { workspace = true } +kzg = { workspace = true } diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 472ac55ca0..3d0ffc5b9e 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -14,6 +14,7 @@ use bytes::Bytes; use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; +use kzg::trusted_setup::get_trusted_setup; use pretty_reqwest_error::PrettyReqwestError; use reqwest::{Client, Error}; use sensitive_url::SensitiveUrl; @@ -24,7 +25,7 @@ use std::io::{Read, Write}; use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; -use types::{BeaconState, ChainSpec, Config, Epoch, EthSpec, EthSpecId, Hash256}; +use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256}; use url::Url; pub use eth2_config::GenesisStateSource; @@ -43,26 +44,6 @@ instantiate_hardcoded_nets!(eth2_config); pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; -/// Contains the bytes from the trusted setup json. -/// The mainnet trusted setup is also reused in testnets. -/// -/// This is done to ensure that testnets also inherit the high security and -/// randomness of the mainnet kzg trusted setup ceremony. -/// -/// Note: The trusted setup for both mainnet and minimal presets are the same. -pub const TRUSTED_SETUP_BYTES: &[u8] = - include_bytes!("../built_in_network_configs/trusted_setup.json"); - -/// Returns `Some(TrustedSetup)` if the deneb fork epoch is set and `None` otherwise. -/// -/// Returns an error if the trusted setup parsing failed. -fn get_trusted_setup_from_config(config: &Config) -> Option> { - config - .deneb_fork_epoch - .filter(|epoch| epoch.value != Epoch::max_value()) - .map(|_| TRUSTED_SETUP_BYTES.to_vec()) -} - /// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the /// binary whilst also supporting loading them from a file at runtime. #[derive(Clone, PartialEq, Debug)] @@ -104,7 +85,7 @@ pub struct Eth2NetworkConfig { pub genesis_state_source: GenesisStateSource, pub genesis_state_bytes: Option, pub config: Config, - pub kzg_trusted_setup: Option>, + pub kzg_trusted_setup: Vec, } impl Eth2NetworkConfig { @@ -122,7 +103,7 @@ impl Eth2NetworkConfig { fn from_hardcoded_net(net: &HardcodedNet) -> Result { let config: Config = serde_yaml::from_reader(net.config) .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?; - let kzg_trusted_setup = get_trusted_setup_from_config(&config); + let kzg_trusted_setup = get_trusted_setup(); Ok(Self { deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block) .map_err(|e| format!("Unable to parse deploy block: {:?}", e))?, @@ -359,7 +340,7 @@ impl Eth2NetworkConfig { (None, GenesisStateSource::Unknown) }; - let kzg_trusted_setup = get_trusted_setup_from_config(&config); + let kzg_trusted_setup = get_trusted_setup(); Ok(Self { deposit_contract_deploy_block, @@ -577,7 +558,7 @@ mod tests { GenesisStateSource::Unknown }; // With Deneb enabled by default we must set a trusted setup here. - let kzg_trusted_setup = get_trusted_setup_from_config(&config).unwrap(); + let kzg_trusted_setup = get_trusted_setup(); let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, @@ -588,7 +569,7 @@ mod tests { .map(Encode::as_ssz_bytes) .map(Into::into), config, - kzg_trusted_setup: Some(kzg_trusted_setup), + kzg_trusted_setup, }; testnet diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index e940fe2e20..ce55f83639 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -18,11 +18,11 @@ hex = { workspace = true } ethereum_hashing = { workspace = true } c-kzg = { workspace = true } rust_eth_kzg = { workspace = true } +serde_json = { workspace = true } [dev-dependencies] criterion = { workspace = true } serde_json = { workspace = true } -eth2_network_config = { workspace = true } [[bench]] name = "benchmark" diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 35e370cd0f..50f5f4e779 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -1,11 +1,10 @@ use c_kzg::KzgSettings; use criterion::{criterion_group, criterion_main, Criterion}; -use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::TrustedSetup; +use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; pub fn bench_init_context(c: &mut Criterion) { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); @@ -22,9 +21,10 @@ pub fn bench_init_context(c: &mut Criterion) { }); c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { b.iter(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) - .map_err(|e| format!("Unable to read trusted setup file: {}", e)) - .expect("should have trusted setup"); + let trusted_setup: TrustedSetup = + serde_json::from_reader(get_trusted_setup().as_slice()) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); KzgSettings::load_trusted_setup(&trusted_setup.g1_points(), &trusted_setup.g2_points()) .unwrap() }) diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index ebe93934fd..348ed785af 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -1,6 +1,6 @@ mod kzg_commitment; mod kzg_proof; -mod trusted_setup; +pub mod trusted_setup; use rust_eth_kzg::{CellIndex, DASContext}; use std::fmt::Debug; @@ -51,18 +51,41 @@ impl From for Error { #[derive(Debug)] pub struct Kzg { trusted_setup: KzgSettings, - context: Option, + context: DASContext, } impl Kzg { - /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. - pub fn new_from_trusted_setup(trusted_setup: TrustedSetup) -> Result { + pub fn new_from_trusted_setup_no_precomp(trusted_setup: TrustedSetup) -> Result { + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + + let context = DASContext::new(&peerdas_trusted_setup, rust_eth_kzg::UsePrecomp::No); + Ok(Self { trusted_setup: KzgSettings::load_trusted_setup( &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, - context: None, + context, + }) + } + + /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. + pub fn new_from_trusted_setup(trusted_setup: TrustedSetup) -> Result { + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + + let context = DASContext::new( + &peerdas_trusted_setup, + rust_eth_kzg::UsePrecomp::Yes { + width: rust_eth_kzg::constants::RECOMMENDED_PRECOMP_WIDTH, + }, + ); + + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + context, }) } @@ -88,12 +111,12 @@ impl Kzg { &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, - context: Some(context), + context, }) } - fn context(&self) -> Result<&DASContext, Error> { - self.context.as_ref().ok_or(Error::DASContextUninitialized) + fn context(&self) -> &DASContext { + &self.context } /// Compute the kzg proof given a blob and its kzg commitment. @@ -200,7 +223,7 @@ impl Kzg { blob: KzgBlobRef<'_>, ) -> Result { let (cells, proofs) = self - .context()? + .context() .compute_cells_and_kzg_proofs(blob) .map_err(Error::PeerDASKZG)?; @@ -226,7 +249,7 @@ impl Kzg { .iter() .map(|commitment| commitment.as_ref()) .collect(); - let verification_result = self.context()?.verify_cell_kzg_proof_batch( + let verification_result = self.context().verify_cell_kzg_proof_batch( commitments.to_vec(), columns, cells.to_vec(), @@ -247,7 +270,7 @@ impl Kzg { cells: &[CellRef<'_>], ) -> Result { let (cells, proofs) = self - .context()? + .context() .recover_cells_and_kzg_proofs(cell_ids.to_vec(), cells.to_vec()) .map_err(Error::PeerDASKZG)?; diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index 6ddc33df5a..f788be265a 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -5,6 +5,12 @@ use serde::{ Deserialize, Serialize, }; +pub const TRUSTED_SETUP_BYTES: &[u8] = include_bytes!("../trusted_setup.json"); + +pub fn get_trusted_setup() -> Vec { + TRUSTED_SETUP_BYTES.into() +} + /// Wrapper over a BLS G1 point's byte representation. #[derive(Debug, Clone, PartialEq)] struct G1Point([u8; BYTES_PER_G1_POINT]); diff --git a/common/eth2_network_config/built_in_network_configs/trusted_setup.json b/crypto/kzg/trusted_setup.json similarity index 100% rename from common/eth2_network_config/built_in_network_configs/trusted_setup.json rename to crypto/kzg/trusted_setup.json diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index f9b3009fde..3dc955bdcc 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blob; -use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::trusted_setup::get_trusted_setup; use kzg::{Cell, Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; use serde::Deserialize; use std::marker::PhantomData; @@ -10,7 +10,7 @@ use std::sync::LazyLock; use types::Blob; static KZG: LazyLock> = LazyLock::new(|| { - let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| Error::InternalError(format!("Failed to initialize trusted setup: {:?}", e))) .expect("failed to initialize trusted setup"); let kzg = Kzg::new_from_trusted_setup_das_enabled(trusted_setup) diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index f8769b10e2..7772523284 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -19,3 +19,4 @@ rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } eth2_network_config = { workspace = true } serde_json = { workspace = true } +kzg = { workspace = true } diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index faf3246e0d..7b9327a7aa 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,5 +1,5 @@ use crate::checks::epoch_delay; -use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::trusted_setup::get_trusted_setup; use node_test_rig::{ environment::RuntimeContext, eth2::{types::StateId, BeaconNodeHttpClient}, @@ -46,8 +46,8 @@ fn default_client_config(network_params: LocalNetworkParams, genesis_time: u64) beacon_config.chain.enable_light_client_server = true; beacon_config.http_api.enable_light_client_server = true; beacon_config.chain.optimistic_finalized_sync = false; - beacon_config.trusted_setup = - serde_json::from_reader(TRUSTED_SETUP_BYTES).expect("Trusted setup bytes should be valid"); + beacon_config.trusted_setup = serde_json::from_reader(get_trusted_setup().as_slice()) + .expect("Trusted setup bytes should be valid"); let el_config = execution_layer::Config { execution_endpoint: Some( From 012e7e7bfa29dd6da8fbb73b09c2d7d020340705 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 23 Sep 2024 14:49:23 -0400 Subject: [PATCH 26/66] Allow custody by root requests to have no peers (#6417) * Allow custody by root requests to have no peers --- beacon_node/network/src/sync/manager.rs | 56 +++++++++++---- .../network/src/sync/network_context.rs | 68 ++++++++++++++----- .../src/sync/network_context/custody.rs | 50 +++++++------- 3 files changed, 121 insertions(+), 53 deletions(-) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index ed91c73d8b..f141780484 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,7 +35,9 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; -use super::network_context::{BlockOrBlob, RangeRequestId, RpcEvent, SyncNetworkContext}; +use super::network_context::{ + BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, +}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use super::sampling::{Sampling, SamplingConfig, SamplingResult}; @@ -55,8 +57,8 @@ use beacon_chain::{ use futures::StreamExt; use lighthouse_network::rpc::RPCError; use lighthouse_network::service::api_types::{ - DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, SamplingRequester, - SingleLookupReqId, SyncRequestId, + CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, + SamplingRequester, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -368,6 +370,11 @@ impl SyncManager { } self.update_sync_state(); + + // Try to make progress on custody requests that are waiting for peers + for (id, result) in self.network.continue_custody_by_root_requests() { + self.on_custody_by_root_result(id, result); + } } /// Handles RPC errors related to requests that were emitted from the sync manager. @@ -444,6 +451,16 @@ impl SyncManager { self.update_sync_state(); } + /// Prune stale requests that are waiting for peers + fn prune_requests(&mut self) { + // continue_custody_by_root_requests attempts to make progress on all requests. If some + // exceed the stale duration limit they will fail and return a result. Re-using + // `continue_custody_by_root_requests` is just a convenience to have less code. + for (id, result) in self.network.continue_custody_by_root_requests() { + self.on_custody_by_root_result(id, result); + } + } + /// Updates the syncing state of a peer. /// Return whether the peer should be used for range syncing or not, according to its /// connection status. @@ -624,6 +641,8 @@ impl SyncManager { // unless there is a bug. let mut prune_lookups_interval = tokio::time::interval(Duration::from_secs(15)); + let mut prune_requests = tokio::time::interval(Duration::from_secs(15)); + let mut register_metrics_interval = tokio::time::interval(Duration::from_secs(5)); // process any inbound messages @@ -638,6 +657,9 @@ impl SyncManager { _ = prune_lookups_interval.tick() => { self.block_lookups.prune_lookups(); } + _ = prune_requests.tick() => { + self.prune_requests(); + } _ = register_metrics_interval.tick() => { self.network.register_metrics(); } @@ -1054,26 +1076,32 @@ impl SyncManager { } } DataColumnsByRootRequester::Custody(custody_id) => { - if let Some(custody_columns) = self + if let Some(result) = self .network .on_custody_by_root_response(custody_id, req_id, peer_id, resp) { - // TODO(das): get proper timestamp - let seen_timestamp = timestamp_now(); - self.block_lookups - .on_download_response::>( - custody_id.requester.0, - custody_columns.map(|(columns, peer_group)| { - (columns, peer_group, seen_timestamp) - }), - &mut self.network, - ); + self.on_custody_by_root_result(custody_id.requester, result); } } } } } + fn on_custody_by_root_result( + &mut self, + requester: CustodyRequester, + response: CustodyByRootResult, + ) { + // TODO(das): get proper timestamp + let seen_timestamp = timestamp_now(); + self.block_lookups + .on_download_response::>( + requester.0, + response.map(|(columns, peer_group)| (columns, peer_group, seen_timestamp)), + &mut self.network, + ); + } + fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { // TODO(das): How is a consumer of sampling results? // - Fork-choice for trailing DA diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b9f6d180c1..d6f5417849 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -15,6 +15,7 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; +use custody::CustodyRequestResult; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; @@ -69,6 +70,8 @@ pub enum RpcEvent { pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; +pub type CustodyByRootResult = Result<(DataColumnSidecarList, PeerGroup), RpcResponseError>; + #[derive(Debug)] pub enum RpcResponseError { RpcError(RPCError), @@ -915,6 +918,32 @@ impl SyncNetworkContext { .insert(id, (sender_id, info)); } + /// Attempt to make progress on all custody_by_root requests. Some request may be stale waiting + /// for custody peers. Returns a Vec of results as zero or more requests may fail in this + /// attempt. + pub fn continue_custody_by_root_requests( + &mut self, + ) -> Vec<(CustodyRequester, CustodyByRootResult)> { + let ids = self + .custody_by_root_requests + .keys() + .copied() + .collect::>(); + + // Need to collect ids and results in separate steps to re-borrow self. + ids.into_iter() + .filter_map(|id| { + let mut request = self + .custody_by_root_requests + .remove(&id) + .expect("key of hashmap"); + let result = request.continue_requests(self); + self.handle_custody_by_root_result(id, request, result) + .map(|result| (id, result)) + }) + .collect() + } + // Request handlers pub fn on_single_block_response( @@ -1069,7 +1098,7 @@ impl SyncNetworkContext { req_id: DataColumnsByRootRequestId, peer_id: PeerId, resp: RpcResponseResult>>>, - ) -> Option, PeerGroup), RpcResponseError>> { + ) -> Option> { // Note: need to remove the request to borrow self again below. Otherwise we can't // do nested requests let Some(mut request) = self.custody_by_root_requests.remove(&id.requester) else { @@ -1078,28 +1107,35 @@ impl SyncNetworkContext { return None; }; - let result = request - .on_data_column_downloaded(peer_id, req_id, resp, self) + let result = request.on_data_column_downloaded(peer_id, req_id, resp, self); + + self.handle_custody_by_root_result(id.requester, request, result) + } + + fn handle_custody_by_root_result( + &mut self, + id: CustodyRequester, + request: ActiveCustodyRequest, + result: CustodyRequestResult, + ) -> Option> { + let result = result .map_err(RpcResponseError::CustodyRequestError) .transpose(); // Convert a result from internal format of `ActiveCustodyRequest` (error first to use ?) to // an Option first to use in an `if let Some() { act on result }` block. - if let Some(result) = result { - match result.as_ref() { - Ok((columns, peer_group)) => { - debug!(self.log, "Custody request success, removing"; "id" => ?id, "count" => columns.len(), "peers" => ?peer_group) - } - Err(e) => { - debug!(self.log, "Custody request failure, removing"; "id" => ?id, "error" => ?e) - } + match result.as_ref() { + Some(Ok((columns, peer_group))) => { + debug!(self.log, "Custody request success, removing"; "id" => ?id, "count" => columns.len(), "peers" => ?peer_group) + } + Some(Err(e)) => { + debug!(self.log, "Custody request failure, removing"; "id" => ?id, "error" => ?e) + } + None => { + self.custody_by_root_requests.insert(id, request); } - - Some(result) - } else { - self.custody_by_root_requests.insert(id.requester, request); - None } + result } pub fn send_block_for_processing( diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index dfe409f043..6736bfb82f 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -9,7 +9,7 @@ use lighthouse_network::PeerId; use lru_cache::LRUTimeCache; use rand::Rng; use slog::{debug, warn}; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use types::EthSpec; use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; @@ -17,6 +17,7 @@ use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; use super::{LookupRequestResult, PeerGroup, RpcResponseResult, SyncNetworkContext}; const FAILED_PEERS_CACHE_EXPIRY_SECONDS: u64 = 5; +const MAX_STALE_NO_PEERS_DURATION: Duration = Duration::from_secs(30); type DataColumnSidecarList = Vec>>; @@ -56,7 +57,7 @@ struct ActiveBatchColumnsRequest { indices: Vec, } -type CustodyRequestResult = Result, PeerGroup)>, Error>; +pub type CustodyRequestResult = Result, PeerGroup)>, Error>; impl ActiveCustodyRequest { pub(crate) fn new( @@ -221,13 +222,13 @@ impl ActiveCustodyRequest { // - which peer returned what to have PeerGroup attributability for (column_index, request) in self.column_requests.iter_mut() { - if request.is_awaiting_download() { + if let Some(wait_duration) = request.is_awaiting_download() { if request.download_failures > MAX_CUSTODY_COLUMN_DOWNLOAD_ATTEMPTS { return Err(Error::TooManyFailures); } - // TODO: When is a fork and only a subset of your peers know about a block, we should only - // query the peers on that fork. Should this case be handled? How to handle it? + // TODO(das): When is a fork and only a subset of your peers know about a block, we should + // only query the peers on that fork. Should this case be handled? How to handle it? let custodial_peers = cx.get_custodial_peers(*column_index); // TODO(das): cache this computation in a OneCell or similar to prevent having to @@ -256,17 +257,20 @@ impl ActiveCustodyRequest { .collect::>(); priorized_peers.sort_unstable(); - let Some((_, _, _, peer_id)) = priorized_peers.first() else { - // Do not tolerate not having custody peers, hard error. - // TODO(das): we might implement some grace period. The request will pause for X - // seconds expecting the peer manager to find peers before failing the request. + if let Some((_, _, _, peer_id)) = priorized_peers.first() { + columns_to_request_by_peer + .entry(*peer_id) + .or_default() + .push(*column_index); + } else if wait_duration > MAX_STALE_NO_PEERS_DURATION { + // Allow to request to sit stale in `NotStarted` state for at most + // `MAX_STALE_NO_PEERS_DURATION`, else error and drop the request. Note that + // lookup will naturally retry when other peers send us attestations for + // descendants of this un-available lookup. return Err(Error::NoPeers(*column_index)); - }; - - columns_to_request_by_peer - .entry(*peer_id) - .or_default() - .push(*column_index); + } else { + // Do not issue requests if there is no custody peer on this column + } } } @@ -315,7 +319,7 @@ struct ColumnRequest { #[derive(Debug, Clone)] enum Status { - NotStarted, + NotStarted(Instant), Downloading(DataColumnsByRootRequestId), Downloaded(PeerId, Arc>), } @@ -323,28 +327,28 @@ enum Status { impl ColumnRequest { fn new() -> Self { Self { - status: Status::NotStarted, + status: Status::NotStarted(Instant::now()), download_failures: 0, } } - fn is_awaiting_download(&self) -> bool { + fn is_awaiting_download(&self) -> Option { match self.status { - Status::NotStarted => true, - Status::Downloading { .. } | Status::Downloaded { .. } => false, + Status::NotStarted(start_time) => Some(start_time.elapsed()), + Status::Downloading { .. } | Status::Downloaded { .. } => None, } } fn is_downloaded(&self) -> bool { match self.status { - Status::NotStarted | Status::Downloading { .. } => false, + Status::NotStarted { .. } | Status::Downloading { .. } => false, Status::Downloaded { .. } => true, } } fn on_download_start(&mut self, req_id: DataColumnsByRootRequestId) -> Result<(), Error> { match &self.status { - Status::NotStarted => { + Status::NotStarted { .. } => { self.status = Status::Downloading(req_id); Ok(()) } @@ -363,7 +367,7 @@ impl ColumnRequest { req_id, }); } - self.status = Status::NotStarted; + self.status = Status::NotStarted(Instant::now()); Ok(()) } other => Err(Error::BadState(format!( From d84df5799cb33d34da97f9d46971f1086aaea513 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 23 Sep 2024 14:49:26 -0400 Subject: [PATCH 27/66] Attribute invalid column proof error to correct peer (#6377) * Attribute invalid column proof error to correct peer * Update beacon_node/beacon_chain/src/data_availability_checker.rs Co-authored-by: Pawan Dhananjay * fix conflicts --- .../src/data_availability_checker.rs | 54 +++++++++++++++---- .../src/data_availability_checker/error.rs | 20 +++---- .../overflow_lru_cache.rs | 3 +- .../network/src/sync/block_lookups/mod.rs | 16 ++++-- .../network/src/sync/block_lookups/tests.rs | 2 +- .../network/src/sync/network_context.rs | 9 ++++ 6 files changed, 76 insertions(+), 28 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 26fb46ef7f..c13593d7af 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -24,8 +24,8 @@ mod overflow_lru_cache; mod state_lru_cache; use crate::data_column_verification::{ - verify_kzg_for_data_column_list, CustodyDataColumn, GossipVerifiedDataColumn, - KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, + verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, + GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, }; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; @@ -195,12 +195,15 @@ impl DataAvailabilityChecker { .now_duration() .ok_or(AvailabilityCheckError::SlotClockError)?; + // Note: currently not reporting which specific blob is invalid because we fetch all blobs + // from the same peer for both lookup and range sync. + let verified_blobs = KzgVerifiedBlobList::new( Vec::from(blobs).into_iter().flatten(), &self.kzg, seen_timestamp, ) - .map_err(AvailabilityCheckError::Kzg)?; + .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache .put_kzg_verified_blobs(block_root, epoch, verified_blobs) @@ -217,13 +220,15 @@ impl DataAvailabilityChecker { ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> { // TODO(das): report which column is invalid for proper peer scoring - // TODO(das): batch KZG verification here + // TODO(das): batch KZG verification here, but fallback into checking each column + // individually to report which column(s) are invalid. let verified_custody_columns = custody_columns .into_iter() .map(|column| { + let index = column.index; Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( KzgVerifiedDataColumn::new(column, &self.kzg) - .map_err(AvailabilityCheckError::Kzg)?, + .map_err(|e| AvailabilityCheckError::InvalidColumn(index, e))?, )) }) .collect::, AvailabilityCheckError>>()?; @@ -308,7 +313,7 @@ impl DataAvailabilityChecker { if self.blobs_required_for_block(&block) { return if let Some(blob_list) = blobs.as_ref() { verify_kzg_for_blob_list(blob_list.iter(), &self.kzg) - .map_err(AvailabilityCheckError::Kzg)?; + .map_err(AvailabilityCheckError::InvalidBlobs)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, @@ -323,13 +328,12 @@ impl DataAvailabilityChecker { } if self.data_columns_required_for_block(&block) { return if let Some(data_column_list) = data_columns.as_ref() { - verify_kzg_for_data_column_list( + verify_kzg_for_data_column_list_with_scoring( data_column_list .iter() .map(|custody_column| custody_column.as_data_column()), &self.kzg, - ) - .map_err(AvailabilityCheckError::Kzg)?; + )?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, @@ -380,7 +384,8 @@ impl DataAvailabilityChecker { // verify kzg for all blobs at once if !all_blobs.is_empty() { - verify_kzg_for_blob_list(all_blobs.iter(), &self.kzg)?; + verify_kzg_for_blob_list(all_blobs.iter(), &self.kzg) + .map_err(AvailabilityCheckError::InvalidBlobs)?; } let all_data_columns = blocks @@ -396,7 +401,8 @@ impl DataAvailabilityChecker { // verify kzg for all data columns at once if !all_data_columns.is_empty() { - verify_kzg_for_data_column_list(all_data_columns.iter(), &self.kzg)?; + // TODO: Need to also attribute which specific block is faulty + verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg)?; } for block in blocks { @@ -598,6 +604,32 @@ async fn availability_cache_maintenance_service( } } +fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>( + data_column_iter: I, + kzg: &'a Kzg, +) -> Result<(), AvailabilityCheckError> +where + I: Iterator>> + Clone, +{ + let Err(batch_err) = verify_kzg_for_data_column_list(data_column_iter.clone(), kzg) else { + return Ok(()); + }; + + let data_columns = data_column_iter.collect::>(); + // Find which column is invalid. If len is 1 or 0 continue to default case below. + // If len > 1 at least one column MUST fail. + if data_columns.len() > 1 { + for data_column in data_columns { + if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) { + return Err(AvailabilityCheckError::InvalidColumn(data_column.index, e)); + } + } + } + + // len 0 should never happen + Err(AvailabilityCheckError::InvalidColumn(0, batch_err)) +} + /// A fully available block that is ready to be imported into fork choice. #[derive(Clone, Debug, PartialEq)] pub struct AvailableBlock { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 7f34cacefe..dbfa00e6e2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -1,10 +1,11 @@ use kzg::{Error as KzgError, KzgCommitment}; -use types::{BeaconStateError, Hash256}; +use types::{BeaconStateError, ColumnIndex, Hash256}; #[derive(Debug)] pub enum Error { - Kzg(KzgError), - KzgVerificationFailed, + InvalidBlobs(KzgError), + InvalidColumn(ColumnIndex, KzgError), + ReconstructColumnsError(KzgError), KzgCommitmentMismatch { blob_commitment: KzgCommitment, block_commitment: KzgCommitment, @@ -46,11 +47,12 @@ impl Error { | Error::UnableToDetermineImportRequirement | Error::RebuildingStateCaches(_) | Error::SlotClockError => ErrorCategory::Internal, - Error::Kzg(_) + Error::InvalidBlobs { .. } + | Error::InvalidColumn { .. } + | Error::ReconstructColumnsError { .. } | Error::BlobIndexInvalid(_) | Error::DataColumnIndexInvalid(_) - | Error::KzgCommitmentMismatch { .. } - | Error::KzgVerificationFailed => ErrorCategory::Malicious, + | Error::KzgCommitmentMismatch { .. } => ErrorCategory::Malicious, } } } @@ -78,9 +80,3 @@ impl From for Error { Self::BlockReplayError(value) } } - -impl From for Error { - fn from(value: KzgError) -> Self { - Self::Kzg(value) - } -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 36c5a9359d..05f8da4eed 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -555,7 +555,8 @@ impl DataAvailabilityCheckerInner { kzg, pending_components.verified_data_columns.as_slice(), &self.spec, - )?; + ) + .map_err(AvailabilityCheckError::ReconstructColumnsError)?; let data_columns_to_publish = all_data_columns .iter() diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index e31adb783c..9abcd263de 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -29,7 +29,9 @@ use crate::metrics; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::data_availability_checker::AvailabilityCheckErrorCategory; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckError, AvailabilityCheckErrorCategory, +}; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::RequestState; use fnv::FnvHashMap; @@ -591,8 +593,16 @@ impl BlockLookups { other => { debug!(self.log, "Invalid lookup component"; "block_root" => ?block_root, "component" => ?R::response_type(), "error" => ?other); let peer_group = request_state.on_processing_failure()?; - // TOOD(das): only downscore peer subgroup that provided the invalid proof - for peer in peer_group.all() { + let peers_to_penalize: Vec<_> = match other { + // Note: currenlty only InvalidColumn errors have index granularity, + // but future errors may follow the same pattern. Generalize this + // pattern with https://github.com/sigp/lighthouse/pull/6321 + BlockError::AvailabilityCheck( + AvailabilityCheckError::InvalidColumn(index, _), + ) => peer_group.of_index(index as usize).collect(), + _ => peer_group.all().collect(), + }; + for peer in peers_to_penalize { cx.report_peer( *peer, PeerAction::MidToleranceError, diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 5b4f17ac0d..5aa1d5c290 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -2449,7 +2449,7 @@ mod deneb_only { self.rig.single_blob_component_processed( self.blob_req_id.expect("blob request id").lookup_id, BlockProcessingResult::Err(BlockError::AvailabilityCheck( - AvailabilityCheckError::KzgVerificationFailed, + AvailabilityCheckError::InvalidBlobs(kzg::Error::KzgVerificationFailed), )), ); self.rig.assert_single_lookups_count(1); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index d6f5417849..07d04b3fb2 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -139,6 +139,15 @@ impl PeerGroup { pub fn all(&self) -> impl Iterator + '_ { self.peers.keys() } + pub fn of_index(&self, index: usize) -> impl Iterator + '_ { + self.peers.iter().filter_map(move |(peer, indices)| { + if indices.contains(&index) { + Some(peer) + } else { + None + } + }) + } } /// Sequential ID that uniquely identifies ReqResp outgoing requests From 1447eeb40b2f3987c290e60c8e24d8c70c525fe2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Sep 2024 10:16:18 +1000 Subject: [PATCH 28/66] Improve single-node testnet support and Arc NetworkConfig/ChainSpec (#6396) * Arc ChainSpec and NetworkConfig * Fix release tests * Fix lint * Merge remote-tracking branch 'origin/unstable' into single-node-testnet --- .../beacon_chain/src/beacon_block_streamer.rs | 4 +- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/builder.rs | 14 +++-- .../src/data_availability_checker.rs | 3 +- .../overflow_lru_cache.rs | 5 +- beacon_node/beacon_chain/src/eth1_chain.rs | 14 +++-- .../beacon_chain/src/graffiti_calculator.rs | 9 +-- .../src/observed_data_sidecars.rs | 9 +-- beacon_node/beacon_chain/src/test_utils.rs | 10 ++-- .../src/validator_pubkey_cache.rs | 3 +- .../tests/attestation_verification.rs | 6 +- beacon_node/beacon_chain/tests/bellatrix.rs | 4 +- .../beacon_chain/tests/block_verification.rs | 6 +- beacon_node/beacon_chain/tests/capella.rs | 2 +- beacon_node/beacon_chain/tests/events.rs | 4 +- .../beacon_chain/tests/op_verification.rs | 2 +- .../tests/payload_invalidation.rs | 2 +- beacon_node/beacon_chain/tests/rewards.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 14 ++--- .../tests/sync_committee_verification.rs | 2 +- beacon_node/client/src/builder.rs | 13 ++--- beacon_node/eth1/src/inner.rs | 7 ++- beacon_node/eth1/src/service.rs | 10 +++- beacon_node/eth1/tests/test.rs | 39 ++++++++----- .../src/test_utils/mock_builder.rs | 6 +- .../genesis/src/eth1_genesis_service.rs | 10 ++-- beacon_node/genesis/tests/tests.rs | 12 ++-- beacon_node/http_api/src/lib.rs | 12 +++- beacon_node/http_api/src/test_utils.rs | 6 +- beacon_node/http_api/tests/tests.rs | 2 +- .../lighthouse_network/src/discovery/mod.rs | 4 +- .../src/peer_manager/mod.rs | 23 +++----- .../lighthouse_network/src/service/mod.rs | 7 ++- .../lighthouse_network/src/service/utils.rs | 4 +- .../lighthouse_network/src/types/globals.rs | 44 ++++++++++----- .../src/types/sync_state.rs | 8 +++ .../lighthouse_network/tests/common.rs | 24 ++++---- .../lighthouse_network/tests/rpc_tests.rs | 40 ++++++-------- .../src/network_beacon_processor/tests.rs | 5 +- beacon_node/network/src/persisted_dht.rs | 3 +- beacon_node/network/src/service.rs | 12 ++-- beacon_node/network/src/service/tests.rs | 8 ++- .../network/src/subnet_service/tests/mod.rs | 2 +- .../network/src/sync/block_lookups/tests.rs | 6 +- .../network/src/sync/range_sync/range.rs | 7 ++- beacon_node/operation_pool/src/lib.rs | 4 +- beacon_node/src/lib.rs | 4 +- beacon_node/store/src/hot_cold_store.rs | 8 +-- beacon_node/store/src/iter.rs | 7 ++- common/eth2_config/src/lib.rs | 14 ++--- consensus/fork_choice/tests/tests.rs | 2 +- .../src/per_block_processing/tests.rs | 3 +- .../src/per_epoch_processing/tests.rs | 5 +- database_manager/src/lib.rs | 4 +- lcli/src/transition_blocks.rs | 14 +++-- lighthouse/environment/src/lib.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 3 +- testing/simulator/src/basic_sim.rs | 5 +- testing/simulator/src/fallback_sim.rs | 5 +- testing/web3signer_tests/src/lib.rs | 55 ++++++++++--------- validator_client/src/beacon_node_fallback.rs | 4 +- validator_client/src/duties_service.rs | 2 +- validator_client/src/http_api/mod.rs | 4 +- validator_client/src/http_api/test_utils.rs | 6 +- validator_client/src/http_api/tests.rs | 4 +- validator_client/src/validator_store.rs | 4 +- 66 files changed, 340 insertions(+), 250 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index ace5f0be74..198d7d61f0 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -711,6 +711,7 @@ mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; use execution_layer::test_utils::Block; + use std::sync::Arc; use std::sync::LazyLock; use tokio::sync::mpsc; use types::{ @@ -725,7 +726,7 @@ mod tests { fn get_harness( validator_count: usize, - spec: ChainSpec, + spec: Arc, ) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec) @@ -756,6 +757,7 @@ mod tests { spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); + let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); // go to bellatrix fork diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7094060b71..515b65b1af 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -371,7 +371,7 @@ type ReqRespPreImportCache = HashMap>>; /// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block /// operations and chooses a canonical head. pub struct BeaconChain { - pub spec: ChainSpec, + pub spec: Arc, /// Configuration for `BeaconChain` runtime behaviour. pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c38101e274..001dbf0080 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -93,7 +93,7 @@ pub struct BeaconChainBuilder { light_client_server_tx: Option>>, head_tracker: Option, validator_pubkey_cache: Option>, - spec: ChainSpec, + spec: Arc, chain_config: ChainConfig, log: Option, beacon_graffiti: GraffitiOrigin, @@ -137,7 +137,7 @@ where light_client_server_tx: None, head_tracker: None, validator_pubkey_cache: None, - spec: E::default_spec(), + spec: Arc::new(E::default_spec()), chain_config: ChainConfig::default(), log: None, beacon_graffiti: GraffitiOrigin::default(), @@ -154,7 +154,7 @@ where /// /// This method should generally be called immediately after `Self::new` to ensure components /// are started with a consistent spec. - pub fn custom_spec(mut self, spec: ChainSpec) -> Self { + pub fn custom_spec(mut self, spec: Arc) -> Self { self.spec = spec; self } @@ -1183,8 +1183,12 @@ mod test { MinimalEthSpec, MemoryStore, MemoryStore, - > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) - .unwrap(); + > = HotColdDB::open_ephemeral( + StoreConfig::default(), + ChainSpec::minimal().into(), + log.clone(), + ) + .unwrap(); let spec = MinimalEthSpec::default_spec(); let genesis_state = interop_genesis_state( diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index c13593d7af..4d5afdc890 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -100,9 +100,8 @@ impl DataAvailabilityChecker { kzg: Arc, store: BeaconStore, import_all_data_columns: bool, - spec: ChainSpec, + spec: Arc, ) -> Result { - let spec = Arc::new(spec); let custody_subnet_count = if import_all_data_columns { spec.data_column_sidecar_subnet_count as usize } else { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 05f8da4eed..46ab08a821 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -704,7 +704,7 @@ mod test { fn get_store_with_spec( db_path: &TempDir, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Arc, LevelDB>> { let hot_path = db_path.path().join("hot_db"); @@ -741,6 +741,7 @@ mod test { spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); spec.capella_fork_epoch = Some(capella_fork_epoch); spec.deneb_fork_epoch = Some(deneb_fork_epoch); + let spec = Arc::new(spec); let chain_store = get_store_with_spec::(db_path, spec.clone(), log.clone()); let validators_keypairs = @@ -884,7 +885,7 @@ mod test { let log = test_logger(); let chain_db_path = tempdir().expect("should get temp dir"); let harness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = Arc::new(harness.spec.clone()); + let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 2252d5b9c9..276262085e 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -10,6 +10,7 @@ use state_processing::per_block_processing::get_new_eth1_data; use std::cmp::Ordering; use std::collections::HashMap; use std::marker::PhantomData; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use store::{DBColumn, Error as StoreError, StoreItem}; use task_executor::TaskExecutor; @@ -284,7 +285,7 @@ where ssz_container: &SszEth1, config: Eth1Config, log: &Logger, - spec: ChainSpec, + spec: Arc, ) -> Result { let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?; @@ -355,7 +356,7 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { bytes: &[u8], config: Eth1Config, log: Logger, - spec: ChainSpec, + spec: Arc, ) -> Result; } @@ -413,7 +414,7 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { _bytes: &[u8], _config: Eth1Config, _log: Logger, - _spec: ChainSpec, + _spec: Arc, ) -> Result { Ok(Self(PhantomData)) } @@ -441,7 +442,7 @@ impl CachingEth1Backend { /// Instantiates `self` with empty caches. /// /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { + pub fn new(config: Eth1Config, log: Logger, spec: Arc) -> Result { Ok(Self { core: HttpService::new(config, log.clone(), spec) .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, @@ -596,7 +597,7 @@ impl Eth1ChainBackend for CachingEth1Backend { bytes: &[u8], config: Eth1Config, log: Logger, - spec: ChainSpec, + spec: Arc, ) -> Result { let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?; Ok(Self { @@ -752,7 +753,8 @@ mod test { let log = test_logger(); Eth1Chain::new( - CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(), + CachingEth1Backend::new(eth1_config, log, Arc::new(MainnetEthSpec::default_spec())) + .unwrap(), ) } diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 42a1aa1a0b..4373164d62 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -242,6 +242,7 @@ mod tests { use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use execution_layer::EngineCapabilities; use slog::info; + use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; @@ -253,7 +254,7 @@ mod tests { fn get_harness( validator_count: usize, - spec: ChainSpec, + spec: Arc, chain_config: Option, ) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -272,7 +273,7 @@ mod tests { #[tokio::test] async fn check_graffiti_without_el_version_support() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); // modify execution engine so it doesn't support engine_getClientVersionV1 method let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); @@ -313,7 +314,7 @@ mod tests { #[tokio::test] async fn check_graffiti_with_el_version_support() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); let found_graffiti_bytes = harness.chain.graffiti_calculator.get_graffiti(None).await.0; @@ -355,7 +356,7 @@ mod tests { #[tokio::test] async fn check_graffiti_with_validator_specified_value() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); let graffiti_str = "nice graffiti bro"; diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 601241dd8a..9b59a8f85b 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -6,6 +6,7 @@ use crate::observed_block_producers::ProposalKey; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; +use std::sync::Arc; use types::{BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Slot}; #[derive(Debug, PartialEq)] @@ -74,13 +75,13 @@ pub struct ObservedDataSidecars { finalized_slot: Slot, /// Stores all received data indices for a given `(ValidatorIndex, Slot)` tuple. items: HashMap>, - spec: ChainSpec, + spec: Arc, _phantom: PhantomData, } impl ObservedDataSidecars { /// Instantiates `Self` with `finalized_slot == 0`. - pub fn new(spec: ChainSpec) -> Self { + pub fn new(spec: Arc) -> Self { Self { finalized_slot: Slot::new(0), items: HashMap::new(), @@ -167,7 +168,7 @@ mod tests { #[test] fn pruning() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let mut cache = ObservedDataSidecars::>::new(spec); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); @@ -306,7 +307,7 @@ mod tests { #[test] fn simple_observations() { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let mut cache = ObservedDataSidecars::>::new(spec); // Slot 0, index 0 diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8261500fba..582d20637b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -206,7 +206,7 @@ pub fn test_spec() -> ChainSpec { pub struct Builder { eth_spec_instance: T::EthSpec, - spec: Option, + spec: Option>, validator_keypairs: Option>, withdrawal_keypairs: Vec>, chain_config: Option, @@ -395,12 +395,12 @@ where self.spec_or_default(None) } - pub fn spec(self, spec: ChainSpec) -> Self { + pub fn spec(self, spec: Arc) -> Self { self.spec_or_default(Some(spec)) } - pub fn spec_or_default(mut self, spec: Option) -> Self { - self.spec = Some(spec.unwrap_or_else(test_spec::)); + pub fn spec_or_default(mut self, spec: Option>) -> Self { + self.spec = Some(spec.unwrap_or_else(|| Arc::new(test_spec::()))); self } @@ -648,7 +648,7 @@ pub struct BeaconChainHarness { pub withdrawal_keypairs: Vec>, pub chain: Arc>, - pub spec: ChainSpec, + pub spec: Arc, pub shutdown_receiver: Arc>>, pub runtime: TestRuntime, diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 917c20bfa5..877c297a3b 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -232,7 +232,8 @@ mod test { fn get_store() -> BeaconStore { Arc::new( - HotColdDB::open_ephemeral(<_>::default(), E::default_spec(), test_logger()).unwrap(), + HotColdDB::open_ephemeral(<_>::default(), Arc::new(E::default_spec()), test_logger()) + .unwrap(), ) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 335884d57a..f3b25ed5ce 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -18,7 +18,7 @@ use ssz_types::BitVector; use state_processing::{ per_block_processing::errors::AttestationValidationError, per_slot_processing, }; -use std::sync::LazyLock; +use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{ signed_aggregate_and_proof::SignedAggregateAndProofRefMut, @@ -47,6 +47,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness (BeaconChainHarness>, ChainSpec) { +) -> (BeaconChainHarness>, Arc) { let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH as u64)); + let spec = Arc::new(spec); let validator_keypairs = KEYPAIRS[0..validator_count].to_vec(); let genesis_state = interop_genesis_state( diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 027082c11c..5bd3452623 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -49,7 +49,7 @@ async fn merge_with_terminal_block_hash_override() { spec.terminal_block_hash = genesis_pow_block_hash; let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(spec.into()) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() @@ -106,7 +106,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { let mut execution_payloads = vec![]; let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(spec.into()) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index faa4d74a18..535d63427a 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1354,7 +1354,7 @@ async fn add_base_block_to_altair_chain() { spec.altair_fork_epoch = Some(Epoch::new(1)); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -1489,7 +1489,7 @@ async fn add_altair_block_to_base_chain() { spec.altair_fork_epoch = None; let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -1622,7 +1622,7 @@ async fn import_duplicate_block_unrealized_justification() { let spec = MainnetEthSpec::default_spec(); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index c8fd2637f0..ac97a95721 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -39,7 +39,7 @@ async fn base_altair_bellatrix_capella() { spec.capella_fork_epoch = Some(capella_fork_epoch); let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(spec.into()) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index b8d4a7722a..ab784d3be4 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -12,7 +12,7 @@ type E = MinimalEthSpec; /// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. #[tokio::test] async fn blob_sidecar_event_on_process_gossip_blob() { - let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -46,7 +46,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { /// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { - let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 2f8fb6d2bc..df0d561e1c 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -29,7 +29,7 @@ type TestHarness = BeaconChainHarness>; type HotColdDB = store::HotColdDB, LevelDB>; fn get_store(db_path: &TempDir) -> Arc { - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let blobs_path = db_path.path().join("blobs_db"); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index b455c3bace..dd195048e8 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -57,7 +57,7 @@ impl InvalidPayloadRig { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .chain_config(ChainConfig { reconstruct_historic_states: true, ..ChainConfig::default() diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 323f4f38eb..be7045c54a 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -32,7 +32,7 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { }; let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) + .spec(Arc::new(spec)) .keypairs(KEYPAIRS.to_vec()) .fresh_ephemeral_store() .chain_config(chain_config) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 541abaa424..5d83d65efd 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -69,7 +69,7 @@ fn get_store_generic( &blobs_path, |_, _, _| Ok(()), config, - spec, + spec.into(), log, ) .expect("disk store should initialize") @@ -182,7 +182,7 @@ async fn light_client_bootstrap_test() { let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) - .custom_spec(test_spec::()) + .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) .weak_subjectivity_state( @@ -325,7 +325,7 @@ async fn light_client_updates_test() { let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) - .custom_spec(test_spec::()) + .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) .weak_subjectivity_state( @@ -2695,7 +2695,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) .store(store.clone()) - .custom_spec(test_spec::()) + .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) .logger(log.clone()) .weak_subjectivity_state( @@ -3162,7 +3162,7 @@ async fn revert_minority_fork_on_resume() { let db_path1 = tempdir().unwrap(); let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone()); let harness1 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec1) + .spec(spec1.clone().into()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store1) .mock_execution_layer() @@ -3172,7 +3172,7 @@ async fn revert_minority_fork_on_resume() { let db_path2 = tempdir().unwrap(); let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone()); let harness2 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec2.clone()) + .spec(spec2.clone().into()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store2) .mock_execution_layer() @@ -3268,7 +3268,7 @@ async fn revert_minority_fork_on_resume() { let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone()); let resumed_harness = TestHarness::builder(MinimalEthSpec) - .spec(spec2) + .spec(spec2.clone().into()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(resume_store) .override_store_mutator(Box::new(move |mut builder| { diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index f8da2e8da1..d1b3139d42 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -30,7 +30,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness { #[allow(clippy::type_complexity)] store: Option>>, runtime_context: Option>, - chain_spec: Option, + chain_spec: Option>, beacon_chain_builder: Option>, beacon_chain: Option>>, eth1_service: Option, @@ -137,7 +137,7 @@ where } /// Specifies the `ChainSpec`. - pub fn chain_spec(mut self, spec: ChainSpec) -> Self { + pub fn chain_spec(mut self, spec: Arc) -> Self { self.chain_spec = Some(spec); self } @@ -604,10 +604,9 @@ where }; let genesis_state = genesis_service - .wait_for_genesis_state( - Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS), - context.eth2_config().spec.clone(), - ) + .wait_for_genesis_state(Duration::from_millis( + ETH1_GENESIS_UPDATE_INTERVAL_MILLIS, + )) .await?; let _ = exit_tx.send(()); @@ -641,7 +640,7 @@ where } /// Starts the networking stack. - pub async fn network(mut self, config: &NetworkConfig) -> Result { + pub async fn network(mut self, config: Arc) -> Result { let beacon_chain = self .beacon_chain .clone() diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 7387642bf4..1f45346256 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -9,6 +9,7 @@ use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::sync::Arc; use superstruct::superstruct; use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; @@ -51,7 +52,7 @@ pub struct Inner { pub to_finalize: RwLock>, pub config: RwLock, pub remote_head_block: RwLock>, - pub spec: ChainSpec, + pub spec: Arc, } impl Inner { @@ -71,7 +72,7 @@ impl Inner { } /// Recover `Inner` given byte representation of eth1 deposit and block caches. - pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result { + pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { SszEth1Cache::from_ssz_bytes(bytes) .map_err(|e| format!("Ssz decoding error: {:?}", e))? .to_inner(config, spec) @@ -109,7 +110,7 @@ impl SszEth1Cache { } } - pub fn to_inner(&self, config: Config, spec: ChainSpec) -> Result { + pub fn to_inner(&self, config: Config, spec: Arc) -> Result { Ok(Inner { block_cache: RwLock::new(self.block_cache.clone()), deposit_cache: RwLock::new(DepositUpdater { diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index e5d60fac49..a70a927307 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -397,7 +397,7 @@ pub struct Service { impl Service { /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Result { + pub fn new(config: Config, log: Logger, spec: Arc) -> Result { Ok(Self { inner: Arc::new(Inner { block_cache: <_>::default(), @@ -414,6 +414,10 @@ impl Service { }) } + pub fn chain_spec(&self) -> &Arc { + &self.inner.spec + } + pub fn client(&self) -> &HttpJsonRpc { &self.inner.endpoint } @@ -422,7 +426,7 @@ impl Service { pub fn from_deposit_snapshot( config: Config, log: Logger, - spec: ChainSpec, + spec: Arc, deposit_snapshot: &DepositTreeSnapshot, ) -> Result { let deposit_cache = @@ -464,7 +468,7 @@ impl Service { bytes: &[u8], config: Config, log: Logger, - spec: ChainSpec, + spec: Arc, ) -> Result { let inner = Inner::from_bytes(bytes, config, spec)?; Ok(Self { diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 3ad9b34381..e442ce4863 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -8,6 +8,7 @@ use logging::test_logger; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; use std::ops::Range; +use std::sync::Arc; use std::time::Duration; use tree_hash::TreeHash; use types::{ @@ -122,8 +123,12 @@ mod eth1_cache { }; let cache_follow_distance = config.cache_follow_distance(); - let service = - Service::new(config, log.clone(), MainnetEthSpec::default_spec()).unwrap(); + let service = Service::new( + config, + log.clone(), + Arc::new(MainnetEthSpec::default_spec()), + ) + .unwrap(); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -204,7 +209,7 @@ mod eth1_cache { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -259,7 +264,7 @@ mod eth1_cache { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -310,7 +315,7 @@ mod eth1_cache { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -365,7 +370,7 @@ mod deposit_tree { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -447,7 +452,7 @@ mod deposit_tree { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -694,7 +699,7 @@ mod fast { let anvil_client = eth1.json_rpc_client(); let now = get_block_number(&anvil_client).await; - let spec = MainnetEthSpec::default_spec(); + let spec = Arc::new(MainnetEthSpec::default_spec()); let service = Service::new( Config { endpoint: Eth1Endpoint::NoAuth( @@ -788,8 +793,12 @@ mod persist { block_cache_truncation: None, ..Config::default() }; - let service = - Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()).unwrap(); + let service = Service::new( + config.clone(), + log.clone(), + Arc::new(MainnetEthSpec::default_spec()), + ) + .unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -828,9 +837,13 @@ mod persist { // Drop service and recover from bytes drop(service); - let recovered_service = - Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()) - .unwrap(); + let recovered_service = Service::from_bytes( + ð1_bytes, + config, + log, + Arc::new(MainnetEthSpec::default_spec()), + ) + .unwrap(); assert_eq!( recovered_service.block_cache_len(), block_count, diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 46830256b0..1291c8cf97 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -209,7 +209,7 @@ impl BidStuff for BuilderBid { pub struct MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, - spec: ChainSpec, + spec: Arc, val_registration_cache: Arc>>, builder_sk: SecretKey, operations: Arc>>, @@ -220,7 +220,7 @@ impl MockBuilder { pub fn new_for_testing( mock_el_url: SensitiveUrl, beacon_url: SensitiveUrl, - spec: ChainSpec, + spec: Arc, executor: TaskExecutor, ) -> (Self, (SocketAddr, impl Future)) { let file = NamedTempFile::new().unwrap(); @@ -252,7 +252,7 @@ impl MockBuilder { pub fn new( el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, - spec: ChainSpec, + spec: Arc, ) -> Self { let sk = SecretKey::random(); Self { diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 3347f6c6c2..3981833a5c 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -43,7 +43,7 @@ impl Eth1GenesisService { /// Creates a new service. Does not attempt to connect to the Eth1 node. /// /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result { + pub fn new(config: Eth1Config, log: Logger, spec: Arc) -> Result { let config = Eth1Config { // Truncating the block cache makes searching for genesis more // complicated. @@ -100,9 +100,9 @@ impl Eth1GenesisService { pub async fn wait_for_genesis_state( &self, update_interval: Duration, - spec: ChainSpec, ) -> Result, String> { let eth1_service = &self.eth1_service; + let spec = eth1_service.chain_spec(); let log = ð1_service.log; let mut sync_blocks = false; @@ -180,13 +180,13 @@ impl Eth1GenesisService { // Scan the new eth1 blocks, searching for genesis. if let Some(genesis_state) = - self.scan_new_blocks::(&mut highest_processed_block, &spec)? + self.scan_new_blocks::(&mut highest_processed_block, spec)? { info!( log, "Genesis ceremony complete"; "genesis_validators" => genesis_state - .get_active_validator_indices(E::genesis_epoch(), &spec) + .get_active_validator_indices(E::genesis_epoch(), spec) .map_err(|e| format!("Genesis validators error: {:?}", e))? .len(), "genesis_time" => genesis_state.genesis_time(), @@ -203,7 +203,7 @@ impl Eth1GenesisService { let latest_timestamp = self.stats.latest_timestamp.load(Ordering::Relaxed); // Perform some logging. - if timestamp_can_trigger_genesis(latest_timestamp, &spec)? { + if timestamp_can_trigger_genesis(latest_timestamp, spec)? { // Indicate that we are awaiting adequate active validators. if (active_validator_count as u64) < spec.min_genesis_active_validator_count { info!( diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index b5c6d85afe..6cc7517aa4 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -5,6 +5,7 @@ use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use state_processing::is_valid_genesis_state; +use std::sync::Arc; use std::time::Duration; use types::{ test_utils::generate_deterministic_keypair, FixedBytesExtended, Hash256, MinimalEthSpec, @@ -24,7 +25,10 @@ pub fn new_env() -> Environment { fn basic() { let env = new_env(); let log = env.core_context().log().clone(); - let mut spec = env.eth2_config().spec.clone(); + let mut spec = (*env.eth2_config().spec).clone(); + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 8; + let spec = Arc::new(spec); env.runtime().block_on(async { let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()) @@ -60,9 +64,6 @@ fn basic() { // you're experiencing failures, try increasing the update_interval. let update_interval = Duration::from_millis(500); - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 8; - let deposits = (0..spec.min_genesis_active_validator_count + 2) .map(|i| { deposit_contract.deposit_helper::( @@ -79,8 +80,7 @@ fn basic() { let deposit_future = deposit_contract.deposit_multiple(deposits); - let wait_future = - service.wait_for_genesis_state::(update_interval, spec.clone()); + let wait_future = service.wait_for_genesis_state::(update_interval); let state = futures::try_join!(deposit_future, wait_future) .map(|(_, state)| state) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 998114f565..15d463b661 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -153,6 +153,7 @@ pub struct Config { #[serde(with = "eth2::types::serde_status_code")] pub duplicate_block_status_code: StatusCode, pub enable_light_client_server: bool, + pub target_peers: usize, } impl Default for Config { @@ -169,6 +170,7 @@ impl Default for Config { enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, enable_light_client_server: false, + target_peers: 100, } } } @@ -2934,8 +2936,16 @@ pub fn serve( let is_optimistic = head_execution_status.is_optimistic_or_invalid(); + // When determining sync status, make an exception for single-node + // testnets with 0 peers. + let sync_state = network_globals.sync_state.read(); + let is_synced = sync_state.is_synced() + || (sync_state.is_stalled() + && network_globals.config.target_peers == 0); + drop(sync_state); + let syncing_data = api_types::SyncingData { - is_syncing: !network_globals.sync_state.read().is_synced(), + is_syncing: !is_synced, is_optimistic, el_offline, head_slot, diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index dcd494a880..4742fa109f 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -16,7 +16,7 @@ use lighthouse_network::{ }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, - ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, + ConnectedPoint, Enr, NetworkConfig, NetworkGlobals, PeerId, PeerManager, }; use logging::test_logger; use network::{NetworkReceivers, NetworkSenders}; @@ -71,7 +71,7 @@ impl InteractiveTester { mutator: Option>, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) - .spec_or_default(spec) + .spec_or_default(spec.map(Arc::new)) .logger(test_logger()) .mock_execution_layer(); @@ -145,12 +145,14 @@ pub async fn create_api_server( }); let enr_key = CombinedKey::generate_secp256k1(); let enr = Enr::builder().build(&enr_key).unwrap(); + let network_config = Arc::new(NetworkConfig::default()); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), meta_data, vec![], false, &log, + network_config, chain.spec.clone(), )); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 19a01a91c5..940f3ae9c0 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -122,7 +122,7 @@ impl ApiTester { } pub async fn new_from_config(config: ApiTesterConfig) -> Self { - let spec = config.spec; + let spec = Arc::new(config.spec); let mut harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index e1cea3153a..d57c67bacb 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1215,10 +1215,11 @@ mod tests { } async fn build_discovery() -> Discovery { - let spec = ChainSpec::default(); + let spec = Arc::new(ChainSpec::default()); let keypair = secp256k1::Keypair::generate(); let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); + let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); let log = build_log(slog::Level::Debug, false); @@ -1232,6 +1233,7 @@ mod tests { vec![], false, &log, + config.clone(), spec.clone(), ); let keypair = keypair.into(); diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 320bbc4d63..b8dce6667e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -4,9 +4,7 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::service::TARGET_SUBNET_PEERS; -use crate::{error, metrics, Gossipsub}; -use crate::{NetworkGlobals, PeerId}; -use crate::{Subnet, SubnetDiscovery}; +use crate::{error, metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; @@ -1452,6 +1450,7 @@ enum ConnectingType { #[cfg(test)] mod tests { use super::*; + use crate::NetworkConfig; use slog::{o, Drain}; use types::MainnetEthSpec as E; @@ -1468,15 +1467,7 @@ mod tests { } async fn build_peer_manager(target_peer_count: usize) -> PeerManager { - let config = config::Config { - target_peer_count, - discovery_enabled: false, - ..Default::default() - }; - let log = build_log(slog::Level::Debug, false); - let spec = E::default_spec(); - let globals = NetworkGlobals::new_test_globals(vec![], &log, spec); - PeerManager::new(config, Arc::new(globals), &log).unwrap() + build_peer_manager_with_trusted_peers(vec![], target_peer_count).await } async fn build_peer_manager_with_trusted_peers( @@ -1488,9 +1479,13 @@ mod tests { discovery_enabled: false, ..Default::default() }; + let network_config = Arc::new(NetworkConfig { + target_peers: target_peer_count, + ..Default::default() + }); let log = build_log(slog::Level::Debug, false); - let spec = E::default_spec(); - let globals = NetworkGlobals::new_test_globals(trusted_peers, &log, spec); + let spec = Arc::new(E::default_spec()); + let globals = NetworkGlobals::new_test_globals(trusted_peers, &log, network_config, spec); PeerManager::new(config, Arc::new(globals), &log).unwrap() } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a97157ff0a..43217ba5ab 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -166,7 +166,7 @@ impl Network { &config, &ctx.enr_fork_id, &log, - ctx.chain_spec, + &ctx.chain_spec, )?; // Construct the metadata let custody_subnet_count = if ctx.chain_spec.is_peer_das_scheduled() { @@ -186,6 +186,7 @@ impl Network { trusted_peers, config.disable_peer_scoring, &log, + config.clone(), ctx.chain_spec.clone(), ); Arc::new(globals) @@ -209,7 +210,7 @@ impl Network { E::slots_per_epoch(), ); - let score_settings = PeerScoreSettings::new(ctx.chain_spec, gs_config.mesh_n()); + let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); let gossip_cache = { let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); @@ -346,7 +347,7 @@ impl Network { &config, network_globals.clone(), &log, - ctx.chain_spec, + &ctx.chain_spec, ) .await?; // start searching for peers diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 8b6a84ae0c..81ee86b8b9 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -30,10 +30,10 @@ pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; pub const METADATA_FILENAME: &str = "metadata"; pub struct Context<'a> { - pub config: &'a NetworkConfig, + pub config: Arc, pub enr_fork_id: EnrForkId, pub fork_context: Arc, - pub chain_spec: &'a ChainSpec, + pub chain_spec: Arc, pub libp2p_registry: Option<&'a mut Registry>, } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index ac78e2cb01..f271c9ff72 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -2,12 +2,11 @@ use crate::peer_manager::peerdb::PeerDB; use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; -use crate::Client; -use crate::EnrExt; -use crate::{Enr, GossipTopic, Multiaddr, PeerId}; +use crate::{Client, Enr, EnrExt, GossipTopic, Multiaddr, NetworkConfig, PeerId}; use itertools::Itertools; use parking_lot::RwLock; use std::collections::HashSet; +use std::sync::Arc; use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; pub struct NetworkGlobals { @@ -30,7 +29,10 @@ pub struct NetworkGlobals { /// The computed custody subnets and columns is stored to avoid re-computing. pub custody_subnets: Vec, pub custody_columns: Vec, - pub spec: ChainSpec, + /// Network-related configuration. Immutable after initialization. + pub config: Arc, + /// Ethereum chain configuration. Immutable after initialization. + pub spec: Arc, } impl NetworkGlobals { @@ -40,7 +42,8 @@ impl NetworkGlobals { trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger, - spec: ChainSpec, + config: Arc, + spec: Arc, ) -> Self { let (custody_subnets, custody_columns) = if spec.is_peer_das_scheduled() { let custody_subnet_count = local_metadata @@ -75,6 +78,7 @@ impl NetworkGlobals { backfill_state: RwLock::new(BackFillState::NotRequired), custody_subnets, custody_columns, + config, spec, } } @@ -160,7 +164,8 @@ impl NetworkGlobals { pub fn new_test_globals( trusted_peers: Vec, log: &slog::Logger, - spec: ChainSpec, + config: Arc, + spec: Arc, ) -> NetworkGlobals { let metadata = MetaData::V3(MetaDataV3 { seq_number: 0, @@ -168,20 +173,21 @@ impl NetworkGlobals { syncnets: Default::default(), custody_subnet_count: spec.custody_requirement, }); - Self::new_test_globals_with_metadata(trusted_peers, metadata, log, spec) + Self::new_test_globals_with_metadata(trusted_peers, metadata, log, config, spec) } pub(crate) fn new_test_globals_with_metadata( trusted_peers: Vec, metadata: MetaData, log: &slog::Logger, - spec: ChainSpec, + config: Arc, + spec: Arc, ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); - NetworkGlobals::new(enr, metadata, trusted_peers, false, log, spec) + NetworkGlobals::new(enr, metadata, trusted_peers, false, log, config, spec) } } @@ -198,9 +204,15 @@ mod test { let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; let metadata = get_metadata(custody_subnet_count); + let config = Arc::new(NetworkConfig::default()); - let globals = - NetworkGlobals::::new_test_globals_with_metadata(vec![], metadata, &log, spec); + let globals = NetworkGlobals::::new_test_globals_with_metadata( + vec![], + metadata, + &log, + config, + Arc::new(spec), + ); assert_eq!(globals.custody_subnets.len(), custody_subnet_count as usize); } @@ -213,9 +225,15 @@ mod test { let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; let custody_columns_count = spec.number_of_columns / 2; let metadata = get_metadata(custody_subnet_count); + let config = Arc::new(NetworkConfig::default()); - let globals = - NetworkGlobals::::new_test_globals_with_metadata(vec![], metadata, &log, spec); + let globals = NetworkGlobals::::new_test_globals_with_metadata( + vec![], + metadata, + &log, + config, + Arc::new(spec), + ); assert_eq!(globals.custody_columns.len(), custody_columns_count); } diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index b82e63bd9c..4322763fc5 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -91,6 +91,14 @@ impl SyncState { pub fn is_synced(&self) -> bool { matches!(self, SyncState::Synced | SyncState::BackFillSyncing { .. }) } + + /// Returns true if the node is *stalled*, i.e. has no synced peers. + /// + /// Usually this state is treated as unsynced, except in some places where we make an exception + /// for single-node testnets where having 0 peers is desired. + pub fn is_stalled(&self) -> bool { + matches!(self, SyncState::Stalled) + } } impl std::fmt::Display for SyncState { diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 660d786169..84e19c81d0 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -76,7 +76,7 @@ pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { } } -pub fn build_config(mut boot_nodes: Vec) -> NetworkConfig { +pub fn build_config(mut boot_nodes: Vec) -> Arc { let mut config = NetworkConfig::default(); // Find unused ports by using the 0 port. @@ -92,7 +92,7 @@ pub fn build_config(mut boot_nodes: Vec) -> NetworkConfig { config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); - config + Arc::new(config) } pub async fn build_libp2p_instance( @@ -100,7 +100,7 @@ pub async fn build_libp2p_instance( boot_nodes: Vec, log: slog::Logger, fork_name: ForkName, - spec: &ChainSpec, + chain_spec: Arc, ) -> Libp2pInstance { let config = build_config(boot_nodes); // launch libp2p service @@ -109,10 +109,10 @@ pub async fn build_libp2p_instance( let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); let libp2p_context = lighthouse_network::Context { - config: &config, + config, enr_fork_id: EnrForkId::default(), fork_context: Arc::new(fork_context(fork_name)), - chain_spec: spec, + chain_spec, libp2p_registry: None, }; Libp2pInstance( @@ -142,14 +142,16 @@ pub async fn build_node_pair( rt: Weak, log: &slog::Logger, fork_name: ForkName, - spec: &ChainSpec, + spec: Arc, protocol: Protocol, ) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await; - let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await; + let mut sender = + build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec.clone()).await; + let mut receiver = + build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec.clone()).await; // let the two nodes set up listeners let sender_fut = async { @@ -218,11 +220,13 @@ pub async fn build_linear( log: slog::Logger, n: usize, fork_name: ForkName, - spec: &ChainSpec, + spec: Arc, ) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec).await); + nodes.push( + build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec.clone()).await, + ); } let multiaddrs: Vec = nodes diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 25d249960d..8a0416c1f8 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -61,7 +61,7 @@ fn test_tcp_status_rpc() { let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -69,7 +69,7 @@ fn test_tcp_status_rpc() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec, Protocol::Tcp, ) .await; @@ -163,7 +163,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -171,7 +171,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { Arc::downgrade(&rt), &log, ForkName::Bellatrix, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -179,8 +179,6 @@ fn test_tcp_blocks_by_range_chunked_rpc() { // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); - let spec = E::default_spec(); - // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); @@ -300,12 +298,12 @@ fn test_blobs_by_range_chunked_rpc() { rt.block_on(async { // get sender/receiver - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), &log, ForkName::Deneb, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -410,7 +408,7 @@ fn test_tcp_blocks_by_range_over_limit() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -418,7 +416,7 @@ fn test_tcp_blocks_by_range_over_limit() { Arc::downgrade(&rt), &log, ForkName::Bellatrix, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -502,7 +500,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -510,7 +508,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -519,7 +517,6 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); // BlocksByRange Response - let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); @@ -631,7 +628,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver @@ -639,7 +636,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -648,7 +645,6 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); // BlocksByRange Response - let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); @@ -739,7 +735,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let messages_to_send = 6; let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver @@ -748,7 +744,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { Arc::downgrade(&rt), &log, ForkName::Bellatrix, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -877,7 +873,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { let extra_messages_to_send: u64 = 10; let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver @@ -886,7 +882,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { Arc::downgrade(&rt), &log, ForkName::Base, - &spec, + spec.clone(), Protocol::Tcp, ) .await; @@ -1016,12 +1012,12 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { let rt = Arc::new(Runtime::new().unwrap()); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec, protocol) + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, spec, protocol) .await; // build the sender future diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 391175ccd4..6e8f151a05 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -21,7 +21,7 @@ use lighthouse_network::{ discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - Client, MessageId, NetworkGlobals, PeerId, Response, + Client, MessageId, NetworkConfig, NetworkGlobals, PeerId, Response, }; use slot_clock::SlotClock; use std::iter::Iterator; @@ -91,6 +91,7 @@ impl TestRig { // This allows for testing voluntary exits without building out a massive chain. let mut spec = test_spec::(); spec.shard_committee_period = 2; + let spec = Arc::new(spec); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) @@ -204,12 +205,14 @@ impl TestRig { }); let enr_key = CombinedKey::generate_secp256k1(); let enr = enr::Enr::builder().build(&enr_key).unwrap(); + let network_config = Arc::new(NetworkConfig::default()); let network_globals = Arc::new(NetworkGlobals::new( enr, meta_data, vec![], false, &log, + network_config, spec, )); diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 522ff0536e..1e1420883e 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -81,7 +81,8 @@ mod tests { MinimalEthSpec, MemoryStore, MemoryStore, - > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(); + > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal().into(), log) + .unwrap(); let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; store .put_item(&DHT_DB_KEY, &PersistedDht { enrs: enrs.clone() }) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5782fb00b6..150402a7ab 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -205,7 +205,7 @@ pub struct NetworkService { impl NetworkService { async fn build( beacon_chain: Arc>, - config: &NetworkConfig, + config: Arc, executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, @@ -271,10 +271,10 @@ impl NetworkService { // construct the libp2p service context let service_context = Context { - config, + config: config.clone(), enr_fork_id, fork_context: fork_context.clone(), - chain_spec: &beacon_chain.spec, + chain_spec: beacon_chain.spec.clone(), libp2p_registry, }; @@ -318,12 +318,12 @@ impl NetworkService { let attestation_service = AttestationService::new( beacon_chain.clone(), network_globals.local_enr().node_id(), - config, + &config, &network_log, ); // sync committee subnet service let sync_committee_service = - SyncCommitteeService::new(beacon_chain.clone(), config, &network_log); + SyncCommitteeService::new(beacon_chain.clone(), &config, &network_log); // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -368,7 +368,7 @@ impl NetworkService { #[allow(clippy::type_complexity)] pub async fn start( beacon_chain: Arc>, - config: &NetworkConfig, + config: Arc, executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index fec5f3f83f..b55992c624 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -73,6 +73,7 @@ mod tests { config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; config.boot_nodes_enr = enrs.clone(); + let config = Arc::new(config); runtime.block_on(async move { // Create a new network service which implicitly gets dropped at the // end of the block. @@ -86,7 +87,7 @@ mod tests { let _network_service = NetworkService::start( beacon_chain.clone(), - &config, + config, executor, None, beacon_processor_tx, @@ -125,7 +126,7 @@ mod tests { // Build beacon chain. let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec.clone()) + .spec(spec.clone().into()) .deterministic_keypairs(8) .fresh_ephemeral_store() .mock_execution_layer() @@ -149,12 +150,13 @@ mod tests { config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; + let config = Arc::new(config); let beacon_processor_channels = BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); NetworkService::build( beacon_chain.clone(), - &config, + config, executor.clone(), None, beacon_processor_channels.beacon_processor_tx, diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 3ee7c7f768..a784b05ea7 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -38,7 +38,7 @@ pub struct TestBeaconChain { impl TestBeaconChain { pub fn new_with_system_clock() -> Self { - let spec = MainnetEthSpec::default_spec(); + let spec = Arc::new(MainnetEthSpec::default_spec()); let keypairs = generate_deterministic_keypairs(1); diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 5aa1d5c290..6e6c9a5cdf 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -28,7 +28,7 @@ use lighthouse_network::service::api_types::{ SyncRequestId, }; use lighthouse_network::types::SyncState; -use lighthouse_network::{NetworkGlobals, Request}; +use lighthouse_network::{NetworkConfig, NetworkGlobals, Request}; use slog::info; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; @@ -116,7 +116,7 @@ impl TestRig { // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) - .spec(spec) + .spec(Arc::new(spec)) .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() @@ -132,9 +132,11 @@ impl TestRig { let (network_tx, network_rx) = mpsc::unbounded_channel(); // TODO(das): make the generation of the ENR use the deterministic rng to have consistent // column assignments + let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), &log, + network_config, chain.spec.clone(), )); let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 28dea8e4b5..f28b57eb18 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -51,8 +51,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::service::api_types::Id; -use lighthouse_network::PeerId; -use lighthouse_network::SyncInfo; +use lighthouse_network::{PeerId, SyncInfo}; use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; @@ -399,7 +398,7 @@ mod tests { use beacon_processor::WorkEvent as BeaconWorkEvent; use lighthouse_network::service::api_types::SyncRequestId; use lighthouse_network::{ - rpc::StatusMessage, service::api_types::AppRequestId, NetworkGlobals, + rpc::StatusMessage, service::api_types::AppRequestId, NetworkConfig, NetworkGlobals, }; use slog::{o, Drain}; use slot_clock::TestingSlotClock; @@ -692,9 +691,11 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), &log, + network_config, chain.spec.clone(), )); let (network_beacon_processor, beacon_processor_rx) = diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index c60480ef37..e6a61edc09 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -801,7 +801,7 @@ mod release_tests { use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; use std::collections::BTreeSet; - use std::sync::LazyLock; + use std::sync::{Arc, LazyLock}; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::*; @@ -816,7 +816,7 @@ mod release_tests { spec: Option, ) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(E::default()) - .spec_or_default(spec) + .spec_or_default(spec.map(Arc::new)) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 945bd787dd..5bc0f9dc6a 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -119,7 +119,7 @@ impl ProductionBeaconNode { let slasher = Arc::new( Slasher::open( slasher_config, - Arc::new(spec), + spec, log.new(slog::o!("service" => "slasher")), ) .map_err(|e| format!("Slasher open error: {:?}", e))?, @@ -174,7 +174,7 @@ impl ProductionBeaconNode { builder .build_beacon_chain()? - .network(&client_config.network) + .network(Arc::new(client_config.network)) .await? .notifier()? .http_metrics_config(client_config.http_metrics.clone()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index bd87cdcfee..ba288039d6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -82,7 +82,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// LRU cache of replayed states. historic_state_cache: Mutex>>, /// Chain spec. - pub(crate) spec: ChainSpec, + pub(crate) spec: Arc, /// Logger. pub log: Logger, /// Mere vessel for E. @@ -194,7 +194,7 @@ pub enum HotColdDBError { impl HotColdDB, MemoryStore> { pub fn open_ephemeral( config: StoreConfig, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Result, MemoryStore>, Error> { Self::verify_config(&config)?; @@ -231,7 +231,7 @@ impl HotColdDB, LevelDB> { blobs_db_path: &Path, migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Result, Error> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; @@ -1868,7 +1868,7 @@ impl, Cold: ItemStore> HotColdDB } /// Get a reference to the `ChainSpec` used by the database. - pub fn get_chain_spec(&self) -> &ChainSpec { + pub fn get_chain_spec(&self) -> &Arc { &self.spec } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index a7e0c09ed1..71dc96d99e 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -385,6 +385,7 @@ mod test { use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::{ChainSpec, MainnetEthSpec}; use sloggers::{null::NullLoggerBuilder, Build}; + use std::sync::Arc; use types::FixedBytesExtended; fn get_state() -> BeaconState { @@ -401,7 +402,8 @@ mod test { fn block_root_iter() { let log = NullLoggerBuilder.build().unwrap(); let store = - HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(); + HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal()), log) + .unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); @@ -449,7 +451,8 @@ mod test { fn state_root_iter() { let log = NullLoggerBuilder.build().unwrap(); let store = - HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(); + HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal()), log) + .unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 9104db8f67..cd5d7a8bd4 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -7,6 +7,7 @@ use std::env; use std::path::PathBuf; +use std::sync::Arc; use types::{ChainSpec, EthSpecId}; pub use paste::paste; @@ -44,15 +45,12 @@ const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url #[derive(Debug, Clone)] pub struct Eth2Config { pub eth_spec_id: EthSpecId, - pub spec: ChainSpec, + pub spec: Arc, } impl Default for Eth2Config { fn default() -> Self { - Self { - eth_spec_id: EthSpecId::Minimal, - spec: ChainSpec::minimal(), - } + Self::minimal() } } @@ -60,21 +58,21 @@ impl Eth2Config { pub fn mainnet() -> Self { Self { eth_spec_id: EthSpecId::Mainnet, - spec: ChainSpec::mainnet(), + spec: Arc::new(ChainSpec::mainnet()), } } pub fn minimal() -> Self { Self { eth_spec_id: EthSpecId::Minimal, - spec: ChainSpec::minimal(), + spec: Arc::new(ChainSpec::minimal()), } } pub fn gnosis() -> Self { Self { eth_spec_id: EthSpecId::Gnosis, - spec: ChainSpec::gnosis(), + spec: Arc::new(ChainSpec::gnosis()), } } } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index b1ef833be0..ce19d68203 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -55,7 +55,7 @@ impl ForkChoiceTest { // Run fork choice tests against the latest fork. let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec) + .spec(spec.into()) .chain_config(chain_config) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index f8b354d92d..c59449634a 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -12,7 +12,7 @@ use crate::{ }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use ssz_types::Bitfield; -use std::sync::LazyLock; +use std::sync::{Arc, LazyLock}; use test_utils::generate_deterministic_keypairs; use types::*; @@ -1017,6 +1017,7 @@ async fn fork_spanning_exit() { spec.altair_fork_epoch = Some(Epoch::new(2)); spec.bellatrix_fork_epoch = Some(Epoch::new(4)); spec.shard_committee_period = 0; + let spec = Arc::new(spec); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 8c240548b0..b93ede248c 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -45,6 +45,7 @@ mod release_tests { per_slot_processing::per_slot_processing, EpochProcessingError, SlotProcessingError, }; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; + use std::sync::Arc; use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; #[tokio::test] @@ -56,7 +57,7 @@ mod release_tests { let altair_state = { let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) + .spec(Arc::new(spec.clone())) .deterministic_keypairs(8) .fresh_ephemeral_store() .build(); @@ -116,7 +117,7 @@ mod release_tests { let base_state = { let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) + .spec(Arc::new(spec.clone())) .deterministic_keypairs(8) .fresh_ephemeral_store() .build(); diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index c5344f1f92..3d55631848 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -301,7 +301,7 @@ pub fn migrate_db( runtime_context: &RuntimeContext, log: Logger, ) -> Result<(), Error> { - let spec = &runtime_context.eth2_config.spec; + let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); @@ -334,7 +334,7 @@ pub fn migrate_db( from, to, log, - spec, + &spec, ) } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index ec3bb5b9ed..94d95a0d1c 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -103,7 +103,7 @@ pub fn run( network_config: Eth2NetworkConfig, matches: &ArgMatches, ) -> Result<(), String> { - let spec = &network_config.chain_spec::()?; + let spec = Arc::new(network_config.chain_spec::()?); let executor = env.core_context().executor; /* @@ -137,13 +137,15 @@ pub fn run( (Some(pre_state_path), Some(block_path), None) => { info!("Block path: {:?}", block_path); info!("Pre-state path: {:?}", pre_state_path); - let pre_state = load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; - let block = load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + let pre_state = + load_from_ssz_with(&pre_state_path, &spec, BeaconState::from_ssz_bytes)?; + let block = load_from_ssz_with(&block_path, &spec, SignedBeaconBlock::from_ssz_bytes)?; (pre_state, None, block) } (None, None, Some(beacon_url)) => { let block_id: BlockId = parse_required(matches, "block-id")?; let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + let inner_spec = spec.clone(); executor .handle() .ok_or("shutdown in progress")? @@ -155,7 +157,7 @@ pub fn run( .ok_or_else(|| format!("Unable to locate block at {:?}", block_id))? .data; - if block.slot() == spec.genesis_slot { + if block.slot() == inner_spec.genesis_slot { return Err("Cannot run on the genesis block".to_string()); } @@ -215,7 +217,7 @@ pub fn run( if config.exclude_cache_builds { pre_state - .build_all_caches(spec) + .build_all_caches(&spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = pre_state .update_tree_hash_cache() @@ -251,7 +253,7 @@ pub fn run( &config, &validator_pubkey_cache, &mut saved_ctxt, - spec, + &spec, )?; let duration = Instant::now().duration_since(start); diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index aa2caa2350..9ad40a6acd 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -333,7 +333,7 @@ impl EnvironmentBuilder { eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. - self.eth2_config.spec = eth2_network_config.chain_spec::()?; + self.eth2_config.spec = Arc::new(eth2_network_config.chain_spec::()?); self.eth2_network_config = Some(eth2_network_config); Ok(self) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 2a2cc067e5..33ae132e8a 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -350,11 +350,12 @@ impl Case for ForkChoiceTest { /// A testing rig used to execute a test case. struct Tester { harness: BeaconChainHarness>, - spec: ChainSpec, + spec: Arc, } impl Tester { pub fn new(case: &ForkChoiceTest, spec: ChainSpec) -> Result { + let spec = Arc::new(spec); let genesis_time = case.anchor_state.genesis_time(); if case.anchor_state.slot() != spec.genesis_slot { diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 16badaffc2..e1cef95cd3 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -11,6 +11,7 @@ use node_test_rig::{ }; use rayon::prelude::*; use std::cmp::max; +use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -98,7 +99,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .multi_threaded_tokio_runtime()? .build()?; - let spec = &mut env.eth2_config.spec; + let mut spec = (*env.eth2_config.spec).clone(); let total_validator_count = validators_per_node * node_count; let genesis_delay = GENESIS_DELAY; @@ -117,6 +118,8 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + let spec = Arc::new(spec); + env.eth2_config.spec = spec.clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 73984aadad..b27a6246bf 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -10,6 +10,7 @@ use node_test_rig::{ }; use rayon::prelude::*; use std::cmp::max; +use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -105,7 +106,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { .multi_threaded_tokio_runtime()? .build()?; - let spec = &mut env.eth2_config.spec; + let mut spec = (*env.eth2_config.spec).clone(); let total_validator_count = validators_per_vc * vc_count; let node_count = vc_count * bns_per_vc; @@ -122,6 +123,8 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + let spec = Arc::new(spec); + env.eth2_config.spec = spec.clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index f6ee01a4ba..3a039d3c80 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -317,7 +317,7 @@ mod tests { validator_definitions: Vec, slashing_protection_config: SlashingProtectionConfig, using_web3signer: bool, - spec: ChainSpec, + spec: Arc, ) -> Self { let log = test_logger(); let validator_dir = TempDir::new().unwrap(); @@ -408,7 +408,7 @@ mod tests { pub async fn new( network: &str, slashing_protection_config: SlashingProtectionConfig, - spec: ChainSpec, + spec: Arc, listen_port: u16, ) -> Self { let signer_rig = @@ -575,7 +575,7 @@ mod tests { /// Test all the "base" (phase 0) types. async fn test_base_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); TestingRig::new( network, @@ -591,13 +591,16 @@ mod tests { .unwrap() }) .await - .assert_signatures_match("beacon_block_base", |pubkey, validator_store| async move { - let block = BeaconBlock::Base(BeaconBlockBase::empty(spec)); - let block_slot = block.slot(); - validator_store - .sign_block(pubkey, block, block_slot) - .await - .unwrap() + .assert_signatures_match("beacon_block_base", |pubkey, validator_store| { + let spec = spec.clone(); + async move { + let block = BeaconBlock::Base(BeaconBlockBase::empty(&spec)); + let block_slot = block.slot(); + validator_store + .sign_block(pubkey, block, block_slot) + .await + .unwrap() + } }) .await .assert_signatures_match("attestation", |pubkey, validator_store| async move { @@ -645,7 +648,7 @@ mod tests { /// Test all the Altair types. async fn test_altair_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); let altair_fork_slot = spec .altair_fork_epoch .unwrap() @@ -658,17 +661,17 @@ mod tests { listen_port, ) .await - .assert_signatures_match( - "beacon_block_altair", - |pubkey, validator_store| async move { - let mut altair_block = BeaconBlockAltair::empty(spec); + .assert_signatures_match("beacon_block_altair", |pubkey, validator_store| { + let spec = spec.clone(); + async move { + let mut altair_block = BeaconBlockAltair::empty(&spec); altair_block.slot = altair_fork_slot; validator_store .sign_block(pubkey, BeaconBlock::Altair(altair_block), altair_fork_slot) .await .unwrap() - }, - ) + } + }) .await .assert_signatures_match( "sync_selection_proof", @@ -728,7 +731,7 @@ mod tests { /// Test all the Bellatrix types. async fn test_bellatrix_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); let bellatrix_fork_slot = spec .bellatrix_fork_epoch .unwrap() @@ -741,10 +744,10 @@ mod tests { listen_port, ) .await - .assert_signatures_match( - "beacon_block_bellatrix", - |pubkey, validator_store| async move { - let mut bellatrix_block = BeaconBlockBellatrix::empty(spec); + .assert_signatures_match("beacon_block_bellatrix", |pubkey, validator_store| { + let spec = spec.clone(); + async move { + let mut bellatrix_block = BeaconBlockBellatrix::empty(&spec); bellatrix_block.slot = bellatrix_fork_slot; validator_store .sign_block( @@ -754,8 +757,8 @@ mod tests { ) .await .unwrap() - }, - ) + } + }) .await; } @@ -767,7 +770,7 @@ mod tests { let network = "mainnet"; let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); - let spec = &network_config.chain_spec::().unwrap(); + let spec = Arc::new(network_config.chain_spec::().unwrap()); let bellatrix_fork_slot = spec .bellatrix_fork_epoch .unwrap() @@ -805,7 +808,7 @@ mod tests { }; let first_block = || { - let mut bellatrix_block = BeaconBlockBellatrix::empty(spec); + let mut bellatrix_block = BeaconBlockBellatrix::empty(&spec); bellatrix_block.slot = bellatrix_fork_slot; BeaconBlock::Bellatrix(bellatrix_block) }; diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 58d7f9d8ee..6bba55d676 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -347,7 +347,7 @@ pub struct BeaconNodeFallback { candidates: Vec>, slot_clock: Option, broadcast_topics: Vec, - spec: ChainSpec, + spec: Arc, log: Logger, } @@ -355,7 +355,7 @@ impl BeaconNodeFallback { pub fn new( candidates: Vec>, broadcast_topics: Vec, - spec: ChainSpec, + spec: Arc, log: Logger, ) -> Self { Self { diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index faa157a859..1c205b38e5 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -229,7 +229,7 @@ pub struct DutiesService { /// The runtime for spawning tasks. pub context: RuntimeContext, /// The current chain spec. - pub spec: ChainSpec, + pub spec: Arc, //// Whether we permit large validator counts in the metrics. pub enable_high_validator_count_metrics: bool, /// If this validator is running in distributed mode. diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 3d7cab8e5e..bfd5c1fa80 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -77,7 +77,7 @@ pub struct Context { pub secrets_dir: Option, pub graffiti_file: Option, pub graffiti_flag: Option, - pub spec: ChainSpec, + pub spec: Arc, pub config: Config, pub log: Logger, pub sse_logging_components: Option, @@ -217,7 +217,7 @@ pub fn serve( let inner_slot_clock = ctx.slot_clock.clone(); let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); - let inner_spec = Arc::new(ctx.spec.clone()); + let inner_spec = ctx.spec.clone(); let spec_filter = warp::any().map(move || inner_spec.clone()); let api_token_path_inner = api_token_path.clone(); diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index 8bb56e87a3..6c0e8b1617 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -96,7 +96,7 @@ impl ApiTester { ..Default::default() }; - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); @@ -110,7 +110,7 @@ impl ApiTester { initialized_validators, slashing_protection, Hash256::repeat_byte(42), - spec, + spec.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock.clone(), &config, @@ -132,7 +132,7 @@ impl ApiTester { validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), - spec: E::default_spec(), + spec, config: http_config, log, sse_logging_components: None, diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index ce1937d437..98fbc854ae 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -80,7 +80,7 @@ impl ApiTester { config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); - let spec = E::default_spec(); + let spec = Arc::new(E::default_spec()); let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); @@ -120,7 +120,7 @@ impl ApiTester { validator_store: Some(validator_store.clone()), graffiti_file: None, graffiti_flag: Some(Graffiti::default()), - spec: E::default_spec(), + spec: E::default_spec().into(), config: HttpConfig { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 8a9e125936..6753c50cff 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -85,7 +85,7 @@ impl ValidatorStore { validators: InitializedValidators, slashing_protection: SlashingDatabase, genesis_validators_root: Hash256, - spec: ChainSpec, + spec: Arc, doppelganger_service: Option>, slot_clock: T, config: &Config, @@ -97,7 +97,7 @@ impl ValidatorStore { slashing_protection, slashing_protection_last_prune: Arc::new(Mutex::new(Epoch::new(0))), genesis_validators_root, - spec: Arc::new(spec), + spec, log, doppelganger_service, slot_clock, From 279270533128ef8069765d04c280db993db12905 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Sep 2024 14:52:44 +1000 Subject: [PATCH 29/66] Lenient duplicate checks on HTTP API for block publication (#5574) * start splitting gossip verification * WIP * Gossip verify separate (#7) * save * save * make ProvenancedBlock concrete * delete into gossip verified block contents * get rid of IntoBlobSidecar trait * remove IntoGossipVerified trait * get tests compiling * don't check sidecar slashability in publish * remove second publish closure * drop blob bool. also prefer using message index over index of position in list * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Fix low-hanging tests * Fix tests and clean up * Clean up imports * more cleanup * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Further refine behaviour and add tests * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Remove empty line * Fix test (block is not fully imported just gossip verified) * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Update for unstable & use empty blob list * Update comment * Add test for duplicate block case * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Clarify unreachable case * Fix another publish_block case * Remove unreachable case in filter chain segment * Revert unrelated blob optimisation * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Fix merge conflicts * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Fix some compilation issues. Impl is fucked though * Support peerDAS * Fix tests * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Fix conflict * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate * Address review comments * Merge remote-tracking branch 'origin/unstable' into gossip-verify-separate --- beacon_node/beacon_chain/src/beacon_chain.rs | 47 +- .../beacon_chain/src/block_verification.rs | 155 ++-- .../src/block_verification_types.rs | 72 +- beacon_node/beacon_chain/src/lib.rs | 6 +- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../beacon_chain/tests/block_verification.rs | 8 +- beacon_node/beacon_chain/tests/events.rs | 2 +- beacon_node/http_api/src/lib.rs | 8 +- beacon_node/http_api/src/publish_blocks.rs | 671 ++++++++++++------ beacon_node/http_api/src/test_utils.rs | 20 +- .../tests/broadcast_validation_tests.rs | 507 +++++++++++-- beacon_node/http_api/tests/fork_tests.rs | 1 + .../http_api/tests/interactive_tests.rs | 2 + .../gossip_methods.rs | 16 +- .../network_beacon_processor/sync_methods.rs | 7 +- .../network/src/sync/block_lookups/mod.rs | 12 +- .../network/src/sync/block_lookups/tests.rs | 4 +- .../network/src/sync/network_context.rs | 4 +- common/eth2/src/types.rs | 36 - consensus/types/src/data_column_sidecar.rs | 1 + testing/ef_tests/src/cases/fork_choice.rs | 4 +- 21 files changed, 1071 insertions(+), 516 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 515b65b1af..5d287e2b68 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2740,7 +2740,10 @@ impl BeaconChain { // If the block is relevant, add it to the filtered chain segment. Ok(_) => filtered_chain_segment.push((block_root, block)), // If the block is already known, simply ignore this block. - Err(BlockError::BlockIsAlreadyKnown(_)) => continue, + // + // Note that `check_block_relevancy` is incapable of returning + // `DuplicateImportStatusUnknown` so we don't need to handle that case here. + Err(BlockError::DuplicateFullyImported(_)) => continue, // If the block is the genesis block, simply ignore this block. Err(BlockError::GenesisBlock) => continue, // If the block is is for a finalized slot, simply ignore this block. @@ -2886,7 +2889,7 @@ impl BeaconChain { } } } - Err(BlockError::BlockIsAlreadyKnown(block_root)) => { + Err(BlockError::DuplicateFullyImported(block_root)) => { debug!(self.log, "Ignoring already known blocks while processing chain segment"; "block_root" => ?block_root); @@ -2977,6 +2980,7 @@ impl BeaconChain { pub async fn process_gossip_blob( self: &Arc, blob: GossipVerifiedBlob, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let block_root = blob.block_root(); @@ -2987,7 +2991,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(blob.block_root())); + return Err(BlockError::DuplicateFullyImported(blob.block_root())); } // No need to process and import blobs beyond the PeerDAS epoch. @@ -3003,7 +3007,9 @@ impl BeaconChain { } } - let r = self.check_gossip_blob_availability_and_import(blob).await; + let r = self + .check_gossip_blob_availability_and_import(blob, publish_fn) + .await; self.remove_notified(&block_root, r) } @@ -3012,6 +3018,7 @@ impl BeaconChain { pub async fn process_gossip_data_columns( self: &Arc, data_columns: Vec>, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result< ( AvailabilityProcessingStatus, @@ -3037,11 +3044,16 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } let r = self - .check_gossip_data_columns_availability_and_import(slot, block_root, data_columns) + .check_gossip_data_columns_availability_and_import( + slot, + block_root, + data_columns, + publish_fn, + ) .await; self.remove_notified_custody_columns(&block_root, r) } @@ -3061,7 +3073,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } // Reject RPC blobs referencing unknown parents. Otherwise we allow potentially invalid data @@ -3127,7 +3139,7 @@ impl BeaconChain { .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } // Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data @@ -3225,7 +3237,7 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, block_source: BlockImportSource, - publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -3407,7 +3419,8 @@ impl BeaconChain { let availability = self .data_availability_checker .put_pending_executed_block(block)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability, || Ok(())) + .await } /// Checks if the provided blob can make any cached blocks available, and imports immediately @@ -3415,6 +3428,7 @@ impl BeaconChain { async fn check_gossip_blob_availability_and_import( self: &Arc, blob: GossipVerifiedBlob, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { let slot = blob.slot(); if let Some(slasher) = self.slasher.as_ref() { @@ -3422,7 +3436,8 @@ impl BeaconChain { } let availability = self.data_availability_checker.put_gossip_blob(blob)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability, publish_fn) + .await } /// Checks if the provided data column can make any cached blocks available, and imports immediately @@ -3432,6 +3447,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, data_columns: Vec>, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result< ( AvailabilityProcessingStatus, @@ -3449,7 +3465,7 @@ impl BeaconChain { .data_availability_checker .put_gossip_data_columns(slot, block_root, data_columns)?; - self.process_availability(slot, availability) + self.process_availability(slot, availability, publish_fn) .await .map(|result| (result, data_columns_to_publish)) } @@ -3490,7 +3506,8 @@ impl BeaconChain { .data_availability_checker .put_rpc_blobs(block_root, epoch, blobs)?; - self.process_availability(slot, availability).await + self.process_availability(slot, availability, || Ok(())) + .await } /// Checks if the provided columns can make any cached blocks available, and imports immediately @@ -3538,7 +3555,7 @@ impl BeaconChain { custody_columns, )?; - self.process_availability(slot, availability) + self.process_availability(slot, availability, || Ok(())) .await .map(|result| (result, data_columns_to_publish)) } @@ -3551,9 +3568,11 @@ impl BeaconChain { self: &Arc, slot: Slot, availability: Availability, + publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { match availability { Availability::Available(block) => { + publish_fn()?; // Block is fully available, import into fork choice self.import_available_block(block).await } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 027c013a49..a8233f170f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -49,14 +49,10 @@ #![allow(clippy::result_large_err)] use crate::beacon_snapshot::PreProcessingSnapshot; -use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob, GossipVerifiedBlobList}; -use crate::block_verification_types::{ - AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, -}; +use crate::blob_verification::GossipBlobError; +use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; -use crate::data_column_verification::{ - GossipDataColumnError, GossipVerifiedDataColumn, GossipVerifiedDataColumnList, -}; +use crate::data_column_verification::GossipDataColumnError; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, @@ -71,7 +67,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::{BlockGossip, EventKind, PublishBlockRequest}; +use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use lighthouse_metrics::TryExt; @@ -82,7 +78,6 @@ use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, @@ -98,14 +93,12 @@ use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; -use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSubnetId, Epoch, - EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, PublicKey, - PublicKeyBytes, RelativeEpoch, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, - Slot, + data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, + BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, + FullPayload, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -use types::{BlobSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, @@ -187,12 +180,18 @@ pub enum BlockError { /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be /// imported. NotFinalizedDescendant { block_parent_root: Hash256 }, - /// Block is already known, no need to re-import. + /// Block is already known and valid, no need to re-import. /// /// ## Peer scoring /// /// The block is valid and we have already imported a block with this hash. - BlockIsAlreadyKnown(Hash256), + DuplicateFullyImported(Hash256), + /// Block has already been seen on gossip but has not necessarily finished being imported. + /// + /// ## Peer scoring + /// + /// The block could be valid, or invalid. We don't know. + DuplicateImportStatusUnknown(Hash256), /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -704,115 +703,57 @@ pub struct ExecutionPendingBlock { pub payload_verification_handle: PayloadVerificationHandle, } -pub trait IntoGossipVerifiedBlockContents: Sized { +pub trait IntoGossipVerifiedBlock: Sized { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockContentsError>; - fn inner_block(&self) -> &SignedBeaconBlock; + ) -> Result, BlockError>; + fn inner_block(&self) -> Arc>; } -impl IntoGossipVerifiedBlockContents for GossipVerifiedBlockContents { +impl IntoGossipVerifiedBlock for GossipVerifiedBlock { fn into_gossip_verified_block( self, _chain: &BeaconChain, - ) -> Result, BlockContentsError> { + ) -> Result, BlockError> { Ok(self) } - fn inner_block(&self) -> &SignedBeaconBlock { - self.0.block.as_block() + fn inner_block(&self) -> Arc> { + self.block_cloned() } } -impl IntoGossipVerifiedBlockContents for PublishBlockRequest { +impl IntoGossipVerifiedBlock for Arc> { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockContentsError> { - let (block, blobs) = self.deconstruct(); - let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - - let (gossip_verified_blobs, gossip_verified_data_columns) = if peer_das_enabled { - let gossip_verified_data_columns = - build_gossip_verified_data_columns(chain, &block, blobs.map(|(_, blobs)| blobs))?; - (None, gossip_verified_data_columns) - } else { - let gossip_verified_blobs = build_gossip_verified_blobs(chain, &block, blobs)?; - (gossip_verified_blobs, None) - }; - - let gossip_verified_block = GossipVerifiedBlock::new(block, chain)?; - - Ok(( - gossip_verified_block, - gossip_verified_blobs, - gossip_verified_data_columns, - )) + ) -> Result, BlockError> { + GossipVerifiedBlock::new(self, chain) } - fn inner_block(&self) -> &SignedBeaconBlock { - self.signed_block() + fn inner_block(&self) -> Arc> { + self.clone() } } -#[allow(clippy::type_complexity)] -fn build_gossip_verified_blobs( - chain: &BeaconChain, - block: &Arc>>, - blobs: Option<(KzgProofs, BlobsList)>, -) -> Result>, BlockContentsError> { - blobs - .map(|(kzg_proofs, blobs)| { - let mut gossip_verified_blobs = vec![]; - for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { - let _timer = - metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); - let blob = BlobSidecar::new(i, blob, block, *kzg_proof) - .map_err(BlockContentsError::BlobSidecarError)?; - drop(_timer); - let gossip_verified_blob = - GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; - gossip_verified_blobs.push(gossip_verified_blob); - } - let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); - Ok::<_, BlockContentsError>(gossip_verified_blobs) - }) - .transpose() -} - -fn build_gossip_verified_data_columns( +pub fn build_blob_data_column_sidecars( chain: &BeaconChain, block: &SignedBeaconBlock>, - blobs: Option>, -) -> Result>, BlockContentsError> { - blobs - // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. - .filter(|b| !b.is_empty()) - .map(|blobs| { - let mut timer = metrics::start_timer_vec( - &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, - &[&blobs.len().to_string()], - ); - let sidecars = blobs_to_data_column_sidecars(&blobs, block, &chain.kzg, &chain.spec) - .discard_timer_on_break(&mut timer)?; - drop(timer); - let mut gossip_verified_data_columns = vec![]; - for sidecar in sidecars { - let subnet = DataColumnSubnetId::from_column_index::( - sidecar.index as usize, - &chain.spec, - ); - let column = GossipVerifiedDataColumn::new(sidecar, subnet.into(), chain)?; - gossip_verified_data_columns.push(column); - } - let gossip_verified_data_columns = RuntimeVariableList::new( - gossip_verified_data_columns, - chain.spec.number_of_columns, - ) - .map_err(DataColumnSidecarError::SszError)?; - Ok::<_, BlockContentsError>(gossip_verified_data_columns) - }) - .transpose() + blobs: BlobsList, +) -> Result, DataColumnSidecarError> { + // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. + if blobs.is_empty() { + return Ok(vec![]); + } + + let mut timer = metrics::start_timer_vec( + &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, + &[&blobs.len().to_string()], + ); + let sidecars = blobs_to_data_column_sidecars(&blobs, block, &chain.kzg, &chain.spec) + .discard_timer_on_break(&mut timer)?; + drop(timer); + Ok(sidecars) } /// Implemented on types that can be converted into a `ExecutionPendingBlock`. @@ -912,7 +853,7 @@ impl GossipVerifiedBlock { // already know this block. let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); if fork_choice_read_lock.contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } // Do not process a block that doesn't descend from the finalized root. @@ -1046,7 +987,9 @@ impl GossipVerifiedBlock { SeenBlock::Slashable => { return Err(BlockError::Slashable); } - SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown(block_root)), + SeenBlock::Duplicate => { + return Err(BlockError::DuplicateImportStatusUnknown(block_root)) + } SeenBlock::UniqueNonSlashable => {} }; @@ -1894,7 +1837,7 @@ pub fn check_block_relevancy( .fork_choice_read_lock() .contains_block(&block_root) { - return Err(BlockError::BlockIsAlreadyKnown(block_root)); + return Err(BlockError::DuplicateFullyImported(block_root)); } Ok(block_root) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 707dfa56d8..420c83081c 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -1,19 +1,14 @@ -use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList}; -use crate::block_verification::BlockError; use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; -use crate::data_column_verification::{ - CustodyDataColumn, CustodyDataColumnList, GossipDataColumnError, GossipVerifiedDataColumnList, -}; +use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::eth1_finalization_cache::Eth1FinalizationData; -use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome}; +use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use types::blob_sidecar::{self, BlobIdentifier, FixedBlobSidecarList}; -use types::data_column_sidecar::{self}; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -390,67 +385,6 @@ impl BlockImportData { } } -pub type GossipVerifiedBlockContents = ( - GossipVerifiedBlock, - Option>, - Option>, -); - -#[derive(Debug)] -pub enum BlockContentsError { - BlockError(BlockError), - BlobError(GossipBlobError), - BlobSidecarError(blob_sidecar::BlobSidecarError), - DataColumnError(GossipDataColumnError), - DataColumnSidecarError(data_column_sidecar::DataColumnSidecarError), -} - -impl From for BlockContentsError { - fn from(value: BlockError) -> Self { - Self::BlockError(value) - } -} - -impl From for BlockContentsError { - fn from(value: GossipBlobError) -> Self { - Self::BlobError(value) - } -} - -impl From for BlockContentsError { - fn from(value: GossipDataColumnError) -> Self { - Self::DataColumnError(value) - } -} - -impl From for BlockContentsError { - fn from(value: data_column_sidecar::DataColumnSidecarError) -> Self { - Self::DataColumnSidecarError(value) - } -} - -impl std::fmt::Display for BlockContentsError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockContentsError::BlockError(err) => { - write!(f, "BlockError({})", err) - } - BlockContentsError::BlobError(err) => { - write!(f, "BlobError({})", err) - } - BlockContentsError::BlobSidecarError(err) => { - write!(f, "BlobSidecarError({:?})", err) - } - BlockContentsError::DataColumnError(err) => { - write!(f, "DataColumnError({:?})", err) - } - BlockContentsError::DataColumnSidecarError(err) => { - write!(f, "DataColumnSidecarError({:?})", err) - } - } - } -} - /// Trait for common block operations. pub trait AsBlock { fn slot(&self) -> Slot; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7bfb5b08be..b89c00e0af 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -75,9 +75,9 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, - IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, - PayloadVerificationStatus, + build_blob_data_column_sidecars, get_block_root, BlockError, ExecutionPayloadError, + ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, IntoGossipVerifiedBlock, + PayloadVerificationOutcome, PayloadVerificationStatus, }; pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 582d20637b..ce36c8ca21 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -878,7 +878,7 @@ where let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and - // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce + // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); @@ -940,7 +940,7 @@ where let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and - // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce + // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 535d63427a..d239f5089a 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -976,7 +976,7 @@ async fn block_gossip_verification() { harness .chain - .process_gossip_blob(gossip_verified) + .process_gossip_blob(gossip_verified, || Ok(())) .await .expect("should import valid gossip verified blob"); } @@ -1173,7 +1173,7 @@ async fn block_gossip_verification() { assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), - BlockError::BlockIsAlreadyKnown(_), + BlockError::DuplicateImportStatusUnknown(_), ), "should register any valid signature against the proposer, even if the block failed later verification" ); @@ -1201,7 +1201,7 @@ async fn block_gossip_verification() { .verify_block_for_gossip(block.clone()) .await .expect_err("should error when processing known block"), - BlockError::BlockIsAlreadyKnown(_) + BlockError::DuplicateImportStatusUnknown(_) ), "the second proposal by this validator should be rejected" ); @@ -1247,7 +1247,7 @@ async fn verify_block_for_gossip_slashing_detection() { .unwrap(); harness .chain - .process_gossip_blob(verified_blob) + .process_gossip_blob(verified_blob, || Ok(())) .await .unwrap(); } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index ab784d3be4..31e69f0524 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -35,7 +35,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { let _ = harness .chain - .process_gossip_blob(gossip_verified_blob) + .process_gossip_blob(gossip_verified_blob, || Ok(())) .await .unwrap(); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 15d463b661..ffcfda4680 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1298,7 +1298,7 @@ pub fn serve( task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -1340,7 +1340,7 @@ pub fn serve( })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -1375,7 +1375,7 @@ pub fn serve( task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, @@ -1419,7 +1419,7 @@ pub fn serve( })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(block_contents), + ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, log, diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index ad7cb3081e..16364b435a 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,13 +1,17 @@ use crate::metrics; -use beacon_chain::block_verification_types::{AsBlock, BlockContentsError}; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ - AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, - IntoGossipVerifiedBlockContents, NotifyExecutionLayer, + build_blob_data_column_sidecars, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, + BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, +}; +use eth2::types::{ + BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, FullPayloadContents, + PublishBlockRequest, SignedBlockContents, }; -use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage}; -use eth2::types::{FullPayloadContents, PublishBlockRequest}; use execution_layer::ProvenancedPayload; use lighthouse_network::{NetworkGlobals, PubsubMessage}; use network::NetworkMessage; @@ -15,39 +19,62 @@ use rand::seq::SliceRandom; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlobSidecarList, BlockImportSource, DataColumnSidecarList, - DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - FullPayloadBellatrix, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, VariableList, + AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, + DataColumnSidecarList, DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, + FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, + SignedBlindedBeaconBlock, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; -pub enum ProvenancedBlock> { +pub type UnverifiedBlobs = Option<( + KzgProofs<::EthSpec>, + BlobsList<::EthSpec>, +)>; + +pub enum ProvenancedBlock> { /// The payload was built using a local EE. - Local(B, PhantomData), + Local(B, UnverifiedBlobs, PhantomData), /// The payload was build using a remote builder (e.g., via a mev-boost /// compatible relay). - Builder(B, PhantomData), + Builder(B, UnverifiedBlobs, PhantomData), } -impl> ProvenancedBlock { - pub fn local(block: B) -> Self { - Self::Local(block, PhantomData) +impl> ProvenancedBlock { + pub fn local(block: B, blobs: UnverifiedBlobs) -> Self { + Self::Local(block, blobs, PhantomData) } - pub fn builder(block: B) -> Self { - Self::Builder(block, PhantomData) + pub fn builder(block: B, blobs: UnverifiedBlobs) -> Self { + Self::Builder(block, blobs, PhantomData) + } +} + +impl ProvenancedBlock>> { + pub fn local_from_publish_request(request: PublishBlockRequest) -> Self { + match request { + PublishBlockRequest::Block(block) => Self::local(block, None), + PublishBlockRequest::BlockContents(block_contents) => { + let SignedBlockContents { + signed_block, + kzg_proofs, + blobs, + } = block_contents; + Self::local(signed_block, Some((kzg_proofs, blobs))) + } + } } } /// Handles a request from the HTTP API for full blocks. #[allow(clippy::too_many_arguments)] -pub async fn publish_block>( +pub async fn publish_block>( block_root: Option, provenanced_block: ProvenancedBlock, chain: Arc>, @@ -59,28 +86,29 @@ pub async fn publish_block Result { let seen_timestamp = timestamp_now(); - let (block_contents, is_locally_built_block) = match provenanced_block { - ProvenancedBlock::Local(block_contents, _) => (block_contents, true), - ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), + let (unverified_block, unverified_blobs, is_locally_built_block) = match provenanced_block { + ProvenancedBlock::Local(block, blobs, _) => (block, blobs, true), + ProvenancedBlock::Builder(block, blobs, _) => (block, blobs, false), }; let provenance = if is_locally_built_block { "local" } else { "builder" }; - let block = block_contents.inner_block().clone(); - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + let block = unverified_block.inner_block(); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); let malicious_withhold_count = chain.config.malicious_withhold_count; let chain_cloned = chain.clone(); /* actually publish a block */ - let publish_block = move |block: Arc>, - blobs_opt: Option>, - data_cols_opt: Option>, - sender, - log, - seen_timestamp| { + let publish_block_p2p = move |block: Arc>, + should_publish_block: bool, + blob_sidecars: Vec>>, + mut data_column_sidecars: DataColumnSidecarList, + sender, + log, + seen_timestamp| + -> Result<(), BlockError> { let publish_timestamp = timestamp_now(); let publish_delay = publish_timestamp .checked_sub(seen_timestamp) @@ -92,55 +120,48 @@ pub async fn publish_block block.slot(), - "publish_delay_ms" => publish_delay.as_millis() - ); + let mut pubsub_messages = if should_publish_block { + info!( + log, + "Signed block published to network via HTTP API"; + "slot" => block.slot(), + "blobs_published" => blob_sidecars.len(), + "publish_delay_ms" => publish_delay.as_millis(), + ); + vec![PubsubMessage::BeaconBlock(block.clone())] + } else { + vec![] + }; match block.as_ref() { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Bellatrix(_) | SignedBeaconBlock::Capella(_) => { - crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block)) + crate::publish_pubsub_messages(&sender, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; } SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { - let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block)]; - if let Some(blob_sidecars) = blobs_opt { - // Publish blob sidecars - for (blob_index, blob) in blob_sidecars.into_iter().enumerate() { - pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( - blob_index as u64, - blob, - )))); - } + for blob in blob_sidecars.into_iter() { + pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new((blob.index, blob)))); + } + if malicious_withhold_count > 0 { + let columns_to_keep = data_column_sidecars + .len() + .saturating_sub(malicious_withhold_count); + // Randomize columns before dropping the last malicious_withhold_count items + data_column_sidecars.shuffle(&mut rand::thread_rng()); + drop(data_column_sidecars.drain(columns_to_keep..)); } - if let Some(data_col_sidecars) = data_cols_opt { - let mut data_col_sidecars = data_col_sidecars.to_vec(); - if malicious_withhold_count > 0 { - let columns_to_keep = data_col_sidecars - .len() - .saturating_sub(malicious_withhold_count); - // Randomize columns before dropping the last malicious_withhold_count items - data_col_sidecars.shuffle(&mut rand::thread_rng()); - data_col_sidecars = data_col_sidecars - .into_iter() - .take(columns_to_keep) - .collect::>(); - } - for data_col in data_col_sidecars { - let subnet = DataColumnSubnetId::from_column_index::( - data_col.index as usize, - &chain_cloned.spec, - ); - pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( - subnet, data_col, - )))); - } + for data_col in data_column_sidecars { + let subnet = DataColumnSubnetId::from_column_index::( + data_col.index as usize, + &chain_cloned.spec, + ); + pubsub_messages.push(PubsubMessage::DataColumnSidecar(Box::new(( + subnet, data_col, + )))); } crate::publish_pubsub_messages(&sender, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; @@ -150,72 +171,162 @@ pub async fn publish_block b, - Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown(_))) - | Err(BlockContentsError::BlobError( - beacon_chain::blob_verification::GossipBlobError::RepeatBlob { .. }, - )) => { - // Allow the status code for duplicate blocks to be overridden based on config. - return Ok(warp::reply::with_status( - warp::reply::json(&ErrorMessage { - code: duplicate_status_code.as_u16(), - message: "duplicate block".to_string(), - stacktraces: vec![], - }), - duplicate_status_code, - ) - .into_response()); - } - Err(e) => { - warn!( - log, - "Not publishing block - not gossip verified"; - "slot" => slot, - "error" => %e - ); - return Err(warp_utils::reject::custom_bad_request(e.to_string())); - } - }; + // Convert blobs to either: + // + // 1. Blob sidecars if prior to peer DAS, or + // 2. Data column sidecars if post peer DAS. + let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - // TODO(das): We could potentially get rid of these conversions and pass `GossipVerified` types - // to `publish_block`, i.e. have `GossipVerified` types in `PubsubMessage`? - // This saves us from extra code and provides guarantee that published - // components are verified. - // Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not, - // `Arc`'d but blobs are. - let block = gossip_verified_block.block.block_cloned(); - let blobs_opt = gossip_verified_blobs.as_ref().map(|gossip_verified_blobs| { - let blobs = gossip_verified_blobs - .into_iter() - .map(|b| b.clone_blob()) - .collect::>(); - VariableList::from(blobs) - }); - let data_cols_opt = gossip_verified_data_columns - .as_ref() - .map(|gossip_verified_data_columns| { - gossip_verified_data_columns + let (blob_sidecars, data_column_sidecars) = match unverified_blobs { + // Pre-PeerDAS: construct blob sidecars for the network. + Some((kzg_proofs, blobs)) if !peer_das_enabled => { + let blob_sidecars = kzg_proofs .into_iter() - .map(|col| col.clone_data_column()) - .collect::>() - }); + .zip(blobs) + .enumerate() + .map(|(i, (proof, unverified_blob))| { + let _timer = metrics::start_timer( + &beacon_chain::metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION, + ); + let blob_sidecar = + BlobSidecar::new(i, unverified_blob, &block, proof).map(Arc::new); + blob_sidecar.map_err(|e| { + error!( + log, + "Invalid blob - not publishing block"; + "error" => ?e, + "blob_index" => i, + "slot" => slot, + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + }) + }) + .collect::, Rejection>>()?; + (blob_sidecars, vec![]) + } + // Post PeerDAS: construct data columns. + Some((_, blobs)) => { + // TODO(das): this is sub-optimal and should likely not be happening prior to gossip + // block publishing. + let data_column_sidecars = build_blob_data_column_sidecars(&chain, &block, blobs) + .map_err(|e| { + error!( + log, + "Invalid data column - not publishing block"; + "error" => ?e, + "slot" => slot + ); + warp_utils::reject::custom_bad_request(format!("{e:?}")) + })?; + (vec![], data_column_sidecars) + } + None => (vec![], vec![]), + }; - let block_root = block_root.unwrap_or(gossip_verified_block.block_root); + // Gossip verify the block and blobs/data columns separately. + let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); + let gossip_verified_blobs = blob_sidecars + .into_iter() + .map(|blob_sidecar| { + let gossip_verified_blob = + GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain); + match gossip_verified_blob { + Ok(blob) => Ok(Some(blob)), + Err(GossipBlobError::RepeatBlob { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other blobs if the block & blobs are only partially published + // by the other publisher. + debug!( + log, + "Blob for publication already known"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Blob for publication is gossip-invalid"; + "blob_index" => blob_sidecar.index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(e.to_string())) + } + } + }) + .collect::, Rejection>>()?; + + let gossip_verified_data_columns = data_column_sidecars + .into_iter() + .map(|data_column_sidecar| { + let column_index = data_column_sidecar.index as usize; + let subnet = + DataColumnSubnetId::from_column_index::(column_index, &chain.spec); + let gossip_verified_column = + GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), &chain); + + match gossip_verified_column { + Ok(blob) => Ok(Some(blob)), + Err(GossipDataColumnError::PriorKnown { proposer, .. }) => { + // Log the error but do not abort publication, we may need to publish the block + // or some of the other data columns if the block & data columns are only + // partially published by the other publisher. + debug!( + log, + "Data column for publication already known"; + "column_index" => column_index, + "slot" => slot, + "proposer" => proposer, + ); + Ok(None) + } + Err(e) => { + error!( + log, + "Data column for publication is gossip-invalid"; + "column_index" => column_index, + "slot" => slot, + "error" => ?e, + ); + Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) + } + } + }) + .collect::, Rejection>>()?; + + let publishable_blobs = gossip_verified_blobs + .iter() + .flatten() + .map(|b| b.clone_blob()) + .collect::>(); + + let publishable_data_columns = gossip_verified_data_columns + .iter() + .flatten() + .map(|b| b.clone_data_column()) + .collect::>(); + + let block_root = block_root.unwrap_or_else(|| { + gossip_verified_block_result.as_ref().map_or_else( + |_| block.canonical_root(), + |verified_block| verified_block.block_root, + ) + }); + + let should_publish_block = gossip_verified_block_result.is_ok(); if let BroadcastValidation::Gossip = validation_level { - publish_block( + publish_block_p2p( block.clone(), - blobs_opt.clone(), - data_cols_opt.clone(), + should_publish_block, + publishable_blobs.clone(), + publishable_data_columns.clone(), sender_clone.clone(), log.clone(), seen_timestamp, @@ -223,65 +334,42 @@ pub async fn publish_block Ok(()), - BroadcastValidation::Consensus => publish_block( - block_clone, - blobs_opt, - data_cols_opt, - sender_clone, - log_clone, - seen_timestamp, - ), - BroadcastValidation::ConsensusAndEquivocation => { - check_slashable( - &chain_clone, - &blobs_opt, - block_root, - &block_clone, - &log_clone, - )?; - publish_block( - block_clone, - blobs_opt, - data_cols_opt, - sender_clone, - log_clone, + let publish_fn_completed = Arc::new(AtomicBool::new(false)); + let block_to_publish = block.clone(); + let publish_fn = || { + match validation_level { + BroadcastValidation::Gossip => (), + BroadcastValidation::Consensus => publish_block_p2p( + block_to_publish.clone(), + should_publish_block, + publishable_blobs.clone(), + publishable_data_columns.clone(), + sender_clone.clone(), + log.clone(), seen_timestamp, - ) - } + )?, + BroadcastValidation::ConsensusAndEquivocation => { + check_slashable(&chain, block_root, &block_to_publish, &log)?; + publish_block_p2p( + block_to_publish.clone(), + should_publish_block, + publishable_blobs.clone(), + publishable_data_columns.clone(), + sender_clone.clone(), + log.clone(), + seen_timestamp, + )?; + } + }; + publish_fn_completed.store(true, Ordering::SeqCst); + Ok(()) }; - if let Some(gossip_verified_blobs) = gossip_verified_blobs { - for blob in gossip_verified_blobs { - if let Err(e) = Box::pin(chain.process_gossip_blob(blob)).await { - let msg = format!("Invalid blob: {e}"); - return if let BroadcastValidation::Gossip = validation_level { - Err(warp_utils::reject::broadcast_without_import(msg)) - } else { - error!( - log, - "Invalid blob provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::custom_bad_request(msg)) - }; - } - } - } - - if let Some(gossip_verified_data_columns) = gossip_verified_data_columns { - let custody_columns_indices = &network_globals.custody_columns; - - let custody_columns = gossip_verified_data_columns - .into_iter() - .filter(|data_column| custody_columns_indices.contains(&data_column.index())) - .collect(); - - if let Err(e) = Box::pin(chain.process_gossip_data_columns(custody_columns)).await { - let msg = format!("Invalid data column: {e}"); + for blob in gossip_verified_blobs.into_iter().flatten() { + // Importing the blobs could trigger block import and network publication in the case + // where the block was already seen on gossip. + if let Err(e) = Box::pin(chain.process_gossip_blob(blob, &publish_fn)).await { + let msg = format!("Invalid blob: {e}"); return if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(msg)) } else { @@ -295,23 +383,150 @@ pub async fn publish_block 0 { - Ok(AvailabilityProcessingStatus::Imported(root)) => { + let custody_columns_indices = &network_globals.custody_columns; + + let custody_columns = gossip_verified_data_columns + .into_iter() + .flatten() + .filter(|data_column| custody_columns_indices.contains(&data_column.index())) + .collect(); + + // Importing the columns could trigger block import and network publication in the case + // where the block was already seen on gossip. + if let Err(e) = + Box::pin(chain.process_gossip_data_columns(custody_columns, publish_fn)).await + { + let msg = format!("Invalid data column: {e}"); + return if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + log, + "Invalid data column during block publication"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; + } + } + + match gossip_verified_block_result { + Ok(gossip_verified_block) => { + let import_result = Box::pin(chain.process_block( + block_root, + gossip_verified_block, + NotifyExecutionLayer::Yes, + BlockImportSource::HttpApi, + publish_fn, + )) + .await; + post_block_import_logging_and_response( + import_result, + validation_level, + block, + is_locally_built_block, + seen_timestamp, + &chain, + &log, + ) + .await + } + Err(BlockError::DuplicateFullyImported(root)) => { + if publish_fn_completed.load(Ordering::SeqCst) { + post_block_import_logging_and_response( + Ok(AvailabilityProcessingStatus::Imported(root)), + validation_level, + block, + is_locally_built_block, + seen_timestamp, + &chain, + &log, + ) + .await + } else { + // None of the components provided in this HTTP request were new, so this was an + // entirely redundant duplicate request. Return a status code indicating this, + // which can be overridden based on config. + Ok(warp::reply::with_status( + warp::reply::json(&ErrorMessage { + code: duplicate_status_code.as_u16(), + message: "duplicate block".to_string(), + stacktraces: vec![], + }), + duplicate_status_code, + ) + .into_response()) + } + } + Err(BlockError::DuplicateImportStatusUnknown(root)) => { + debug!( + log, + "Block previously seen"; + "block_root" => ?root, + "slot" => block.slot(), + ); + let import_result = Box::pin(chain.process_block( + block_root, + block.clone(), + NotifyExecutionLayer::Yes, + BlockImportSource::HttpApi, + publish_fn, + )) + .await; + post_block_import_logging_and_response( + import_result, + validation_level, + block, + is_locally_built_block, + seen_timestamp, + &chain, + &log, + ) + .await + } + Err(e) => { + warn!( + log, + "Not publishing block - not gossip verified"; + "slot" => slot, + "error" => %e + ); + Err(warp_utils::reject::custom_bad_request(e.to_string())) + } + } +} + +async fn post_block_import_logging_and_response( + result: Result, + validation_level: BroadcastValidation, + block: Arc>, + is_locally_built_block: bool, + seen_timestamp: Duration, + chain: &Arc>, + log: &Logger, +) -> Result { + match result { + // The `DuplicateFullyImported` case here captures the case where the block finishes + // being imported after gossip verification. It could be that it finished imported as a + // result of the block being imported from gossip, OR it could be that it finished importing + // after processing of a gossip blob. In the latter case we MUST run fork choice to + // re-compute the head. + Ok(AvailabilityProcessingStatus::Imported(root)) + | Err(BlockError::DuplicateFullyImported(root)) => { + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); info!( log, "Valid block from HTTP API"; "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => proposer_index, - "slot" =>slot, + "root" => %root, + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), ); // Notify the validator monitor. @@ -330,7 +545,7 @@ pub async fn publish_block &msg + "reason" => ?e, ); Err(warp_utils::reject::custom_bad_request(format!( "Invalid block: {e}" @@ -385,7 +599,7 @@ pub async fn publish_blinded_block( network_globals: Arc>, ) -> Result { let block_root = blinded_block.canonical_root(); - let full_block: ProvenancedBlock> = + let full_block = reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?; publish_block::( Some(block_root), @@ -408,7 +622,7 @@ pub async fn reconstruct_block( block_root: Hash256, block: Arc>, log: Logger, -) -> Result>, Rejection> { +) -> Result>>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -474,14 +688,17 @@ pub async fn reconstruct_block( match full_payload_opt { // A block without a payload is pre-merge and we consider it locally // built. - None => into_full_block_and_blobs(block, None).map(ProvenancedBlock::local), + None => block + .try_into_full_block(None) + .ok_or("Failed to build full block with payload".to_string()) + .map(|full_block| ProvenancedBlock::local(Arc::new(full_block), None)), Some(ProvenancedPayload::Local(full_payload_contents)) => { - into_full_block_and_blobs(block, Some(full_payload_contents)) - .map(ProvenancedBlock::local) + into_full_block_and_blobs::(block, full_payload_contents) + .map(|(block, blobs)| ProvenancedBlock::local(block, blobs)) } Some(ProvenancedPayload::Builder(full_payload_contents)) => { - into_full_block_and_blobs(block, Some(full_payload_contents)) - .map(ProvenancedBlock::builder) + into_full_block_and_blobs::(block, full_payload_contents) + .map(|(block, blobs)| ProvenancedBlock::builder(block, blobs)) } } .map_err(|e| { @@ -540,28 +757,11 @@ fn late_block_logging>( /// Check if any of the blobs or the block are slashable. Returns `BlockError::Slashable` if so. fn check_slashable( chain_clone: &BeaconChain, - blobs_opt: &Option>, block_root: Hash256, block_clone: &SignedBeaconBlock>, log_clone: &Logger, ) -> Result<(), BlockError> { let slashable_cache = chain_clone.observed_slashable.read(); - if let Some(blobs) = blobs_opt.as_ref() { - blobs.iter().try_for_each(|blob| { - if slashable_cache - .is_slashable(blob.slot(), blob.block_proposer_index(), blob.block_root()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - { - warn!( - log_clone, - "Not publishing equivocating blob"; - "slot" => block_clone.slot() - ); - return Err(BlockError::Slashable); - } - Ok(()) - })?; - }; if slashable_cache .is_slashable( block_clone.slot(), @@ -579,3 +779,38 @@ fn check_slashable( } Ok(()) } + +/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. +#[allow(clippy::type_complexity)] +pub fn into_full_block_and_blobs( + blinded_block: SignedBlindedBeaconBlock, + maybe_full_payload_contents: FullPayloadContents, +) -> Result<(Arc>, UnverifiedBlobs), String> { + match maybe_full_payload_contents { + // This variant implies a pre-deneb block + FullPayloadContents::Payload(execution_payload) => { + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + Ok((Arc::new(signed_block), None)) + } + // This variant implies a post-deneb block + FullPayloadContents::PayloadAndBlobs(payload_and_blobs) => { + let ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } = payload_and_blobs; + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + + let BlobsBundle { + commitments: _, + proofs, + blobs, + } = blobs_bundle; + + Ok((Arc::new(signed_block), Some((proofs, blobs)))) + } + } +} diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 4742fa109f..7b48d64e36 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -61,7 +61,8 @@ type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await + Self::new_with_initializer_and_mutator(spec, validator_count, None, None, Config::default()) + .await } pub async fn new_with_initializer_and_mutator( @@ -69,6 +70,7 @@ impl InteractiveTester { validator_count: usize, initializer: Option>, mutator: Option>, + config: Config, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec.map(Arc::new)) @@ -99,8 +101,9 @@ impl InteractiveTester { listening_socket, network_rx, .. - } = create_api_server( + } = create_api_server_with_config( harness.chain.clone(), + config, &harness.runtime, harness.logger().clone(), ) @@ -131,6 +134,15 @@ pub async fn create_api_server( chain: Arc>, test_runtime: &TestRuntime, log: Logger, +) -> ApiServer> { + create_api_server_with_config(chain, Config::default(), test_runtime, log).await +} + +pub async fn create_api_server_with_config( + chain: Arc>, + http_config: Config, + test_runtime: &TestRuntime, + log: Logger, ) -> ApiServer> { // Use port 0 to allocate a new unused port. let port = 0; @@ -220,12 +232,14 @@ pub async fn create_api_server( .unwrap(); let ctx = Arc::new(Context { + // Override several config fields with defaults. If these need to be tweaked in future + // we could remove these overrides. config: Config { enabled: true, listen_port: port, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), enable_light_client_server: true, - ..Config::default() + ..http_config }, chain: Some(chain), network_senders: Some(network_senders), diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 59cdbb1c99..f55983ec66 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,13 +1,16 @@ +use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, - GossipVerifiedBlock, IntoGossipVerifiedBlockContents, + GossipVerifiedBlock, IntoGossipVerifiedBlock, }; use eth2::reqwest::StatusCode; use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; -use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use http_api::{publish_blinded_block, publish_block, reconstruct_block, Config, ProvenancedBlock}; use std::sync::Arc; -use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot}; +use types::{ + BlobSidecar, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, +}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -81,7 +84,7 @@ pub async fn gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. @@ -266,7 +269,7 @@ pub async fn consensus_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. @@ -360,10 +363,9 @@ pub async fn consensus_partial_pass_only_consensus() { ); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) - .into_gossip_verified_block(&tester.harness.chain); - assert!(gossip_block_contents_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_a.is_err()); /* submit `block_b` which should induce equivocation */ @@ -372,7 +374,7 @@ pub async fn consensus_partial_pass_only_consensus() { let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_contents_b.unwrap()), + ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b), tester.harness.chain.clone(), &channel.0, test_logger, @@ -382,7 +384,7 @@ pub async fn consensus_partial_pass_only_consensus() { ) .await; - assert!(publication_result.is_ok()); + assert!(publication_result.is_ok(), "{publication_result:?}"); assert!(tester .harness .chain @@ -481,7 +483,7 @@ pub async fn equivocation_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -555,10 +557,7 @@ pub async fn equivocation_consensus_early_equivocation() { let error_response: eth2::Error = response.err().unwrap(); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - "BAD_REQUEST: BlockError(Slashable)".to_string(), - ); + assert_server_message_error(error_response, "BAD_REQUEST: Slashable".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -642,7 +641,7 @@ pub async fn equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, blobs_a), mut state_after_a) = + let ((block_a, _blobs_a), mut state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; let ((block_b, blobs_b), mut state_after_b) = tester.harness.make_block(state_a, slot_b).await; @@ -657,19 +656,18 @@ pub async fn equivocation_consensus_late_equivocation() { ); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) - .into_gossip_verified_block(&tester.harness.chain); - assert!(gossip_block_contents_b.is_ok()); - let gossip_block_contents_a = PublishBlockRequest::new(block_a, blobs_a) - .into_gossip_verified_block(&tester.harness.chain); - assert!(gossip_block_contents_a.is_err()); + let gossip_block_b = block_b.into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_b.is_ok()); + + let gossip_block_a = block_a.into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); let network_globals = tester.ctx.network_globals.clone().unwrap(); let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_contents_b.unwrap()), + ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b), tester.harness.chain, &channel.0, test_logger, @@ -686,8 +684,8 @@ pub async fn equivocation_consensus_late_equivocation() { assert!(publication_error.find::().is_some()); assert_eq!( - *publication_error.find::().unwrap().0, - "proposal for this slot and proposer has already been seen".to_string() + publication_error.find::().unwrap().0, + "proposal for this slot and proposer has already been seen" ); } @@ -783,7 +781,7 @@ pub async fn blinded_gossip_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. @@ -961,7 +959,7 @@ pub async fn blinded_consensus_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. @@ -1099,7 +1097,7 @@ pub async fn blinded_equivocation_invalid() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1169,10 +1167,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let error_response: eth2::Error = response.err().unwrap(); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - "BAD_REQUEST: BlockError(Slashable)".to_string(), - ); + assert_server_message_error(error_response, "BAD_REQUEST: Slashable".to_string()); } /// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1295,19 +1290,17 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { .unwrap(); let inner_block_a = match unblinded_block_a { - ProvenancedBlock::Local(a, _) => a, - ProvenancedBlock::Builder(a, _) => a, + ProvenancedBlock::Local(a, _, _) => a, + ProvenancedBlock::Builder(a, _, _) => a, }; let inner_block_b = match unblinded_block_b { - ProvenancedBlock::Local(b, _) => b, - ProvenancedBlock::Builder(b, _) => b, + ProvenancedBlock::Local(b, _, _) => b, + ProvenancedBlock::Builder(b, _, _) => b, }; - let gossip_block_b = - GossipVerifiedBlock::new(inner_block_b.clone().deconstruct().0, &tester.harness.chain); + let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); assert!(gossip_block_b.is_ok()); - let gossip_block_a = - GossipVerifiedBlock::new(inner_block_a.clone().deconstruct().0, &tester.harness.chain); + let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); @@ -1374,6 +1367,438 @@ pub async fn blinded_equivocation_full_pass() { .block_is_known_to_fork_choice(&block.canonical_root())); } +/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response +/// even if the block has already been seen on gossip without any blobs. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn block_seen_on_gossip_without_blobs() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let blobs = blobs.expect("should have some blobs"); + assert_ne!(blobs.0.len(), 0); + + // Simulate the block being seen on gossip. + block + .clone() + .into_gossip_verified_block(&tester.harness.chain) + .unwrap(); + + // It should not yet be added to fork choice because blobs have not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post the block *and* blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), Some(blobs)), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response +/// even if the block has already been seen on gossip without all blobs. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn block_seen_on_gossip_with_some_blobs() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let blobs = blobs.expect("should have some blobs"); + assert!( + blobs.0.len() >= 2, + "need at least 2 blobs for partial reveal" + ); + + let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()]; + let partial_blobs = vec![blobs.1.get(0).unwrap().clone()]; + + // Simulate the block being seen on gossip. + block + .clone() + .into_gossip_verified_block(&tester.harness.chain) + .unwrap(); + + // Simulate some of the blobs being seen on gossip. + for (i, (kzg_proof, blob)) in partial_kzg_proofs + .into_iter() + .zip(partial_blobs) + .enumerate() + { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because all blobs have not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post the block *and* all blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), Some(blobs)), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that an HTTP POST request with the block & blobs succeeds with a 200 response +/// even if the blobs have already been seen on gossip. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blobs_seen_on_gossip_without_block() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); + + // Simulate the blobs being seen on gossip. + for (i, (kzg_proof, blob)) in kzg_proofs + .clone() + .into_iter() + .zip(blobs.clone()) + .enumerate() + { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because the block has not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post the block *and* all blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that an HTTP POST request with the block succeeds with a 200 response +/// if just the blobs have already been seen on gossip. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); + assert!(!blobs.is_empty()); + + // Simulate the blobs being seen on gossip. + for (i, (kzg_proof, blob)) in kzg_proofs + .clone() + .into_iter() + .zip(blobs.clone()) + .enumerate() + { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because the block has not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post just the block to the HTTP API (blob lists are empty). + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new( + block.clone(), + Some((Default::default(), Default::default())), + ), + validation_level, + ) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn slashable_blobs_seen_on_gossip_cause_failure() { + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let tester = InteractiveTester::::new(Some(spec), validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block_a, blobs_a), _) = tester.harness.make_block(state_a.clone(), slot_b).await; + let ((block_b, blobs_b), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs_a, blobs_a) = blobs_a.expect("should have some blobs"); + let (kzg_proofs_b, blobs_b) = blobs_b.expect("should have some blobs"); + + // Simulate the blobs of block B being seen on gossip. + for (i, (kzg_proof, blob)) in kzg_proofs_b.into_iter().zip(blobs_b).enumerate() { + let sidecar = Arc::new(BlobSidecar::new(i, blob, &block_b, kzg_proof).unwrap()); + let gossip_blob = + GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); + tester + .harness + .chain + .process_gossip_blob(gossip_blob, || panic!("should not publish block yet")) + .await + .unwrap(); + } + + // It should not yet be added to fork choice because block B has not been seen. + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b.canonical_root())); + + // Post block A *and* all its blobs to the HTTP API. + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2( + &PublishBlockRequest::new(block_a.clone(), Some((kzg_proofs_a, blobs_a))), + validation_level, + ) + .await; + + // This should not result in block A being fully imported. + response.unwrap_err(); + assert!(!tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); +} + +/// This test checks that an HTTP POST request with a duplicate block & blobs results in the +/// `duplicate_status_code` being returned. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn duplicate_block_status_code() { + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let duplicate_block_status_code = StatusCode::IM_A_TEAPOT; + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec), + validator_count, + None, + None, + Config { + duplicate_block_status_code, + ..Config::default() + }, + ) + .await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let ((block, blobs), _) = tester.harness.make_block(state_a, slot_b).await; + let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); + + // Post the block blobs to the HTTP API once. + let block_request = PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))); + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_request, validation_level) + .await; + + // This should result in the block being fully imported. + response.unwrap(); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); + + // Post again. + let duplicate_response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_request, validation_level) + .await; + let err = duplicate_response.unwrap_err(); + assert_eq!(err.status().unwrap(), duplicate_block_status_code); +} + fn assert_server_message_error(error_response: eth2::Error, expected_message: String) { let eth2::Error::ServerMessage(err) = error_response else { panic!("Not a eth2::Error::ServerMessage"); diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index b5b3edf892..8cb6053e9f 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -386,6 +386,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .genesis_state_ephemeral_store(genesis_state) })), None, + Default::default(), ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 5034492e25..c3ed334782 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -72,6 +72,7 @@ async fn state_by_root_pruned_from_fork_choice() { }) })), None, + Default::default(), ) .await; @@ -427,6 +428,7 @@ pub async fn proposer_boost_re_org_test( DisallowedReOrgOffsets::new::(disallowed_offsets).unwrap(), ) })), + Default::default(), ) .await; let harness = &tester.harness; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index ddcd74d20b..005536bcf2 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -936,7 +936,10 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - let result = self.chain.process_gossip_blob(verified_blob).await; + let result = self + .chain + .process_gossip_blob(verified_blob, || Ok(())) + .await; match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { @@ -963,7 +966,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Ignoring gossip blob already imported"; @@ -1013,7 +1016,7 @@ impl NetworkBeaconProcessor { match self .chain - .process_gossip_data_columns(vec![verified_data_column]) + .process_gossip_data_columns(vec![verified_data_column], || Ok(())) .await { Ok((availability, data_columns_to_publish)) => { @@ -1050,7 +1053,7 @@ impl NetworkBeaconProcessor { } } } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Ignoring gossip column already imported"; @@ -1242,7 +1245,10 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err( + BlockError::DuplicateFullyImported(_) + | BlockError::DuplicateImportStatusUnknown(..), + ) => { debug!( self.log, "Gossip block is already known"; diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 50c7ee05a1..dcad6160b3 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -294,7 +294,7 @@ impl NetworkBeaconProcessor { "slot" => %slot, ); } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Blobs have already been imported"; @@ -355,7 +355,7 @@ impl NetworkBeaconProcessor { } } } - Err(BlockError::BlockIsAlreadyKnown(_)) => { + Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, "Custody columns have already been imported"; @@ -715,7 +715,8 @@ impl NetworkBeaconProcessor { peer_action: Some(PeerAction::LowToleranceError), }) } - BlockError::BlockIsAlreadyKnown(_) => { + BlockError::DuplicateFullyImported(_) + | BlockError::DuplicateImportStatusUnknown(..) => { // This can happen for many reasons. Head sync's can download multiples and parent // lookups can download blocks before range sync Ok(()) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 9abcd263de..a9dbf11fd0 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -517,7 +517,7 @@ impl BlockLookups { let action = match result { BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { + | BlockProcessingResult::Err(BlockError::DuplicateFullyImported(..)) => { // Successfully imported request_state.on_processing_success()?; Action::Continue @@ -541,6 +541,16 @@ impl BlockLookups { Action::Retry } } + BlockProcessingResult::Err(BlockError::DuplicateImportStatusUnknown(..)) => { + // This is unreachable because RPC blocks do not undergo gossip verification, and + // this error can *only* come from gossip verification. + error!( + self.log, + "Single block lookup hit unreachable condition"; + "block_root" => ?block_root + ); + Action::Drop + } BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 6e6c9a5cdf..c0a766137b 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1471,7 +1471,7 @@ fn test_parent_lookup_happy_path() { // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed( block_root, - BlockError::BlockIsAlreadyKnown(block_root).into(), + BlockError::DuplicateFullyImported(block_root).into(), ); rig.expect_parent_chain_process(); rig.parent_chain_processed_success(block_root, &[]); @@ -1839,7 +1839,7 @@ fn test_same_chain_race_condition() { rig.log(&format!("Block {i} was removed and is already known")); rig.parent_block_processed( chain_hash, - BlockError::BlockIsAlreadyKnown(block.canonical_root()).into(), + BlockError::DuplicateFullyImported(block.canonical_root()).into(), ) } else { rig.log(&format!("Block {i} ParentUnknown")); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 07d04b3fb2..5b7003e5e8 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -629,8 +629,8 @@ impl SyncNetworkContext { // cache nor in the request state of this lookup. Therefore, the block must either: (1) not // be downloaded yet or (2) the block is already imported into the fork-choice. // In case (1) the lookup must either successfully download the block or get dropped. - // In case (2) the block will be downloaded, processed, reach `BlockIsAlreadyKnown` and - // get dropped as completed. + // In case (2) the block will be downloaded, processed, reach `DuplicateFullyImported` + // and get dropped as completed. return Ok(LookupRequestResult::Pending("waiting for block download")); }; let expected_blobs = block.num_expected_blobs(); diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 3925d2deda..e1550fdee2 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1883,42 +1883,6 @@ impl PublishBlockRequest { } } -/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. -pub fn into_full_block_and_blobs( - blinded_block: SignedBlindedBeaconBlock, - maybe_full_payload_contents: Option>, -) -> Result, String> { - match maybe_full_payload_contents { - None => { - let signed_block = blinded_block - .try_into_full_block(None) - .ok_or("Failed to build full block with payload".to_string())?; - Ok(PublishBlockRequest::new(Arc::new(signed_block), None)) - } - // This variant implies a pre-deneb block - Some(FullPayloadContents::Payload(execution_payload)) => { - let signed_block = blinded_block - .try_into_full_block(Some(execution_payload)) - .ok_or("Failed to build full block with payload".to_string())?; - Ok(PublishBlockRequest::new(Arc::new(signed_block), None)) - } - // This variant implies a post-deneb block - Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { - let signed_block = blinded_block - .try_into_full_block(Some(payload_and_blobs.execution_payload)) - .ok_or("Failed to build full block with payload".to_string())?; - - Ok(PublishBlockRequest::new( - Arc::new(signed_block), - Some(( - payload_and_blobs.blobs_bundle.proofs, - payload_and_blobs.blobs_bundle.blobs, - )), - )) - } - } -} - impl TryFrom>> for PublishBlockRequest { type Error = &'static str; fn try_from(block: Arc>) -> Result { diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 90c05aea1f..57251e319a 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -161,6 +161,7 @@ pub enum DataColumnSidecarError { DataColumnIndexOutOfBounds, KzgCommitmentInclusionProofOutOfBounds, KzgError(KzgError), + KzgNotInitialized, MissingBlobSidecars, PreDeneb, SszError(SszError), diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 33ae132e8a..8d933a6fcd 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -505,8 +505,8 @@ impl Tester { } Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), }; - let result = - self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; + let result = self + .block_on_dangerous(self.harness.chain.process_gossip_blob(blob, || Ok(())))?; if valid { assert!(result.is_ok()); } From 50d8375d4666e6deb9bee9cfab396ce767ef15ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 25 Sep 2024 14:45:35 +0100 Subject: [PATCH 30/66] Remove Score Ord, PartialOrd, Eq and PartialEq impls (#6420) * drop score Ord, PartialOrd, Eq and PartialEq impls and impl total_cmp instead * Revert "Fix test failure on Rust v1.81 (#6407)" This reverts commit 8a085fc828cef14674ef342906b715dd816e8047. * reverse in the compare function * lint mdfiles --- .../src/peer_manager/mod.rs | 14 ++------ .../src/peer_manager/peerdb.rs | 22 +++++------- .../src/peer_manager/peerdb/score.rs | 35 +++++++++++-------- book/src/developers.md | 1 + 4 files changed, 31 insertions(+), 41 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index b8dce6667e..9f46f5daa0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2340,16 +2340,6 @@ mod tests { gossipsub_score: f64, } - // generate an arbitrary f64 while preventing NaN values - fn arbitrary_f64(g: &mut Gen) -> f64 { - loop { - let val = f64::arbitrary(g); - if !val.is_nan() { - return val; - } - } - } - impl Arbitrary for PeerCondition { fn arbitrary(g: &mut Gen) -> Self { let attestation_net_bitfield = { @@ -2375,9 +2365,9 @@ mod tests { outgoing: bool::arbitrary(g), attestation_net_bitfield, sync_committee_net_bitfield, - score: arbitrary_f64(g), + score: f64::arbitrary(g), trusted: bool::arbitrary(g), - gossipsub_score: arbitrary_f64(g), + gossipsub_score: f64::arbitrary(g), } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 08d9e5209c..d2effd4d03 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,8 +1,8 @@ use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; use crate::discovery::{peer_id_to_node_id, CombinedKey}; use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; +use itertools::Itertools; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; -use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; use slog::{crit, debug, error, trace, warn}; use std::net::IpAddr; @@ -290,15 +290,11 @@ impl PeerDB { /// Returns a vector of all connected peers sorted by score beginning with the worst scores. /// Ties get broken randomly. pub fn worst_connected_peers(&self) -> Vec<(&PeerId, &PeerInfo)> { - let mut connected = self - .peers + self.peers .iter() .filter(|(_, info)| info.is_connected()) - .collect::>(); - - connected.shuffle(&mut rand::thread_rng()); - connected.sort_by_key(|(_, info)| info.score()); - connected + .sorted_by(|(_, info_a), (_, info_b)| info_a.score().total_cmp(info_b.score(), false)) + .collect::>() } /// Returns a vector containing peers (their ids and info), sorted by @@ -307,13 +303,11 @@ impl PeerDB { where F: Fn(&PeerInfo) -> bool, { - let mut by_status = self - .peers + self.peers .iter() .filter(|(_, info)| is_status(info)) - .collect::>(); - by_status.sort_by_key(|(_, info)| info.score()); - by_status.into_iter().rev().collect() + .sorted_by(|(_, info_a), (_, info_b)| info_a.score().total_cmp(info_b.score(), true)) + .collect::>() } /// Returns the peer with highest reputation that satisfies `is_status` @@ -324,7 +318,7 @@ impl PeerDB { self.peers .iter() .filter(|(_, info)| is_status(info)) - .max_by_key(|(_, info)| info.score()) + .max_by(|(_, info_a), (_, info_b)| info_a.score().total_cmp(info_b.score(), false)) .map(|(id, _)| id) } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index c8425fc104..995ebf9064 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -7,6 +7,7 @@ //! The scoring algorithms are currently experimental. use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; +use std::cmp::Ordering; use std::sync::LazyLock; use std::time::Instant; use strum::AsRefStr; @@ -260,7 +261,7 @@ impl RealScore { } } -#[derive(PartialEq, Clone, Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] pub enum Score { Max, Real(RealScore), @@ -323,21 +324,25 @@ impl Score { Self::Real(score) => score.is_good_gossipsub_peer(), } } -} -impl Eq for Score {} - -impl PartialOrd for Score { - fn partial_cmp(&self, other: &Score) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Score { - fn cmp(&self, other: &Score) -> std::cmp::Ordering { - self.score() - .partial_cmp(&other.score()) - .unwrap_or(std::cmp::Ordering::Equal) + /// Instead of implementing `Ord` for `Score`, as we are underneath dealing with f64, + /// follow std convention and impl `Score::total_cmp` similar to `f64::total_cmp`. + pub fn total_cmp(&self, other: &Score, reverse: bool) -> Ordering { + match self.score().partial_cmp(&other.score()) { + Some(v) => { + // Only reverse when none of the items is NAN, + // so that NAN's are never considered. + if reverse { + v.reverse() + } else { + v + } + } + None if self.score().is_nan() && !other.score().is_nan() => Ordering::Less, + None if !self.score().is_nan() && other.score().is_nan() => Ordering::Greater, + // Both are NAN. + None => Ordering::Equal, + } } } diff --git a/book/src/developers.md b/book/src/developers.md index 244c935ac2..d90708c5a9 100644 --- a/book/src/developers.md +++ b/book/src/developers.md @@ -20,6 +20,7 @@ Lighthouse currently uses the following ENR fields: ### Lighthouse Custom Fields Lighthouse is currently using the following custom ENR fields. + | Field | Description | | ---- | ---- | | `quic` | The UDP port on which the QUIC transport is listening on IPv4 | From 5d1ff7c6f896602a02e4ef4f3ab60209bcbb996c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 26 Sep 2024 15:52:03 +0100 Subject: [PATCH 31/66] fix Rpc Ping sequence number (#6408) * fix Rpc Ping sequence number * bubble up Outbound Err's and Responses even if the peer disconnected * send pings via Rpc from main network * add comment to connected check * Merge branch 'unstable' into fix-ping-seq-number --- .../lighthouse_network/src/rpc/handler.rs | 24 +---- beacon_node/lighthouse_network/src/rpc/mod.rs | 59 ++++++---- .../lighthouse_network/src/service/mod.rs | 102 +++++++++--------- 3 files changed, 90 insertions(+), 95 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 08e55e50c9..6f338ebc8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -4,7 +4,7 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; -use super::{RPCReceived, RPCResponse, RPCSend, ReqId}; +use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; @@ -14,8 +14,7 @@ use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::{ConnectionId, Stream}; -use libp2p::PeerId; +use libp2p::swarm::Stream; use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ @@ -89,12 +88,6 @@ pub struct RPCHandler where E: EthSpec, { - /// This `ConnectionId`. - id: ConnectionId, - - /// The matching `PeerId` of this connection. - peer_id: PeerId, - /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, ()>, @@ -225,16 +218,12 @@ where E: EthSpec, { pub fn new( - id: ConnectionId, - peer_id: PeerId, listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, log: &slog::Logger, resp_timeout: Duration, ) -> Self { RPCHandler { - id, - peer_id, listen_protocol, events_out: SmallVec::new(), dial_queue: SmallVec::new(), @@ -903,15 +892,6 @@ where self.shutdown(None); } - // If we received a Ping, we queue a Pong response. - if let InboundRequest::Ping(ping) = req { - trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %self.id, "peer_id" => %self.peer_id); - self.send_response( - self.current_inbound_substream_id, - RPCCodedResponse::Success(RPCResponse::Pong(ping)), - ); - } - self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( self.current_inbound_substream_id, req, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index eae206e022..4961c31d28 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -14,7 +14,7 @@ use libp2p::swarm::{ use libp2p::swarm::{ConnectionClosed, FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; -use slog::{crit, debug, o}; +use slog::{crit, debug, o, trace}; use std::marker::PhantomData; use std::sync::Arc; use std::task::{Context, Poll}; @@ -132,6 +132,8 @@ pub struct RPC { log: slog::Logger, /// Networking constant values network_params: NetworkParams, + /// A sequential counter indicating when data gets modified. + seq_number: u64, } impl RPC { @@ -142,6 +144,7 @@ impl RPC { outbound_rate_limiter_config: Option, log: slog::Logger, network_params: NetworkParams, + seq_number: u64, ) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); @@ -163,6 +166,7 @@ impl RPC { enable_light_client_server, log, network_params, + seq_number, } } @@ -214,6 +218,19 @@ impl RPC { event: RPCSend::Shutdown(id, reason), }); } + + pub fn update_seq_number(&mut self, seq_number: u64) { + self.seq_number = seq_number + } + + /// Send a Ping request to the destination `PeerId` via `ConnectionId`. + pub fn ping(&mut self, peer_id: PeerId, id: Id) { + let ping = Ping { + data: self.seq_number, + }; + trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); + self.send_request(peer_id, id, OutboundRequest::Ping(ping)); + } } impl NetworkBehaviour for RPC @@ -245,8 +262,6 @@ where .log .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( - connection_id, - peer_id, protocol, self.fork_context.clone(), &log, @@ -280,8 +295,6 @@ where .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( - connection_id, - peer_id, protocol, self.fork_context.clone(), &log, @@ -359,14 +372,6 @@ where if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota match limiter.allows(&peer_id, &req) { - Ok(()) => { - // send the event to the user - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - message: Ok(RPCReceived::Request(id, req)), - })) - } Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols let protocol = req.versioned_protocol().protocol(); @@ -394,6 +399,7 @@ where "Rate limited. Request too large".into(), ), ); + return; } Err(RateLimitedErr::TooSoon(wait_time)) => { debug!(self.log, "Request exceeds the rate limit"; @@ -408,16 +414,29 @@ where format!("Wait {:?}", wait_time).into(), ), ); + return; } + // No rate limiting, continue. + Ok(_) => {} } - } else { - // No rate limiting, send the event to the user - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - message: Ok(RPCReceived::Request(id, req)), - })) } + // If we received a Ping, we queue a Pong response. + if let InboundRequest::Ping(_) = req { + trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %conn_id, "peer_id" => %peer_id); + self.send_response( + peer_id, + (conn_id, id), + RPCCodedResponse::Success(RPCResponse::Pong(Ping { + data: self.seq_number, + })), + ); + } + + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + message: Ok(RPCReceived::Request(id, req)), + })); } HandlerEvent::Ok(rpc) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 43217ba5ab..ede8fdd13a 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -159,38 +159,36 @@ impl Network { .collect(); // set up a collection of variables accessible outside of the network crate - let network_globals = { - // Create an ENR or load from disk if appropriate - let enr = crate::discovery::enr::build_or_load_enr::( - local_keypair.clone(), - &config, - &ctx.enr_fork_id, - &log, - &ctx.chain_spec, - )?; - // Construct the metadata - let custody_subnet_count = if ctx.chain_spec.is_peer_das_scheduled() { - if config.subscribe_all_data_column_subnets { - Some(ctx.chain_spec.data_column_sidecar_subnet_count) - } else { - Some(ctx.chain_spec.custody_requirement) - } + // Create an ENR or load from disk if appropriate + let enr = crate::discovery::enr::build_or_load_enr::( + local_keypair.clone(), + &config, + &ctx.enr_fork_id, + &log, + &ctx.chain_spec, + )?; + + // Construct the metadata + let custody_subnet_count = ctx.chain_spec.is_peer_das_scheduled().then(|| { + if config.subscribe_all_data_column_subnets { + ctx.chain_spec.data_column_sidecar_subnet_count } else { - None - }; - let meta_data = - utils::load_or_build_metadata(&config.network_dir, custody_subnet_count, &log); - let globals = NetworkGlobals::new( - enr, - meta_data, - trusted_peers, - config.disable_peer_scoring, - &log, - config.clone(), - ctx.chain_spec.clone(), - ); - Arc::new(globals) - }; + ctx.chain_spec.custody_requirement + } + }); + let meta_data = + utils::load_or_build_metadata(&config.network_dir, custody_subnet_count, &log); + let seq_number = *meta_data.seq_number(); + let globals = NetworkGlobals::new( + enr, + meta_data, + trusted_peers, + config.disable_peer_scoring, + &log, + config.clone(), + ctx.chain_spec.clone(), + ); + let network_globals = Arc::new(globals); // Grab our local ENR FORK ID let enr_fork_id = network_globals @@ -338,6 +336,7 @@ impl Network { config.outbound_rate_limiter_config.clone(), log.clone(), network_params, + seq_number, ); let discovery = { @@ -1104,33 +1103,26 @@ impl Network { .sync_committee_bitfield::() .expect("Local discovery must have sync committee bitfield"); - { - // write lock scope - let mut meta_data = self.network_globals.local_metadata.write(); + // write lock scope + let mut meta_data_w = self.network_globals.local_metadata.write(); - *meta_data.seq_number_mut() += 1; - *meta_data.attnets_mut() = local_attnets; - if let Ok(syncnets) = meta_data.syncnets_mut() { - *syncnets = local_syncnets; - } + *meta_data_w.seq_number_mut() += 1; + *meta_data_w.attnets_mut() = local_attnets; + if let Ok(syncnets) = meta_data_w.syncnets_mut() { + *syncnets = local_syncnets; } + let seq_number = *meta_data_w.seq_number(); + let meta_data = meta_data_w.clone(); + + drop(meta_data_w); + self.eth2_rpc_mut().update_seq_number(seq_number); // Save the updated metadata to disk - utils::save_metadata_to_disk( - &self.network_dir, - self.network_globals.local_metadata.read().clone(), - &self.log, - ); + utils::save_metadata_to_disk(&self.network_dir, meta_data, &self.log); } /// Sends a Ping request to the peer. fn ping(&mut self, peer_id: PeerId) { - let ping = crate::rpc::Ping { - data: *self.network_globals.local_metadata.read().seq_number(), - }; - trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - let id = RequestId::Internal; - self.eth2_rpc_mut() - .send_request(peer_id, id, OutboundRequest::Ping(ping)); + self.eth2_rpc_mut().ping(peer_id, RequestId::Internal); } /// Sends a METADATA request to a peer. @@ -1400,8 +1392,12 @@ impl Network { fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { let peer_id = event.peer_id; - // Do not permit Inbound events from peers that are being disconnected, or RPC requests. - if !self.peer_manager().is_connected(&peer_id) { + // Do not permit Inbound events from peers that are being disconnected or RPC requests, + // but allow `RpcFailed` and `HandlerErr::Outbound` to be bubble up to sync for state management. + if !self.peer_manager().is_connected(&peer_id) + && (matches!(event.message, Err(HandlerErr::Inbound { .. })) + || matches!(event.message, Ok(RPCReceived::Request(..)))) + { debug!( self.log, "Ignoring rpc message of disconnecting peer"; From 82098e1ef72148fca69b4f56b887a1258da24b5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 1 Oct 2024 02:36:17 +0100 Subject: [PATCH 32/66] add a unique integer id to Rpc requests (#6444) * add id to rpc requests * rename rpc request and response types for more accurate meaning * remove unrequired build_request function * remove unirequired Request wrapper types and unify Outbound and Inbound Request * add RequestId to NetworkMessage::SendResponse ,NetworkMessage::SendErrorResponse to be passed to Rpc::send_response --- beacon_node/lighthouse_network/src/lib.rs | 2 +- .../src/peer_manager/mod.rs | 14 +- .../lighthouse_network/src/rpc/codec.rs | 494 ++++++++++-------- .../lighthouse_network/src/rpc/handler.rs | 59 ++- .../lighthouse_network/src/rpc/methods.rs | 126 ++--- beacon_node/lighthouse_network/src/rpc/mod.rs | 102 +++- .../lighthouse_network/src/rpc/outbound.rs | 165 +----- .../lighthouse_network/src/rpc/protocol.rs | 201 ++++--- .../src/rpc/rate_limiter.rs | 11 +- .../src/rpc/self_limiter.rs | 12 +- .../src/service/api_types.rs | 112 +--- .../lighthouse_network/src/service/mod.rs | 291 +++++------ .../lighthouse_network/tests/rpc_tests.rs | 135 +++-- .../src/network_beacon_processor/mod.rs | 130 ++++- .../network_beacon_processor/rpc_methods.rs | 284 ++++++---- .../src/network_beacon_processor/tests.rs | 7 +- beacon_node/network/src/router.rs | 154 ++++-- beacon_node/network/src/service.rs | 18 +- .../network/src/sync/block_lookups/tests.rs | 17 +- .../network/src/sync/network_context.rs | 39 +- 20 files changed, 1327 insertions(+), 1046 deletions(-) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 5c12290b97..ced803add8 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -122,6 +122,6 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::api_types::{PeerRequestId, Request, Response}; +pub use service::api_types::{PeerRequestId, Response}; pub use service::utils::*; pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 9f46f5daa0..1f066e9bbc 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2,7 +2,7 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; -use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; +use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; @@ -526,8 +526,8 @@ impl PeerManager { PeerAction::HighToleranceError } RPCError::ErrorResponse(code, _) => match code { - RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, - RPCResponseErrorCode::ResourceUnavailable => { + RpcErrorResponse::Unknown => PeerAction::HighToleranceError, + RpcErrorResponse::ResourceUnavailable => { // Don't ban on this because we want to retry with a block by root request. if matches!( protocol, @@ -558,9 +558,9 @@ impl PeerManager { ConnectionDirection::Incoming => return, } } - RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, - RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, - RPCResponseErrorCode::RateLimited => match protocol { + RpcErrorResponse::ServerError => PeerAction::MidToleranceError, + RpcErrorResponse::InvalidRequest => PeerAction::LowToleranceError, + RpcErrorResponse::RateLimited => match protocol { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, @@ -577,7 +577,7 @@ impl PeerManager { Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, }, - RPCResponseErrorCode::BlobsNotFoundForBlock => PeerAction::LowToleranceError, + RpcErrorResponse::BlobsNotFoundForBlock => PeerAction::LowToleranceError, }, RPCError::SSZDecodeError(_) => PeerAction::Fatal, RPCError::UnsupportedProtocol => { diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 224fb8a5f7..13af04f9b8 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -2,7 +2,7 @@ use crate::rpc::methods::*; use crate::rpc::protocol::{ Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN, }; -use crate::rpc::{InboundRequest, OutboundRequest}; +use crate::rpc::RequestType; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; @@ -61,23 +61,23 @@ impl SSZSnappyInboundCodec { /// Encodes RPC Responses sent to peers. fn encode_response( &mut self, - item: RPCCodedResponse, + item: RpcResponse, dst: &mut BytesMut, ) -> Result<(), RPCError> { let bytes = match &item { - RPCCodedResponse::Success(resp) => match &resp { - RPCResponse::Status(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), - RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), - RPCResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), - RPCResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), - RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), - RPCResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), - RPCResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), - RPCResponse::Pong(res) => res.data.as_ssz_bytes(), - RPCResponse::MetaData(res) => + RpcResponse::Success(resp) => match &resp { + RpcSuccessResponse::Status(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlocksByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlobsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::Pong(res) => res.data.as_ssz_bytes(), + RpcSuccessResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. { match self.protocol.versioned_protocol { @@ -92,8 +92,8 @@ impl SSZSnappyInboundCodec { } } }, - RPCCodedResponse::Error(_, err) => err.as_ssz_bytes(), - RPCCodedResponse::StreamTermination(_) => { + RpcResponse::Error(_, err) => err.as_ssz_bytes(), + RpcResponse::StreamTermination(_) => { unreachable!("Code error - attempting to encode a stream termination") } }; @@ -126,10 +126,10 @@ impl SSZSnappyInboundCodec { } // Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder> for SSZSnappyInboundCodec { +impl Encoder> for SSZSnappyInboundCodec { type Error = RPCError; - fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RpcResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { dst.clear(); dst.reserve(1); dst.put_u8( @@ -142,18 +142,18 @@ impl Encoder> for SSZSnappyInboundCodec { // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { - type Item = InboundRequest; + type Item = RequestType; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV1 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v1()))); } if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v2()))); } if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV3 { - return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))); + return Ok(Some(RequestType::MetaData(MetadataRequest::new_v3()))); } let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { return Ok(None); @@ -231,7 +231,10 @@ impl SSZSnappyOutboundCodec { } // Decode an Rpc response. - fn decode_response(&mut self, src: &mut BytesMut) -> Result>, RPCError> { + fn decode_response( + &mut self, + src: &mut BytesMut, + ) -> Result>, RPCError> { // Read the context bytes if required if self.protocol.has_context_bytes() && self.fork_name.is_none() { if src.len() >= CONTEXT_BYTES_LEN { @@ -318,28 +321,33 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder> for SSZSnappyOutboundCodec { +impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: OutboundRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { - OutboundRequest::Status(req) => req.as_ssz_bytes(), - OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), - OutboundRequest::BlocksByRange(r) => match r { + RequestType::Status(req) => req.as_ssz_bytes(), + RequestType::Goodbye(req) => req.as_ssz_bytes(), + RequestType::BlocksByRange(r) => match r { OldBlocksByRangeRequest::V1(req) => req.as_ssz_bytes(), OldBlocksByRangeRequest::V2(req) => req.as_ssz_bytes(), }, - OutboundRequest::BlocksByRoot(r) => match r { + RequestType::BlocksByRoot(r) => match r { BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), }, - OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), - OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), - OutboundRequest::DataColumnsByRange(req) => req.as_ssz_bytes(), - OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), - OutboundRequest::Ping(req) => req.as_ssz_bytes(), - OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode + RequestType::BlobsByRange(req) => req.as_ssz_bytes(), + RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), + RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), + RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), + RequestType::Ping(req) => req.as_ssz_bytes(), + RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), + // no metadata to encode + RequestType::MetaData(_) + | RequestType::LightClientOptimisticUpdate + | RequestType::LightClientFinalityUpdate => return Ok(()), }; + // SSZ encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { return Err(RPCError::InternalError( @@ -369,7 +377,7 @@ impl Encoder> for SSZSnappyOutboundCodec { // We prefer to decode blocks and attestations with extra knowledge about the chain to perform // faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZSnappyOutboundCodec { - type Item = RPCCodedResponse; + type Item = RpcResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -385,15 +393,15 @@ impl Decoder for SSZSnappyOutboundCodec { }); let inner_result = { - if RPCCodedResponse::::is_response(response_code) { + if RpcResponse::::is_response(response_code) { // decode an actual response and mutates the buffer if enough bytes have been read // returning the result. self.decode_response(src) - .map(|r| r.map(RPCCodedResponse::Success)) + .map(|r| r.map(RpcResponse::Success)) } else { // decode an error self.decode_error(src) - .map(|r| r.map(|resp| RPCCodedResponse::from_error(response_code, resp))) + .map(|r| r.map(|resp| RpcResponse::from_error(response_code, resp))) } }; // if the inner decoder was capable of decoding a chunk, we need to reset the current @@ -437,14 +445,14 @@ fn handle_error( fn context_bytes( protocol: &ProtocolId, fork_context: &ForkContext, - resp: &RPCCodedResponse, + resp: &RpcResponse, ) -> Option<[u8; CONTEXT_BYTES_LEN]> { // Add the context bytes if required if protocol.has_context_bytes() { - if let RPCCodedResponse::Success(rpc_variant) = resp { + if let RpcResponse::Success(rpc_variant) = resp { match rpc_variant { - RPCResponse::BlocksByRange(ref_box_block) - | RPCResponse::BlocksByRoot(ref_box_block) => { + RpcSuccessResponse::BlocksByRange(ref_box_block) + | RpcSuccessResponse::BlocksByRoot(ref_box_block) => { return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! @@ -468,10 +476,11 @@ fn context_bytes( } }; } - RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) => { + RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { return fork_context.to_context_bytes(ForkName::Deneb); } - RPCResponse::DataColumnsByRoot(d) | RPCResponse::DataColumnsByRange(d) => { + RpcSuccessResponse::DataColumnsByRoot(d) + | RpcSuccessResponse::DataColumnsByRange(d) => { // TODO(das): Remove deneb fork after `peerdas-devnet-2`. return if matches!( fork_context.spec.fork_name_at_slot::(d.slot()), @@ -482,20 +491,22 @@ fn context_bytes( fork_context.to_context_bytes(ForkName::Electra) }; } - RPCResponse::LightClientBootstrap(lc_bootstrap) => { + RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { return lc_bootstrap .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } - RPCResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { + RpcSuccessResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { return lc_optimistic_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } - RPCResponse::LightClientFinalityUpdate(lc_finality_update) => { + RpcSuccessResponse::LightClientFinalityUpdate(lc_finality_update) => { return lc_finality_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } // These will not pass the has_context_bytes() check - RPCResponse::Status(_) | RPCResponse::Pong(_) | RPCResponse::MetaData(_) => { + RpcSuccessResponse::Status(_) + | RpcSuccessResponse::Pong(_) + | RpcSuccessResponse::MetaData(_) => { return None; } } @@ -536,21 +547,21 @@ fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], spec: &ChainSpec, -) -> Result>, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status( + SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::GoodbyeV1 => Ok(Some(InboundRequest::Goodbye( + SupportedProtocol::GoodbyeV1 => Ok(Some(RequestType::Goodbye( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::BlocksByRangeV2 => Ok(Some(InboundRequest::BlocksByRange( + SupportedProtocol::BlocksByRangeV2 => Ok(Some(RequestType::BlocksByRange( OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2::from_ssz_bytes(decoded_buffer)?), ))), - SupportedProtocol::BlocksByRangeV1 => Ok(Some(InboundRequest::BlocksByRange( + SupportedProtocol::BlocksByRangeV1 => Ok(Some(RequestType::BlocksByRange( OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1::from_ssz_bytes(decoded_buffer)?), ))), - SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot( + SupportedProtocol::BlocksByRootV2 => Ok(Some(RequestType::BlocksByRoot( BlocksByRootRequest::V2(BlocksByRootRequestV2 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -558,7 +569,7 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot( + SupportedProtocol::BlocksByRootV1 => Ok(Some(RequestType::BlocksByRoot( BlocksByRootRequest::V1(BlocksByRootRequestV1 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -566,21 +577,21 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange( + SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), SupportedProtocol::BlobsByRootV1 => { - Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest { + Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, spec.max_request_blob_sidecars as usize, )?, }))) } - SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(InboundRequest::DataColumnsByRange( + SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(RequestType::DataColumnsByRange( DataColumnsByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), - SupportedProtocol::DataColumnsByRootV1 => Ok(Some(InboundRequest::DataColumnsByRoot( + SupportedProtocol::DataColumnsByRootV1 => Ok(Some(RequestType::DataColumnsByRoot( DataColumnsByRootRequest { data_column_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, @@ -588,19 +599,19 @@ fn handle_rpc_request( )?, }, ))), - SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { + SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::LightClientBootstrapV1 => Ok(Some( - InboundRequest::LightClientBootstrap(LightClientBootstrapRequest { + SupportedProtocol::LightClientBootstrapV1 => Ok(Some(RequestType::LightClientBootstrap( + LightClientBootstrapRequest { root: Hash256::from_ssz_bytes(decoded_buffer)?, - }), - )), + }, + ))), SupportedProtocol::LightClientOptimisticUpdateV1 => { - Ok(Some(InboundRequest::LightClientOptimisticUpdate)) + Ok(Some(RequestType::LightClientOptimisticUpdate)) } SupportedProtocol::LightClientFinalityUpdateV1 => { - Ok(Some(InboundRequest::LightClientFinalityUpdate)) + Ok(Some(RequestType::LightClientFinalityUpdate)) } // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. @@ -610,7 +621,7 @@ fn handle_rpc_request( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v3()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v3()))) } } SupportedProtocol::MetaDataV2 => { @@ -619,14 +630,14 @@ fn handle_rpc_request( "Metadata requests shouldn't reach decoder", )) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v2()))) } } SupportedProtocol::MetaDataV1 => { if !decoded_buffer.is_empty() { Err(RPCError::InvalidData("Metadata request".to_string())) } else { - Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1()))) + Ok(Some(RequestType::MetaData(MetadataRequest::new_v1()))) } } } @@ -642,31 +653,33 @@ fn handle_rpc_response( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], fork_name: Option, -) -> Result>, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { - SupportedProtocol::StatusV1 => Ok(Some(RPCResponse::Status( + SupportedProtocol::StatusV1 => Ok(Some(RpcSuccessResponse::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, ))), // This case should be unreachable as `Goodbye` has no response. SupportedProtocol::GoodbyeV1 => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - SupportedProtocol::BlocksByRangeV1 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), - )))), - SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SupportedProtocol::BlocksByRangeV1 => { + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))) + } + SupportedProtocol::BlocksByRootV1 => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlobsByRangeV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( BlobSidecar::from_ssz_bytes(decoded_buffer)?, )))), Some(_) => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by range".to_string(), )), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -674,15 +687,15 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlobsByRootV1 => match fork_name { - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( BlobSidecar::from_ssz_bytes(decoded_buffer)?, )))), Some(_) => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for blobs by root".to_string(), )), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -695,18 +708,18 @@ fn handle_rpc_response( // does not advertise the topic on deneb, simply allows it to decode it. Advertise // logic is in `SupportedTopic::currently_supported`. if fork_name.deneb_enabled() { - Ok(Some(RPCResponse::DataColumnsByRoot(Arc::new( + Ok(Some(RpcSuccessResponse::DataColumnsByRoot(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) } else { Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for data columns by root".to_string(), )) } } None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -716,36 +729,36 @@ fn handle_rpc_response( SupportedProtocol::DataColumnsByRangeV1 => match fork_name { Some(fork_name) => { if fork_name.deneb_enabled() { - Ok(Some(RPCResponse::DataColumnsByRange(Arc::new( + Ok(Some(RpcSuccessResponse::DataColumnsByRange(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) } else { Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Invalid fork name for data columns by range".to_string(), )) } } None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol ), )), }, - SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { + SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::MetaDataV1 => Ok(Some(RPCResponse::MetaData(MetaData::V1( + SupportedProtocol::MetaDataV1 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V1( MetaDataV1::from_ssz_bytes(decoded_buffer)?, )))), SupportedProtocol::LightClientBootstrapV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientBootstrap(Arc::new( + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientBootstrap(Arc::new( LightClientBootstrap::from_ssz_bytes(decoded_buffer, fork_name)?, )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -753,11 +766,14 @@ fn handle_rpc_response( )), }, SupportedProtocol::LightClientOptimisticUpdateV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientOptimisticUpdate(Arc::new( - LightClientOptimisticUpdate::from_ssz_bytes(decoded_buffer, fork_name)?, - )))), + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientOptimisticUpdate( + Arc::new(LightClientOptimisticUpdate::from_ssz_bytes( + decoded_buffer, + fork_name, + )?), + ))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -765,11 +781,14 @@ fn handle_rpc_response( )), }, SupportedProtocol::LightClientFinalityUpdateV1 => match fork_name { - Some(fork_name) => Ok(Some(RPCResponse::LightClientFinalityUpdate(Arc::new( - LightClientFinalityUpdate::from_ssz_bytes(decoded_buffer, fork_name)?, - )))), + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientFinalityUpdate( + Arc::new(LightClientFinalityUpdate::from_ssz_bytes( + decoded_buffer, + fork_name, + )?), + ))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -777,40 +796,40 @@ fn handle_rpc_response( )), }, // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses - SupportedProtocol::MetaDataV3 => Ok(Some(RPCResponse::MetaData(MetaData::V3( + SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V3( MetaDataV3::from_ssz_bytes(decoded_buffer)?, )))), - SupportedProtocol::MetaDataV2 => Ok(Some(RPCResponse::MetaData(MetaData::V2( + SupportedProtocol::MetaDataV2 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V2( MetaDataV2::from_ssz_bytes(decoded_buffer)?, )))), SupportedProtocol::BlocksByRangeV2 => match fork_name { - Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Base) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Bellatrix) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Capella) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Electra) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Some(ForkName::Electra) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Electra(SignedBeaconBlockElectra::from_ssz_bytes( decoded_buffer, )?), )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -818,32 +837,32 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlocksByRootV2 => match fork_name { - Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Base) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Bellatrix) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Bellatrix) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Bellatrix(SignedBeaconBlockBellatrix::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Capella) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( decoded_buffer, )?), )))), - Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Deneb) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), )))), - Some(ForkName::Electra) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Some(ForkName::Electra) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Electra(SignedBeaconBlockElectra::from_ssz_bytes( decoded_buffer, )?), )))), None => Err(RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "No context bytes provided for {:?} response", versioned_protocol @@ -864,7 +883,7 @@ fn context_bytes_to_fork_name( .ok_or_else(|| { let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, format!( "Context bytes {} do not correspond to a valid fork", encoded @@ -1063,7 +1082,7 @@ mod tests { /// Encodes the given protocol response as bytes. fn encode_response( protocol: SupportedProtocol, - message: RPCCodedResponse, + message: RpcResponse, fork_name: ForkName, spec: &ChainSpec, ) -> Result { @@ -1113,7 +1132,7 @@ mod tests { message: &mut BytesMut, fork_name: ForkName, spec: &ChainSpec, - ) -> Result>, RPCError> { + ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); @@ -1126,20 +1145,16 @@ mod tests { /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. fn encode_then_decode_response( protocol: SupportedProtocol, - message: RPCCodedResponse, + message: RpcResponse, fork_name: ForkName, spec: &ChainSpec, - ) -> Result>, RPCError> { + ) -> Result>, RPCError> { let mut encoded = encode_response(protocol, message, fork_name, spec)?; decode_response(protocol, &mut encoded, fork_name, spec) } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request( - req: OutboundRequest, - fork_name: ForkName, - spec: &ChainSpec, - ) { + fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); @@ -1162,36 +1177,43 @@ mod tests { ) }); match req { - OutboundRequest::Status(status) => { - assert_eq!(decoded, InboundRequest::Status(status)) + RequestType::Status(status) => { + assert_eq!(decoded, RequestType::Status(status)) } - OutboundRequest::Goodbye(goodbye) => { - assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) + RequestType::Goodbye(goodbye) => { + assert_eq!(decoded, RequestType::Goodbye(goodbye)) } - OutboundRequest::BlocksByRange(bbrange) => { - assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) + RequestType::BlocksByRange(bbrange) => { + assert_eq!(decoded, RequestType::BlocksByRange(bbrange)) } - OutboundRequest::BlocksByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) + RequestType::BlocksByRoot(bbroot) => { + assert_eq!(decoded, RequestType::BlocksByRoot(bbroot)) } - OutboundRequest::BlobsByRange(blbrange) => { - assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + RequestType::BlobsByRange(blbrange) => { + assert_eq!(decoded, RequestType::BlobsByRange(blbrange)) } - OutboundRequest::BlobsByRoot(bbroot) => { - assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) + RequestType::BlobsByRoot(bbroot) => { + assert_eq!(decoded, RequestType::BlobsByRoot(bbroot)) } - OutboundRequest::DataColumnsByRoot(dcbroot) => { - assert_eq!(decoded, InboundRequest::DataColumnsByRoot(dcbroot)) + RequestType::DataColumnsByRoot(dcbroot) => { + assert_eq!(decoded, RequestType::DataColumnsByRoot(dcbroot)) } - OutboundRequest::DataColumnsByRange(dcbrange) => { - assert_eq!(decoded, InboundRequest::DataColumnsByRange(dcbrange)) + RequestType::DataColumnsByRange(dcbrange) => { + assert_eq!(decoded, RequestType::DataColumnsByRange(dcbrange)) } - OutboundRequest::Ping(ping) => { - assert_eq!(decoded, InboundRequest::Ping(ping)) + RequestType::Ping(ping) => { + assert_eq!(decoded, RequestType::Ping(ping)) } - OutboundRequest::MetaData(metadata) => { - assert_eq!(decoded, InboundRequest::MetaData(metadata)) + RequestType::MetaData(metadata) => { + assert_eq!(decoded, RequestType::MetaData(metadata)) } + RequestType::LightClientBootstrap(light_client_bootstrap_request) => { + assert_eq!( + decoded, + RequestType::LightClientBootstrap(light_client_bootstrap_request) + ) + } + RequestType::LightClientOptimisticUpdate | RequestType::LightClientFinalityUpdate => {} } } @@ -1203,31 +1225,33 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::StatusV1, - RPCCodedResponse::Success(RPCResponse::Status(status_message())), + RpcResponse::Success(RpcSuccessResponse::Status(status_message())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::Status(status_message()))) + Ok(Some(RpcSuccessResponse::Status(status_message()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::PingV1, - RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + RpcResponse::Success(RpcSuccessResponse::Pong(ping_message())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::Pong(ping_message()))) + Ok(Some(RpcSuccessResponse::Pong(ping_message()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1236,7 +1260,9 @@ mod tests { matches!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + altair_block() + ))), ForkName::Altair, &chain_spec, ) @@ -1249,20 +1275,24 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))) ); assert!( matches!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot( + Arc::new(altair_block()) + )), ForkName::Altair, &chain_spec, ) @@ -1275,65 +1305,65 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata()))), ); // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV1, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v2())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata()))), ); // A MetaDataV3 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v3())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v3())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))), + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RPCCodedResponse::Success(RPCResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RPCCodedResponse::Success(RPCResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, - RPCCodedResponse::Success(RPCResponse::DataColumnsByRange( + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( empty_data_column_sidecar() )), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::DataColumnsByRange( + Ok(Some(RpcSuccessResponse::DataColumnsByRange( empty_data_column_sidecar() ))), ); @@ -1341,13 +1371,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, - RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot( + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( empty_data_column_sidecar() )), ForkName::Deneb, &chain_spec ), - Ok(Some(RPCResponse::DataColumnsByRoot( + Ok(Some(RpcSuccessResponse::DataColumnsByRoot( empty_data_column_sidecar() ))), ); @@ -1361,11 +1391,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1376,11 +1408,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block() + ))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1388,11 +1422,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + altair_block() + )))) ); let bellatrix_block_small = @@ -1403,13 +1439,13 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( bellatrix_block_small.clone() ))), ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRange(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( bellatrix_block_small.clone() )))) ); @@ -1435,13 +1471,15 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))), + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))), ); // Decode the smallest possible base block when current fork is altair @@ -1450,35 +1488,39 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + ))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot( - Arc::new(empty_base_block()) - ))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block() + )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + altair_block() + )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( bellatrix_block_small.clone() ))), ForkName::Bellatrix, &chain_spec, ), - Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( bellatrix_block_small )))) ); @@ -1505,21 +1547,21 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Base, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))) + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata_v2())), ForkName::Altair, &chain_spec, ), - Ok(Some(RPCResponse::MetaData(metadata_v2()))) + Ok(Some(RpcSuccessResponse::MetaData(metadata_v2()))) ); } @@ -1533,7 +1575,9 @@ mod tests { // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block(), + ))), ForkName::Base, &chain_spec, ) @@ -1549,12 +1593,14 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Base, &chain_spec, ) @@ -1570,13 +1616,15 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); // Trying to decode a base block with altair context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1601,7 +1649,7 @@ mod tests { // Trying to decode an altair block with base context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, &chain_spec, ) @@ -1628,7 +1676,7 @@ mod tests { encoded_bytes.extend_from_slice( &encode_response( SupportedProtocol::MetaDataV2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + RpcResponse::Success(RpcSuccessResponse::MetaData(metadata())), ForkName::Altair, &chain_spec, ) @@ -1646,7 +1694,9 @@ mod tests { // Sending context bytes which do not correspond to any fork should return an error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1664,13 +1714,15 @@ mod tests { &chain_spec, ) .unwrap_err(), - RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + RPCError::ErrorResponse(RpcErrorResponse::InvalidRequest, _), )); // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( + empty_base_block(), + ))), ForkName::Altair, &chain_spec, ) @@ -1693,20 +1745,20 @@ mod tests { fn test_encode_then_decode_request() { let chain_spec = Spec::default_spec(); - let requests: &[OutboundRequest] = &[ - OutboundRequest::Ping(ping_message()), - OutboundRequest::Status(status_message()), - OutboundRequest::Goodbye(GoodbyeReason::Fault), - OutboundRequest::BlocksByRange(bbrange_request_v1()), - OutboundRequest::BlocksByRange(bbrange_request_v2()), - OutboundRequest::BlocksByRoot(bbroot_request_v1(&chain_spec)), - OutboundRequest::BlocksByRoot(bbroot_request_v2(&chain_spec)), - OutboundRequest::MetaData(MetadataRequest::new_v1()), - OutboundRequest::BlobsByRange(blbrange_request()), - OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), - OutboundRequest::DataColumnsByRange(dcbrange_request()), - OutboundRequest::DataColumnsByRoot(dcbroot_request(&chain_spec)), - OutboundRequest::MetaData(MetadataRequest::new_v2()), + let requests: &[RequestType] = &[ + RequestType::Ping(ping_message()), + RequestType::Status(status_message()), + RequestType::Goodbye(GoodbyeReason::Fault), + RequestType::BlocksByRange(bbrange_request_v1()), + RequestType::BlocksByRange(bbrange_request_v2()), + RequestType::BlocksByRoot(bbroot_request_v1(&chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v2(&chain_spec)), + RequestType::MetaData(MetadataRequest::new_v1()), + RequestType::BlobsByRange(blbrange_request()), + RequestType::BlobsByRoot(blbroot_request(&chain_spec)), + RequestType::DataColumnsByRange(dcbrange_request()), + RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec)), + RequestType::MetaData(MetadataRequest::new_v2()), ]; for req in requests.iter() { diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 6f338ebc8b..e76d6d2786 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -1,11 +1,12 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; +use super::methods::{GoodbyeReason, RpcErrorResponse, RpcResponse}; use super::outbound::OutboundRequestContainer; -use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; -use super::{RPCReceived, RPCSend, ReqId}; -use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; +use super::protocol::{InboundOutput, Protocol, RPCError, RPCProtocol, RequestType}; +use super::RequestId; +use super::{RPCReceived, RPCSend, ReqId, Request}; +use crate::rpc::outbound::OutboundFramed; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; use futures::prelude::*; @@ -95,7 +96,7 @@ where events_out: SmallVec<[HandlerEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[(Id, OutboundRequest); 4]>, + dial_queue: SmallVec<[(Id, RequestType); 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, @@ -159,7 +160,7 @@ struct InboundInfo { /// State of the substream. state: InboundState, /// Responses queued for sending. - pending_items: VecDeque>, + pending_items: VecDeque>, /// Protocol of the original request we received from the peer. protocol: Protocol, /// Responses that the peer is still expecting from us. @@ -205,7 +206,7 @@ pub enum OutboundSubstreamState { /// The framed negotiated substream. substream: Box>, /// Keeps track of the actual request sent. - request: OutboundRequest, + request: RequestType, }, /// Closing an outbound substream> Closing(Box>), @@ -263,7 +264,7 @@ where // Queue our goodbye message. if let Some((id, reason)) = goodbye_reason { - self.dial_queue.push((id, OutboundRequest::Goodbye(reason))); + self.dial_queue.push((id, RequestType::Goodbye(reason))); } self.state = HandlerState::ShuttingDown(Box::pin(sleep(Duration::from_secs( @@ -273,7 +274,7 @@ where } /// Opens an outbound substream with a request. - fn send_request(&mut self, id: Id, req: OutboundRequest) { + fn send_request(&mut self, id: Id, req: RequestType) { match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); @@ -291,10 +292,10 @@ where /// Sends a response to a peer's request. // NOTE: If the substream has closed due to inactivity, or the substream is in the // wrong state a response will fail silently. - fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse) { + fn send_response(&mut self, inbound_id: SubstreamId, response: RpcResponse) { // check if the stream matching the response still exists let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else { - if !matches!(response, RPCCodedResponse::StreamTermination(..)) { + if !matches!(response, RpcResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(self.log, "Inbound stream has expired. Response not sent"; "response" => %response, "id" => inbound_id); @@ -302,7 +303,7 @@ where return; }; // If the response we are sending is an error, report back for handling - if let RPCCodedResponse::Error(ref code, ref reason) = response { + if let RpcResponse::Error(ref code, ref reason) = response { self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::ErrorResponse(*code, reason.to_string()), proto: inbound_info.protocol, @@ -329,7 +330,7 @@ where type ToBehaviour = HandlerEvent; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; - type OutboundOpenInfo = (Id, OutboundRequest); // Keep track of the id and the request + type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -403,8 +404,8 @@ where if info.pending_items.back().map(|l| l.close_after()) == Some(false) { // if the last chunk does not close the stream, append an error - info.pending_items.push_back(RPCCodedResponse::Error( - RPCResponseErrorCode::ServerError, + info.pending_items.push_back(RpcResponse::Error( + RpcErrorResponse::ServerError, "Request timed out".into(), )); } @@ -672,13 +673,13 @@ where let proto = entry.get().proto; let received = match response { - RPCCodedResponse::StreamTermination(t) => { + RpcResponse::StreamTermination(t) => { HandlerEvent::Ok(RPCReceived::EndOfStream(id, t)) } - RPCCodedResponse::Success(resp) => { + RpcResponse::Success(resp) => { HandlerEvent::Ok(RPCReceived::Response(id, resp)) } - RPCCodedResponse::Error(ref code, ref r) => { + RpcResponse::Error(ref code, ref r) => { HandlerEvent::Err(HandlerErr::Outbound { id, proto, @@ -888,21 +889,23 @@ where } // If we received a goodbye, shutdown the connection. - if let InboundRequest::Goodbye(_) = req { + if let RequestType::Goodbye(_) = req { self.shutdown(None); } - self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( - self.current_inbound_substream_id, - req, - ))); + self.events_out + .push(HandlerEvent::Ok(RPCReceived::Request(Request { + id: RequestId::next(), + substream_id: self.current_inbound_substream_id, + r#type: req, + }))); self.current_inbound_substream_id.0 += 1; } fn on_fully_negotiated_outbound( &mut self, substream: OutboundFramed, - (id, request): (Id, OutboundRequest), + (id, request): (Id, RequestType), ) { self.dial_negotiated -= 1; // Reset any io-retries counter. @@ -958,7 +961,7 @@ where } fn on_dial_upgrade_error( &mut self, - request_info: (Id, OutboundRequest), + request_info: (Id, RequestType), error: StreamUpgradeError, ) { let (id, req) = request_info; @@ -1016,15 +1019,15 @@ impl slog::Value for SubstreamId { /// error that occurred with sending a message is reported also. async fn send_message_to_inbound_substream( mut substream: InboundSubstream, - message: RPCCodedResponse, + message: RpcResponse, last_chunk: bool, ) -> Result<(InboundSubstream, bool), RPCError> { - if matches!(message, RPCCodedResponse::StreamTermination(_)) { + if matches!(message, RpcResponse::StreamTermination(_)) { substream.close().await.map(|_| (substream, true)) } else { // chunks that are not stream terminations get sent, and the stream is closed if // the response is an error - let is_error = matches!(message, RPCCodedResponse::Error(..)); + let is_error = matches!(message, RpcResponse::Error(..)); let send_result = substream.send(message).await; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 6e1ba9cd30..dc7d316fb0 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -481,7 +481,7 @@ impl DataColumnsByRootRequest { // Collection of enums and structs used by the Codecs to encode/decode RPC messages #[derive(Debug, Clone, PartialEq)] -pub enum RPCResponse { +pub enum RpcSuccessResponse { /// A HELLO message. Status(StatusMessage), @@ -545,11 +545,11 @@ pub enum ResponseTermination { /// The structured response containing a result/code indicating success or failure /// and the contents of the response #[derive(Debug, Clone)] -pub enum RPCCodedResponse { +pub enum RpcResponse { /// The response is a successful. - Success(RPCResponse), + Success(RpcSuccessResponse), - Error(RPCResponseErrorCode, ErrorType), + Error(RpcErrorResponse, ErrorType), /// Received a stream termination indicating which response is being terminated. StreamTermination(ResponseTermination), @@ -564,7 +564,7 @@ pub struct LightClientBootstrapRequest { /// The code assigned to an erroneous `RPCResponse`. #[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] -pub enum RPCResponseErrorCode { +pub enum RpcErrorResponse { RateLimited, BlobsNotFoundForBlock, InvalidRequest, @@ -574,13 +574,13 @@ pub enum RPCResponseErrorCode { Unknown, } -impl RPCCodedResponse { +impl RpcResponse { /// Used to encode the response in the codec. pub fn as_u8(&self) -> Option { match self { - RPCCodedResponse::Success(_) => Some(0), - RPCCodedResponse::Error(code, _) => Some(code.as_u8()), - RPCCodedResponse::StreamTermination(_) => None, + RpcResponse::Success(_) => Some(0), + RpcResponse::Error(code, _) => Some(code.as_u8()), + RpcResponse::StreamTermination(_) => None, } } @@ -592,64 +592,66 @@ impl RPCCodedResponse { /// Builds an RPCCodedResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorType) -> Self { let code = match response_code { - 1 => RPCResponseErrorCode::InvalidRequest, - 2 => RPCResponseErrorCode::ServerError, - 3 => RPCResponseErrorCode::ResourceUnavailable, - 139 => RPCResponseErrorCode::RateLimited, - 140 => RPCResponseErrorCode::BlobsNotFoundForBlock, - _ => RPCResponseErrorCode::Unknown, + 1 => RpcErrorResponse::InvalidRequest, + 2 => RpcErrorResponse::ServerError, + 3 => RpcErrorResponse::ResourceUnavailable, + 139 => RpcErrorResponse::RateLimited, + 140 => RpcErrorResponse::BlobsNotFoundForBlock, + _ => RpcErrorResponse::Unknown, }; - RPCCodedResponse::Error(code, err) + RpcResponse::Error(code, err) } /// Returns true if this response always terminates the stream. pub fn close_after(&self) -> bool { - !matches!(self, RPCCodedResponse::Success(_)) + !matches!(self, RpcResponse::Success(_)) } } -impl RPCResponseErrorCode { +impl RpcErrorResponse { fn as_u8(&self) -> u8 { match self { - RPCResponseErrorCode::InvalidRequest => 1, - RPCResponseErrorCode::ServerError => 2, - RPCResponseErrorCode::ResourceUnavailable => 3, - RPCResponseErrorCode::Unknown => 255, - RPCResponseErrorCode::RateLimited => 139, - RPCResponseErrorCode::BlobsNotFoundForBlock => 140, + RpcErrorResponse::InvalidRequest => 1, + RpcErrorResponse::ServerError => 2, + RpcErrorResponse::ResourceUnavailable => 3, + RpcErrorResponse::Unknown => 255, + RpcErrorResponse::RateLimited => 139, + RpcErrorResponse::BlobsNotFoundForBlock => 140, } } } use super::Protocol; -impl RPCResponse { +impl RpcSuccessResponse { pub fn protocol(&self) -> Protocol { match self { - RPCResponse::Status(_) => Protocol::Status, - RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, - RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, - RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, - RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, - RPCResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, - RPCResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, - RPCResponse::Pong(_) => Protocol::Ping, - RPCResponse::MetaData(_) => Protocol::MetaData, - RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, - RPCResponse::LightClientOptimisticUpdate(_) => Protocol::LightClientOptimisticUpdate, - RPCResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::Status(_) => Protocol::Status, + RpcSuccessResponse::BlocksByRange(_) => Protocol::BlocksByRange, + RpcSuccessResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RpcSuccessResponse::BlobsByRange(_) => Protocol::BlobsByRange, + RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, + RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, + RpcSuccessResponse::Pong(_) => Protocol::Ping, + RpcSuccessResponse::MetaData(_) => Protocol::MetaData, + RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, + RpcSuccessResponse::LightClientOptimisticUpdate(_) => { + Protocol::LightClientOptimisticUpdate + } + RpcSuccessResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, } } } -impl std::fmt::Display for RPCResponseErrorCode { +impl std::fmt::Display for RpcErrorResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { - RPCResponseErrorCode::InvalidRequest => "The request was invalid", - RPCResponseErrorCode::ResourceUnavailable => "Resource unavailable", - RPCResponseErrorCode::ServerError => "Server error occurred", - RPCResponseErrorCode::Unknown => "Unknown error occurred", - RPCResponseErrorCode::RateLimited => "Rate limited", - RPCResponseErrorCode::BlobsNotFoundForBlock => "No blobs for the given root", + RpcErrorResponse::InvalidRequest => "The request was invalid", + RpcErrorResponse::ResourceUnavailable => "Resource unavailable", + RpcErrorResponse::ServerError => "Server error occurred", + RpcErrorResponse::Unknown => "Unknown error occurred", + RpcErrorResponse::RateLimited => "Rate limited", + RpcErrorResponse::BlobsNotFoundForBlock => "No blobs for the given root", }; f.write_str(repr) } @@ -661,45 +663,47 @@ impl std::fmt::Display for StatusMessage { } } -impl std::fmt::Display for RPCResponse { +impl std::fmt::Display for RpcSuccessResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RPCResponse::Status(status) => write!(f, "{}", status), - RPCResponse::BlocksByRange(block) => { + RpcSuccessResponse::Status(status) => write!(f, "{}", status), + RpcSuccessResponse::BlocksByRange(block) => { write!(f, "BlocksByRange: Block slot: {}", block.slot()) } - RPCResponse::BlocksByRoot(block) => { + RpcSuccessResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } - RPCResponse::BlobsByRange(blob) => { + RpcSuccessResponse::BlobsByRange(blob) => { write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) } - RPCResponse::BlobsByRoot(sidecar) => { + RpcSuccessResponse::BlobsByRoot(sidecar) => { write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } - RPCResponse::DataColumnsByRoot(sidecar) => { + RpcSuccessResponse::DataColumnsByRoot(sidecar) => { write!(f, "DataColumnsByRoot: Data column slot: {}", sidecar.slot()) } - RPCResponse::DataColumnsByRange(sidecar) => { + RpcSuccessResponse::DataColumnsByRange(sidecar) => { write!( f, "DataColumnsByRange: Data column slot: {}", sidecar.slot() ) } - RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), - RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), - RPCResponse::LightClientBootstrap(bootstrap) => { + RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), + RpcSuccessResponse::MetaData(metadata) => { + write!(f, "Metadata: {}", metadata.seq_number()) + } + RpcSuccessResponse::LightClientBootstrap(bootstrap) => { write!(f, "LightClientBootstrap Slot: {}", bootstrap.get_slot()) } - RPCResponse::LightClientOptimisticUpdate(update) => { + RpcSuccessResponse::LightClientOptimisticUpdate(update) => { write!( f, "LightClientOptimisticUpdate Slot: {}", update.signature_slot() ) } - RPCResponse::LightClientFinalityUpdate(update) => { + RpcSuccessResponse::LightClientFinalityUpdate(update) => { write!( f, "LightClientFinalityUpdate Slot: {}", @@ -710,12 +714,12 @@ impl std::fmt::Display for RPCResponse { } } -impl std::fmt::Display for RPCCodedResponse { +impl std::fmt::Display for RpcResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - RPCCodedResponse::Success(res) => write!(f, "{}", res), - RPCCodedResponse::Error(code, err) => write!(f, "{}: {}", code, err), - RPCCodedResponse::StreamTermination(_) => write!(f, "Stream Termination"), + RpcResponse::Success(res) => write!(f, "{}", res), + RpcResponse::Error(code, err) => write!(f, "{}: {}", code, err), + RpcResponse::StreamTermination(_) => write!(f, "Stream Termination"), } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 4961c31d28..e3b41ea1df 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -16,6 +16,7 @@ use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o, trace}; use std::marker::PhantomData; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -23,16 +24,15 @@ use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; pub(crate) use methods::{ - MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RPCCodedResponse, RPCResponse, + MetaData, MetaDataV1, MetaDataV2, MetaDataV3, Ping, RpcResponse, RpcSuccessResponse, }; -pub(crate) use protocol::InboundRequest; +pub use protocol::RequestType; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, - RPCResponseErrorCode, ResponseTermination, StatusMessage, + ResponseTermination, RpcErrorResponse, StatusMessage, }; -pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; @@ -48,6 +48,8 @@ mod protocol; mod rate_limiter; mod self_limiter; +static NEXT_REQUEST_ID: AtomicUsize = AtomicUsize::new(1); + /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} impl ReqId for T where T: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -59,13 +61,13 @@ pub enum RPCSend { /// /// The `Id` is given by the application making the request. These /// go over *outbound* connections. - Request(Id, OutboundRequest), + Request(Id, RequestType), /// A response sent from Lighthouse. /// /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the /// peer. The second parameter is a single chunk of a response. These go over *inbound* /// connections. - Response(SubstreamId, RPCCodedResponse), + Response(SubstreamId, RpcResponse), /// Lighthouse has requested to terminate the connection with a goodbye message. Shutdown(Id, GoodbyeReason), } @@ -77,17 +79,46 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(SubstreamId, InboundRequest), + Request(Request), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the /// peer. The second parameter is a single chunk of a response. These go over *outbound* /// connections. - Response(Id, RPCResponse), + Response(Id, RpcSuccessResponse), /// Marks a request as completed EndOfStream(Id, ResponseTermination), } +/// Rpc `Request` identifier. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct RequestId(usize); + +impl RequestId { + /// Returns the next available [`RequestId`]. + pub fn next() -> Self { + Self(NEXT_REQUEST_ID.fetch_add(1, Ordering::SeqCst)) + } + + /// Creates an _unchecked_ [`RequestId`]. + /// + /// [`Rpc`] enforces that [`RequestId`]s are unique and not reused. + /// This constructor does not, hence the _unchecked_. + /// + /// It is primarily meant for allowing manual tests. + pub fn new_unchecked(id: usize) -> Self { + Self(id) + } +} + +/// An Rpc Request. +#[derive(Debug, Clone)] +pub struct Request { + pub id: RequestId, + pub substream_id: SubstreamId, + pub r#type: RequestType, +} + impl std::fmt::Display for RPCSend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -177,7 +208,8 @@ impl RPC { &mut self, peer_id: PeerId, id: (ConnectionId, SubstreamId), - event: RPCCodedResponse, + _request_id: RequestId, + event: RpcResponse, ) { self.events.push(ToSwarm::NotifyHandler { peer_id, @@ -189,7 +221,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: OutboundRequest) { + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { Ok(event) => event, @@ -229,7 +261,7 @@ impl RPC { data: self.seq_number, }; trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - self.send_request(peer_id, id, OutboundRequest::Ping(ping)); + self.send_request(peer_id, id, RequestType::Ping(ping)); } } @@ -368,13 +400,17 @@ where event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(id, req)) => { + HandlerEvent::Ok(RPCReceived::Request(Request { + id, + substream_id, + r#type, + })) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, &req) { + match limiter.allows(&peer_id, &r#type) { Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = req.versioned_protocol().protocol(); + let protocol = r#type.versioned_protocol().protocol(); if matches!( protocol, Protocol::BlocksByRange @@ -384,7 +420,7 @@ where | Protocol::BlobsByRoot | Protocol::DataColumnsByRoot ) { - debug!(self.log, "Request too large to process"; "request" => %req, "protocol" => %protocol); + debug!(self.log, "Request too large to process"; "request" => %r#type, "protocol" => %protocol); } else { // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); @@ -393,9 +429,10 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, + (conn_id, substream_id), + id, + RpcResponse::Error( + RpcErrorResponse::RateLimited, "Rate limited. Request too large".into(), ), ); @@ -403,30 +440,33 @@ where } Err(RateLimitedErr::TooSoon(wait_time)) => { debug!(self.log, "Request exceeds the rate limit"; - "request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); + "request" => %r#type, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, + (conn_id, substream_id), + id, + RpcResponse::Error( + RpcErrorResponse::RateLimited, format!("Wait {:?}", wait_time).into(), ), ); return; } // No rate limiting, continue. - Ok(_) => {} + Ok(()) => {} } } + // If we received a Ping, we queue a Pong response. - if let InboundRequest::Ping(_) = req { + if let RequestType::Ping(_) = r#type { trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %conn_id, "peer_id" => %peer_id); self.send_response( peer_id, - (conn_id, id), - RPCCodedResponse::Success(RPCResponse::Pong(Ping { + (conn_id, substream_id), + id, + RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data: self.seq_number, })), ); @@ -435,7 +475,11 @@ where self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - message: Ok(RPCReceived::Request(id, req)), + message: Ok(RPCReceived::Request(Request { + id, + substream_id, + r#type, + })), })); } HandlerEvent::Ok(rpc) => { @@ -496,8 +540,8 @@ where match &self.message { Ok(received) => { let (msg_kind, protocol) = match received { - RPCReceived::Request(_, req) => { - ("request", req.versioned_protocol().protocol()) + RPCReceived::Request(Request { r#type, .. }) => { + ("request", r#type.versioned_protocol().protocol()) } RPCReceived::Response(_, res) => ("response", res.protocol()), RPCReceived::EndOfStream(_, end) => ( diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 2bfa42ccac..b614313a84 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -1,7 +1,6 @@ -use super::methods::*; use super::protocol::ProtocolId; -use super::protocol::SupportedProtocol; use super::RPCError; +use super::RequestType; use crate::rpc::codec::SSZSnappyOutboundCodec; use crate::rpc::protocol::Encoding; use futures::future::BoxFuture; @@ -21,25 +20,11 @@ use types::{EthSpec, ForkContext}; #[derive(Debug, Clone)] pub struct OutboundRequestContainer { - pub req: OutboundRequest, + pub req: RequestType, pub fork_context: Arc, pub max_rpc_size: usize, } -#[derive(Debug, Clone, PartialEq)] -pub enum OutboundRequest { - Status(StatusMessage), - Goodbye(GoodbyeReason), - BlocksByRange(OldBlocksByRangeRequest), - BlocksByRoot(BlocksByRootRequest), - BlobsByRange(BlobsByRangeRequest), - BlobsByRoot(BlobsByRootRequest), - DataColumnsByRoot(DataColumnsByRootRequest), - DataColumnsByRange(DataColumnsByRangeRequest), - Ping(Ping), - MetaData(MetadataRequest), -} - impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; @@ -50,133 +35,6 @@ impl UpgradeInfo for OutboundRequestContainer { } } -/// Implements the encoding per supported protocol for `RPCRequest`. -impl OutboundRequest { - pub fn supported_protocols(&self) -> Vec { - match self { - // add more protocols when versions/encodings are supported - OutboundRequest::Status(_) => vec![ProtocolId::new( - SupportedProtocol::StatusV1, - Encoding::SSZSnappy, - )], - OutboundRequest::Goodbye(_) => vec![ProtocolId::new( - SupportedProtocol::GoodbyeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlocksByRange(_) => vec![ - ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), - ], - OutboundRequest::BlocksByRoot(_) => vec![ - ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), - ], - OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( - SupportedProtocol::BlobsByRangeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new( - SupportedProtocol::BlobsByRootV1, - Encoding::SSZSnappy, - )], - OutboundRequest::DataColumnsByRoot(_) => vec![ProtocolId::new( - SupportedProtocol::DataColumnsByRootV1, - Encoding::SSZSnappy, - )], - OutboundRequest::DataColumnsByRange(_) => vec![ProtocolId::new( - SupportedProtocol::DataColumnsByRangeV1, - Encoding::SSZSnappy, - )], - OutboundRequest::Ping(_) => vec![ProtocolId::new( - SupportedProtocol::PingV1, - Encoding::SSZSnappy, - )], - OutboundRequest::MetaData(_) => vec![ - ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), - ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), - ], - } - } - /* These functions are used in the handler for stream management */ - - /// Maximum number of responses expected for this request. - pub fn max_responses(&self) -> u64 { - match self { - OutboundRequest::Status(_) => 1, - OutboundRequest::Goodbye(_) => 0, - OutboundRequest::BlocksByRange(req) => *req.count(), - OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, - OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), - OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, - OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - OutboundRequest::DataColumnsByRange(req) => req.max_requested::(), - OutboundRequest::Ping(_) => 1, - OutboundRequest::MetaData(_) => 1, - } - } - - pub fn expect_exactly_one_response(&self) -> bool { - match self { - OutboundRequest::Status(_) => true, - OutboundRequest::Goodbye(_) => false, - OutboundRequest::BlocksByRange(_) => false, - OutboundRequest::BlocksByRoot(_) => false, - OutboundRequest::BlobsByRange(_) => false, - OutboundRequest::BlobsByRoot(_) => false, - OutboundRequest::DataColumnsByRoot(_) => false, - OutboundRequest::DataColumnsByRange(_) => false, - OutboundRequest::Ping(_) => true, - OutboundRequest::MetaData(_) => true, - } - } - - /// Gives the corresponding `SupportedProtocol` to this request. - pub fn versioned_protocol(&self) -> SupportedProtocol { - match self { - OutboundRequest::Status(_) => SupportedProtocol::StatusV1, - OutboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, - OutboundRequest::BlocksByRange(req) => match req { - OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, - OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, - }, - OutboundRequest::BlocksByRoot(req) => match req { - BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, - BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, - }, - OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, - OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, - OutboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, - OutboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, - OutboundRequest::Ping(_) => SupportedProtocol::PingV1, - OutboundRequest::MetaData(req) => match req { - MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, - MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, - MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, - }, - } - } - - /// Returns the `ResponseTermination` type associated with the request if a stream gets - /// terminated. - pub fn stream_termination(&self) -> ResponseTermination { - match self { - // this only gets called after `multiple_responses()` returns true. Therefore, only - // variants that have `multiple_responses()` can have values. - OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, - OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, - OutboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, - OutboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, - OutboundRequest::Status(_) => unreachable!(), - OutboundRequest::Goodbye(_) => unreachable!(), - OutboundRequest::Ping(_) => unreachable!(), - OutboundRequest::MetaData(_) => unreachable!(), - } - } -} - /* RPC Response type - used for outbound upgrades */ /* Outbound upgrades */ @@ -211,22 +69,3 @@ where .boxed() } } - -impl std::fmt::Display for OutboundRequest { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - OutboundRequest::Status(status) => write!(f, "Status Message: {}", status), - OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), - OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), - OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), - OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), - OutboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), - OutboundRequest::DataColumnsByRange(req) => { - write!(f, "Data columns by range: {:?}", req) - } - OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), - OutboundRequest::MetaData(_) => write!(f, "MetaData request"), - } - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 09a18e5de6..3f78d35f5c 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -645,7 +645,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (InboundRequest, InboundFramed); +pub type InboundOutput = (RequestType, InboundFramed); pub type InboundFramed = Framed>>>, SSZSnappyInboundCodec>; @@ -679,19 +679,19 @@ where // MetaData requests should be empty, return the stream match versioned_protocol { SupportedProtocol::MetaDataV1 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v1()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v1()), socket)) } SupportedProtocol::MetaDataV2 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v2()), socket)) } SupportedProtocol::MetaDataV3 => { - Ok((InboundRequest::MetaData(MetadataRequest::new_v3()), socket)) + Ok((RequestType::MetaData(MetadataRequest::new_v3()), socket)) } SupportedProtocol::LightClientOptimisticUpdateV1 => { - Ok((InboundRequest::LightClientOptimisticUpdate, socket)) + Ok((RequestType::LightClientOptimisticUpdate, socket)) } SupportedProtocol::LightClientFinalityUpdateV1 => { - Ok((InboundRequest::LightClientFinalityUpdate, socket)) + Ok((RequestType::LightClientFinalityUpdate, socket)) } _ => { match tokio::time::timeout( @@ -713,7 +713,7 @@ where } #[derive(Debug, Clone, PartialEq)] -pub enum InboundRequest { +pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), @@ -730,56 +730,56 @@ pub enum InboundRequest { } /// Implements the encoding per supported protocol for `RPCRequest`. -impl InboundRequest { +impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. pub fn max_responses(&self) -> u64 { match self { - InboundRequest::Status(_) => 1, - InboundRequest::Goodbye(_) => 0, - InboundRequest::BlocksByRange(req) => *req.count(), - InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, - InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), - InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, - InboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - InboundRequest::DataColumnsByRange(req) => req.max_requested::(), - InboundRequest::Ping(_) => 1, - InboundRequest::MetaData(_) => 1, - InboundRequest::LightClientBootstrap(_) => 1, - InboundRequest::LightClientOptimisticUpdate => 1, - InboundRequest::LightClientFinalityUpdate => 1, + RequestType::Status(_) => 1, + RequestType::Goodbye(_) => 0, + RequestType::BlocksByRange(req) => *req.count(), + RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, + RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, + RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::Ping(_) => 1, + RequestType::MetaData(_) => 1, + RequestType::LightClientBootstrap(_) => 1, + RequestType::LightClientOptimisticUpdate => 1, + RequestType::LightClientFinalityUpdate => 1, } } /// Gives the corresponding `SupportedProtocol` to this request. pub fn versioned_protocol(&self) -> SupportedProtocol { match self { - InboundRequest::Status(_) => SupportedProtocol::StatusV1, - InboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1, - InboundRequest::BlocksByRange(req) => match req { + RequestType::Status(_) => SupportedProtocol::StatusV1, + RequestType::Goodbye(_) => SupportedProtocol::GoodbyeV1, + RequestType::BlocksByRange(req) => match req { OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1, OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2, }, - InboundRequest::BlocksByRoot(req) => match req { + RequestType::BlocksByRoot(req) => match req { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, - InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, - InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, - InboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, - InboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, - InboundRequest::Ping(_) => SupportedProtocol::PingV1, - InboundRequest::MetaData(req) => match req { + RequestType::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, + RequestType::Ping(_) => SupportedProtocol::PingV1, + RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2, MetadataRequest::V3(_) => SupportedProtocol::MetaDataV3, }, - InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, - InboundRequest::LightClientOptimisticUpdate => { + RequestType::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1, + RequestType::LightClientOptimisticUpdate => { SupportedProtocol::LightClientOptimisticUpdateV1 } - InboundRequest::LightClientFinalityUpdate => { + RequestType::LightClientFinalityUpdate => { SupportedProtocol::LightClientFinalityUpdateV1 } } @@ -791,19 +791,96 @@ impl InboundRequest { match self { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. - InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, - InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, - InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, - InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, - InboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, - InboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, - InboundRequest::Status(_) => unreachable!(), - InboundRequest::Goodbye(_) => unreachable!(), - InboundRequest::Ping(_) => unreachable!(), - InboundRequest::MetaData(_) => unreachable!(), - InboundRequest::LightClientBootstrap(_) => unreachable!(), - InboundRequest::LightClientFinalityUpdate => unreachable!(), - InboundRequest::LightClientOptimisticUpdate => unreachable!(), + RequestType::BlocksByRange(_) => ResponseTermination::BlocksByRange, + RequestType::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + RequestType::BlobsByRange(_) => ResponseTermination::BlobsByRange, + RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::Status(_) => unreachable!(), + RequestType::Goodbye(_) => unreachable!(), + RequestType::Ping(_) => unreachable!(), + RequestType::MetaData(_) => unreachable!(), + RequestType::LightClientBootstrap(_) => unreachable!(), + RequestType::LightClientFinalityUpdate => unreachable!(), + RequestType::LightClientOptimisticUpdate => unreachable!(), + } + } + + pub fn supported_protocols(&self) -> Vec { + match self { + // add more protocols when versions/encodings are supported + RequestType::Status(_) => vec![ProtocolId::new( + SupportedProtocol::StatusV1, + Encoding::SSZSnappy, + )], + RequestType::Goodbye(_) => vec![ProtocolId::new( + SupportedProtocol::GoodbyeV1, + Encoding::SSZSnappy, + )], + RequestType::BlocksByRange(_) => vec![ + ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy), + ], + RequestType::BlocksByRoot(_) => vec![ + ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), + ], + RequestType::BlobsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::BlobsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::DataColumnsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::DataColumnsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRangeV1, + Encoding::SSZSnappy, + )], + RequestType::Ping(_) => vec![ProtocolId::new( + SupportedProtocol::PingV1, + Encoding::SSZSnappy, + )], + RequestType::MetaData(_) => vec![ + ProtocolId::new(SupportedProtocol::MetaDataV3, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy), + ], + RequestType::LightClientBootstrap(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientBootstrapV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientOptimisticUpdate => vec![ProtocolId::new( + SupportedProtocol::LightClientOptimisticUpdateV1, + Encoding::SSZSnappy, + )], + RequestType::LightClientFinalityUpdate => vec![ProtocolId::new( + SupportedProtocol::LightClientFinalityUpdateV1, + Encoding::SSZSnappy, + )], + } + } + + pub fn expect_exactly_one_response(&self) -> bool { + match self { + RequestType::Status(_) => true, + RequestType::Goodbye(_) => false, + RequestType::BlocksByRange(_) => false, + RequestType::BlocksByRoot(_) => false, + RequestType::BlobsByRange(_) => false, + RequestType::BlobsByRoot(_) => false, + RequestType::DataColumnsByRoot(_) => false, + RequestType::DataColumnsByRange(_) => false, + RequestType::Ping(_) => true, + RequestType::MetaData(_) => true, + RequestType::LightClientBootstrap(_) => true, + RequestType::LightClientOptimisticUpdate => true, + RequestType::LightClientFinalityUpdate => true, } } } @@ -819,7 +896,7 @@ pub enum RPCError { /// IO Error. IoError(String), /// The peer returned a valid response but the response indicated an error. - ErrorResponse(RPCResponseErrorCode, String), + ErrorResponse(RpcErrorResponse, String), /// Timed out waiting for a response. StreamTimeout, /// Peer does not support the protocol. @@ -898,28 +975,28 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for InboundRequest { +impl std::fmt::Display for RequestType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - InboundRequest::Status(status) => write!(f, "Status Message: {}", status), - InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), - InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), - InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), - InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), - InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), - InboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), - InboundRequest::DataColumnsByRange(req) => { + RequestType::Status(status) => write!(f, "Status Message: {}", status), + RequestType::Goodbye(reason) => write!(f, "Goodbye: {}", reason), + RequestType::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), + RequestType::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + RequestType::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + RequestType::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + RequestType::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + RequestType::DataColumnsByRange(req) => { write!(f, "Data columns by range: {:?}", req) } - InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), - InboundRequest::MetaData(_) => write!(f, "MetaData request"), - InboundRequest::LightClientBootstrap(bootstrap) => { + RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), + RequestType::MetaData(_) => write!(f, "MetaData request"), + RequestType::LightClientBootstrap(bootstrap) => { write!(f, "Light client boostrap: {}", bootstrap.root) } - InboundRequest::LightClientOptimisticUpdate => { + RequestType::LightClientOptimisticUpdate => { write!(f, "Light client optimistic update request") } - InboundRequest::LightClientFinalityUpdate => { + RequestType::LightClientFinalityUpdate => { write!(f, "Light client finality update request") } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 523b891a00..a8e8f45b6f 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -252,7 +252,7 @@ pub trait RateLimiterItem { fn max_responses(&self) -> u64; } -impl RateLimiterItem for super::InboundRequest { +impl RateLimiterItem for super::RequestType { fn protocol(&self) -> Protocol { self.versioned_protocol().protocol() } @@ -262,15 +262,6 @@ impl RateLimiterItem for super::InboundRequest { } } -impl RateLimiterItem for super::OutboundRequest { - fn protocol(&self) -> Protocol { - self.versioned_protocol().protocol() - } - - fn max_responses(&self) -> u64 { - self.max_responses() - } -} impl RPCRateLimiter { pub fn new_with_config(config: RateLimiterConfig) -> Result { // Destructure to make sure every configuration value is used. diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 77caecb16d..e968ad11e3 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -14,13 +14,13 @@ use types::EthSpec; use super::{ config::OutboundRateLimiterConfig, rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}, - BehaviourAction, OutboundRequest, Protocol, RPCSend, ReqId, + BehaviourAction, Protocol, RPCSend, ReqId, RequestType, }; /// A request that was rate limited or waiting on rate limited requests for the same peer and /// protocol. struct QueuedRequest { - req: OutboundRequest, + req: RequestType, request_id: Id, } @@ -70,7 +70,7 @@ impl SelfRateLimiter { &mut self, peer_id: PeerId, request_id: Id, - req: OutboundRequest, + req: RequestType, ) -> Result, Error> { let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. @@ -101,7 +101,7 @@ impl SelfRateLimiter { limiter: &mut RateLimiter, peer_id: PeerId, request_id: Id, - req: OutboundRequest, + req: RequestType, log: &Logger, ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { @@ -211,7 +211,7 @@ mod tests { use crate::rpc::config::{OutboundRateLimiterConfig, RateLimiterConfig}; use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; - use crate::rpc::{OutboundRequest, Ping, Protocol}; + use crate::rpc::{Ping, Protocol, RequestType}; use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; @@ -235,7 +235,7 @@ mod tests { RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id: i, })), - OutboundRequest::Ping(Ping { data: i as u64 }), + RequestType::Ping(Ping { data: i as u64 }), ); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 30400db3b6..e57e846c33 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -6,16 +6,9 @@ use types::{ LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, }; -use crate::rpc::methods::{ - BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, -}; use crate::rpc::{ - methods::{ - BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, - OldBlocksByRangeRequest, OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, - RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage, - }, - OutboundRequest, SubstreamId, + methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, + SubstreamId, }; /// Identifier of requests sent by a peer. @@ -93,69 +86,6 @@ pub enum RequestId { Internal, } -/// The type of RPC requests the Behaviour informs it has received and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level requests that can be -// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't -// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. -#[derive(Debug, Clone, PartialEq)] -pub enum Request { - /// A Status message. - Status(StatusMessage), - /// A blocks by range request. - BlocksByRange(BlocksByRangeRequest), - /// A blobs by range request. - BlobsByRange(BlobsByRangeRequest), - /// A request blocks root request. - BlocksByRoot(BlocksByRootRequest), - // light client bootstrap request - LightClientBootstrap(LightClientBootstrapRequest), - // light client optimistic update request - LightClientOptimisticUpdate, - // light client finality update request - LightClientFinalityUpdate, - /// A request blobs root request. - BlobsByRoot(BlobsByRootRequest), - /// A request data columns root request. - DataColumnsByRoot(DataColumnsByRootRequest), - /// A request data columns by range request. - DataColumnsByRange(DataColumnsByRangeRequest), -} - -impl std::convert::From for OutboundRequest { - fn from(req: Request) -> OutboundRequest { - match req { - Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(r) => match r { - BlocksByRangeRequest::V1(req) => OutboundRequest::BlocksByRange( - OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { - start_slot: req.start_slot, - count: req.count, - step: 1, - }), - ), - BlocksByRangeRequest::V2(req) => OutboundRequest::BlocksByRange( - OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { - start_slot: req.start_slot, - count: req.count, - step: 1, - }), - ), - }, - Request::LightClientBootstrap(_) - | Request::LightClientOptimisticUpdate - | Request::LightClientFinalityUpdate => { - unreachable!("Lighthouse never makes an outbound light client request") - } - Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), - Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), - Request::DataColumnsByRoot(r) => OutboundRequest::DataColumnsByRoot(r), - Request::DataColumnsByRange(r) => OutboundRequest::DataColumnsByRange(r), - Request::Status(s) => OutboundRequest::Status(s), - } - } -} - /// The type of RPC responses the Behaviour informs it has received, and allows for sending. /// // NOTE: This is an application-level wrapper over the lower network level responses that can be @@ -186,44 +116,42 @@ pub enum Response { LightClientFinalityUpdate(Arc>), } -impl std::convert::From> for RPCCodedResponse { - fn from(resp: Response) -> RPCCodedResponse { +impl std::convert::From> for RpcResponse { + fn from(resp: Response) -> RpcResponse { match resp { Response::BlocksByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRoot), }, Response::BlocksByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlocksByRange(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlocksByRange), }, Response::BlobsByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRoot), }, Response::BlobsByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + Some(b) => RpcResponse::Success(RpcSuccessResponse::BlobsByRange(b)), + None => RpcResponse::StreamTermination(ResponseTermination::BlobsByRange), }, Response::DataColumnsByRoot(r) => match r { - Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot(d)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), + Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot(d)), + None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), }, Response::DataColumnsByRange(r) => match r { - Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRange(d)), - None => { - RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRange) - } + Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(d)), + None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, - Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { - RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) + RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) } Response::LightClientOptimisticUpdate(o) => { - RPCCodedResponse::Success(RPCResponse::LightClientOptimisticUpdate(o)) + RpcResponse::Success(RpcSuccessResponse::LightClientOptimisticUpdate(o)) } Response::LightClientFinalityUpdate(f) => { - RPCCodedResponse::Success(RPCResponse::LightClientFinalityUpdate(f)) + RpcResponse::Success(RpcSuccessResponse::LightClientFinalityUpdate(f)) } } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ede8fdd13a..4cf59e15e1 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -11,9 +11,8 @@ use crate::peer_manager::{ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; use crate::rpc::{ - methods, BlocksByRangeRequest, GoodbyeReason, HandlerErr, InboundRequest, NetworkParams, - OutboundRequest, Protocol, RPCCodedResponse, RPCError, RPCMessage, RPCReceived, RPCResponse, - RPCResponseErrorCode, ResponseTermination, RPC, + self, GoodbyeReason, HandlerErr, NetworkParams, Protocol, RPCError, RPCMessage, RPCReceived, + RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, RpcSuccessResponse, RPC, }; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; @@ -25,7 +24,7 @@ use crate::types::{ use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use api_types::{AppRequestId, PeerRequestId, Request, RequestId, Response}; +use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, @@ -84,7 +83,7 @@ pub enum NetworkEvent { /// Identifier of the request. All responses to this request must use this id. id: PeerRequestId, /// Request the peer sent. - request: Request, + request: rpc::Request, }, ResponseReceived { /// Peer that sent the response. @@ -934,25 +933,28 @@ impl Network { &mut self, peer_id: PeerId, request_id: AppRequestId, - request: Request, + request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { return Err((request_id, RPCError::Disconnected)); } - self.eth2_rpc_mut().send_request( - peer_id, - RequestId::Application(request_id), - request.into(), - ); + self.eth2_rpc_mut() + .send_request(peer_id, RequestId::Application(request_id), request); Ok(()) } /// Send a successful response to a peer over RPC. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { + pub fn send_response( + &mut self, + peer_id: PeerId, + id: PeerRequestId, + request_id: rpc::RequestId, + response: Response, + ) { self.eth2_rpc_mut() - .send_response(peer_id, id, response.into()) + .send_response(peer_id, id, request_id, response.into()) } /// Inform the peer that their request produced an error. @@ -960,13 +962,15 @@ impl Network { &mut self, peer_id: PeerId, id: PeerRequestId, - error: RPCResponseErrorCode, + request_id: rpc::RequestId, + error: RpcErrorResponse, reason: String, ) { self.eth2_rpc_mut().send_response( peer_id, id, - RPCCodedResponse::Error(error, reason.into()), + request_id, + RpcResponse::Error(error, reason.into()), ) } @@ -1130,10 +1134,10 @@ impl Network { let event = if self.fork_context.spec.is_peer_das_scheduled() { // Nodes with higher custody will probably start advertising it // before peerdas is activated - OutboundRequest::MetaData(MetadataRequest::new_v3()) + RequestType::MetaData(MetadataRequest::new_v3()) } else { // We always prefer sending V2 requests otherwise - OutboundRequest::MetaData(MetadataRequest::new_v2()) + RequestType::MetaData(MetadataRequest::new_v2()) }; self.eth2_rpc_mut() .send_request(peer_id, RequestId::Internal, event); @@ -1144,12 +1148,14 @@ impl Network { &mut self, _req: MetadataRequest, id: PeerRequestId, + request_id: rpc::RequestId, peer_id: PeerId, ) { let metadata = self.network_globals.local_metadata.read().clone(); // The encoder is responsible for sending the negotiated version of the metadata - let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata)); - self.eth2_rpc_mut().send_response(peer_id, id, event); + let event = RpcResponse::Success(RpcSuccessResponse::MetaData(metadata)); + self.eth2_rpc_mut() + .send_response(peer_id, id, request_id, event); } // RPC Propagation methods @@ -1171,56 +1177,6 @@ impl Network { } } - /// Convenience function to propagate a request. - #[must_use = "actually return the event"] - fn build_request( - &mut self, - id: PeerRequestId, - peer_id: PeerId, - request: Request, - ) -> NetworkEvent { - // Increment metrics - match &request { - Request::Status(_) => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) - } - Request::LightClientBootstrap(_) => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"]) - } - Request::LightClientOptimisticUpdate => metrics::inc_counter_vec( - &metrics::TOTAL_RPC_REQUESTS, - &["light_client_optimistic_update"], - ), - Request::LightClientFinalityUpdate => metrics::inc_counter_vec( - &metrics::TOTAL_RPC_REQUESTS, - &["light_client_finality_update"], - ), - Request::BlocksByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) - } - Request::BlocksByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) - } - Request::BlobsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) - } - Request::BlobsByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) - } - Request::DataColumnsByRoot { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_root"]) - } - Request::DataColumnsByRange { .. } => { - metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_range"]) - } - } - NetworkEvent::RequestReceived { - peer_id, - id, - request, - } - } - /// Dial cached Enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet, spec: Arc) { @@ -1406,7 +1362,7 @@ impl Network { return None; } - let handler_id = event.conn_id; + let connection_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.message { Err(handler_err) => { @@ -1444,21 +1400,25 @@ impl Network { } } } - Ok(RPCReceived::Request(id, request)) => { - let peer_request_id = (handler_id, id); - match request { + Ok(RPCReceived::Request(request)) => { + match request.r#type { /* Behaviour managed protocols: Ping and Metadata */ - InboundRequest::Ping(ping) => { + RequestType::Ping(ping) => { // inform the peer manager and send the response self.peer_manager_mut().ping_request(&peer_id, ping.data); None } - InboundRequest::MetaData(req) => { + RequestType::MetaData(req) => { // send the requested meta-data - self.send_meta_data_response(req, (handler_id, id), peer_id); + self.send_meta_data_response( + req, + (connection_id, request.substream_id), + request.id, + peer_id, + ); None } - InboundRequest::Goodbye(reason) => { + RequestType::Goodbye(reason) => { // queue for disconnection without a goodbye message debug!( self.log, "Peer sent Goodbye"; @@ -1473,17 +1433,19 @@ impl Network { None } /* Protocols propagated to the Network */ - InboundRequest::Status(msg) => { + RequestType::Status(_) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]); // propagate the STATUS message upwards - let event = - self.build_request(peer_request_id, peer_id, Request::Status(msg)); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::BlocksByRange(req) => { + RequestType::BlocksByRange(ref req) => { // Still disconnect the peer if the request is naughty. - let mut count = *req.count(); if *req.step() == 0 { self.peer_manager_mut().handle_rpc_error( &peer_id, @@ -1495,131 +1457,144 @@ impl Network { ); return None; } - // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 - if *req.step() > 1 { - count = 1; - } - let request = match req { - methods::OldBlocksByRangeRequest::V1(req) => Request::BlocksByRange( - BlocksByRangeRequest::new_v1(req.start_slot, count), - ), - methods::OldBlocksByRangeRequest::V2(req) => Request::BlocksByRange( - BlocksByRangeRequest::new(req.start_slot, count), - ), - }; - let event = self.build_request(peer_request_id, peer_id, request); - Some(event) - } - InboundRequest::BlocksByRoot(req) => { - let event = self.build_request( - peer_request_id, - peer_id, - Request::BlocksByRoot(req), + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["blocks_by_range"], ); - Some(event) - } - InboundRequest::BlobsByRange(req) => { - let event = self.build_request( - peer_request_id, + Some(NetworkEvent::RequestReceived { peer_id, - Request::BlobsByRange(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::BlobsByRoot(req) => { - let event = - self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); - Some(event) - } - InboundRequest::DataColumnsByRoot(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlocksByRoot(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::DataColumnsByRoot(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::DataColumnsByRange(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlobsByRange(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::DataColumnsByRange(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientBootstrap(req) => { - let event = self.build_request( - peer_request_id, + RequestType::BlobsByRoot(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]); + Some(NetworkEvent::RequestReceived { peer_id, - Request::LightClientBootstrap(req), - ); - Some(event) + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientOptimisticUpdate => { - let event = self.build_request( - peer_request_id, - peer_id, - Request::LightClientOptimisticUpdate, + RequestType::DataColumnsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["data_columns_by_root"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } - InboundRequest::LightClientFinalityUpdate => { - let event = self.build_request( - peer_request_id, - peer_id, - Request::LightClientFinalityUpdate, + RequestType::DataColumnsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["data_columns_by_range"], ); - Some(event) + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientBootstrap(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_bootstrap"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientOptimisticUpdate => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_optimistic_update"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) + } + RequestType::LightClientFinalityUpdate => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_finality_update"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) } } } Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ - RPCResponse::Pong(ping) => { + RpcSuccessResponse::Pong(ping) => { self.peer_manager_mut().pong_response(&peer_id, ping.data); None } - RPCResponse::MetaData(meta_data) => { + RpcSuccessResponse::MetaData(meta_data) => { self.peer_manager_mut() .meta_data_response(&peer_id, meta_data); None } /* Network propagated protocols */ - RPCResponse::Status(msg) => { + RpcSuccessResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards self.build_response(id, peer_id, Response::Status(msg)) } - RPCResponse::BlocksByRange(resp) => { + RpcSuccessResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } - RPCResponse::BlobsByRange(resp) => { + RpcSuccessResponse::BlobsByRange(resp) => { self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) } - RPCResponse::BlocksByRoot(resp) => { + RpcSuccessResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } - RPCResponse::BlobsByRoot(resp) => { + RpcSuccessResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } - RPCResponse::DataColumnsByRoot(resp) => { + RpcSuccessResponse::DataColumnsByRoot(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRoot(Some(resp))) } - RPCResponse::DataColumnsByRange(resp) => { + RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } // Should never be reached - RPCResponse::LightClientBootstrap(bootstrap) => { + RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) } - RPCResponse::LightClientOptimisticUpdate(update) => self.build_response( + RpcSuccessResponse::LightClientOptimisticUpdate(update) => self.build_response( id, peer_id, Response::LightClientOptimisticUpdate(update), ), - RPCResponse::LightClientFinalityUpdate(update) => self.build_response( + RpcSuccessResponse::LightClientFinalityUpdate(update) => self.build_response( id, peer_id, Response::LightClientFinalityUpdate(update), diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 8a0416c1f8..f721c8477c 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,9 +3,9 @@ mod common; use common::Protocol; -use lighthouse_network::rpc::methods::*; +use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; -use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; +use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; @@ -75,7 +75,7 @@ fn test_tcp_status_rpc() { .await; // Dummy STATUS RPC message - let rpc_request = Request::Status(StatusMessage { + let rpc_request = RequestType::Status(StatusMessage { fork_digest: [0; 4], finalized_root: Hash256::zero(), finalized_epoch: Epoch::new(1), @@ -128,10 +128,10 @@ fn test_tcp_status_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response(peer_id, id, request.id, rpc_response.clone()); } } _ => {} // Ignore other events @@ -177,7 +177,12 @@ fn test_tcp_blocks_by_range_chunked_rpc() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -247,7 +252,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for i in 0..messages_to_send { @@ -260,10 +265,20 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -309,7 +324,7 @@ fn test_blobs_by_range_chunked_rpc() { .await; // BlobsByRange Request - let rpc_request = Request::BlobsByRange(BlobsByRangeRequest { + let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: 0, count: slot_count, }); @@ -367,16 +382,26 @@ fn test_blobs_by_range_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { // Send first third of responses as base blocks, // second as altair and third as bellatrix. - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlobsByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlobsByRange(None), + ); } } _ => {} // Ignore other events @@ -422,7 +447,12 @@ fn test_tcp_blocks_by_range_over_limit() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let full_block = bellatrix_block_large(&common::fork_context(ForkName::Bellatrix), &spec); @@ -460,15 +490,25 @@ fn test_tcp_blocks_by_range_over_limit() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_bellatrix_large.clone(); - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -514,7 +554,12 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: messages_to_send, + step: 1, + })); // BlocksByRange Response let empty_block = BeaconBlock::empty(&spec); @@ -583,10 +628,10 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { }, _, )) => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + message_info = Some((peer_id, id, request.id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -596,8 +641,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); + let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -642,7 +687,12 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { .await; // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); + let rpc_request = + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: 0, + count: 10, + step: 1, + })); // BlocksByRange Response let empty_block = BeaconBlock::empty(&spec); @@ -696,15 +746,25 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.send_response(peer_id, id, rpc_response.clone()); + receiver.send_response( + peer_id, + id, + request.id, + rpc_response.clone(), + ); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); } } _ => {} // Ignore other events @@ -750,7 +810,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { .await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( vec![ Hash256::zero(), Hash256::zero(), @@ -827,7 +887,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { id, request, } => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response debug!(log, "Receiver got request"); @@ -840,11 +900,16 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, rpc_response); + receiver.send_response(peer_id, id, request.id, rpc_response); debug!(log, "Sending message"); } // send the stream termination - receiver.send_response(peer_id, id, Response::BlocksByRange(None)); + receiver.send_response( + peer_id, + id, + request.id, + Response::BlocksByRange(None), + ); debug!(log, "Send stream term"); } } @@ -888,7 +953,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { .await; // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( vec![ Hash256::zero(), Hash256::zero(), @@ -971,10 +1036,10 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { }, _, )) => { - if request == rpc_request { + if request.r#type == rpc_request { // send the response warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + message_info = Some((peer_id, id, request.id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -984,8 +1049,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); + let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7f551c544c..5ec6140828 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -9,12 +9,14 @@ use beacon_processor::{ DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; +use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, + Client, MessageId, NetworkGlobals, PeerId, }; use slog::{debug, Logger}; use slot_clock::ManualSlotClock; @@ -596,13 +598,21 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_range_request(peer_id, request_id, request) + .handle_blocks_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await; }; @@ -616,13 +626,21 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_root_request(peer_id, request_id, request) + .handle_blocks_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await; }; @@ -636,12 +654,21 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_blobs_by_range_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_blobs_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -653,12 +680,21 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_blobs_by_root_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_blobs_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -670,12 +706,21 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_roots_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_data_columns_by_root_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_data_columns_by_root_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -687,12 +732,21 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_range_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_data_columns_by_range_request(peer_id, request_id, request); + let process_fn = move || { + processor.handle_data_columns_by_range_request( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -704,12 +758,21 @@ impl NetworkBeaconProcessor { pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: LightClientBootstrapRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_light_client_bootstrap(peer_id, request_id, request); + let process_fn = move || { + processor.handle_light_client_bootstrap( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -721,11 +784,19 @@ impl NetworkBeaconProcessor { pub fn send_light_client_optimistic_update_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = - move || processor.handle_light_client_optimistic_update(peer_id, request_id); + let process_fn = move || { + processor.handle_light_client_optimistic_update( + peer_id, + connection_id, + substream_id, + request_id, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -737,10 +808,19 @@ impl NetworkBeaconProcessor { pub fn send_light_client_finality_update_request( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || processor.handle_light_client_finality_update(peer_id, request_id); + let process_fn = move || { + processor.handle_light_client_finality_update( + peer_id, + connection_id, + substream_id, + request_id, + ) + }; self.try_send(BeaconWorkEvent { drop_during_sync: true, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 0c98f5c17e..04e06c8e06 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -4,6 +4,7 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; @@ -33,11 +34,14 @@ impl NetworkBeaconProcessor { &self, peer_id: PeerId, response: Response, - id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendResponse { peer_id, - id, + request_id, + id: (connection_id, substream_id), response, }) } @@ -45,15 +49,17 @@ impl NetworkBeaconProcessor { pub fn send_error_response( &self, peer_id: PeerId, - error: RPCResponseErrorCode, + error: RpcErrorResponse, reason: String, id: PeerRequestId, + request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendErrorResponse { peer_id, error, reason, id, + request_id, }) } @@ -131,14 +137,24 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, self.clone() - .handle_blocks_by_root_request_inner(peer_id, request_id, request) + .handle_blocks_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) .await, Response::BlocksByRoot, ); @@ -148,9 +164,11 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request_inner( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlocksByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let log_results = |peer_id, requested_blocks, send_block_count| { debug!( self.log, @@ -169,10 +187,7 @@ impl NetworkBeaconProcessor { Ok(block_stream) => block_stream, Err(e) => { error!(self.log, "Error getting block stream"; "error" => ?e); - return Err(( - RPCResponseErrorCode::ServerError, - "Error getting block stream", - )); + return Err((RpcErrorResponse::ServerError, "Error getting block stream")); } }; // Fetching blocks is async because it may have to hit the execution layer for payloads. @@ -183,6 +198,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::BlocksByRoot(Some(block.clone())), + connection_id, + substream_id, request_id, ); send_block_count += 1; @@ -204,7 +221,7 @@ impl NetworkBeaconProcessor { ); log_results(peer_id, requested_blocks, send_block_count); return Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Execution layer not synced", )); } @@ -228,13 +245,23 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_blobs_by_root_request_inner(peer_id, request_id, request), + self.handle_blobs_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), Response::BlobsByRoot, ); } @@ -243,9 +270,11 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: BlobsByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) else { // No blob ids requested. @@ -263,7 +292,13 @@ impl NetworkBeaconProcessor { for id in request.blob_ids.as_slice() { // First attempt to get the blobs from the RPC cache. if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { - self.send_response(peer_id, Response::BlobsByRoot(Some(blob)), request_id); + self.send_response( + peer_id, + Response::BlobsByRoot(Some(blob)), + connection_id, + substream_id, + request_id, + ); send_blob_count += 1; } else { let BlobIdentifier { @@ -285,6 +320,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::BlobsByRoot(Some(blob_sidecar.clone())), + connection_id, + substream_id, request_id, ); send_blob_count += 1; @@ -320,13 +357,23 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_data_columns_by_root_request_inner(peer_id, request_id, request), + self.handle_data_columns_by_root_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), Response::DataColumnsByRoot, ); } @@ -335,9 +382,11 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: DataColumnsByRootRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { let mut send_data_column_count = 0; for data_column_id in request.data_column_ids.as_slice() { @@ -350,6 +399,8 @@ impl NetworkBeaconProcessor { self.send_response( peer_id, Response::DataColumnsByRoot(Some(data_column)), + connection_id, + substream_id, request_id, ); } @@ -361,10 +412,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err(( - RPCResponseErrorCode::ServerError, - "Error getting data column", - )); + return Err((RpcErrorResponse::ServerError, "Error getting data column")); } } } @@ -384,16 +432,20 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_bootstrap( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, request: LightClientBootstrapRequest, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self.chain.get_light_client_bootstrap(&request.root) { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Bootstrap not available".to_string(), )), Err(e) => { @@ -402,10 +454,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - Err(( - RPCResponseErrorCode::ResourceUnavailable, - format!("{:?}", e), - )) + Err((RpcErrorResponse::ResourceUnavailable, format!("{:?}", e))) } }, Response::LightClientBootstrap, @@ -416,10 +465,14 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_optimistic_update( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self .chain @@ -428,7 +481,7 @@ impl NetworkBeaconProcessor { { Some(update) => Ok(Arc::new(update)), None => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Latest optimistic update not available".to_string(), )), }, @@ -440,10 +493,14 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_finality_update( self: &Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, ) { self.terminate_response_single_item( peer_id, + connection_id, + substream_id, request_id, match self .chain @@ -452,7 +509,7 @@ impl NetworkBeaconProcessor { { Some(update) => Ok(Arc::new(update)), None => Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Latest finality update not available".to_string(), )), }, @@ -464,14 +521,24 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlocksByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, self.clone() - .handle_blocks_by_range_request_inner(peer_id, request_id, req) + .handle_blocks_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ) .await, Response::BlocksByRange, ); @@ -481,9 +548,11 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request_inner( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlocksByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, "count" => req.count(), @@ -507,7 +576,7 @@ impl NetworkBeaconProcessor { }); if *req.count() > max_request_size { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded max size", )); } @@ -527,7 +596,7 @@ impl NetworkBeaconProcessor { "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -535,7 +604,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -566,7 +635,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Iteration error")); + return Err((RpcErrorResponse::ServerError, "Iteration error")); } }; @@ -607,7 +676,7 @@ impl NetworkBeaconProcessor { Ok(block_stream) => block_stream, Err(e) => { error!(self.log, "Error getting block stream"; "error" => ?e); - return Err((RPCResponseErrorCode::ServerError, "Iterator error")); + return Err((RpcErrorResponse::ServerError, "Iterator error")); } }; @@ -624,8 +693,9 @@ impl NetworkBeaconProcessor { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: Response::BlocksByRange(Some(block.clone())), - id: request_id, + id: (connection_id, substream_id), }); } } @@ -638,7 +708,7 @@ impl NetworkBeaconProcessor { "request_root" => ?root ); log_results(req, peer_id, blocks_sent); - return Err((RPCResponseErrorCode::ServerError, "Database inconsistency")); + return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { debug!( @@ -650,7 +720,7 @@ impl NetworkBeaconProcessor { log_results(req, peer_id, blocks_sent); // send the stream terminator return Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "Execution layer not synced", )); } @@ -677,7 +747,7 @@ impl NetworkBeaconProcessor { } log_results(req, peer_id, blocks_sent); // send the stream terminator - return Err((RPCResponseErrorCode::ServerError, "Failed fetching blocks")); + return Err((RpcErrorResponse::ServerError, "Failed fetching blocks")); } } } @@ -690,13 +760,23 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_range_request( self: Arc, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlobsByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_blobs_by_range_request_inner(peer_id, request_id, req), + self.handle_blobs_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ), Response::BlobsByRange, ); } @@ -705,9 +785,11 @@ impl NetworkBeaconProcessor { fn handle_blobs_by_range_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: BlobsByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received BlobsByRange Request"; "peer_id" => %peer_id, "count" => req.count, @@ -717,7 +799,7 @@ impl NetworkBeaconProcessor { // Should not send more than max request blocks if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", )); } @@ -728,10 +810,7 @@ impl NetworkBeaconProcessor { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { debug!(self.log, "Deneb fork is disabled"); - return Err(( - RPCResponseErrorCode::InvalidRequest, - "Deneb fork is disabled", - )); + return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -752,12 +831,12 @@ impl NetworkBeaconProcessor { return if data_availability_boundary_slot < oldest_blob_slot { Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "blobs pruned within boundary", )) } else { Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Req outside availability period", )) }; @@ -776,7 +855,7 @@ impl NetworkBeaconProcessor { "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -784,7 +863,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -821,7 +900,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -854,7 +933,8 @@ impl NetworkBeaconProcessor { self.send_network_message(NetworkMessage::SendResponse { peer_id, response: Response::BlobsByRange(Some(blob_sidecar.clone())), - id: request_id, + request_id, + id: (connection_id, substream_id), }); } } @@ -870,7 +950,7 @@ impl NetworkBeaconProcessor { log_results(peer_id, req, blobs_sent); return Err(( - RPCResponseErrorCode::ServerError, + RpcErrorResponse::ServerError, "No blobs and failed fetching corresponding block", )); } @@ -885,13 +965,23 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: DataColumnsByRangeRequest, ) { self.terminate_response_stream( peer_id, + connection_id, + substream_id, request_id, - self.handle_data_columns_by_range_request_inner(peer_id, request_id, req), + self.handle_data_columns_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + req, + ), Response::DataColumnsByRange, ); } @@ -900,9 +990,11 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request_inner( &self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, req: DataColumnsByRangeRequest, - ) -> Result<(), (RPCResponseErrorCode, &'static str)> { + ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!(self.log, "Received DataColumnsByRange Request"; "peer_id" => %peer_id, "count" => req.count, @@ -912,7 +1004,7 @@ impl NetworkBeaconProcessor { // Should not send more than max request data columns if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { return Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", )); } @@ -923,10 +1015,7 @@ impl NetworkBeaconProcessor { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { debug!(self.log, "Deneb fork is disabled"); - return Err(( - RPCResponseErrorCode::InvalidRequest, - "Deneb fork is disabled", - )); + return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -948,12 +1037,12 @@ impl NetworkBeaconProcessor { return if data_availability_boundary_slot < oldest_data_column_slot { Err(( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "blobs pruned within boundary", )) } else { Err(( - RPCResponseErrorCode::InvalidRequest, + RpcErrorResponse::InvalidRequest, "Req outside availability period", )) }; @@ -972,7 +1061,7 @@ impl NetworkBeaconProcessor { "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot ); - return Err((RPCResponseErrorCode::ResourceUnavailable, "Backfilling")); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { error!(self.log, "Unable to obtain root iter"; @@ -980,7 +1069,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -1017,7 +1106,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "error" => ?e ); - return Err((RPCResponseErrorCode::ServerError, "Database error")); + return Err((RpcErrorResponse::ServerError, "Database error")); } }; @@ -1032,10 +1121,11 @@ impl NetworkBeaconProcessor { data_columns_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: Response::DataColumnsByRange(Some( data_column_sidecar.clone(), )), - id: request_id, + id: (connection_id, substream_id), }); } Ok(None) => {} // no-op @@ -1049,7 +1139,7 @@ impl NetworkBeaconProcessor { "error" => ?e ); return Err(( - RPCResponseErrorCode::ServerError, + RpcErrorResponse::ServerError, "No data columns and failed fetching corresponding block", )); } @@ -1080,8 +1170,10 @@ impl NetworkBeaconProcessor { fn terminate_response_single_item Response>( &self, peer_id: PeerId, - request_id: PeerRequestId, - result: Result, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + result: Result, into_response: F, ) { match result { @@ -1091,12 +1183,19 @@ impl NetworkBeaconProcessor { // https://github.com/sigp/lighthouse/blob/3058b96f2560f1da04ada4f9d8ba8e5651794ff6/beacon_node/lighthouse_network/src/rpc/handler.rs#L555-L558 self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: into_response(resp), - id: request_id, + id: (connection_id, substream_id), }); } Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason, request_id); + self.send_error_response( + peer_id, + error_code, + reason, + (connection_id, substream_id), + request_id, + ); } } } @@ -1106,18 +1205,27 @@ impl NetworkBeaconProcessor { fn terminate_response_stream) -> Response>( &self, peer_id: PeerId, - request_id: PeerRequestId, - result: Result<(), (RPCResponseErrorCode, &'static str)>, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + result: Result<(), (RpcErrorResponse, &'static str)>, into_response: F, ) { match result { Ok(_) => self.send_network_message(NetworkMessage::SendResponse { peer_id, + request_id, response: into_response(None), - id: request_id, + id: (connection_id, substream_id), }), Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason.into(), request_id); + self.send_error_response( + peer_id, + error_code, + reason.into(), + (connection_id, substream_id), + request_id, + ); } } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 6e8f151a05..9d774d97c1 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -16,7 +16,7 @@ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::BlobsByRangeRequest; -use lighthouse_network::rpc::SubstreamId; +use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, @@ -360,7 +360,9 @@ impl TestRig { self.network_beacon_processor .send_blobs_by_range_request( PeerId::random(), - (ConnectionId::new_unchecked(42), SubstreamId::new(24)), + ConnectionId::new_unchecked(42), + SubstreamId::new(24), + RequestId::new_unchecked(0), BlobsByRangeRequest { start_slot: 0, count, @@ -1137,6 +1139,7 @@ async fn test_blobs_by_range() { peer_id: _, response: Response::BlobsByRange(blob), id: _, + request_id: _, } = next { if blob.is_some() { diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 26c1d14f02..f05cb01fa4 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -15,10 +15,12 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, }; use futures::prelude::*; +use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::*; use lighthouse_network::{ + rpc, service::api_types::{AppRequestId, SyncRequestId}, - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, + MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Response, }; use logging::TimeLatch; use slog::{crit, debug, o, trace}; @@ -56,7 +58,7 @@ pub enum RouterMessage { RPCRequestReceived { peer_id: PeerId, id: PeerRequestId, - request: Request, + request: rpc::Request, }, /// An RPC response has been received. RPCResponseReceived { @@ -191,51 +193,125 @@ impl Router { /* RPC - Related functionality */ /// A new RPC request has been received from the network. - fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: PeerRequestId, request: Request) { + fn handle_rpc_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + rpc_request: rpc::Request, + ) { if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); + debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); return; } - match request { - Request::Status(status_message) => { - self.on_status_request(peer_id, request_id, status_message) + match rpc_request.r#type { + RequestType::Status(status_message) => self.on_status_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + status_message, + ), + RequestType::BlocksByRange(request) => { + // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 + let mut count = *request.count(); + if *request.step() > 1 { + count = 1; + } + let blocks_request = match request { + methods::OldBlocksByRangeRequest::V1(req) => { + BlocksByRangeRequest::new_v1(req.start_slot, count) + } + methods::OldBlocksByRangeRequest::V2(req) => { + BlocksByRangeRequest::new(req.start_slot, count) + } + }; + + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blocks_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + blocks_request, + ), + ) } - Request::BlocksByRange(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blocks_by_range_request(peer_id, request_id, request), + RequestType::BlocksByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blocks_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlocksByRoot(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blocks_by_roots_request(peer_id, request_id, request), + RequestType::BlobsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blobs_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlobsByRange(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_blobs_by_range_request(peer_id, request_id, request), + RequestType::BlobsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_blobs_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::BlobsByRoot(request) => self.handle_beacon_processor_send_result( + RequestType::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_blobs_by_roots_request(peer_id, request_id, request), + .send_data_columns_by_roots_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( + RequestType::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_roots_request(peer_id, request_id, request), + .send_data_columns_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( + RequestType::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_range_request(peer_id, request_id, request), + .send_light_client_bootstrap_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), ), - Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( + RequestType::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_bootstrap_request(peer_id, request_id, request), + .send_light_client_optimistic_update_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + ), ), - Request::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( + RequestType::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_optimistic_update_request(peer_id, request_id), - ), - Request::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_light_client_finality_update_request(peer_id, request_id), + .send_light_client_finality_update_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + ), ), + _ => {} } } @@ -461,7 +537,7 @@ impl Router { let status_message = status_message(&self.chain); debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); self.network - .send_processor_request(peer_id, Request::Status(status_message)); + .send_processor_request(peer_id, RequestType::Status(status_message)); } fn send_to_sync(&mut self, message: SyncMessage) { @@ -493,7 +569,9 @@ impl Router { pub fn on_status_request( &mut self, peer_id: PeerId, - request_id: PeerRequestId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, status: StatusMessage, ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); @@ -502,6 +580,7 @@ impl Router { self.network.send_response( peer_id, Response::Status(status_message(&self.chain)), + (connection_id, substream_id), request_id, ); @@ -745,7 +824,7 @@ impl HandlerNetworkContext { } /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { + pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, request_id: AppRequestId::Router, @@ -754,8 +833,15 @@ impl HandlerNetworkContext { } /// Sends a response to the network task. - pub fn send_response(&mut self, peer_id: PeerId, response: Response, id: PeerRequestId) { + pub fn send_response( + &mut self, + peer_id: PeerId, + response: Response, + id: PeerRequestId, + request_id: RequestId, + ) { self.inform_network(NetworkMessage::SendResponse { + request_id, peer_id, id, response, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 150402a7ab..f36d11ecdd 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -14,12 +14,13 @@ use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; +use lighthouse_network::rpc::{RequestId, RequestType}; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ - rpc::{GoodbyeReason, RPCResponseErrorCode}, - Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, + rpc::{GoodbyeReason, RpcErrorResponse}, + Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Response, Subnet, }; use lighthouse_network::{ service::api_types::AppRequestId, @@ -61,19 +62,21 @@ pub enum NetworkMessage { /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, - request: Request, + request: RequestType, request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. SendResponse { peer_id: PeerId, + request_id: RequestId, response: Response, id: PeerRequestId, }, /// Sends an error response to an RPC request. SendErrorResponse { peer_id: PeerId, - error: RPCResponseErrorCode, + request_id: RequestId, + error: RpcErrorResponse, reason: String, id: PeerRequestId, }, @@ -623,16 +626,19 @@ impl NetworkService { peer_id, response, id, + request_id, } => { - self.libp2p.send_response(peer_id, id, response); + self.libp2p.send_response(peer_id, id, request_id, response); } NetworkMessage::SendErrorResponse { peer_id, error, id, + request_id, reason, } => { - self.libp2p.send_error_response(peer_id, id, error, reason); + self.libp2p + .send_error_response(peer_id, id, request_id, error, reason); } NetworkMessage::ValidationResult { propagation_source, diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index c0a766137b..ffbdd43b5f 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -22,13 +22,14 @@ use beacon_chain::{ AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; +use lighthouse_network::rpc::{RPCError, RequestType, RpcErrorResponse}; use lighthouse_network::service::api_types::{ AppRequestId, DataColumnsByRootRequester, Id, SamplingRequester, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::SyncState; -use lighthouse_network::{NetworkConfig, NetworkGlobals, Request}; +use lighthouse_network::NetworkConfig; +use lighthouse_network::NetworkGlobals; use slog::info; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; @@ -618,7 +619,7 @@ impl TestRig { id, peer_id, RPCError::ErrorResponse( - RPCResponseErrorCode::ResourceUnavailable, + RpcErrorResponse::ResourceUnavailable, "older than deneb".into(), ), ); @@ -894,7 +895,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlocksByRoot(request), + request: RequestType::BlocksByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, @@ -914,7 +915,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlobsByRoot(request), + request: RequestType::BlobsByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids @@ -939,7 +940,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlocksByRoot(request), + request: RequestType::BlocksByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, @@ -961,7 +962,7 @@ impl TestRig { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::BlobsByRoot(request), + request: RequestType::BlobsByRoot(request), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids @@ -989,7 +990,7 @@ impl TestRig { .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, - request: Request::DataColumnsByRoot(request), + request: RequestType::DataColumnsByRoot(request), request_id: AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), } if request .data_column_ids diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 5b7003e5e8..dc35a141d2 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,13 +17,16 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; -use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, + OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, +}; +use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; use lighthouse_network::service::api_types::{ AppRequestId, CustodyId, CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; -use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; +use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use rand::seq::SliceRandom; use rand::thread_rng; use requests::ActiveDataColumnsByRootRequest; @@ -336,7 +339,7 @@ impl SyncNetworkContext { "head_slot" => %status_message.head_slot, ); - let request = Request::Status(status_message.clone()); + let request = RequestType::Status(status_message.clone()); let request_id = AppRequestId::Router; let _ = self.send_network_msg(NetworkMessage::SendRequest { peer_id, @@ -365,10 +368,26 @@ impl SyncNetworkContext { "epoch" => epoch, "peer" => %peer_id, ); + let rpc_request = match request { + BlocksByRangeRequest::V1(ref req) => { + RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: req.start_slot, + count: req.count, + step: 1, + })) + } + BlocksByRangeRequest::V2(ref req) => { + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: req.start_slot, + count: req.count, + step: 1, + })) + } + }; self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRange(request.clone()), + request: rpc_request, request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -387,7 +406,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlobsByRange(BlobsByRangeRequest { + request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: *request.start_slot(), count: *request.count(), }), @@ -421,7 +440,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::DataColumnsByRange(columns_by_range_request), + request: RequestType::DataColumnsByRange(columns_by_range_request), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -585,7 +604,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), + request: RequestType::BlocksByRoot(request.into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -683,7 +702,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request: RequestType::BlobsByRoot(request.clone().into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -715,7 +734,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request: Request::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), + request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id, requester)), })?; From 4a62b2418cecb016ad9216dccc27336a3b88d64e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 1 Oct 2024 03:13:51 +0100 Subject: [PATCH 33/66] estimate the total inbound bandwidth of IDONTWANT messages in bytes (#6438) * estimate the total inbound bandwidth of IDONTWANT messages --- .../gossipsub/src/behaviour.rs | 2 ++ .../gossipsub/src/metrics.rs | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 996f701e89..bf77f30979 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -3348,6 +3348,8 @@ where }; if let Some(metrics) = self.metrics.as_mut() { metrics.register_idontwant(message_ids.len()); + let idontwant_size = message_ids.iter().map(|id| id.0.len()).sum(); + metrics.register_idontwant_bytes(idontwant_size); } for message_id in message_ids { peer.dont_send.insert(message_id, Instant::now()); diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index 7e1cdac18b..a4ac389a74 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -185,6 +185,9 @@ pub(crate) struct Metrics { /// The number of msg_id's we have received in every IDONTWANT control message. idontwant_msgs_ids: Counter, + /// The number of bytes we have received in every IDONTWANT control message. + idontwant_bytes: Counter, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -338,6 +341,16 @@ impl Metrics { metric }; + let idontwant_bytes = { + let metric = Counter::default(); + registry.register( + "idontwant_bytes", + "The total bytes we have received an IDONTWANT control messages", + metric.clone(), + ); + metric + }; + let memcache_misses = { let metric = Counter::default(); registry.register( @@ -390,6 +403,7 @@ impl Metrics { memcache_misses, topic_iwant_msgs, idontwant_msgs, + idontwant_bytes, idontwant_msgs_ids, priority_queue_size, non_priority_queue_size, @@ -589,6 +603,11 @@ impl Metrics { } } + /// Register receiving the total bytes of an IDONTWANT control message. + pub(crate) fn register_idontwant_bytes(&mut self, bytes: usize) { + self.idontwant_bytes.inc_by(bytes as u64); + } + /// Register receiving an IDONTWANT msg for this topic. pub(crate) fn register_idontwant(&mut self, msgs: usize) { self.idontwant_msgs.inc(); From dd08ebb2b0f8b64459ec84f06f719c7fc34a353f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 1 Oct 2024 12:59:03 +1000 Subject: [PATCH 34/66] Prevent Kurtosis container pollution (#6441) * Prevent Kurtosis container pollution --- scripts/local_testnet/stop_local_testnet.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 5500f8d5a0..6af1989e9f 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -12,4 +12,5 @@ kurtosis enclave dump $ENCLAVE_NAME $LOGS_SUBDIR echo "Local testnet logs stored to $LOGS_SUBDIR." kurtosis enclave rm -f $ENCLAVE_NAME +kurtosis engine stop echo "Local testnet stopped." From 82faf975b346e20fd82ee22dba3a845b99bd7c22 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 2 Oct 2024 19:00:52 -0700 Subject: [PATCH 35/66] Add {fork_name}_enabled functions (#5951) * add fork_name_enabled fn to Forkname impl * refactor codebase to use new fork_enabled fn * fmt * Merge branch 'unstable' of https://github.com/sigp/lighthouse into fork-ord-impl * small code cleanup * resolve merge conflicts * fix beacon chain test * merge conflicts * fix ef test issue * resolve merge conflicts --- .../src/attestation_verification.rs | 9 +- beacon_node/beacon_chain/src/beacon_chain.rs | 87 +++++++++---------- .../tests/attestation_verification.rs | 30 ++++--- beacon_node/client/src/notifier.rs | 4 +- .../test_utils/execution_block_generator.rs | 25 +++--- .../src/test_utils/mock_builder.rs | 10 ++- .../http_api/src/build_block_contents.rs | 14 +-- beacon_node/http_api/src/builder_states.rs | 5 +- beacon_node/lighthouse_network/src/config.rs | 38 ++++---- .../lighthouse_network/src/rpc/protocol.rs | 10 +-- .../lighthouse_network/src/types/pubsub.rs | 25 +++--- .../network_beacon_processor/rpc_methods.rs | 14 ++- .../network/src/sync/range_sync/range.rs | 27 +++--- common/eth2/src/types.rs | 86 +++++++++--------- consensus/types/src/light_client_bootstrap.rs | 11 +-- .../types/src/light_client_finality_update.rs | 13 ++- consensus/types/src/light_client_header.rs | 9 +- .../src/light_client_optimistic_update.rs | 15 ++-- consensus/types/src/voluntary_exit.rs | 9 +- testing/ef_tests/src/cases/operations.rs | 2 +- testing/ef_tests/src/handler.rs | 6 +- validator_client/src/validator_store.rs | 25 +++--- 22 files changed, 223 insertions(+), 251 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 491271d6a9..9ee0b01df3 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1144,13 +1144,14 @@ pub fn verify_propagation_slot_range( let current_fork = spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); - let earliest_permissible_slot = if !current_fork.deneb_enabled() { - one_epoch_prior - // EIP-7045 - } else { + + let earliest_permissible_slot = if current_fork.deneb_enabled() { + // EIP-7045 one_epoch_prior .epoch(E::slots_per_epoch()) .start_slot(E::slots_per_epoch()) + } else { + one_epoch_prior }; if attestation_slot < earliest_permissible_slot { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d287e2b68..2262325642 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2619,11 +2619,7 @@ impl BeaconChain { /// Check if the current slot is greater than or equal to the Capella fork epoch. pub fn current_slot_is_post_capella(&self) -> Result { let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = current_fork { - Ok(false) - } else { - Ok(true) - } + Ok(current_fork.capella_enabled()) } /// Import a BLS to execution change to the op pool. @@ -5945,26 +5941,23 @@ impl BeaconChain { payload_attributes } else { let prepare_slot_fork = self.spec.fork_name_at_slot::(prepare_slot); - let withdrawals = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, - ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - let chain = self.clone(); - self.spawn_blocking_handle( - move || { - chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot) - }, - "prepare_beacon_proposer_withdrawals", - ) - .await? - .map(Some)? - } + + let withdrawals = if prepare_slot_fork.capella_enabled() { + let chain = self.clone(); + self.spawn_blocking_handle( + move || chain.get_expected_withdrawals(&forkchoice_update_params, prepare_slot), + "prepare_beacon_proposer_withdrawals", + ) + .await? + .map(Some)? + } else { + None }; - let parent_beacon_block_root = match prepare_slot_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => None, - ForkName::Deneb | ForkName::Electra => { - Some(pre_payload_attributes.parent_beacon_block_root) - } + let parent_beacon_block_root = if prepare_slot_fork.deneb_enabled() { + Some(pre_payload_attributes.parent_beacon_block_root) + } else { + None }; let payload_attributes = PayloadAttributes::new( @@ -6110,27 +6103,27 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - let (head_block_root, head_hash, justified_hash, finalized_hash) = if let Some(head_hash) = - params.head_hash - { - ( - params.head_root, - head_hash, - params - .justified_hash - .unwrap_or_else(ExecutionBlockHash::zero), - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { + let (head_block_root, head_hash, justified_hash, finalized_hash) = + if let Some(head_hash) = params.head_hash { + ( + params.head_root, + head_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + if self + .spec + .fork_name_at_slot::(next_slot) + .bellatrix_enabled() + { // We are post-bellatrix if let Some(payload_attributes) = execution_layer .payload_attributes(next_slot, params.head_root) @@ -6164,9 +6157,10 @@ impl BeaconChain { // We are not a proposer, no need to update the EL. return Ok(()); } + } else { + return Ok(()); } - } - }; + }; let forkchoice_updated_response = execution_layer .notify_forkchoice_updated( @@ -7009,7 +7003,6 @@ impl BeaconChain { .finalized_checkpoint() .epoch .sync_committee_period(&self.spec)?; - self.light_client_server_cache.get_light_client_bootstrap( &self.store, block_root, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index f3b25ed5ce..e168cbb6f4 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -359,22 +359,24 @@ impl GossipTester { } pub fn earliest_valid_attestation_slot(&self) -> Slot { - let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. - E::slots_per_epoch() + 1 - } + let offset = if self + .harness + .spec + .fork_name_at_epoch(self.epoch()) + .deneb_enabled() + { // EIP-7045 - ForkName::Deneb | ForkName::Electra => { - let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); - if epoch_slot_offset != 0 { - E::slots_per_epoch() + epoch_slot_offset - } else { - // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier - 2 * E::slots_per_epoch() - } + let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); + if epoch_slot_offset != 0 { + E::slots_per_epoch() + epoch_slot_offset + } else { + // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier + 2 * E::slots_per_epoch() } + } else { + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + E::slots_per_epoch() + 1 }; self.slot() diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 632188014e..839d296c76 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -436,7 +436,7 @@ async fn capella_readiness_logging( .snapshot .beacon_state .fork_name_unchecked() - >= ForkName::Capella; + .capella_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -496,7 +496,7 @@ async fn deneb_readiness_logging( .snapshot .beacon_state .fork_name_unchecked() - >= ForkName::Deneb; + .deneb_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a5960744f5..42f594fdf4 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -661,21 +661,18 @@ impl ExecutionBlockGenerator { }, }; - match execution_payload.fork_name() { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => {} - ForkName::Deneb | ForkName::Electra => { - // get random number between 0 and Max Blobs - let mut rng = self.rng.lock(); - let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); - let (bundle, transactions) = generate_blobs(num_blobs)?; - for tx in Vec::from(transactions) { - execution_payload - .transactions_mut() - .push(tx) - .map_err(|_| "transactions are full".to_string())?; - } - self.blobs_bundles.insert(id, bundle); + if execution_payload.fork_name().deneb_enabled() { + // get random number between 0 and Max Blobs + let mut rng = self.rng.lock(); + let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); + let (bundle, transactions) = generate_blobs(num_blobs)?; + for tx in Vec::from(transactions) { + execution_payload + .transactions_mut() + .push(tx) + .map_err(|_| "transactions are full".to_string())?; } + self.blobs_bundles.insert(id, bundle); } *execution_payload.block_hash_mut() = diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 1291c8cf97..139ea06918 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -479,16 +479,18 @@ pub fn serve( let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) .map_err(|_| reject("couldn't get prev randao"))?; - let expected_withdrawals = match fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix => None, - ForkName::Capella | ForkName::Deneb | ForkName::Electra => Some( + + let expected_withdrawals = if fork.capella_enabled() { + Some( builder .beacon_client .get_expected_withdrawals(&StateId::Head) .await .unwrap() .data, - ), + ) + } else { + None }; let payload_attributes = match fork { diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index 05a6735b32..c2ccb6695e 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -11,11 +11,9 @@ pub fn build_block_contents( BeaconBlockResponseWrapper::Blinded(block) => { Ok(ProduceBlockV3Response::Blinded(block.block)) } - BeaconBlockResponseWrapper::Full(block) => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => Ok( - ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), - ), - ForkName::Deneb | ForkName::Electra => { + + BeaconBlockResponseWrapper::Full(block) => { + if fork_name.deneb_enabled() { let BeaconBlockResponse { block, state: _, @@ -37,7 +35,11 @@ pub fn build_block_contents( blobs, }), )) + } else { + Ok(ProduceBlockV3Response::Full(FullBlockContents::Block( + block.block, + ))) } - }, + } } } diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index 54f2c0efa8..40b3815736 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -4,7 +4,7 @@ use safe_arith::SafeArith; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::state_advance::partial_state_advance; use std::sync::Arc; -use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; +use types::{BeaconState, EthSpec, Slot, Withdrawals}; const MAX_EPOCH_LOOKAHEAD: u64 = 2; @@ -53,7 +53,8 @@ fn get_next_withdrawals_sanity_checks( } let fork = chain.spec.fork_name_at_slot::(proposal_slot); - if let ForkName::Base | ForkName::Altair | ForkName::Bellatrix = fork { + + if !fork.capella_enabled() { return Err(warp_utils::reject::custom_bad_request( "the specified state is a pre-capella state.".to_string(), )); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 7c95977140..ea29501784 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -440,28 +440,22 @@ pub fn gossipsub_config( fork_context: Arc, ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); - match fork_context.current_fork() { - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { - let topic_len_bytes = topic_bytes.len().to_le_bytes(); - let mut vec = Vec::with_capacity( - prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), - ); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(&topic_len_bytes); - vec.extend_from_slice(topic_bytes); - vec.extend_from_slice(&message.data); - vec - } - ForkName::Base => { - let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(&message.data); - vec - } + + if fork_context.current_fork().altair_enabled() { + let topic_len_bytes = topic_bytes.len().to_le_bytes(); + let mut vec = Vec::with_capacity( + prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), + ); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&topic_len_bytes); + vec.extend_from_slice(topic_bytes); + vec.extend_from_slice(&message.data); + vec + } else { + let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&message.data); + vec } } let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 3f78d35f5c..67104fbc29 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -151,12 +151,10 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { - match fork_context.current_fork() { - ForkName::Altair | ForkName::Base => max_chunk_size / 10, - ForkName::Bellatrix => max_chunk_size, - ForkName::Capella => max_chunk_size, - ForkName::Deneb => max_chunk_size, - ForkName::Electra => max_chunk_size, + if fork_context.current_fork().bellatrix_enabled() { + max_chunk_size + } else { + max_chunk_size / 10 } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 1bc99f9a6c..9f68278e28 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -252,28 +252,25 @@ impl PubsubMessage { Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::BlobSidecar(blob_index) => { - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(ForkName::Deneb | ForkName::Electra) => { + if let Some(fork_name) = + fork_context.from_context_bytes(gossip_topic.fork_digest) + { + if fork_name.deneb_enabled() { let blob_sidecar = Arc::new( BlobSidecar::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ); - Ok(PubsubMessage::BlobSidecar(Box::new(( + return Ok(PubsubMessage::BlobSidecar(Box::new(( *blob_index, blob_sidecar, - )))) + )))); } - Some( - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella, - ) - | None => Err(format!( - "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )), } + + Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )) } GossipKind::DataColumnSidecar(subnet_id) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 04e06c8e06..88a7616ec7 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -16,7 +16,7 @@ use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use tokio_stream::StreamExt; use types::blob_sidecar::BlobIdentifier; -use types::{Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, Slot}; +use types::{Epoch, EthSpec, FixedBytesExtended, Hash256, Slot}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -564,14 +564,10 @@ impl NetworkBeaconProcessor { self.chain .epoch() .map_or(self.chain.spec.max_request_blocks, |epoch| { - match self.chain.spec.fork_name_at_epoch(epoch) { - ForkName::Deneb | ForkName::Electra => { - self.chain.spec.max_request_blocks_deneb - } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella => self.chain.spec.max_request_blocks, + if self.chain.spec.fork_name_at_epoch(epoch).deneb_enabled() { + self.chain.spec.max_request_blocks_deneb + } else { + self.chain.spec.max_request_blocks } }); if *req.count() > max_request_size { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index f28b57eb18..b88253c9e8 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -537,21 +537,20 @@ mod tests { } else { panic!("Should have sent a batch request to the peer") }; - let blob_req_id = match fork_name { - ForkName::Deneb | ForkName::Electra => { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } + let blob_req_id = if fork_name.deneb_enabled() { + if let Ok(NetworkMessage::SendRequest { + peer_id, + request: _, + request_id, + }) = self.network_rx.try_recv() + { + assert_eq!(&peer_id, expected_peer); + Some(request_id) + } else { + panic!("Should have sent a batch request to the peer") } - _ => None, + } else { + None }; (block_req_id, blob_req_id) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e1550fdee2..c187399ebd 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1678,27 +1678,23 @@ impl FullBlockContents { bytes: &[u8], fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - .map(|block| FullBlockContents::Block(block)) - } - ForkName::Deneb | ForkName::Electra => { - let mut builder = ssz::SszDecoderBuilder::new(bytes); + if fork_name.deneb_enabled() { + let mut builder = ssz::SszDecoderBuilder::new(bytes); - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - builder.register_type::>()?; + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; - let mut decoder = builder.build()?; - let block = decoder.decode_next_with(|bytes| { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - })?; - let kzg_proofs = decoder.decode_next()?; - let blobs = decoder.decode_next()?; + let mut decoder = builder.build()?; + let block = decoder + .decode_next_with(|bytes| BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name))?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; - Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) - } + Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) + } else { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| FullBlockContents::Block(block)) } } @@ -1738,15 +1734,14 @@ impl ForkVersionDeserialize for FullBlockContents { value: serde_json::value::Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - Ok(FullBlockContents::Block( - BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, - )) - } - ForkName::Deneb | ForkName::Electra => Ok(FullBlockContents::BlockContents( + if fork_name.deneb_enabled() { + Ok(FullBlockContents::BlockContents( BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, - )), + )) + } else { + Ok(FullBlockContents::Block( + BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + )) } } } @@ -1838,28 +1833,25 @@ impl PublishBlockRequest { /// SSZ decode with fork variant determined by `fork_name`. pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - .map(|block| PublishBlockRequest::Block(Arc::new(block))) - } - ForkName::Deneb | ForkName::Electra => { - let mut builder = ssz::SszDecoderBuilder::new(bytes); - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - builder.register_type::>()?; + if fork_name.deneb_enabled() { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + builder.register_type::>()?; - let mut decoder = builder.build()?; - let block = decoder.decode_next_with(|bytes| { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - })?; - let kzg_proofs = decoder.decode_next()?; - let blobs = decoder.decode_next()?; - Ok(PublishBlockRequest::new( - Arc::new(block), - Some((kzg_proofs, blobs)), - )) - } + let mut decoder = builder.build()?; + let block = decoder.decode_next_with(|bytes| { + SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + })?; + let kzg_proofs = decoder.decode_next()?; + let blobs = decoder.decode_next()?; + Ok(PublishBlockRequest::new( + Arc::new(block), + Some((kzg_proofs, blobs)), + )) + } else { + SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + .map(|block| PublishBlockRequest::Block(Arc::new(block))) } } diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 7c716e6bb2..25f029bcc0 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -196,13 +196,14 @@ impl ForkVersionDeserialize for LightClientBootstrap { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( + if fork_name.altair_enabled() { + Ok(serde_json::from_value::>(value) + .map_err(serde::de::Error::custom))? + } else { + Err(serde::de::Error::custom(format!( "LightClientBootstrap failed to deserialize: unsupported fork '{}'", fork_name - ))), - _ => Ok(serde_json::from_value::>(value) - .map_err(serde::de::Error::custom))?, + ))) } } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index dc7561f5fc..91ee58b4be 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -212,15 +212,14 @@ impl ForkVersionDeserialize for LightClientFinalityUpdate { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( + if fork_name.altair_enabled() { + serde_json::from_value::>(value) + .map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom(format!( "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", fork_name - ))), - _ => Ok( - serde_json::from_value::>(value) - .map_err(serde::de::Error::custom), - )?, + ))) } } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index c0de114b35..fecdc39533 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -129,11 +129,10 @@ impl LightClientHeader { } pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { - match fork_name { - ForkName::Base | ForkName::Altair => 0, - ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) - } + if fork_name.capella_enabled() { + ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) + } else { + 0 } } } diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 3cae31edf8..2f8cc034eb 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -198,15 +198,16 @@ impl ForkVersionDeserialize for LightClientOptimisticUpdate { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( - "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", - fork_name - ))), - _ => Ok( + if fork_name.altair_enabled() { + Ok( serde_json::from_value::>(value) .map_err(serde::de::Error::custom), - )?, + )? + } else { + Err(serde::de::Error::custom(format!( + "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", + fork_name + ))) } } } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 4c7c16757e..153506f47a 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -41,12 +41,11 @@ impl VoluntaryExit { spec: &ChainSpec, ) -> SignedVoluntaryExit { let fork_name = spec.fork_name_at_epoch(self.epoch); - let fork_version = match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - spec.fork_version_for_name(fork_name) - } + let fork_version = if fork_name.deneb_enabled() { // EIP-7044 - ForkName::Deneb | ForkName::Electra => spec.fork_version_for_name(ForkName::Capella), + spec.fork_version_for_name(ForkName::Capella) + } else { + spec.fork_version_for_name(fork_name) }; let domain = spec.compute_domain(Domain::VoluntaryExit, fork_version, genesis_validators_root); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 2418444104..54ca52447f 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -270,7 +270,7 @@ impl Operation for SyncAggregate { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base + fork_name.altair_enabled() } fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index dacaba1dca..97b449dab9 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -967,8 +967,8 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // Enabled in Deneb - fork_name == ForkName::Deneb + // TODO(electra) re-enable for electra once merkle proof issues for electra are resolved + fork_name.deneb_enabled() && !fork_name.electra_enabled() } } @@ -994,7 +994,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - fork_name != ForkName::Base && fork_name != ForkName::Electra + fork_name.altair_enabled() && fork_name != ForkName::Electra } } diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 6753c50cff..af59ad9892 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -19,8 +19,8 @@ use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, - Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, PublicKeyBytes, SelectionProof, Signature, + SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, @@ -353,17 +353,9 @@ impl ValidatorStore { fn signing_context(&self, domain: Domain, signing_epoch: Epoch) -> SigningContext { if domain == Domain::VoluntaryExit { - match self.spec.fork_name_at_epoch(signing_epoch) { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - SigningContext { - domain, - epoch: signing_epoch, - fork: self.fork(signing_epoch), - genesis_validators_root: self.genesis_validators_root, - } - } + if self.spec.fork_name_at_epoch(signing_epoch).deneb_enabled() { // EIP-7044 - ForkName::Deneb | ForkName::Electra => SigningContext { + SigningContext { domain, epoch: signing_epoch, fork: Fork { @@ -372,7 +364,14 @@ impl ValidatorStore { epoch: signing_epoch, }, genesis_validators_root: self.genesis_validators_root, - }, + } + } else { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } } } else { SigningContext { From f6d46fd6e91aecf35f5ef7048c194f94f8b0f59a Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 2 Oct 2024 19:00:56 -0700 Subject: [PATCH 36/66] Fix rolling file appender bug (#6266) * ensure file path passed to the rolling file appender only contains directories * handle edge case * handle edge case * fix based on feedback * Merge branch 'unstable' of https://github.com/sigp/lighthouse into rolling-file-apender-bug * fmt * linting * ensure only bn inits tracing logfile * ensure only bn inits tracing logfile * Merge branch 'unstable' into rolling-file-apender-bug * Get the metadata of `tracing_log_path` instead of `p`, which is a part of the path * Merge pull request #11 from ackintosh/rolling-file-apender-bug-ackintosh Get the metadata of `tracing_log_path` instead of `p` * Merge branch 'unstable' of https://github.com/sigp/lighthouse into rolling-file-apender-bug * fmt --- common/logging/src/lib.rs | 17 +++++++++++++++-- lighthouse/src/main.rs | 29 +++++++++++++++-------------- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index a4a1acabd4..0df03c17d0 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -217,6 +217,19 @@ impl TimeLatch { } pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { + let mut tracing_log_path = PathBuf::new(); + + // Ensure that `tracing_log_path` only contains directories. + for p in base_tracing_log_path.iter() { + tracing_log_path = tracing_log_path.join(p); + if let Ok(metadata) = tracing_log_path.metadata() { + if !metadata.is_dir() { + tracing_log_path.pop(); + break; + } + } + } + let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env() .or_else(|_| tracing_subscriber::EnvFilter::try_new("warn")) { @@ -232,7 +245,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { .max_log_files(2) .filename_prefix("libp2p") .filename_suffix("log") - .build(base_tracing_log_path.clone()) + .build(tracing_log_path.clone()) else { eprintln!("Failed to initialize libp2p rolling file appender"); return; @@ -243,7 +256,7 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { .max_log_files(2) .filename_prefix("discv5") .filename_suffix("log") - .build(base_tracing_log_path.clone()) + .build(tracing_log_path) else { eprintln!("Failed to initialize discv5 rolling file appender"); return; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index e865fbd272..aad8860fcc 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -626,20 +626,6 @@ fn run( })); } - let mut tracing_log_path: Option = clap_utils::parse_optional(matches, "logfile")?; - - if tracing_log_path.is_none() { - tracing_log_path = Some( - parse_path_or_default(matches, "datadir")? - .join(DEFAULT_BEACON_NODE_DIR) - .join("logs"), - ) - } - - let path = tracing_log_path.clone().unwrap(); - - logging::create_tracing_layer(path); - // Allow Prometheus to export the time at which the process was started. metrics::expose_process_start_time(&log); @@ -724,6 +710,21 @@ fn run( return Ok(()); } + let mut tracing_log_path: Option = + clap_utils::parse_optional(matches, "logfile")?; + + if tracing_log_path.is_none() { + tracing_log_path = Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_BEACON_NODE_DIR) + .join("logs"), + ) + } + + let path = tracing_log_path.clone().unwrap(); + + logging::create_tracing_layer(path); + executor.clone().spawn( async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { From 428310c88132dcf29ff08a2fc28382eed4d80960 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 3 Oct 2024 06:06:02 +0300 Subject: [PATCH 37/66] Fit sampling log statements to fmt width (#6433) * Fit sampling log statements to fmt width --- beacon_node/network/src/sync/sampling.rs | 65 +++++++++++++++++------- 1 file changed, 48 insertions(+), 17 deletions(-) diff --git a/beacon_node/network/src/sync/sampling.rs b/beacon_node/network/src/sync/sampling.rs index 524fe86bee..4d0fa509cd 100644 --- a/beacon_node/network/src/sync/sampling.rs +++ b/beacon_node/network/src/sync/sampling.rs @@ -244,22 +244,31 @@ impl ActiveSamplingRequest { .column_indexes_by_sampling_request .get(&sampling_request_id) else { - error!(self.log, "Column indexes for the sampling request ID not found"; "sampling_request_id" => ?sampling_request_id); + error!(self.log, + "Column indexes for the sampling request ID not found"; + "sampling_request_id" => ?sampling_request_id + ); return Ok(None); }; match resp { Ok((mut resp_data_columns, seen_timestamp)) => { - debug!(self.log, "Sample download success"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "count" => resp_data_columns.len()); + debug!(self.log, + "Sample download success"; + "block_root" => %self.block_root, + "column_indexes" => ?column_indexes, + "count" => resp_data_columns.len() + ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); // Filter the data received in the response using the requested column indexes. let mut data_columns = vec![]; for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + warn!(self.log, + "Active column sample request not found"; + "block_root" => %self.block_root, + "column_index" => column_index ); continue; }; @@ -270,7 +279,11 @@ impl ActiveSamplingRequest { else { // Peer does not have the requested data. // TODO(das) what to do? - debug!(self.log, "Sampling peer claims to not have the data"; "block_root" => %self.block_root, "column_index" => column_index); + debug!(self.log, + "Sampling peer claims to not have the data"; + "block_root" => %self.block_root, + "column_index" => column_index + ); request.on_sampling_error()?; continue; }; @@ -283,15 +296,16 @@ impl ActiveSamplingRequest { .iter() .map(|d| d.index) .collect::>(); - debug!( - self.log, - "Received data that was not requested"; "block_root" => %self.block_root, "column_indexes" => ?resp_column_indexes + debug!(self.log, + "Received data that was not requested"; + "block_root" => %self.block_root, + "column_indexes" => ?resp_column_indexes ); } // Handle the downloaded data columns. if data_columns.is_empty() { - debug!(self.log,"Received empty response"; "block_root" => %self.block_root); + debug!(self.log, "Received empty response"; "block_root" => %self.block_root); self.column_indexes_by_sampling_request .remove(&sampling_request_id); } else { @@ -302,10 +316,18 @@ impl ActiveSamplingRequest { // Peer has data column, send to verify let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { // If processor is not available, error the entire sampling - debug!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => "beacon processor unavailable"); + debug!(self.log, + "Dropping sampling"; + "block" => %self.block_root, + "reason" => "beacon processor unavailable" + ); return Err(SamplingError::ProcessorUnavailable); }; - debug!(self.log, "Sending data_column for verification"; "block" => ?self.block_root, "column_indexes" => ?column_indexes); + debug!(self.log, + "Sending data_column for verification"; + "block" => ?self.block_root, + "column_indexes" => ?column_indexes + ); if let Err(e) = beacon_processor.send_rpc_validate_data_columns( self.block_root, data_columns, @@ -316,22 +338,31 @@ impl ActiveSamplingRequest { }, ) { // TODO(das): Beacon processor is overloaded, what should we do? - error!(self.log, "Dropping sampling"; "block" => %self.block_root, "reason" => e.to_string()); + error!(self.log, + "Dropping sampling"; + "block" => %self.block_root, + "reason" => e.to_string() + ); return Err(SamplingError::SendFailed("beacon processor send failure")); } } } Err(err) => { - debug!(self.log, "Sample download error"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "error" => ?err); + debug!(self.log, "Sample download error"; + "block_root" => %self.block_root, + "column_indexes" => ?column_indexes, + "error" => ?err + ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); // Error downloading, maybe penalize peer and retry again. // TODO(das) with different peer or different peer? for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + warn!(self.log, + "Active column sample request not found"; + "block_root" => %self.block_root, + "column_index" => column_index ); continue; }; From 17849b58ec89fd3370926ed76bbc5c8a5d223abe Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 3 Oct 2024 13:06:05 +1000 Subject: [PATCH 38/66] Fix invalid data column sidecars getting accepted (#6454) * Fix invalid data column sidecars getting accepted. * Update code to match spec function. --- .../src/data_column_verification.rs | 93 ++++++++++++++++++- .../gossip_methods.rs | 3 + 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 1647f190cf..44873fab4a 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -127,6 +127,25 @@ pub enum GossipDataColumnError { slot: Slot, index: ColumnIndex, }, + /// Data column index must be between 0 and `NUMBER_OF_COLUMNS` (exclusive). + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InvalidColumnIndex(u64), + /// Data column not expected for a block with empty kzg commitments. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + UnexpectedDataColumn, + /// The data column length must be equal to the number of commitments/proofs, otherwise the + /// sidecar is invalid. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InconsistentCommitmentsOrProofLength, } impl From for GossipDataColumnError { @@ -367,7 +386,7 @@ pub fn validate_data_column_sidecar_for_gossip( chain: &BeaconChain, ) -> Result, GossipDataColumnError> { let column_slot = data_column.slot(); - + verify_data_column_sidecar(&data_column, &chain.spec)?; verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; verify_sidecar_not_from_future_slot(chain, column_slot)?; verify_slot_greater_than_latest_finalized_slot(chain, column_slot)?; @@ -396,6 +415,26 @@ pub fn validate_data_column_sidecar_for_gossip( }) } +/// Verify if the data column sidecar is valid. +fn verify_data_column_sidecar( + data_column: &DataColumnSidecar, + spec: &ChainSpec, +) -> Result<(), GossipDataColumnError> { + if data_column.index >= spec.number_of_columns as u64 { + return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index)); + } + if data_column.kzg_commitments.is_empty() { + return Err(GossipDataColumnError::UnexpectedDataColumn); + } + if data_column.column.len() != data_column.kzg_commitments.len() + || data_column.column.len() != data_column.kzg_proofs.len() + { + return Err(GossipDataColumnError::InconsistentCommitmentsOrProofLength); + } + + Ok(()) +} + // Verify that this is the first column sidecar received for the tuple: // (block_header.slot, block_header.proposer_index, column_sidecar.index) fn verify_is_first_sidecar( @@ -613,3 +652,55 @@ fn verify_sidecar_not_from_future_slot( } Ok(()) } + +#[cfg(test)] +mod test { + use crate::data_column_verification::{ + validate_data_column_sidecar_for_gossip, GossipDataColumnError, + }; + use crate::test_utils::BeaconChainHarness; + use types::{DataColumnSidecar, EthSpec, ForkName, MainnetEthSpec}; + + type E = MainnetEthSpec; + + #[tokio::test] + async fn empty_data_column_sidecars_fails_validation() { + let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + harness.advance_slot(); + + let slot = harness.get_current_slot(); + let state = harness.get_current_state(); + let ((block, _blobs_opt), _state) = harness + .make_block_with_modifier(state, slot, |block| { + *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].into(); + }) + .await; + + let index = 0; + let column_sidecar = DataColumnSidecar:: { + index, + column: vec![].into(), + kzg_commitments: vec![].into(), + kzg_proofs: vec![].into(), + signed_block_header: block.signed_block_header(), + kzg_commitments_inclusion_proof: block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(), + }; + + let result = + validate_data_column_sidecar_for_gossip(column_sidecar.into(), index, &harness.chain); + assert!(matches!( + result.err(), + Some(GossipDataColumnError::UnexpectedDataColumn) + )); + } +} diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 005536bcf2..3153ce533c 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -711,6 +711,9 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InvalidSubnetId { .. } | GossipDataColumnError::InvalidInclusionProof { .. } | GossipDataColumnError::InvalidKzgProof { .. } + | GossipDataColumnError::UnexpectedDataColumn + | GossipDataColumnError::InvalidColumnIndex(_) + | GossipDataColumnError::InconsistentCommitmentsOrProofLength | GossipDataColumnError::NotFinalizedDescendant { .. } => { debug!( self.log, From f870b66f496bc4eb000e8cb0bbbe62c22828c149 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 3 Oct 2024 09:57:12 +0400 Subject: [PATCH 39/66] Rework Validator Client fallback mechanism (#4393) * Rework Validator Client fallback mechanism * Add CI workflow for fallback simulator * Tie-break with sync distance for non-synced nodes * Fix simulator * Cleanup unused code * More improvements * Add IsOptimistic enum for readability * Use configurable sync distance tiers * Fix tests * Combine status and health and improve logging * Fix nodes not being marked as available * Fix simulator * Fix tests again * Increase fallback simulator tolerance * Add http api endpoint * Fix todos and tests * Update simulator * Merge branch 'unstable' into vc-fallback * Add suggestions * Add id to ui endpoint * Remove unnecessary clones * Formatting * Merge branch 'unstable' into vc-fallback * Merge branch 'unstable' into vc-fallback * Fix flag tests * Merge branch 'unstable' into vc-fallback * Merge branch 'unstable' into vc-fallback * Fix conflicts * Merge branch 'unstable' into vc-fallback * Remove unnecessary pubs * Simplify `compute_distance_tier` and reduce notifier awaits * Use the more descriptive `user_index` instead of `id` * Combine sync distance tolerance flags into one * Merge branch 'unstable' into vc-fallback * Merge branch 'unstable' into vc-fallback * wip * Use new simulator from unstable * Fix cli text * Remove leftover files * Remove old commented code * Merge branch 'unstable' into vc-fallback * Update cli text * Silence candidate errors when pre-genesis * Merge branch 'unstable' into vc-fallback * Merge branch 'unstable' into vc-fallback * Retry on failure * Merge branch 'unstable' into vc-fallback * Merge branch 'unstable' into vc-fallback * Remove disable_run_on_all * Remove unused error variant * Fix out of date comment * Merge branch 'unstable' into vc-fallback * Remove unnecessary as_u64 * Remove more out of date comments * Use tokio RwLock and remove parking_lot * Merge branch 'unstable' into vc-fallback * Formatting * Ensure nodes are still added to total when not available * Allow VC to detect when BN comes online * Fix ui endpoint * Don't have block_service as an Option * Merge branch 'unstable' into vc-fallback * Clean up lifetimes and futures * Revert "Don't have block_service as an Option" This reverts commit b5445a09e9f0942ecd561575367f0d1f120415f6. * Merge branch 'unstable' into vc-fallback * Merge branch 'unstable' into vc-fallback * Improve rwlock sanitation using clones * Merge branch 'unstable' into vc-fallback * Drop read lock immediately by cloning the vec. --- Cargo.lock | 1 + book/src/help_vc.md | 16 + common/eth2/Cargo.toml | 1 + common/eth2/src/lib.rs | 9 +- lighthouse/tests/validator_client.rs | 34 +- testing/simulator/src/fallback_sim.rs | 2 +- validator_client/Cargo.toml | 2 +- validator_client/src/attestation_service.rs | 180 ++-- validator_client/src/beacon_node_fallback.rs | 812 ++++++++++-------- validator_client/src/beacon_node_health.rs | 420 +++++++++ validator_client/src/block_service.rs | 109 +-- validator_client/src/check_synced.rs | 83 +- validator_client/src/cli.rs | 27 + validator_client/src/config.rs | 20 +- validator_client/src/doppelganger_service.rs | 29 +- validator_client/src/duties_service.rs | 99 +-- validator_client/src/duties_service/sync.rs | 23 +- validator_client/src/http_api/mod.rs | 50 +- validator_client/src/http_api/test_utils.rs | 1 + validator_client/src/http_api/tests.rs | 1 + validator_client/src/lib.rs | 34 +- validator_client/src/notifier.rs | 40 +- validator_client/src/preparation_service.rs | 28 +- .../src/sync_committee_service.rs | 73 +- 24 files changed, 1316 insertions(+), 778 deletions(-) create mode 100644 validator_client/src/beacon_node_health.rs diff --git a/Cargo.lock b/Cargo.lock index 5fcba6b264..44ca67e9b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2535,6 +2535,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bytes", + "derivative", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 7f2cfab8e3..23a8491993 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -177,6 +177,22 @@ Options: Default is unlimited. Flags: + --beacon-nodes-sync-tolerances + A comma-separated list of 3 values which sets the size of each sync + distance range when determining the health of each connected beacon + node. The first value determines the `Synced` range. If a connected + beacon node is synced to within this number of slots it is considered + 'Synced'. The second value determines the `Small` sync distance range. + This range starts immediately after the `Synced` range. The third + value determines the `Medium` sync distance range. This range starts + immediately after the `Small` range. Any sync distance value beyond + that is considered `Large`. For example, a value of `8,8,48` would + have ranges like the following: `Synced`: 0..=8 `Small`: 9..=16 + `Medium`: 17..=64 `Large`: 65.. These values are used to determine + what ordering beacon node fallbacks are used in. Generally, `Synced` + nodes are preferred over `Small` and so on. Nodes in the `Synced` + range will tie-break based on their ordering in `--beacon-nodes`. This + ensures the primary beacon node is prioritised. [default: 8,8,48] --builder-proposals If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will sign over headers. Useful for diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 10b4755ba2..d23a4068f1 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -29,6 +29,7 @@ store = { workspace = true } slashing_protection = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } +derivative = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 2805d36b90..522c6414ea 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -16,6 +16,7 @@ pub mod types; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; +use derivative::Derivative; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; @@ -117,7 +118,7 @@ impl fmt::Display for Error { /// A struct to define a variety of different timeouts for different validator tasks to ensure /// proper fallback behaviour. -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Timeouts { pub attestation: Duration, pub attester_duties: Duration, @@ -154,13 +155,17 @@ impl Timeouts { /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Beacon Node HTTP server (`http_api`). -#[derive(Clone)] +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq)] pub struct BeaconNodeHttpClient { + #[derivative(PartialEq = "ignore")] client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, } +impl Eq for BeaconNodeHttpClient {} + impl fmt::Display for BeaconNodeHttpClient { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.server.fmt(f) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index cb16ca4792..baf50aa7c0 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -1,4 +1,6 @@ -use validator_client::{config::DEFAULT_WEB3SIGNER_KEEP_ALIVE, ApiTopic, Config}; +use validator_client::{ + config::DEFAULT_WEB3SIGNER_KEEP_ALIVE, ApiTopic, BeaconNodeSyncDistanceTiers, Config, +}; use crate::exec::CommandLineTestExec; use bls::{Keypair, PublicKeyBytes}; @@ -12,7 +14,7 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; -use types::Address; +use types::{Address, Slot}; /// Returns the `lighthouse validator_client` command. fn base_cmd() -> Command { @@ -511,7 +513,6 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } - #[test] fn disable_run_on_all_flag() { CommandLineTest::new() @@ -572,6 +573,33 @@ fn broadcast_flag() { }); } +/// Tests for validator fallback flags. +#[test] +fn beacon_nodes_sync_tolerances_flag_default() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!( + config.beacon_node_fallback.sync_tolerances, + BeaconNodeSyncDistanceTiers::default() + ) + }); +} +#[test] +fn beacon_nodes_sync_tolerances_flag() { + CommandLineTest::new() + .flag("beacon-nodes-sync-tolerances", Some("4,4,4")) + .run() + .with_config(|config| { + assert_eq!( + config.beacon_node_fallback.sync_tolerances, + BeaconNodeSyncDistanceTiers { + synced: Slot::new(4), + small: Slot::new(8), + medium: Slot::new(12), + } + ); + }); +} + #[test] #[should_panic(expected = "Unknown API topic")] fn wrong_broadcast_flag() { diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index b27a6246bf..3859257fb7 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -29,7 +29,7 @@ const DENEB_FORK_EPOCH: u64 = 2; // This has potential to block CI so it should be set conservatively enough that spurious failures // don't become very common, but not so conservatively that regressions to the fallback mechanism // cannot be detected. -const ACCEPTABLE_FALLBACK_ATTESTATION_HIT_PERCENTAGE: f64 = 85.0; +const ACCEPTABLE_FALLBACK_ATTESTATION_HIT_PERCENTAGE: f64 = 95.0; const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index bff40b41d5..4c338e91b9 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,7 +10,6 @@ path = "src/lib.rs" [dev-dependencies] tokio = { workspace = true } -itertools = { workspace = true } [dependencies] tree_hash = { workspace = true } @@ -60,4 +59,5 @@ sysinfo = { workspace = true } system_health = { path = "../common/system_health" } logging = { workspace = true } strum = { workspace = true } +itertools = { workspace = true } fdlimit = "0.3.0" diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 30fe508a2c..5363f36f66 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -1,9 +1,8 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, validator_store::{Error as ValidatorStoreError, ValidatorStore}, - OfflineOnFailure, }; use environment::RuntimeContext; use futures::future::join_all; @@ -339,21 +338,17 @@ impl AttestationService { let attestation_data = self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_GET], - ); - beacon_node - .get_validator_attestation_data(slot, committee_index) - .await - .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) - .map(|result| result.data) - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, committee_index) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }) .await .map_err(|e| e.to_string())?; @@ -458,26 +453,21 @@ impl AttestationService { // Post the attestations to the BN. match self .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Attestations, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_POST], - ); - if fork_name.electra_enabled() { - beacon_node - .post_beacon_pool_attestations_v2(attestations, fork_name) - .await - } else { - beacon_node - .post_beacon_pool_attestations_v1(attestations) - .await - } - }, - ) + .request(ApiTopic::Attestations, |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_POST], + ); + if fork_name.electra_enabled() { + beacon_node + .post_beacon_pool_attestations_v2(attestations, fork_name) + .await + } else { + beacon_node + .post_beacon_pool_attestations_v1(attestations) + .await + } + }) .await { Ok(()) => info!( @@ -540,46 +530,38 @@ impl AttestationService { let aggregated_attestation = &self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_GET], - ); - if fork_name.electra_enabled() { - beacon_node - .get_validator_aggregate_attestation_v2( - attestation_data.slot, - attestation_data.tree_hash_root(), - committee_index, - ) - .await - .map_err(|e| { - format!("Failed to produce an aggregate attestation: {:?}", e) - })? - .ok_or_else(|| { - format!("No aggregate available for {:?}", attestation_data) - }) - .map(|result| result.data) - } else { - beacon_node - .get_validator_aggregate_attestation_v1( - attestation_data.slot, - attestation_data.tree_hash_root(), - ) - .await - .map_err(|e| { - format!("Failed to produce an aggregate attestation: {:?}", e) - })? - .ok_or_else(|| { - format!("No aggregate available for {:?}", attestation_data) - }) - .map(|result| result.data) - } - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_GET], + ); + if fork_name.electra_enabled() { + beacon_node + .get_validator_aggregate_attestation_v2( + attestation_data.slot, + attestation_data.tree_hash_root(), + committee_index, + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) + .map(|result| result.data) + } else { + beacon_node + .get_validator_aggregate_attestation_v1( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) + .map(|result| result.data) + } + }) .await .map_err(|e| e.to_string())?; @@ -637,30 +619,26 @@ impl AttestationService { let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice(); match self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_POST], - ); - if fork_name.electra_enabled() { - beacon_node - .post_validator_aggregate_and_proof_v2( - signed_aggregate_and_proofs_slice, - fork_name, - ) - .await - } else { - beacon_node - .post_validator_aggregate_and_proof_v1( - signed_aggregate_and_proofs_slice, - ) - .await - } - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_POST], + ); + if fork_name.electra_enabled() { + beacon_node + .post_validator_aggregate_and_proof_v2( + signed_aggregate_and_proofs_slice, + fork_name, + ) + .await + } else { + beacon_node + .post_validator_aggregate_and_proof_v1( + signed_aggregate_and_proofs_slice, + ) + .await + } + }) .await { Ok(()) => { diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 6bba55d676..e5fe419983 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -2,14 +2,19 @@ //! "fallback" behaviour; it will try a request on all of the nodes until one or none of them //! succeed. -use crate::check_synced::check_synced; +use crate::beacon_node_health::{ + BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, + SyncDistanceTier, +}; +use crate::check_synced::check_node_health; use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; -use serde::{Deserialize, Serialize}; -use slog::{debug, error, info, warn, Logger}; +use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; +use std::cmp::Ordering; use std::fmt; use std::fmt::Debug; use std::future::Future; @@ -18,7 +23,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use strum::{EnumString, EnumVariantNames}; use tokio::{sync::RwLock, time::sleep}; -use types::{ChainSpec, Config, EthSpec}; +use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; /// Message emitted when the VC detects the BN is using a different spec. const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updating"; @@ -32,6 +37,16 @@ const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updati /// having the correct nodes up and running prior to the start of the slot. const SLOT_LOOKAHEAD: Duration = Duration::from_secs(2); +/// If the beacon node slot_clock is within 1 slot, this is deemed acceptable. Otherwise the node +/// will be marked as CandidateError::TimeDiscrepancy. +const FUTURE_SLOT_TOLERANCE: Slot = Slot::new(1); + +// Configuration for the Beacon Node fallback. +#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)] +pub struct Config { + pub sync_tolerances: BeaconNodeSyncDistanceTiers, +} + /// Indicates a measurement of latency between the VC and a BN. pub struct LatencyMeasurement { /// An identifier for the beacon node (e.g. the URL). @@ -76,34 +91,8 @@ pub fn start_fallback_updater_service( Ok(()) } -/// Indicates if a beacon node must be synced before some action is performed on it. -#[derive(PartialEq, Clone, Copy)] -pub enum RequireSynced { - Yes, - No, -} - -/// Indicates if a beacon node should be set to `Offline` if a request fails. -#[derive(PartialEq, Clone, Copy)] -pub enum OfflineOnFailure { - Yes, - No, -} - -impl PartialEq for RequireSynced { - fn eq(&self, other: &bool) -> bool { - if *other { - *self == RequireSynced::Yes - } else { - *self == RequireSynced::No - } - } -} - #[derive(Debug)] pub enum Error { - /// The node was unavailable and we didn't attempt to contact it. - Unavailable(CandidateError), /// We attempted to contact the node but it failed. RequestFailed(T), } @@ -112,7 +101,6 @@ impl Error { pub fn request_failure(&self) -> Option<&T> { match self { Error::RequestFailed(e) => Some(e), - _ => None, } } } @@ -141,106 +129,159 @@ impl Errors { } /// Reasons why a candidate might not be ready. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)] pub enum CandidateError { + PreGenesis, Uninitialized, Offline, Incompatible, - NotSynced, + TimeDiscrepancy, +} + +impl std::fmt::Display for CandidateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CandidateError::PreGenesis => write!(f, "PreGenesis"), + CandidateError::Uninitialized => write!(f, "Uninitialized"), + CandidateError::Offline => write!(f, "Offline"), + CandidateError::Incompatible => write!(f, "Incompatible"), + CandidateError::TimeDiscrepancy => write!(f, "TimeDiscrepancy"), + } + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct CandidateInfo { + pub index: usize, + pub endpoint: String, + pub health: Result, +} + +impl Serialize for CandidateInfo { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("CandidateInfo", 2)?; + + state.serialize_field("index", &self.index)?; + state.serialize_field("endpoint", &self.endpoint)?; + + // Serialize either the health or the error field based on the Result + match &self.health { + Ok(health) => { + state.serialize_field("health", health)?; + } + Err(e) => { + state.serialize_field("error", &e.to_string())?; + } + } + + state.end() + } } /// Represents a `BeaconNodeHttpClient` inside a `BeaconNodeFallback` that may or may not be used /// for a query. +#[derive(Clone, Debug)] pub struct CandidateBeaconNode { - beacon_node: BeaconNodeHttpClient, - status: RwLock>, + pub index: usize, + pub beacon_node: BeaconNodeHttpClient, + pub health: Arc>>, _phantom: PhantomData, } +impl PartialEq for CandidateBeaconNode { + fn eq(&self, other: &Self) -> bool { + self.index == other.index && self.beacon_node == other.beacon_node + } +} + +impl Eq for CandidateBeaconNode {} + impl CandidateBeaconNode { /// Instantiate a new node. - pub fn new(beacon_node: BeaconNodeHttpClient) -> Self { + pub fn new(beacon_node: BeaconNodeHttpClient, index: usize) -> Self { Self { + index, beacon_node, - status: RwLock::new(Err(CandidateError::Uninitialized)), + health: Arc::new(RwLock::new(Err(CandidateError::Uninitialized))), _phantom: PhantomData, } } - /// Returns the status of `self`. - /// - /// If `RequiredSynced::No`, any `NotSynced` node will be ignored and mapped to `Ok(())`. - pub async fn status(&self, synced: RequireSynced) -> Result<(), CandidateError> { - match *self.status.read().await { - Err(CandidateError::NotSynced) if synced == false => Ok(()), - other => other, - } + /// Returns the health of `self`. + pub async fn health(&self) -> Result { + *self.health.read().await } - /// Indicate that `self` is offline. - pub async fn set_offline(&self) { - *self.status.write().await = Err(CandidateError::Offline) - } - - /// Perform some queries against the node to determine if it is a good candidate, updating - /// `self.status` and returning that result. - pub async fn refresh_status( + pub async fn refresh_health( &self, + distance_tiers: &BeaconNodeSyncDistanceTiers, slot_clock: Option<&T>, spec: &ChainSpec, log: &Logger, ) -> Result<(), CandidateError> { - let previous_status = self.status(RequireSynced::Yes).await; - let was_offline = matches!(previous_status, Err(CandidateError::Offline)); + if let Err(e) = self.is_compatible(spec, log).await { + *self.health.write().await = Err(e); + return Err(e); + } - let new_status = if let Err(e) = self.is_online(was_offline, log).await { - Err(e) - } else if let Err(e) = self.is_compatible(spec, log).await { - Err(e) - } else if let Err(e) = self.is_synced(slot_clock, log).await { - Err(e) - } else { - Ok(()) - }; + if let Some(slot_clock) = slot_clock { + match check_node_health(&self.beacon_node, log).await { + Ok((head, is_optimistic, el_offline)) => { + let Some(slot_clock_head) = slot_clock.now() else { + let e = match slot_clock.is_prior_to_genesis() { + Some(true) => CandidateError::PreGenesis, + _ => CandidateError::Uninitialized, + }; + *self.health.write().await = Err(e); + return Err(e); + }; - // In case of concurrent use, the latest value will always be used. It's possible that a - // long time out might over-ride a recent successful response, leading to a falsely-offline - // status. I deem this edge-case acceptable in return for the concurrency benefits of not - // holding a write-lock whilst we check the online status of the node. - *self.status.write().await = new_status; + if head > slot_clock_head + FUTURE_SLOT_TOLERANCE { + let e = CandidateError::TimeDiscrepancy; + *self.health.write().await = Err(e); + return Err(e); + } + let sync_distance = slot_clock_head.saturating_sub(head); - new_status - } + // Currently ExecutionEngineHealth is solely determined by online status. + let execution_status = if el_offline { + ExecutionEngineHealth::Unhealthy + } else { + ExecutionEngineHealth::Healthy + }; - /// Checks if the node is reachable. - async fn is_online(&self, was_offline: bool, log: &Logger) -> Result<(), CandidateError> { - let result = self - .beacon_node - .get_node_version() - .await - .map(|body| body.data.version); + let optimistic_status = if is_optimistic { + IsOptimistic::Yes + } else { + IsOptimistic::No + }; - match result { - Ok(version) => { - if was_offline { - info!( - log, - "Connected to beacon node"; - "version" => version, - "endpoint" => %self.beacon_node, + let new_health = BeaconNodeHealth::from_status( + self.index, + sync_distance, + head, + optimistic_status, + execution_status, + distance_tiers, ); + + *self.health.write().await = Ok(new_health); + Ok(()) + } + Err(e) => { + // Set the health as `Err` which is sorted last in the list. + *self.health.write().await = Err(e); + Err(e) } - Ok(()) - } - Err(e) => { - warn!( - log, - "Offline beacon node"; - "error" => %e, - "endpoint" => %self.beacon_node, - ); - Err(CandidateError::Offline) } + } else { + // Slot clock will only be `None` at startup. + let e = CandidateError::Uninitialized; + *self.health.write().await = Err(e); + Err(e) } } @@ -248,7 +289,7 @@ impl CandidateBeaconNode { async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> { let config = self .beacon_node - .get_config_spec::() + .get_config_spec::() .await .map_err(|e| { error!( @@ -324,27 +365,15 @@ impl CandidateBeaconNode { Ok(()) } - - /// Checks if the beacon node is synced. - async fn is_synced( - &self, - slot_clock: Option<&T>, - log: &Logger, - ) -> Result<(), CandidateError> { - if let Some(slot_clock) = slot_clock { - check_synced(&self.beacon_node, slot_clock, Some(log)).await - } else { - // Skip this check if we don't supply a slot clock. - Ok(()) - } - } } /// A collection of `CandidateBeaconNode` that can be used to perform requests with "fallback" /// behaviour, where the failure of one candidate results in the next candidate receiving an /// identical query. +#[derive(Clone, Debug)] pub struct BeaconNodeFallback { - candidates: Vec>, + pub candidates: Arc>>>, + distance_tiers: BeaconNodeSyncDistanceTiers, slot_clock: Option, broadcast_topics: Vec, spec: Arc, @@ -354,12 +383,15 @@ pub struct BeaconNodeFallback { impl BeaconNodeFallback { pub fn new( candidates: Vec>, + config: Config, broadcast_topics: Vec, spec: Arc, log: Logger, ) -> Self { + let distance_tiers = config.sync_tolerances; Self { - candidates, + candidates: Arc::new(RwLock::new(candidates)), + distance_tiers, slot_clock: None, broadcast_topics, spec, @@ -377,69 +409,112 @@ impl BeaconNodeFallback { } /// The count of candidates, regardless of their state. - pub fn num_total(&self) -> usize { - self.candidates.len() - } - - /// The count of synced and ready candidates. - pub async fn num_synced(&self) -> usize { - let mut n = 0; - for candidate in &self.candidates { - if candidate.status(RequireSynced::Yes).await.is_ok() { - n += 1 - } - } - n - } - - /// The count of synced and ready fallbacks excluding the primary beacon node candidate. - pub async fn num_synced_fallback(&self) -> usize { - let mut n = 0; - for candidate in self.candidates.iter().skip(1) { - if candidate.status(RequireSynced::Yes).await.is_ok() { - n += 1 - } - } - n + pub async fn num_total(&self) -> usize { + self.candidates.read().await.len() } /// The count of candidates that are online and compatible, but not necessarily synced. pub async fn num_available(&self) -> usize { let mut n = 0; - for candidate in &self.candidates { - if candidate.status(RequireSynced::No).await.is_ok() { - n += 1 + for candidate in self.candidates.read().await.iter() { + match candidate.health().await { + Ok(_) | Err(CandidateError::Uninitialized) => n += 1, + Err(_) => continue, } } n } + // Returns all data required by the VC notifier. + pub async fn get_notifier_info(&self) -> (Vec, usize, usize) { + let candidates = self.candidates.read().await; + + let mut candidate_info = Vec::with_capacity(candidates.len()); + let mut num_available = 0; + let mut num_synced = 0; + + for candidate in candidates.iter() { + let health = candidate.health().await; + + match health { + Ok(health) => { + if self + .distance_tiers + .compute_distance_tier(health.health_tier.sync_distance) + == SyncDistanceTier::Synced + { + num_synced += 1; + } + num_available += 1; + } + Err(CandidateError::Uninitialized) => num_available += 1, + Err(_) => (), + } + + candidate_info.push(CandidateInfo { + index: candidate.index, + endpoint: candidate.beacon_node.to_string(), + health, + }); + } + + (candidate_info, num_available, num_synced) + } + /// Loop through ALL candidates in `self.candidates` and update their sync status. /// /// It is possible for a node to return an unsynced status while continuing to serve /// low quality responses. To route around this it's best to poll all connected beacon nodes. /// A previous implementation of this function polled only the unavailable BNs. pub async fn update_all_candidates(&self) { - let futures = self - .candidates - .iter() - .map(|candidate| { - candidate.refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) - }) - .collect::>(); + // Clone the vec, so we release the read lock immediately. + // `candidate.health` is behind an Arc, so this would still allow us to mutate the values. + let candidates = self.candidates.read().await.clone(); + let mut futures = Vec::with_capacity(candidates.len()); + let mut nodes = Vec::with_capacity(candidates.len()); - // run all updates concurrently and ignore errors - let _ = future::join_all(futures).await; + for candidate in candidates.iter() { + futures.push(candidate.refresh_health( + &self.distance_tiers, + self.slot_clock.as_ref(), + &self.spec, + &self.log, + )); + nodes.push(candidate.beacon_node.to_string()); + } + + // Run all updates concurrently. + let future_results = future::join_all(futures).await; + let results = future_results.iter().zip(nodes); + + for (result, node) in results { + if let Err(e) = result { + if *e != CandidateError::PreGenesis { + warn!( + self.log, + "A connected beacon node errored during routine health check"; + "error" => ?e, + "endpoint" => node, + ); + } + } + } + + drop(candidates); + + let mut candidates = self.candidates.write().await; + sort_nodes_by_health(&mut candidates).await; } /// Concurrently send a request to all candidates (regardless of /// offline/online) status and attempt to collect a rough reading on the /// latency between the VC and candidate. pub async fn measure_latency(&self) -> Vec { - let futures: Vec<_> = self - .candidates - .iter() - .map(|candidate| async { + let candidates = self.candidates.read().await; + let futures: Vec<_> = candidates + .clone() + .into_iter() + .map(|candidate| async move { let beacon_node_id = candidate.beacon_node.to_string(); // The `node/version` endpoint is used since I imagine it would // require the least processing in the BN and therefore measure @@ -456,6 +531,7 @@ impl BeaconNodeFallback { (beacon_node_id, response_instant) }) .collect(); + drop(candidates); let request_instant = Instant::now(); @@ -475,225 +551,120 @@ impl BeaconNodeFallback { /// Run `func` against each candidate in `self`, returning immediately if a result is found. /// Otherwise, return all the errors encountered along the way. - /// - /// First this function will try all nodes with a suitable status. If no candidates are suitable - /// or all the requests fail, it will try updating the status of all unsuitable nodes and - /// re-running `func` again. - pub async fn first_success<'a, F, O, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result> + pub async fn first_success(&self, func: F) -> Result> where - F: Fn(&'a BeaconNodeHttpClient) -> R, + F: Fn(BeaconNodeHttpClient) -> R, R: Future>, Err: Debug, { let mut errors = vec![]; - let mut to_retry = vec![]; - let mut retry_unsynced = vec![]; - let log = &self.log.clone(); + + // First pass: try `func` on all candidates. Candidate order has already been set in + // `update_all_candidates`. This ensures the most suitable node is always tried first. + let candidates = self.candidates.read().await; + let mut futures = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - // - // We use a macro instead of a closure here since it is not trivial to move `func` into a - // closure. - macro_rules! try_func { - ($candidate: ident) => {{ - inc_counter_vec(&ENDPOINT_REQUESTS, &[$candidate.beacon_node.as_ref()]); - - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&$candidate.beacon_node).await { - Ok(val) => return Ok(val), - Err(e) => { - debug!( - log, - "Request to beacon node failed"; - "node" => $candidate.beacon_node.to_string(), - "error" => ?e, - ); - // If we have an error on this function, make the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - $candidate.set_offline().await; - } - errors.push(($candidate.beacon_node.to_string(), Error::RequestFailed(e))); - inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); - } - } - }}; + for candidate in candidates.iter() { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); } + drop(candidates); - // First pass: try `func` on all synced and ready candidates. - // - // This ensures that we always choose a synced node if it is available. - for candidate in &self.candidates { - match candidate.status(RequireSynced::Yes).await { - Err(e @ CandidateError::NotSynced) if require_synced == false => { - // This client is unsynced we will try it after trying all synced clients - retry_unsynced.push(candidate); - errors.push((candidate.beacon_node.to_string(), Error::Unavailable(e))); - } - Err(e) => { - // This client was not ready on the first pass, we might try it again later. - to_retry.push(candidate); - errors.push((candidate.beacon_node.to_string(), Error::Unavailable(e))); - } - _ => try_func!(candidate), + for future in futures { + match future.await { + Ok(val) => return Ok(val), + Err(e) => errors.push(e), } } - // Second pass: try `func` on ready unsynced candidates. This only runs if we permit - // unsynced candidates. - // - // Due to async race-conditions, it is possible that we will send a request to a candidate - // that has been set to an offline/unready status. This is acceptable. - if require_synced == false { - for candidate in retry_unsynced { - try_func!(candidate); + // Second pass. No candidates returned successfully. Try again with the same order. + // This will duplicate errors. + let candidates = self.candidates.read().await; + let mut futures = vec![]; + + // Run `func` using a `candidate`, returning the value or capturing errors. + for candidate in candidates.iter() { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); + } + drop(candidates); + + for future in futures { + match future.await { + Ok(val) => return Ok(val), + Err(e) => errors.push(e), } } - // Third pass: try again, attempting to make non-ready clients become ready. - for candidate in to_retry { - // If the candidate hasn't luckily transferred into the correct state in the meantime, - // force an update of the state. - let new_status = match candidate.status(require_synced).await { - Ok(()) => Ok(()), - Err(_) => { - candidate - .refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) - .await - } - }; - - match new_status { - Ok(()) => try_func!(candidate), - Err(CandidateError::NotSynced) if require_synced == false => try_func!(candidate), - Err(e) => { - errors.push((candidate.beacon_node.to_string(), Error::Unavailable(e))); - } - } - } - - // There were no candidates already ready and we were unable to make any of them ready. + // No candidates returned successfully. Err(Errors(errors)) } + /// Run the future `func` on `candidate` while reporting metrics. + async fn run_on_candidate( + candidate: BeaconNodeHttpClient, + func: F, + log: &Logger, + ) -> Result)> + where + F: Fn(BeaconNodeHttpClient) -> R, + R: Future>, + Err: Debug, + { + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.as_ref()]); + + // There exists a race condition where `func` may be called when the candidate is + // actually not ready. We deem this an acceptable inefficiency. + match func(candidate.clone()).await { + Ok(val) => Ok(val), + Err(e) => { + debug!( + log, + "Request to beacon node failed"; + "node" => %candidate, + "error" => ?e, + ); + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.as_ref()]); + Err((candidate.to_string(), Error::RequestFailed(e))) + } + } + } + /// Run `func` against all candidates in `self`, collecting the result of `func` against each /// candidate. /// - /// First this function will try all nodes with a suitable status. If no candidates are suitable - /// it will try updating the status of all unsuitable nodes and re-running `func` again. - /// /// Note: This function returns `Ok(())` if `func` returned successfully on all beacon nodes. /// It returns a list of errors along with the beacon node id that failed for `func`. /// Since this ignores the actual result of `func`, this function should only be used for beacon /// node calls whose results we do not care about, only that they completed successfully. - pub async fn broadcast<'a, F, O, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result<(), Errors> + pub async fn broadcast(&self, func: F) -> Result<(), Errors> where - F: Fn(&'a BeaconNodeHttpClient) -> R, + F: Fn(BeaconNodeHttpClient) -> R, R: Future>, + Err: Debug, { - let mut to_retry = vec![]; - let mut retry_unsynced = vec![]; + // Run `func` on all candidates. + let candidates = self.candidates.read().await; + let mut futures = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - let run_on_candidate = |candidate: &'a CandidateBeaconNode| async { - inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.beacon_node.as_ref()]); - - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&candidate.beacon_node).await { - Ok(val) => Ok(val), - Err(e) => { - // If we have an error on this function, mark the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - candidate.set_offline().await; - } - inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.beacon_node.as_ref()]); - Err((candidate.beacon_node.to_string(), Error::RequestFailed(e))) - } - } - }; - - // First pass: try `func` on all synced and ready candidates. - // - // This ensures that we always choose a synced node if it is available. - let mut first_batch_futures = vec![]; - for candidate in &self.candidates { - match candidate.status(RequireSynced::Yes).await { - Ok(_) => { - first_batch_futures.push(run_on_candidate(candidate)); - } - Err(CandidateError::NotSynced) if require_synced == false => { - // This client is unsynced we will try it after trying all synced clients - retry_unsynced.push(candidate); - } - Err(_) => { - // This client was not ready on the first pass, we might try it again later. - to_retry.push(candidate); - } - } + for candidate in candidates.iter() { + futures.push(Self::run_on_candidate( + candidate.beacon_node.clone(), + &func, + &self.log, + )); } - let first_batch_results = futures::future::join_all(first_batch_futures).await; + drop(candidates); - // Second pass: try `func` on ready unsynced candidates. This only runs if we permit - // unsynced candidates. - // - // Due to async race-conditions, it is possible that we will send a request to a candidate - // that has been set to an offline/unready status. This is acceptable. - let second_batch_results = if require_synced == false { - futures::future::join_all(retry_unsynced.into_iter().map(run_on_candidate)).await - } else { - vec![] - }; - - // Third pass: try again, attempting to make non-ready clients become ready. - let mut third_batch_futures = vec![]; - let mut third_batch_results = vec![]; - for candidate in to_retry { - // If the candidate hasn't luckily transferred into the correct state in the meantime, - // force an update of the state. - let new_status = match candidate.status(require_synced).await { - Ok(()) => Ok(()), - Err(_) => { - candidate - .refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) - .await - } - }; - - match new_status { - Ok(()) => third_batch_futures.push(run_on_candidate(candidate)), - Err(CandidateError::NotSynced) if require_synced == false => { - third_batch_futures.push(run_on_candidate(candidate)) - } - Err(e) => third_batch_results.push(Err(( - candidate.beacon_node.to_string(), - Error::Unavailable(e), - ))), - } - } - third_batch_results.extend(futures::future::join_all(third_batch_futures).await); - - let mut results = first_batch_results; - results.extend(second_batch_results); - results.extend(third_batch_results); + let results = future::join_all(futures).await; let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); @@ -706,29 +677,47 @@ impl BeaconNodeFallback { /// Call `func` on first beacon node that returns success or on all beacon nodes /// depending on the `topic` and configuration. - pub async fn request<'a, F, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - topic: ApiTopic, - func: F, - ) -> Result<(), Errors> + pub async fn request(&self, topic: ApiTopic, func: F) -> Result<(), Errors> where - F: Fn(&'a BeaconNodeHttpClient) -> R, + F: Fn(BeaconNodeHttpClient) -> R, R: Future>, Err: Debug, { if self.broadcast_topics.contains(&topic) { - self.broadcast(require_synced, offline_on_failure, func) - .await + self.broadcast(func).await } else { - self.first_success(require_synced, offline_on_failure, func) - .await?; + self.first_success(func).await?; Ok(()) } } } +/// Helper functions to allow sorting candidate nodes by health. +async fn sort_nodes_by_health(nodes: &mut Vec>) { + // Fetch all health values. + let health_results: Vec> = + future::join_all(nodes.iter().map(|node| node.health())).await; + + // Pair health results with their indices. + let mut indices_with_health: Vec<(usize, Result)> = + health_results.into_iter().enumerate().collect(); + + // Sort indices based on their health. + indices_with_health.sort_by(|a, b| match (&a.1, &b.1) { + (Ok(health_a), Ok(health_b)) => health_a.cmp(health_b), + (Err(_), Ok(_)) => Ordering::Greater, + (Ok(_), Err(_)) => Ordering::Less, + (Err(_), Err(_)) => Ordering::Equal, + }); + + // Reorder candidates based on the sorted indices. + let sorted_nodes: Vec> = indices_with_health + .into_iter() + .map(|(index, _)| nodes[index].clone()) + .collect(); + *nodes = sorted_nodes; +} + /// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. #[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumString, EnumVariantNames)] #[strum(serialize_all = "kebab-case")] @@ -747,10 +736,16 @@ impl ApiTopic { } #[cfg(test)] -mod test { +mod tests { use super::*; + use crate::beacon_node_health::BeaconNodeHealthTier; + use crate::SensitiveUrl; + use eth2::Timeouts; use std::str::FromStr; use strum::VariantNames; + use types::{MainnetEthSpec, Slot}; + + type E = MainnetEthSpec; #[test] fn api_topic_all() { @@ -761,4 +756,115 @@ mod test { .map(|topic| ApiTopic::from_str(topic).unwrap()) .eq(all.into_iter())); } + + #[tokio::test] + async fn check_candidate_order() { + // These fields is irrelvant for sorting. They are set to arbitrary values. + let head = Slot::new(99); + let optimistic_status = IsOptimistic::No; + let execution_status = ExecutionEngineHealth::Healthy; + + fn new_candidate(index: usize) -> CandidateBeaconNode { + let beacon_node = BeaconNodeHttpClient::new( + SensitiveUrl::parse(&format!("http://example_{index}.com")).unwrap(), + Timeouts::set_all(Duration::from_secs(index as u64)), + ); + CandidateBeaconNode::new(beacon_node, index) + } + + let candidate_1 = new_candidate(1); + let expected_candidate_1 = new_candidate(1); + let candidate_2 = new_candidate(2); + let expected_candidate_2 = new_candidate(2); + let candidate_3 = new_candidate(3); + let expected_candidate_3 = new_candidate(3); + let candidate_4 = new_candidate(4); + let expected_candidate_4 = new_candidate(4); + let candidate_5 = new_candidate(5); + let expected_candidate_5 = new_candidate(5); + let candidate_6 = new_candidate(6); + let expected_candidate_6 = new_candidate(6); + + let synced = SyncDistanceTier::Synced; + let small = SyncDistanceTier::Small; + + // Despite `health_1` having a larger sync distance, it is inside the `synced` range which + // does not tie-break on sync distance and so will tie-break on `user_index` instead. + let health_1 = BeaconNodeHealth { + user_index: 1, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(1, Slot::new(2), synced), + }; + let health_2 = BeaconNodeHealth { + user_index: 2, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(2, Slot::new(1), synced), + }; + + // `health_3` and `health_4` have the same health tier and sync distance so should + // tie-break on `user_index`. + let health_3 = BeaconNodeHealth { + user_index: 3, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(3, Slot::new(9), small), + }; + let health_4 = BeaconNodeHealth { + user_index: 4, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(3, Slot::new(9), small), + }; + + // `health_5` has a smaller sync distance and is outside the `synced` range so should be + // sorted first. Note the values of `user_index`. + let health_5 = BeaconNodeHealth { + user_index: 6, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(4, Slot::new(9), small), + }; + let health_6 = BeaconNodeHealth { + user_index: 5, + head, + optimistic_status, + execution_status, + health_tier: BeaconNodeHealthTier::new(4, Slot::new(10), small), + }; + + *candidate_1.health.write().await = Ok(health_1); + *candidate_2.health.write().await = Ok(health_2); + *candidate_3.health.write().await = Ok(health_3); + *candidate_4.health.write().await = Ok(health_4); + *candidate_5.health.write().await = Ok(health_5); + *candidate_6.health.write().await = Ok(health_6); + + let mut candidates = vec![ + candidate_3, + candidate_6, + candidate_5, + candidate_1, + candidate_4, + candidate_2, + ]; + let expected_candidates = vec![ + expected_candidate_1, + expected_candidate_2, + expected_candidate_3, + expected_candidate_4, + expected_candidate_5, + expected_candidate_6, + ]; + + sort_nodes_by_health(&mut candidates).await; + + assert_eq!(candidates, expected_candidates); + } } diff --git a/validator_client/src/beacon_node_health.rs b/validator_client/src/beacon_node_health.rs new file mode 100644 index 0000000000..1783bb312c --- /dev/null +++ b/validator_client/src/beacon_node_health.rs @@ -0,0 +1,420 @@ +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use std::fmt::{Debug, Display, Formatter}; +use std::str::FromStr; +use types::Slot; + +/// Sync distances between 0 and DEFAULT_SYNC_TOLERANCE are considered `synced`. +/// Sync distance tiers are determined by the different modifiers. +/// +/// The default range is the following: +/// Synced: 0..=8 +/// Small: 9..=16 +/// Medium: 17..=64 +/// Large: 65.. +const DEFAULT_SYNC_TOLERANCE: Slot = Slot::new(8); +const DEFAULT_SMALL_SYNC_DISTANCE_MODIFIER: Slot = Slot::new(8); +const DEFAULT_MEDIUM_SYNC_DISTANCE_MODIFIER: Slot = Slot::new(48); + +type HealthTier = u8; +type SyncDistance = Slot; + +/// Helpful enum which is used when pattern matching to determine health tier. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum SyncDistanceTier { + Synced, + Small, + Medium, + Large, +} + +/// Contains the different sync distance tiers which are determined at runtime by the +/// `beacon-nodes-sync-tolerances` flag. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BeaconNodeSyncDistanceTiers { + pub synced: SyncDistance, + pub small: SyncDistance, + pub medium: SyncDistance, +} + +impl Default for BeaconNodeSyncDistanceTiers { + fn default() -> Self { + Self { + synced: DEFAULT_SYNC_TOLERANCE, + small: DEFAULT_SYNC_TOLERANCE + DEFAULT_SMALL_SYNC_DISTANCE_MODIFIER, + medium: DEFAULT_SYNC_TOLERANCE + + DEFAULT_SMALL_SYNC_DISTANCE_MODIFIER + + DEFAULT_MEDIUM_SYNC_DISTANCE_MODIFIER, + } + } +} + +impl FromStr for BeaconNodeSyncDistanceTiers { + type Err = String; + + fn from_str(s: &str) -> Result { + let values: (u64, u64, u64) = s + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("Invalid sync distance modifier: {e:?}")) + }) + .collect::, _>>()? + .into_iter() + .collect_tuple() + .ok_or("Invalid number of sync distance modifiers".to_string())?; + + Ok(BeaconNodeSyncDistanceTiers { + synced: Slot::new(values.0), + small: Slot::new(values.0 + values.1), + medium: Slot::new(values.0 + values.1 + values.2), + }) + } +} + +impl BeaconNodeSyncDistanceTiers { + /// Takes a given sync distance and determines its tier based on the `sync_tolerance` defined by + /// the CLI. + pub fn compute_distance_tier(&self, distance: SyncDistance) -> SyncDistanceTier { + if distance <= self.synced { + SyncDistanceTier::Synced + } else if distance <= self.small { + SyncDistanceTier::Small + } else if distance <= self.medium { + SyncDistanceTier::Medium + } else { + SyncDistanceTier::Large + } + } +} + +/// Execution Node health metrics. +/// +/// Currently only considers `el_offline`. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum ExecutionEngineHealth { + Healthy, + Unhealthy, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum IsOptimistic { + Yes, + No, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BeaconNodeHealthTier { + pub tier: HealthTier, + pub sync_distance: SyncDistance, + pub distance_tier: SyncDistanceTier, +} + +impl Display for BeaconNodeHealthTier { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "Tier{}({})", self.tier, self.sync_distance) + } +} + +impl Ord for BeaconNodeHealthTier { + fn cmp(&self, other: &Self) -> Ordering { + let ordering = self.tier.cmp(&other.tier); + if ordering == Ordering::Equal { + if self.distance_tier == SyncDistanceTier::Synced { + // Don't tie-break on sync distance in these cases. + // This ensures validator clients don't artificially prefer one node. + ordering + } else { + self.sync_distance.cmp(&other.sync_distance) + } + } else { + ordering + } + } +} + +impl PartialOrd for BeaconNodeHealthTier { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl BeaconNodeHealthTier { + pub fn new( + tier: HealthTier, + sync_distance: SyncDistance, + distance_tier: SyncDistanceTier, + ) -> Self { + Self { + tier, + sync_distance, + distance_tier, + } + } +} + +/// Beacon Node Health metrics. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BeaconNodeHealth { + // The index of the Beacon Node. This should correspond with its position in the + // `--beacon-nodes` list. Note that the `user_index` field is used to tie-break nodes with the + // same health so that nodes with a lower index are preferred. + pub user_index: usize, + // The slot number of the head. + pub head: Slot, + // Whether the node is optimistically synced. + pub optimistic_status: IsOptimistic, + // The status of the nodes connected Execution Engine. + pub execution_status: ExecutionEngineHealth, + // The overall health tier of the Beacon Node. Used to rank the nodes for the purposes of + // fallbacks. + pub health_tier: BeaconNodeHealthTier, +} + +impl Ord for BeaconNodeHealth { + fn cmp(&self, other: &Self) -> Ordering { + let ordering = self.health_tier.cmp(&other.health_tier); + if ordering == Ordering::Equal { + // Tie-break node health by `user_index`. + self.user_index.cmp(&other.user_index) + } else { + ordering + } + } +} + +impl PartialOrd for BeaconNodeHealth { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl BeaconNodeHealth { + pub fn from_status( + user_index: usize, + sync_distance: Slot, + head: Slot, + optimistic_status: IsOptimistic, + execution_status: ExecutionEngineHealth, + distance_tiers: &BeaconNodeSyncDistanceTiers, + ) -> Self { + let health_tier = BeaconNodeHealth::compute_health_tier( + sync_distance, + optimistic_status, + execution_status, + distance_tiers, + ); + + Self { + user_index, + head, + optimistic_status, + execution_status, + health_tier, + } + } + + pub fn get_index(&self) -> usize { + self.user_index + } + + pub fn get_health_tier(&self) -> BeaconNodeHealthTier { + self.health_tier + } + + fn compute_health_tier( + sync_distance: SyncDistance, + optimistic_status: IsOptimistic, + execution_status: ExecutionEngineHealth, + sync_distance_tiers: &BeaconNodeSyncDistanceTiers, + ) -> BeaconNodeHealthTier { + let sync_distance_tier = sync_distance_tiers.compute_distance_tier(sync_distance); + let health = (sync_distance_tier, optimistic_status, execution_status); + + match health { + (SyncDistanceTier::Synced, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(1, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(2, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Synced, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(3, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(4, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Synced, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(5, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Synced, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(6, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(7, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(8, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Small, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(9, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::No, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(10, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(11, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(12, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Medium, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(13, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::No, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(14, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::Yes, ExecutionEngineHealth::Healthy) => { + BeaconNodeHealthTier::new(15, sync_distance, sync_distance_tier) + } + (SyncDistanceTier::Large, IsOptimistic::Yes, ExecutionEngineHealth::Unhealthy) => { + BeaconNodeHealthTier::new(16, sync_distance, sync_distance_tier) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::ExecutionEngineHealth::{Healthy, Unhealthy}; + use super::{ + BeaconNodeHealth, BeaconNodeHealthTier, BeaconNodeSyncDistanceTiers, IsOptimistic, + SyncDistanceTier, + }; + use crate::beacon_node_fallback::Config; + use std::str::FromStr; + use types::Slot; + + #[test] + fn all_possible_health_tiers() { + let config = Config::default(); + let beacon_node_sync_distance_tiers = config.sync_tolerances; + + let mut health_vec = vec![]; + + for head_slot in 0..=64 { + for optimistic_status in &[IsOptimistic::No, IsOptimistic::Yes] { + for ee_health in &[Healthy, Unhealthy] { + let health = BeaconNodeHealth::from_status( + 0, + Slot::new(0), + Slot::new(head_slot), + *optimistic_status, + *ee_health, + &beacon_node_sync_distance_tiers, + ); + health_vec.push(health); + } + } + } + + for health in health_vec { + let health_tier = health.get_health_tier(); + let tier = health_tier.tier; + let distance = health_tier.sync_distance; + + let distance_tier = beacon_node_sync_distance_tiers.compute_distance_tier(distance); + + // Check sync distance. + if [1, 3, 5, 6].contains(&tier) { + assert!(distance_tier == SyncDistanceTier::Synced) + } else if [2, 7, 8, 9].contains(&tier) { + assert!(distance_tier == SyncDistanceTier::Small); + } else if [4, 11, 12, 13].contains(&tier) { + assert!(distance_tier == SyncDistanceTier::Medium); + } else { + assert!(distance_tier == SyncDistanceTier::Large); + } + + // Check optimistic status. + if [1, 2, 3, 4, 7, 10, 11, 14].contains(&tier) { + assert_eq!(health.optimistic_status, IsOptimistic::No); + } else { + assert_eq!(health.optimistic_status, IsOptimistic::Yes); + } + + // Check execution health. + if [3, 6, 7, 9, 11, 13, 14, 16].contains(&tier) { + assert_eq!(health.execution_status, Unhealthy); + } else { + assert_eq!(health.execution_status, Healthy); + } + } + } + + fn new_distance_tier( + distance: u64, + distance_tiers: &BeaconNodeSyncDistanceTiers, + ) -> BeaconNodeHealthTier { + BeaconNodeHealth::compute_health_tier( + Slot::new(distance), + IsOptimistic::No, + Healthy, + distance_tiers, + ) + } + + #[test] + fn sync_tolerance_default() { + let distance_tiers = BeaconNodeSyncDistanceTiers::default(); + + let synced_low = new_distance_tier(0, &distance_tiers); + let synced_high = new_distance_tier(8, &distance_tiers); + + let small_low = new_distance_tier(9, &distance_tiers); + let small_high = new_distance_tier(16, &distance_tiers); + + let medium_low = new_distance_tier(17, &distance_tiers); + let medium_high = new_distance_tier(64, &distance_tiers); + let large = new_distance_tier(65, &distance_tiers); + + assert_eq!(synced_low.tier, 1); + assert_eq!(synced_high.tier, 1); + assert_eq!(small_low.tier, 2); + assert_eq!(small_high.tier, 2); + assert_eq!(medium_low.tier, 4); + assert_eq!(medium_high.tier, 4); + assert_eq!(large.tier, 10); + } + + #[test] + fn sync_tolerance_from_str() { + // String should set the tiers as: + // synced: 0..=4 + // small: 5..=8 + // medium 9..=12 + // large: 13.. + + let distance_tiers = BeaconNodeSyncDistanceTiers::from_str("4,4,4").unwrap(); + + let synced_low = new_distance_tier(0, &distance_tiers); + let synced_high = new_distance_tier(4, &distance_tiers); + + let small_low = new_distance_tier(5, &distance_tiers); + let small_high = new_distance_tier(8, &distance_tiers); + + let medium_low = new_distance_tier(9, &distance_tiers); + let medium_high = new_distance_tier(12, &distance_tiers); + + let large = new_distance_tier(13, &distance_tiers); + + assert_eq!(synced_low.tier, 1); + assert_eq!(synced_high.tier, 1); + assert_eq!(small_low.tier, 2); + assert_eq!(small_high.tier, 2); + assert_eq!(medium_low.tier, 4); + assert_eq!(medium_high.tier, 4); + assert_eq!(large.tier, 10); + } +} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index af11d82eb5..665eaf0a0f 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,9 +1,8 @@ use crate::beacon_node_fallback::{Error as FallbackError, Errors}; use crate::{ - beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}, + beacon_node_fallback::{ApiTopic, BeaconNodeFallback}, determine_graffiti, graffiti_file::GraffitiFile, - OfflineOnFailure, }; use crate::{ http_metrics::metrics, @@ -141,26 +140,16 @@ pub struct ProposerFallback { impl ProposerFallback { // Try `func` on `self.proposer_nodes` first. If that doesn't work, try `self.beacon_nodes`. - pub async fn request_proposers_first<'a, F, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result<(), Errors> + pub async fn request_proposers_first(&self, func: F) -> Result<(), Errors> where - F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + F: Fn(BeaconNodeHttpClient) -> R + Clone, R: Future>, Err: Debug, { // If there are proposer nodes, try calling `func` on them and return early if they are successful. if let Some(proposer_nodes) = &self.proposer_nodes { if proposer_nodes - .request( - require_synced, - offline_on_failure, - ApiTopic::Blocks, - func.clone(), - ) + .request(ApiTopic::Blocks, func.clone()) .await .is_ok() { @@ -169,28 +158,18 @@ impl ProposerFallback { } // If the proposer nodes failed, try on the non-proposer nodes. - self.beacon_nodes - .request(require_synced, offline_on_failure, ApiTopic::Blocks, func) - .await + self.beacon_nodes.request(ApiTopic::Blocks, func).await } // Try `func` on `self.beacon_nodes` first. If that doesn't work, try `self.proposer_nodes`. - pub async fn request_proposers_last<'a, F, O, Err, R>( - &'a self, - require_synced: RequireSynced, - offline_on_failure: OfflineOnFailure, - func: F, - ) -> Result> + pub async fn request_proposers_last(&self, func: F) -> Result> where - F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + F: Fn(BeaconNodeHttpClient) -> R + Clone, R: Future>, Err: Debug, { // Try running `func` on the non-proposer beacon nodes. - let beacon_nodes_result = self - .beacon_nodes - .first_success(require_synced, offline_on_failure, func.clone()) - .await; + let beacon_nodes_result = self.beacon_nodes.first_success(func.clone()).await; match (beacon_nodes_result, &self.proposer_nodes) { // The non-proposer node call succeed, return the result. @@ -198,11 +177,7 @@ impl ProposerFallback { // The non-proposer node call failed, but we don't have any proposer nodes. Return an error. (Err(e), None) => Err(e), // The non-proposer node call failed, try the same call on the proposer nodes. - (Err(_), Some(proposer_nodes)) => { - proposer_nodes - .first_success(require_synced, offline_on_failure, func) - .await - } + (Err(_), Some(proposer_nodes)) => proposer_nodes.first_success(func).await, } } } @@ -211,8 +186,8 @@ impl ProposerFallback { pub struct Inner { validator_store: Arc>, slot_clock: Arc, - beacon_nodes: Arc>, - proposer_nodes: Option>>, + pub(crate) beacon_nodes: Arc>, + pub(crate) proposer_nodes: Option>>, context: RuntimeContext, graffiti: Option, graffiti_file: Option, @@ -418,14 +393,10 @@ impl BlockService { // protect them from DoS attacks and they're most likely to successfully // publish a block. proposer_fallback - .request_proposers_first( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async { - self.publish_signed_block_contents(&signed_block, beacon_node) - .await - }, - ) + .request_proposers_first(|beacon_node| async { + self.publish_signed_block_contents(&signed_block, beacon_node) + .await + }) .await?; info!( @@ -503,32 +474,28 @@ impl BlockService { // Try the proposer nodes last, since it's likely that they don't have a // great view of attestations on the network. let unsigned_block = proposer_fallback - .request_proposers_last( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - Self::get_validator_block( - beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_boost_factor, - log, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - }) - }, - ) + .request_proposers_last(|beacon_node| async move { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + Self::get_validator_block( + &beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, + builder_boost_factor, + log, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + }) + }) .await?; self_ref @@ -547,7 +514,7 @@ impl BlockService { async fn publish_signed_block_contents( &self, signed_block: &SignedBlock, - beacon_node: &BeaconNodeHttpClient, + beacon_node: BeaconNodeHttpClient, ) -> Result<(), BlockError> { let log = self.context.log(); let slot = signed_block.slot(); diff --git a/validator_client/src/check_synced.rs b/validator_client/src/check_synced.rs index 6437682512..2e9a62ff65 100644 --- a/validator_client/src/check_synced.rs +++ b/validator_client/src/check_synced.rs @@ -1,80 +1,27 @@ use crate::beacon_node_fallback::CandidateError; -use eth2::BeaconNodeHttpClient; -use slog::{debug, error, warn, Logger}; -use slot_clock::SlotClock; +use eth2::{types::Slot, BeaconNodeHttpClient}; +use slog::{warn, Logger}; -/// A distance in slots. -const SYNC_TOLERANCE: u64 = 4; - -/// Returns -/// -/// `Ok(())` if the beacon node is synced and ready for action, -/// `Err(CandidateError::Offline)` if the beacon node is unreachable, -/// `Err(CandidateError::NotSynced)` if the beacon node indicates that it is syncing **AND** -/// it is more than `SYNC_TOLERANCE` behind the highest -/// known slot. -/// -/// The second condition means the even if the beacon node thinks that it's syncing, we'll still -/// try to use it if it's close enough to the head. -pub async fn check_synced( +pub async fn check_node_health( beacon_node: &BeaconNodeHttpClient, - slot_clock: &T, - log_opt: Option<&Logger>, -) -> Result<(), CandidateError> { + log: &Logger, +) -> Result<(Slot, bool, bool), CandidateError> { let resp = match beacon_node.get_node_syncing().await { Ok(resp) => resp, Err(e) => { - if let Some(log) = log_opt { - warn!( - log, - "Unable connect to beacon node"; - "error" => %e - ) - } + warn!( + log, + "Unable connect to beacon node"; + "error" => %e + ); return Err(CandidateError::Offline); } }; - let bn_is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); - let is_synced = bn_is_synced && !resp.data.el_offline; - - if let Some(log) = log_opt { - if !is_synced { - debug!( - log, - "Beacon node sync status"; - "status" => format!("{:?}", resp), - ); - - warn!( - log, - "Beacon node is not synced"; - "sync_distance" => resp.data.sync_distance.as_u64(), - "head_slot" => resp.data.head_slot.as_u64(), - "endpoint" => %beacon_node, - "el_offline" => resp.data.el_offline, - ); - } - - if let Some(local_slot) = slot_clock.now() { - let remote_slot = resp.data.head_slot + resp.data.sync_distance; - if remote_slot + 1 < local_slot || local_slot + 1 < remote_slot { - error!( - log, - "Time discrepancy with beacon node"; - "msg" => "check the system time on this host and the beacon node", - "beacon_node_slot" => remote_slot, - "local_slot" => local_slot, - "endpoint" => %beacon_node, - ); - } - } - } - - if is_synced { - Ok(()) - } else { - Err(CandidateError::NotSynced) - } + Ok(( + resp.data.head_slot, + resp.data.is_optimistic, + resp.data.el_offline, + )) } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index f84260a924..b027ad0df6 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -444,6 +444,33 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("beacon-nodes-sync-tolerances") + .long("beacon-nodes-sync-tolerances") + .value_name("SYNC_TOLERANCES") + .help("A comma-separated list of 3 values which sets the size of each sync distance range when \ + determining the health of each connected beacon node. \ + The first value determines the `Synced` range. \ + If a connected beacon node is synced to within this number of slots it is considered 'Synced'. \ + The second value determines the `Small` sync distance range. \ + This range starts immediately after the `Synced` range. \ + The third value determines the `Medium` sync distance range. \ + This range starts immediately after the `Small` range. \ + Any sync distance value beyond that is considered `Large`. \ + For example, a value of `8,8,48` would have ranges like the following: \ + `Synced`: 0..=8 \ + `Small`: 9..=16 \ + `Medium`: 17..=64 \ + `Large`: 65.. \ + These values are used to determine what ordering beacon node fallbacks are used in. \ + Generally, `Synced` nodes are preferred over `Small` and so on. \ + Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ + This ensures the primary beacon node is prioritised. \ + [default: 8,8,48]") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .display_order(0) + ) .arg( Arg::new("disable-slashing-protection-web3signer") .long("disable-slashing-protection-web3signer") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 204c5b8b6c..c2c445c48c 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,6 +1,8 @@ use crate::beacon_node_fallback::ApiTopic; use crate::graffiti_file::GraffitiFile; -use crate::{http_api, http_metrics}; +use crate::{ + beacon_node_fallback, beacon_node_health::BeaconNodeSyncDistanceTiers, http_api, http_metrics, +}; use clap::ArgMatches; use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; use directory::{ @@ -14,6 +16,7 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; +use std::str::FromStr; use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; @@ -21,7 +24,7 @@ pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const DEFAULT_WEB3SIGNER_KEEP_ALIVE: Option = Some(Duration::from_secs(20)); /// Stores the core configuration for this validator instance. -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub validator_dir: PathBuf, @@ -52,6 +55,8 @@ pub struct Config { pub http_api: http_api::Config, /// Configuration for the HTTP REST API. pub http_metrics: http_metrics::Config, + /// Configuration for the Beacon Node fallback. + pub beacon_node_fallback: beacon_node_fallback::Config, /// Configuration for sending metrics to a remote explorer endpoint. pub monitoring_api: Option, /// If true, enable functionality that monitors the network for attestations or proposals from @@ -117,6 +122,7 @@ impl Default for Config { fee_recipient: None, http_api: <_>::default(), http_metrics: <_>::default(), + beacon_node_fallback: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, enable_high_validator_count_metrics: false, @@ -258,6 +264,16 @@ impl Config { .collect::>()?; } + /* + * Beacon node fallback + */ + if let Some(sync_tolerance) = cli_args.get_one::("beacon-nodes-sync-tolerances") { + config.beacon_node_fallback.sync_tolerances = + BeaconNodeSyncDistanceTiers::from_str(sync_tolerance)?; + } else { + config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::default(); + } + /* * Web3 signer */ diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 2c8eca8560..1d552cc5ad 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -29,9 +29,8 @@ //! //! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::BeaconNodeFallback; use crate::validator_store::ValidatorStore; -use crate::OfflineOnFailure; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use parking_lot::RwLock; @@ -175,12 +174,11 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( } else { // Request the previous epoch liveness state from the beacon node. beacon_nodes - .first_success( - RequireSynced::Yes, - OfflineOnFailure::Yes, - |beacon_node| async { + .first_success(|beacon_node| { + let validator_indices_ref = &validator_indices; + async move { beacon_node - .post_validator_liveness_epoch(previous_epoch, &validator_indices) + .post_validator_liveness_epoch(previous_epoch, validator_indices_ref) .await .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) .map(|result| { @@ -194,8 +192,8 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( }) .collect() }) - }, - ) + } + }) .await .unwrap_or_else(|e| { crit!( @@ -212,12 +210,11 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( // Request the current epoch liveness state from the beacon node. let current_epoch_responses = beacon_nodes - .first_success( - RequireSynced::Yes, - OfflineOnFailure::Yes, - |beacon_node| async { + .first_success(|beacon_node| { + let validator_indices_ref = &validator_indices; + async move { beacon_node - .post_validator_liveness_epoch(current_epoch, &validator_indices) + .post_validator_liveness_epoch(current_epoch, validator_indices_ref) .await .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) .map(|result| { @@ -231,8 +228,8 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( }) .collect() }) - }, - ) + } + }) .await .unwrap_or_else(|e| { crit!( diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 1c205b38e5..cf8d499792 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -8,7 +8,7 @@ pub mod sync; -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, OfflineOnFailure, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::http_metrics::metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use crate::{ block_service::BlockServiceNotification, @@ -517,22 +517,18 @@ async fn poll_validator_indices( // Query the remote BN to resolve a pubkey to a validator index. let download_result = duties_service .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::VALIDATOR_ID_HTTP_GET], - ); - beacon_node - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(pubkey), - ) - .await - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::VALIDATOR_ID_HTTP_GET], + ); + beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey), + ) + .await + }) .await; let fee_recipient = duties_service @@ -744,20 +740,15 @@ async fn poll_beacon_attesters( let subscriptions_ref = &subscriptions; let subscription_result = duties_service .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Subscriptions, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::SUBSCRIPTIONS_HTTP_POST], - ); - beacon_node - .post_validator_beacon_committee_subscriptions(subscriptions_ref) - .await - }, - ) + .request(ApiTopic::Subscriptions, |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::SUBSCRIPTIONS_HTTP_POST], + ); + beacon_node + .post_validator_beacon_committee_subscriptions(subscriptions_ref) + .await + }) .await; if subscription_result.as_ref().is_ok() { debug!( @@ -769,7 +760,7 @@ async fn poll_beacon_attesters( subscription_slots.record_successful_subscription_at(current_slot); } } else if let Err(e) = subscription_result { - if e.num_errors() < duties_service.beacon_nodes.num_total() { + if e.num_errors() < duties_service.beacon_nodes.num_total().await { warn!( log, "Some subscriptions failed"; @@ -1037,19 +1028,15 @@ async fn post_validator_duties_attester( ) -> Result>, Error> { duties_service .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], - ); - beacon_node - .post_validator_duties_attester(epoch, validator_indices) - .await - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTER_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_attester(epoch, validator_indices) + .await + }) .await .map_err(|e| Error::FailedToDownloadAttesters(e.to_string())) } @@ -1273,19 +1260,15 @@ async fn poll_beacon_proposers( if !local_pubkeys.is_empty() { let download_result = duties_service .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::PROPOSER_DUTIES_HTTP_GET], - ); - beacon_node - .get_validator_duties_proposer(current_epoch) - .await - }, - ) + .first_success(|beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::PROPOSER_DUTIES_HTTP_GET], + ); + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + }) .await; match download_result { diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 3618b47146..0bd99dc638 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -1,4 +1,3 @@ -use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, @@ -442,19 +441,15 @@ pub async fn poll_sync_committee_duties_for_period for Error { pub struct Context { pub task_executor: TaskExecutor, pub api_secret: ApiSecret, + pub block_service: Option>, pub validator_store: Option>>, pub validator_dir: Option, pub secrets_dir: Option, @@ -169,6 +171,17 @@ pub fn serve( } }; + let inner_block_service = ctx.block_service.clone(); + let block_service_filter = warp::any() + .map(move || inner_block_service.clone()) + .and_then(|block_service: Option<_>| async move { + block_service.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "block service is not initialized.".to_string(), + ) + }) + }); + let inner_validator_store = ctx.validator_store.clone(); let validator_store_filter = warp::any() .map(move || inner_validator_store.clone()) @@ -398,6 +411,40 @@ pub fn serve( }, ); + // GET lighthouse/ui/fallback_health + let get_lighthouse_ui_fallback_health = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("fallback_health")) + .and(warp::path::end()) + .and(block_service_filter.clone()) + .then(|block_filter: BlockService| async move { + let mut result: HashMap> = HashMap::new(); + + let mut beacon_nodes = Vec::new(); + for node in &*block_filter.beacon_nodes.candidates.read().await { + beacon_nodes.push(CandidateInfo { + index: node.index, + endpoint: node.beacon_node.to_string(), + health: *node.health.read().await, + }); + } + result.insert("beacon_nodes".to_string(), beacon_nodes); + + if let Some(proposer_nodes_list) = &block_filter.proposer_nodes { + let mut proposer_nodes = Vec::new(); + for node in &*proposer_nodes_list.candidates.read().await { + proposer_nodes.push(CandidateInfo { + index: node.index, + endpoint: node.beacon_node.to_string(), + health: *node.health.read().await, + }); + } + result.insert("proposer_nodes".to_string(), proposer_nodes); + } + + blocking_json_task(move || Ok(api_types::GenericResponse::from(result))).await + }); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -1253,6 +1300,7 @@ pub fn serve( .or(get_lighthouse_validators_pubkey) .or(get_lighthouse_ui_health) .or(get_lighthouse_ui_graffiti) + .or(get_lighthouse_ui_fallback_health) .or(get_fee_recipient) .or(get_gas_limit) .or(get_graffiti) diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index 6c0e8b1617..119c611553 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -127,6 +127,7 @@ impl ApiTester { let context = Arc::new(Context { task_executor: test_runtime.task_executor.clone(), api_secret, + block_service: None, validator_dir: Some(validator_dir.path().into()), secrets_dir: Some(secrets_dir.path().into()), validator_store: Some(validator_store.clone()), diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 98fbc854ae..ba3b7f685b 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -115,6 +115,7 @@ impl ApiTester { let context = Arc::new(Context { task_executor: test_runtime.task_executor.clone(), api_secret, + block_service: None, validator_dir: Some(validator_dir.path().into()), secrets_dir: Some(secrets_dir.path().into()), validator_store: Some(validator_store.clone()), diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index dff50582df..9a02ffdefb 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -1,5 +1,6 @@ mod attestation_service; mod beacon_node_fallback; +mod beacon_node_health; mod block_service; mod check_synced; mod cli; @@ -20,6 +21,7 @@ pub mod initialized_validators; pub mod validator_store; pub use beacon_node_fallback::ApiTopic; +pub use beacon_node_health::BeaconNodeSyncDistanceTiers; pub use cli::cli_app; pub use config::Config; use initialized_validators::InitializedValidators; @@ -29,8 +31,7 @@ use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ - start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, OfflineOnFailure, - RequireSynced, + start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, }; use crate::doppelganger_service::DoppelgangerService; use crate::graffiti_file::GraffitiFile; @@ -364,15 +365,21 @@ impl ProductionValidatorClient { .collect::, String>>()?; let num_nodes = beacon_nodes.len(); + // User order of `beacon_nodes` is preserved, so `index` corresponds to the position of + // the node in `--beacon_nodes`. let candidates = beacon_nodes .into_iter() - .map(CandidateBeaconNode::new) + .enumerate() + .map(|(index, node)| CandidateBeaconNode::new(node, index)) .collect(); let proposer_nodes_num = proposer_nodes.len(); + // User order of `proposer_nodes` is preserved, so `index` corresponds to the position of + // the node in `--proposer_nodes`. let proposer_candidates = proposer_nodes .into_iter() - .map(CandidateBeaconNode::new) + .enumerate() + .map(|(index, node)| CandidateBeaconNode::new(node, index)) .collect(); // Set the count for beacon node fallbacks excluding the primary beacon node. @@ -394,6 +401,7 @@ impl ProductionValidatorClient { let mut beacon_nodes: BeaconNodeFallback<_, E> = BeaconNodeFallback::new( candidates, + config.beacon_node_fallback, config.broadcast_topics.clone(), context.eth2_config.spec.clone(), log.clone(), @@ -401,6 +409,7 @@ impl ProductionValidatorClient { let mut proposer_nodes: BeaconNodeFallback<_, E> = BeaconNodeFallback::new( proposer_candidates, + config.beacon_node_fallback, config.broadcast_topics.clone(), context.eth2_config.spec.clone(), log.clone(), @@ -563,6 +572,7 @@ impl ProductionValidatorClient { let ctx = Arc::new(http_api::Context { task_executor: self.context.executor.clone(), api_secret, + block_service: Some(self.block_service.clone()), validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), secrets_dir: Some(self.config.secrets_dir.clone()), @@ -655,10 +665,10 @@ async fn init_from_beacon_node( proposer_nodes.update_all_candidates().await; let num_available = beacon_nodes.num_available().await; - let num_total = beacon_nodes.num_total(); + let num_total = beacon_nodes.num_total().await; let proposer_available = proposer_nodes.num_available().await; - let proposer_total = proposer_nodes.num_total(); + let proposer_total = proposer_nodes.num_total().await; if proposer_total > 0 && proposer_available == 0 { warn!( @@ -704,11 +714,7 @@ async fn init_from_beacon_node( let genesis = loop { match beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |node| async move { node.get_beacon_genesis().await }, - ) + .first_success(|node| async move { node.get_beacon_genesis().await }) .await { Ok(genesis) => break genesis.data, @@ -795,11 +801,7 @@ async fn poll_whilst_waiting_for_genesis( ) -> Result<(), String> { loop { match beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { beacon_node.get_lighthouse_staking().await }, - ) + .first_success(|beacon_node| async move { beacon_node.get_lighthouse_staking().await }) .await { Ok(is_staking) => { diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 819201978f..00d7b14de7 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,7 +1,7 @@ use crate::http_metrics; use crate::{DutiesService, ProductionValidatorClient}; use lighthouse_metrics::set_gauge; -use slog::{error, info, Logger}; +use slog::{debug, error, info, Logger}; use slot_clock::SlotClock; use tokio::time::{sleep, Duration}; use types::EthSpec; @@ -39,25 +39,32 @@ async fn notify( duties_service: &DutiesService, log: &Logger, ) { - let num_available = duties_service.beacon_nodes.num_available().await; + let (candidate_info, num_available, num_synced) = + duties_service.beacon_nodes.get_notifier_info().await; + let num_total = candidate_info.len(); + let num_synced_fallback = num_synced.saturating_sub(1); + set_gauge( &http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, num_available as i64, ); - let num_synced = duties_service.beacon_nodes.num_synced().await; set_gauge( &http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, num_synced as i64, ); - let num_total = duties_service.beacon_nodes.num_total(); set_gauge( &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, num_total as i64, ); if num_synced > 0 { + let primary = candidate_info + .first() + .map(|candidate| candidate.endpoint.as_str()) + .unwrap_or("None"); info!( log, "Connected to beacon node(s)"; + "primary" => primary, "total" => num_total, "available" => num_available, "synced" => num_synced, @@ -71,13 +78,36 @@ async fn notify( "synced" => num_synced, ) } - let num_synced_fallback = duties_service.beacon_nodes.num_synced_fallback().await; if num_synced_fallback > 0 { set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 1); } else { set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); } + for info in candidate_info { + if let Ok(health) = info.health { + debug!( + log, + "Beacon node info"; + "status" => "Connected", + "index" => info.index, + "endpoint" => info.endpoint, + "head_slot" => %health.head, + "is_optimistic" => ?health.optimistic_status, + "execution_engine_status" => ?health.execution_status, + "health_tier" => %health.health_tier, + ); + } else { + debug!( + log, + "Beacon node info"; + "status" => "Disconnected", + "index" => info.index, + "endpoint" => info.endpoint, + ); + } + } + if let Some(slot) = duties_service.slot_clock.now() { let epoch = slot.epoch(E::slots_per_epoch()); diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 474f9f4760..010c651c25 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,6 +1,5 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; -use crate::OfflineOnFailure; use bls::PublicKeyBytes; use environment::RuntimeContext; use parking_lot::RwLock; @@ -342,16 +341,11 @@ impl PreparationService { let preparation_entries = preparation_data.as_slice(); match self .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Subscriptions, - |beacon_node| async move { - beacon_node - .post_validator_prepare_beacon_proposer(preparation_entries) - .await - }, - ) + .request(ApiTopic::Subscriptions, |beacon_node| async move { + beacon_node + .post_validator_prepare_beacon_proposer(preparation_entries) + .await + }) .await { Ok(()) => debug!( @@ -477,13 +471,9 @@ impl PreparationService { for batch in signed.chunks(self.validator_registration_batch_size) { match self .beacon_nodes - .broadcast( - RequireSynced::No, - OfflineOnFailure::No, - |beacon_node| async move { - beacon_node.post_validator_register_validator(batch).await - }, - ) + .broadcast(|beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }) .await { Ok(()) => info!( diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index f7abb3855a..5c02998e3f 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,8 +1,7 @@ -use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use crate::{ duties_service::DutiesService, validator_store::{Error as ValidatorStoreError, ValidatorStore}, - OfflineOnFailure, }; use environment::RuntimeContext; use eth2::types::BlockId; @@ -180,8 +179,6 @@ impl SyncCommitteeService { let response = self .beacon_nodes .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, |beacon_node| async move { match beacon_node.get_beacon_blocks_root(BlockId::Head).await { Ok(Some(block)) if block.execution_optimistic == Some(false) => { @@ -299,16 +296,11 @@ impl SyncCommitteeService { .collect::>(); self.beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::SyncCommittee, - |beacon_node| async move { - beacon_node - .post_beacon_pool_sync_committee_signatures(committee_signatures) - .await - }, - ) + .request(ApiTopic::SyncCommittee, |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(committee_signatures) + .await + }) .await .map_err(|e| { error!( @@ -371,21 +363,17 @@ impl SyncCommitteeService { let contribution = &self .beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - let sync_contribution_data = SyncContributionData { - slot, - beacon_block_root, - subcommittee_index: subnet_id.into(), - }; + .first_success(|beacon_node| async move { + let sync_contribution_data = SyncContributionData { + slot, + beacon_block_root, + subcommittee_index: subnet_id.into(), + }; - beacon_node - .get_validator_sync_committee_contribution::(&sync_contribution_data) - .await - }, - ) + beacon_node + .get_validator_sync_committee_contribution::(&sync_contribution_data) + .await + }) .await .map_err(|e| { crit!( @@ -453,15 +441,11 @@ impl SyncCommitteeService { // Publish to the beacon node. self.beacon_nodes - .first_success( - RequireSynced::No, - OfflineOnFailure::Yes, - |beacon_node| async move { - beacon_node - .post_validator_contribution_and_proofs(signed_contributions) - .await - }, - ) + .first_success(|beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions) + .await + }) .await .map_err(|e| { error!( @@ -595,16 +579,11 @@ impl SyncCommitteeService { if let Err(e) = self .beacon_nodes - .request( - RequireSynced::No, - OfflineOnFailure::Yes, - ApiTopic::Subscriptions, - |beacon_node| async move { - beacon_node - .post_validator_sync_committee_subscriptions(subscriptions_slice) - .await - }, - ) + .request(ApiTopic::Subscriptions, |beacon_node| async move { + beacon_node + .post_validator_sync_committee_subscriptions(subscriptions_slice) + .await + }) .await { error!( From a4a673b780af257018db6d0019af0f454dbe7f5b Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Thu, 3 Oct 2024 22:53:36 +0900 Subject: [PATCH 40/66] Output network-test logs into files in CI (#6355) * Add ci_logger * Update artifact name * Add env var * Add fork_name * Fix clippy error * Add comments --- .github/workflows/test-suite.yml | 11 ++++ beacon_node/beacon_chain/src/test_utils.rs | 62 ++++++++++++++++--- beacon_node/network/Cargo.toml | 1 + .../network/src/sync/block_lookups/tests.rs | 12 +++- 4 files changed, 74 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 769b889de4..aff9a71b4a 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -173,8 +173,19 @@ jobs: channel: stable cache-target: release bins: cargo-nextest + - name: Create CI logger dir + run: mkdir ${{ runner.temp }}/network_test_logs - name: Run network tests for all known forks run: make test-network + env: + TEST_FEATURES: portable,ci_logger + CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs + - name: Upload logs + uses: actions/upload-artifact@v4 + with: + name: network_test_logs + path: ${{ runner.temp }}/network_test_logs + slasher-tests: name: slasher-tests needs: [check-labels] diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ce36c8ca21..344820c6a2 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -43,13 +43,15 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::{o, Drain, Logger}; use slog_async::Async; -use slog_term::{FullFormat, TermDecorator}; +use slog_term::{FullFormat, PlainSyncDecorator, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::fs::{File, OpenOptions}; +use std::io::BufWriter; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; @@ -68,6 +70,8 @@ use types::{typenum::U4294967296, *}; pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +// Environment variable to read if `ci_logger` feature is enabled. +pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // @@ -2750,15 +2754,55 @@ pub struct MakeAttestationOptions { pub fork: Fork, } -pub fn build_log(level: slog::Level, enabled: bool) -> Logger { - let decorator = TermDecorator::new().build(); - let drain = FullFormat::new(decorator).build().fuse(); - let drain = Async::new(drain).build().fuse(); +pub enum LoggerType { + Test, + // The logs are output to files for each test. + CI, + // No logs will be printed. + Null, +} - if enabled { - Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - Logger::root(drain.filter(|_| false).fuse(), o!()) +fn ci_decorator() -> PlainSyncDecorator> { + let log_dir = std::env::var(CI_LOGGER_DIR_ENV_VAR).unwrap_or_else(|e| { + panic!("{CI_LOGGER_DIR_ENV_VAR} env var must be defined when using ci_logger: {e:?}"); + }); + let fork_name = std::env::var(FORK_NAME_ENV_VAR) + .map(|s| format!("{s}_")) + .unwrap_or_default(); + // The current test name can be got via the thread name. + let test_name = std::thread::current() + .name() + .unwrap() + .to_string() + // Colons are not allowed in files that are uploaded to GitHub Artifacts. + .replace("::", "_"); + let log_path = format!("/{log_dir}/{fork_name}{test_name}.log"); + let file = OpenOptions::new() + .create(true) + .append(true) + .open(log_path) + .unwrap(); + let file = BufWriter::new(file); + PlainSyncDecorator::new(file) +} + +pub fn build_log(level: slog::Level, logger_type: LoggerType) -> Logger { + match logger_type { + LoggerType::Test => { + let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); + let drain = Async::new(drain).build().fuse(); + Logger::root(drain.filter_level(level).fuse(), o!()) + } + LoggerType::CI => { + let drain = FullFormat::new(ci_decorator()).build().fuse(); + let drain = Async::new(drain).build().fuse(); + Logger::root(drain.filter_level(level).fuse(), o!()) + } + LoggerType::Null => { + let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); + let drain = Async::new(drain).build().fuse(); + Logger::root(drain.filter(|_| false).fuse(), o!()) + } } } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index fed346127f..4df1761732 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -57,3 +57,4 @@ disable-backfill = [] fork_from_env = ["beacon_chain/fork_from_env"] portable = ["beacon_chain/portable"] test_logger = [] +ci_logger = [] diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index ffbdd43b5f..4c73e8f8d0 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -15,7 +15,7 @@ use beacon_chain::data_availability_checker::Availability; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::test_utils::{ build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, NumBlobs, + BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, }; use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ @@ -103,8 +103,14 @@ struct TestRigConfig { impl TestRig { fn test_setup_with_config(config: Option) -> Self { - let enable_log = cfg!(feature = "test_logger"); - let log = build_log(slog::Level::Trace, enable_log); + let logger_type = if cfg!(feature = "test_logger") { + LoggerType::Test + } else if cfg!(feature = "ci_logger") { + LoggerType::CI + } else { + LoggerType::Null + }; + let log = build_log(slog::Level::Trace, logger_type); // Use `fork_from_env` logic to set correct fork epochs let mut spec = test_spec::(); From f3a5e256da6f18fd019478c66419f6f07d98d64d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 4 Oct 2024 10:27:30 +1000 Subject: [PATCH 41/66] Implement Subnet Sampling for PeerDAS (#6410) * Add `SAMPLES_PER_SLOT` config. * Rename `sampling` module to `peer_sampling` * Implement subnet sampling. * Update lookup test. * Merge branch 'unstable' into subnet-sampling * Merge branch 'unstable' into subnet-sampling # Conflicts: # beacon_node/beacon_chain/src/data_availability_checker.rs # beacon_node/http_api/src/publish_blocks.rs # beacon_node/lighthouse_network/src/types/globals.rs # beacon_node/network/src/sync/manager.rs * Merge branch 'unstable' into subnet-sampling --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 + .../src/data_availability_checker.rs | 22 ++++----- .../overflow_lru_cache.rs | 24 +++++----- .../beacon_chain/src/historical_blocks.rs | 4 +- beacon_node/http_api/src/publish_blocks.rs | 9 ++-- .../lighthouse_network/src/types/globals.rs | 48 ++++++++++++------- beacon_node/network/src/service.rs | 2 +- .../network/src/sync/block_lookups/tests.rs | 7 +-- beacon_node/network/src/sync/manager.rs | 2 +- beacon_node/network/src/sync/mod.rs | 2 +- .../network/src/sync/network_context.rs | 18 +++---- .../sync/{sampling.rs => peer_sampling.rs} | 0 .../network/src/sync/range_sync/chain.rs | 26 +++++----- .../chiado/config.yaml | 3 +- .../gnosis/config.yaml | 3 +- .../holesky/config.yaml | 3 +- .../mainnet/config.yaml | 3 +- .../sepolia/config.yaml | 3 +- consensus/types/src/chain_spec.rs | 18 ++++++- .../environment/tests/testnet_dir/config.yaml | 3 +- 20 files changed, 122 insertions(+), 80 deletions(-) rename beacon_node/network/src/sync/{sampling.rs => peer_sampling.rs} (100%) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2262325642..13022b8269 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3851,6 +3851,8 @@ impl BeaconChain { } if let Some(data_columns) = data_columns { + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec. if !data_columns.is_empty() { debug!( self.log, "Writing data_columns to store"; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4d5afdc890..395f40c5db 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -108,13 +108,15 @@ impl DataAvailabilityChecker { spec.custody_requirement as usize }; - let custody_column_count = - custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); + let subnet_sampling_size = + std::cmp::max(custody_subnet_count, spec.samples_per_slot as usize); + let sampling_column_count = + subnet_sampling_size.saturating_mul(spec.data_columns_per_subnet()); let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, - custody_column_count, + sampling_column_count, spec.clone(), )?; Ok(Self { @@ -125,10 +127,8 @@ impl DataAvailabilityChecker { }) } - pub fn get_custody_columns_count(&self) -> usize { - self.availability_cache - .custody_subnet_count() - .saturating_mul(self.spec.data_columns_per_subnet()) + pub fn get_sampling_column_count(&self) -> usize { + self.availability_cache.sampling_column_count() } /// Checks if the block root is currenlty in the availability cache awaiting import because @@ -141,9 +141,9 @@ impl DataAvailabilityChecker { .get_execution_valid_block(block_root) } - /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block + /// Return the set of cached blob indexes for `block_root`. Returns None if there is no block /// component for `block_root`. - pub fn imported_blob_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_blob_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| { @@ -156,9 +156,9 @@ impl DataAvailabilityChecker { }) } - /// Return the set of imported custody column indexes for `block_root`. Returns None if there is + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. - pub fn imported_custody_column_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| components.get_cached_data_columns_indices()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 46ab08a821..8f91bf34fc 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -40,7 +40,7 @@ pub struct PendingComponents { pub enum BlockImportRequirement { AllBlobs, - CustodyColumns(usize), + ColumnSampling(usize), } impl PendingComponents { @@ -210,7 +210,7 @@ impl PendingComponents { .map_or(false, |num_expected_blobs| { num_expected_blobs == self.num_received_blobs() }), - BlockImportRequirement::CustodyColumns(num_expected_columns) => { + BlockImportRequirement::ColumnSampling(num_expected_columns) => { let num_received_data_columns = self.num_received_data_columns(); // No data columns when there are 0 blobs self.num_expected_blobs() @@ -281,7 +281,7 @@ impl PendingComponents { }; (Some(VariableList::new(verified_blobs)?), None) } - BlockImportRequirement::CustodyColumns(_) => { + BlockImportRequirement::ColumnSampling(_) => { let verified_data_columns = verified_data_columns .into_iter() .map(|d| d.into_inner()) @@ -353,8 +353,8 @@ pub struct DataAvailabilityCheckerInner { /// This cache holds a limited number of states in memory and reconstructs them /// from disk when necessary. This is necessary until we merge tree-states state_cache: StateLRUCache, - /// The number of data columns the node is custodying. - custody_column_count: usize, + /// The number of data columns the node is sampling via subnet sampling. + sampling_column_count: usize, spec: Arc, } @@ -362,19 +362,19 @@ impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, beacon_store: BeaconStore, - custody_column_count: usize, + sampling_column_count: usize, spec: Arc, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), - custody_column_count, + sampling_column_count, spec, }) } - pub fn custody_subnet_count(&self) -> usize { - self.custody_column_count + pub fn sampling_column_count(&self) -> usize { + self.sampling_column_count } /// Returns true if the block root is known, without altering the LRU ordering @@ -440,8 +440,8 @@ impl DataAvailabilityCheckerInner { ) -> Result { let peer_das_enabled = self.spec.is_peer_das_enabled_for_epoch(epoch); if peer_das_enabled { - Ok(BlockImportRequirement::CustodyColumns( - self.custody_column_count, + Ok(BlockImportRequirement::ColumnSampling( + self.sampling_column_count, )) } else { Ok(BlockImportRequirement::AllBlobs) @@ -456,7 +456,7 @@ impl DataAvailabilityCheckerInner { block_import_requirement: &BlockImportRequirement, pending_components: &PendingComponents, ) -> bool { - let BlockImportRequirement::CustodyColumns(num_expected_columns) = block_import_requirement + let BlockImportRequirement::ColumnSampling(num_expected_columns) = block_import_requirement else { return false; }; diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1372211b17..a23b6ddc1e 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -94,7 +94,9 @@ impl BeaconChain { // Blobs are stored per block, and data columns are each stored individually let n_blob_ops_per_block = if self.spec.is_peer_das_scheduled() { - self.data_availability_checker.get_custody_columns_count() + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec PR. + self.data_availability_checker.get_sampling_column_count() } else { 1 }; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 16364b435a..fceeb2dd23 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -389,18 +389,17 @@ pub async fn publish_block>( .count() > 0 { - let custody_columns_indices = &network_globals.custody_columns; - - let custody_columns = gossip_verified_data_columns + let sampling_columns_indices = &network_globals.sampling_columns; + let sampling_columns = gossip_verified_data_columns .into_iter() .flatten() - .filter(|data_column| custody_columns_indices.contains(&data_column.index())) + .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) .collect(); // Importing the columns could trigger block import and network publication in the case // where the block was already seen on gossip. if let Err(e) = - Box::pin(chain.process_gossip_data_columns(custody_columns, publish_fn)).await + Box::pin(chain.process_gossip_data_columns(sampling_columns, publish_fn)).await { let msg = format!("Invalid data column: {e}"); return if let BroadcastValidation::Gossip = validation_level { diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index f271c9ff72..bcebd02a0e 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -26,9 +26,9 @@ pub struct NetworkGlobals { pub sync_state: RwLock, /// The current state of the backfill sync. pub backfill_state: RwLock, - /// The computed custody subnets and columns is stored to avoid re-computing. - pub custody_subnets: Vec, - pub custody_columns: Vec, + /// The computed sampling subnets and columns is stored to avoid re-computing. + pub sampling_subnets: Vec, + pub sampling_columns: Vec, /// Network-related configuration. Immutable after initialization. pub config: Arc, /// Ethereum chain configuration. Immutable after initialization. @@ -45,24 +45,31 @@ impl NetworkGlobals { config: Arc, spec: Arc, ) -> Self { - let (custody_subnets, custody_columns) = if spec.is_peer_das_scheduled() { + let (sampling_subnets, sampling_columns) = if spec.is_peer_das_scheduled() { + let node_id = enr.node_id().raw(); + let custody_subnet_count = local_metadata .custody_subnet_count() .copied() .expect("custody subnet count must be set if PeerDAS is scheduled"); - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, + + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); + + let sampling_subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id, + subnet_sampling_size, &spec, ) - .expect("custody subnet count must be valid") + .expect("sampling subnet count must be valid") .collect::>(); - let custody_columns = custody_subnets + + let sampling_columns = sampling_subnets .iter() .flat_map(|subnet| subnet.columns::(&spec)) .sorted() .collect(); - (custody_subnets, custody_columns) + + (sampling_subnets, sampling_columns) } else { (vec![], vec![]) }; @@ -76,8 +83,8 @@ impl NetworkGlobals { gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), - custody_subnets, - custody_columns, + sampling_subnets, + sampling_columns, config, spec, } @@ -197,12 +204,13 @@ mod test { use types::{Epoch, EthSpec, MainnetEthSpec as E}; #[test] - fn test_custody_subnets() { + fn test_sampling_subnets() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); let config = Arc::new(NetworkConfig::default()); @@ -213,17 +221,20 @@ mod test { config, Arc::new(spec), ); - assert_eq!(globals.custody_subnets.len(), custody_subnet_count as usize); + assert_eq!( + globals.sampling_subnets.len(), + subnet_sampling_size as usize + ); } #[test] - fn test_custody_columns() { + fn test_sampling_columns() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; - let custody_columns_count = spec.number_of_columns / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); let config = Arc::new(NetworkConfig::default()); @@ -234,7 +245,10 @@ mod test { config, Arc::new(spec), ); - assert_eq!(globals.custody_columns.len(), custody_columns_count); + assert_eq!( + globals.sampling_columns.len(), + subnet_sampling_size as usize + ); } fn get_metadata(custody_subnet_count: u64) -> MetaData { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f36d11ecdd..5a66cb7f30 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -813,7 +813,7 @@ impl NetworkService { } } } else { - for column_subnet in &self.network_globals.custody_subnets { + for column_subnet in &self.network_globals.sampling_subnets { for fork_digest in self.required_gossip_fork_digests() { let gossip_kind = Subnet::DataColumn(*column_subnet).into(); let topic = diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 4c73e8f8d0..151333a2ef 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,6 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::sampling::SamplingConfig; +use crate::sync::peer_sampling::SamplingConfig; use crate::sync::{SamplingId, SyncMessage}; use crate::NetworkMessage; use std::sync::Arc; @@ -2037,9 +2037,10 @@ fn custody_lookup_happy_path() { // Should not request blobs let id = r.expect_block_lookup_request(block.canonical_root()); r.complete_valid_block_request(id, block.into(), true); - let custody_column_count = spec.custody_requirement * spec.data_columns_per_subnet() as u64; + // for each slot we download `samples_per_slot` columns + let sample_column_count = spec.samples_per_slot * spec.data_columns_per_subnet() as u64; let custody_ids = - r.expect_only_data_columns_by_root_requests(block_root, custody_column_count as usize); + r.expect_only_data_columns_by_root_requests(block_root, sample_column_count as usize); r.complete_valid_custody_request(custody_ids, data_columns, false); r.expect_no_active_lookups(); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f141780484..c1f4fe54fb 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,9 +38,9 @@ use super::block_lookups::BlockLookups; use super::network_context::{ BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, }; +use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use super::sampling::{Sampling, SamplingConfig, SamplingResult}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 6669add445..1dca6f02ac 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -6,9 +6,9 @@ mod block_lookups; mod block_sidecar_coupling; pub mod manager; mod network_context; +mod peer_sampling; mod peer_sync_info; mod range_sync; -mod sampling; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index dc35a141d2..492b703f8a 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -418,13 +418,13 @@ impl SyncNetworkContext { false }; - let (expects_custody_columns, num_of_custody_column_req) = + let (expects_columns, num_of_column_req) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let custody_indexes = self.network_globals().custody_columns.clone(); + let column_indexes = self.network_globals().sampling_columns.clone(); let mut num_of_custody_column_req = 0; for (peer_id, columns_by_range_request) in - self.make_columns_by_range_requests(request, &custody_indexes)? + self.make_columns_by_range_requests(request, &column_indexes)? { requested_peers.push(peer_id); @@ -448,15 +448,15 @@ impl SyncNetworkContext { num_of_custody_column_req += 1; } - (Some(custody_indexes), Some(num_of_custody_column_req)) + (Some(column_indexes), Some(num_of_custody_column_req)) } else { (None, None) }; let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_custody_columns, - num_of_custody_column_req, + expects_columns, + num_of_column_req, requested_peers, ); self.range_block_components_requests @@ -668,7 +668,7 @@ impl SyncNetworkContext { let imported_blob_indexes = self .chain .data_availability_checker - .imported_blob_indexes(&block_root) + .cached_blob_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let indices = (0..expected_blobs as u64) @@ -786,13 +786,13 @@ impl SyncNetworkContext { let custody_indexes_imported = self .chain .data_availability_checker - .imported_custody_column_indexes(&block_root) + .cached_data_column_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let custody_indexes_to_fetch = self .network_globals() - .custody_columns + .sampling_columns .clone() .into_iter() .filter(|index| !custody_indexes_imported.contains(index)) diff --git a/beacon_node/network/src/sync/sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs similarity index 100% rename from beacon_node/network/src/sync/sampling.rs rename to beacon_node/network/src/sync/peer_sampling.rs diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ed5946ada7..732e4a7bd1 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -444,9 +444,9 @@ impl SyncingChain { self.request_batches(network)?; } } - } else if !self.good_peers_on_custody_subnets(self.processing_target, network) { + } else if !self.good_peers_on_sampling_subnets(self.processing_target, network) { // This is to handle the case where no batch was sent for the current processing - // target when there is no custody peers available. This is a valid state and should not + // target when there is no sampling peers available. This is a valid state and should not // return an error. return Ok(KeepChain); } else { @@ -1075,10 +1075,10 @@ impl SyncingChain { // check if we have the batch for our optimistic start. If not, request it first. // We wait for this batch before requesting any other batches. if let Some(epoch) = self.optimistic_start { - if !self.good_peers_on_custody_subnets(epoch, network) { + if !self.good_peers_on_sampling_subnets(epoch, network) { debug!( self.log, - "Waiting for peers to be available on custody column subnets" + "Waiting for peers to be available on sampling column subnets" ); return Ok(KeepChain); } @@ -1107,14 +1107,18 @@ impl SyncingChain { Ok(KeepChain) } - /// Checks all custody column subnets for peers. Returns `true` if there is at least one peer in - /// every custody column subnet. - fn good_peers_on_custody_subnets(&self, epoch: Epoch, network: &SyncNetworkContext) -> bool { + /// Checks all sampling column subnets for peers. Returns `true` if there is at least one peer in + /// every sampling column subnet. + fn good_peers_on_sampling_subnets( + &self, + epoch: Epoch, + network: &SyncNetworkContext, + ) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { - // Require peers on all custody column subnets before sending batches + // Require peers on all sampling column subnets before sending batches let peers_on_all_custody_subnets = network .network_globals() - .custody_subnets + .sampling_subnets .iter() .all(|subnet_id| { let peer_count = network @@ -1167,11 +1171,11 @@ impl SyncingChain { return None; } - // don't send batch requests until we have peers on custody subnets + // don't send batch requests until we have peers on sampling subnets // TODO(das): this is a workaround to avoid sending out excessive block requests because // block and data column requests are currently coupled. This can be removed once we find a // way to decouple the requests and do retries individually, see issue #6258. - if !self.good_peers_on_custody_subnets(self.to_be_downloaded, network) { + if !self.good_peers_on_sampling_subnets(self.to_be_downloaded, network) { debug!( self.log, "Waiting for peers to be available on custody column subnets" diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 74fca4c501..1eca01bbee 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -140,4 +140,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 07bd21b35c..500555a269 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 67f1e5b683..d67d77d3be 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -127,4 +127,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index acf4d83f32..18591fecdc 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -149,4 +149,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 8b84d87010..b08a6180bf 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e31427121e..7e933eea19 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -198,6 +198,7 @@ pub struct ChainSpec { pub custody_requirement: u64, pub data_column_sidecar_subnet_count: u64, pub number_of_columns: usize, + pub samples_per_slot: u64, /* * Networking @@ -811,6 +812,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific @@ -1132,6 +1134,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific */ @@ -1382,6 +1385,9 @@ pub struct Config { #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, + #[serde(default = "default_samples_per_slot")] + #[serde(with = "serde_utils::quoted_u64")] + samples_per_slot: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1521,17 +1527,21 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { } const fn default_custody_requirement() -> u64 { - 1 + 4 } const fn default_data_column_sidecar_subnet_count() -> u64 { - 32 + 128 } const fn default_number_of_columns() -> u64 { 128 } +const fn default_samples_per_slot() -> u64 { + 8 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -1727,6 +1737,7 @@ impl Config { custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, number_of_columns: spec.number_of_columns as u64, + samples_per_slot: spec.samples_per_slot, } } @@ -1802,6 +1813,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns, + samples_per_slot, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1881,6 +1893,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns: number_of_columns as usize, + samples_per_slot, ..chain_spec.clone() }) @@ -2125,6 +2138,7 @@ mod yaml_tests { CUSTODY_REQUIREMENT: 1 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 + SAMPLES_PER_SLOT: 8 "#; let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 84e8274f06..34e42a61f6 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -102,4 +102,5 @@ ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file From 8cf686f5c11cbad19727885d07df5abfddeddb0f Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 4 Oct 2024 12:00:32 +0900 Subject: [PATCH 42/66] Add test for ActiveSamplingRequest (#6307) * Add test for ActiveSamplingRequest * Fix the column_indexes field from the requested ones to the responded ones * Fix clippy errors * Move tests to tests.rs * Fix unused import * Fix clippy error * Merge branch 'unstable' into fork/add-test-for-active-sampling-request # Conflicts: # beacon_node/network/Cargo.toml # beacon_node/network/src/sync/sampling.rs * Merge branch 'unstable' into fork/add-test-for-active-sampling-request --- Cargo.lock | 1 + beacon_node/network/Cargo.toml | 1 + .../network/src/sync/block_lookups/tests.rs | 92 +++++++++++++++++++ beacon_node/network/src/sync/manager.rs | 14 +++ beacon_node/network/src/sync/peer_sampling.rs | 38 +++++++- 5 files changed, 145 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 44ca67e9b4..3a063e7e0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5640,6 +5640,7 @@ dependencies = [ "async-channel", "beacon_chain", "beacon_processor", + "bls", "delay_map", "derivative", "error-chain", diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 4df1761732..6d61bffe3d 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,6 +15,7 @@ eth2 = { workspace = true } gossipsub = { workspace = true } eth2_network_config = { workspace = true } kzg = { workspace = true } +bls = { workspace = true } [dependencies] alloy-primitives = { workspace = true } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 151333a2ef..cd4609e147 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -310,6 +310,13 @@ impl TestRig { ); } + fn expect_active_sampling(&mut self, block_root: &Hash256) { + assert!(self + .sync_manager + .active_sampling_requests() + .contains(block_root)); + } + fn expect_clean_finished_sampling(&mut self) { self.expect_empty_network(); self.expect_sampling_result_work(); @@ -1090,6 +1097,11 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected sampling result work: {e}")) } + fn expect_no_work_event(&mut self) { + self.drain_processor_rx(); + assert!(self.network_rx_queue.is_empty()); + } + fn expect_no_penalty_for(&mut self, peer_id: PeerId) { self.drain_network_rx(); let downscore_events = self @@ -1290,6 +1302,16 @@ impl TestRig { imported: false, }); } + + fn assert_sampling_request_status( + &self, + block_root: Hash256, + ongoing: &Vec, + no_peers: &Vec, + ) { + self.sync_manager + .assert_sampling_request_status(block_root, ongoing, no_peers) + } } #[test] @@ -2023,6 +2045,76 @@ fn sampling_avoid_retrying_same_peer() { r.expect_empty_network(); } +#[test] +fn sampling_batch_requests() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + + // Resolve the request. + r.complete_valid_sampling_column_requests( + vec![(sync_request_id, column_indexes.clone())], + data_columns, + ); + r.expect_clean_finished_sampling(); +} + +#[test] +fn sampling_batch_requests_not_enough_responses_returned() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + + // The request status should be set to Sampling. + r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + + // Split the indexes to simulate the case where the supernode doesn't have the requested column. + let (_column_indexes_supernode_does_not_have, column_indexes_to_complete) = + column_indexes.split_at(1); + + // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. + let data_columns_to_complete = data_columns + .iter() + .filter(|d| column_indexes_to_complete.contains(&d.index)) + .cloned() + .collect::>(); + r.complete_data_columns_by_root_request( + (sync_request_id, column_indexes.clone()), + &data_columns_to_complete, + ); + + // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. + r.assert_sampling_request_status(block_root, &vec![], &column_indexes); + + // The sampling request stalls. + r.expect_empty_network(); + r.expect_no_work_event(); + r.expect_active_sampling(&block_root); +} + #[test] fn custody_lookup_happy_path() { let Some(mut r) = TestRig::test_setup_after_peerdas() else { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c1f4fe54fb..708c4308b8 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -71,6 +71,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +#[cfg(test)] +use types::ColumnIndex; + /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. @@ -334,6 +337,17 @@ impl SyncManager { self.sampling.active_sampling_requests() } + #[cfg(test)] + pub(crate) fn assert_sampling_request_status( + &self, + block_root: Hash256, + ongoing: &Vec, + no_peers: &Vec, + ) { + self.sampling + .assert_sampling_request_status(block_root, ongoing, no_peers); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 4d0fa509cd..086fb0ec8d 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -42,6 +42,18 @@ impl Sampling { self.requests.values().map(|r| r.block_root).collect() } + #[cfg(test)] + pub fn assert_sampling_request_status( + &self, + block_root: Hash256, + ongoing: &Vec, + no_peers: &Vec, + ) { + let requester = SamplingRequester::ImportedBlock(block_root); + let active_sampling_request = self.requests.get(&requester).unwrap(); + active_sampling_request.assert_sampling_request_status(ongoing, no_peers); + } + /// Create a new sampling request for a known block /// /// ### Returns @@ -220,6 +232,21 @@ impl ActiveSamplingRequest { } } + #[cfg(test)] + pub fn assert_sampling_request_status( + &self, + ongoing: &Vec, + no_peers: &Vec, + ) { + for idx in ongoing { + assert!(self.column_requests.get(idx).unwrap().is_ongoing()); + } + + for idx in no_peers { + assert!(self.column_requests.get(idx).unwrap().is_no_peers()); + } + } + /// Insert a downloaded column into an active sampling request. Then make progress on the /// entire request. /// @@ -253,10 +280,14 @@ impl ActiveSamplingRequest { match resp { Ok((mut resp_data_columns, seen_timestamp)) => { + let resp_column_indexes = resp_data_columns + .iter() + .map(|r| r.index) + .collect::>(); debug!(self.log, "Sample download success"; "block_root" => %self.block_root, - "column_indexes" => ?column_indexes, + "column_indexes" => ?resp_column_indexes, "count" => resp_data_columns.len() ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); @@ -598,6 +629,11 @@ mod request { } } + #[cfg(test)] + pub(crate) fn is_no_peers(&self) -> bool { + matches!(self.status, Status::NoPeers) + } + pub(crate) fn choose_peer( &mut self, cx: &SyncNetworkContext, From 1bd8f31545e8a22833781123852811beeffab37e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 7 Oct 2024 20:41:52 +1100 Subject: [PATCH 43/66] Clean up temporary state flags while running (#6422) * Clean up temporary state flags while running * Add regression test * Simplify --- beacon_node/beacon_chain/tests/store_tests.rs | 57 ++++++++++++++++++- beacon_node/store/src/garbage_collection.rs | 3 +- beacon_node/store/src/hot_cold_store.rs | 9 +++ 3 files changed, 66 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 5d83d65efd..1a6b444319 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2514,7 +2514,7 @@ async fn pruning_test( } #[tokio::test] -async fn garbage_collect_temp_states_from_failed_block() { +async fn garbage_collect_temp_states_from_failed_block_on_startup() { let db_path = tempdir().unwrap(); // Wrap these functions to ensure the variables are dropped before we try to open another @@ -2571,6 +2571,61 @@ async fn garbage_collect_temp_states_from_failed_block() { assert_eq!(store.iter_temporary_state_roots().count(), 0); } +#[tokio::test] +async fn garbage_collect_temp_states_from_failed_block_on_finalization() { + let db_path = tempdir().unwrap(); + + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let slots_per_epoch = E::slots_per_epoch(); + + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; + + let (mut block, _) = (*signed_block).clone().deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = Arc::new(block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + )); + + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness + .process_block_result((block, None)) + .await + .unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + + // Finalize the chain without the block, which should result in pruning of all temporary states. + let blocks_required_to_finalize = 3 * slots_per_epoch; + harness.advance_slot(); + harness + .extend_chain( + blocks_required_to_finalize as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Check that the finalization migration ran. + assert_ne!(store.get_split_slot(), 0); + + // Check that temporary states have been pruned. + assert_eq!(store.iter_temporary_state_roots().count(), 0); +} + #[tokio::test] async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index c70ef89869..5f8ed8f5e7 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -21,7 +21,6 @@ where .try_fold(vec![], |mut ops, state_root| { let state_root = state_root?; ops.push(StoreOp::DeleteState(state_root, None)); - ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); Result::<_, Error>::Ok(ops) })?; @@ -29,7 +28,7 @@ where debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() / 2 + delete_ops.len() ); self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index ba288039d6..991f215210 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1160,10 +1160,19 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteState(state_root, slot) => { + // Delete the hot state summary. let state_summary_key = get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); + // Delete the state temporary flag (if any). Temporary flags are commonly + // created by the state advance routine. + let state_temp_key = get_key_for_col( + DBColumn::BeaconStateTemporary.into(), + state_root.as_slice(), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(state_temp_key)); + if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { let state_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); From 48dd3f385cd3ecde1baeb9135815f376d79b048a Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 7 Oct 2024 09:35:38 -0700 Subject: [PATCH 44/66] Filter out BlsToExecutionChange messages for 0x02 validators (#6464) * Filter out 0x02 validators from `get_bls_to_execution_changes` * Prune bls to execution changes that have a 0x02 credential * lint --- beacon_node/operation_pool/src/bls_to_execution_changes.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index 07fd72f02c..cbab97e719 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -113,7 +113,7 @@ impl BlsToExecutionChanges { .validators() .get(validator_index as usize) .map_or(true, |validator| { - let prune = validator.has_eth1_withdrawal_credential(spec) + let prune = validator.has_execution_withdrawal_credential(spec) && head_block .message() .body() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e6a61edc09..0b032b0c8a 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -585,7 +585,7 @@ impl OperationPool { && state .get_validator(address_change.as_inner().message.validator_index as usize) .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) + !validator.has_execution_withdrawal_credential(spec) }) }, |address_change| address_change.as_inner().clone(), From 71c5388461df8e48bafb2e1ecd732eb29957ce03 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 9 Oct 2024 00:18:41 +0300 Subject: [PATCH 45/66] Transition block lookup sync to range sync (#6122) * Transition block lookup sync to range sync * Log unexpected state * Merge remote-tracking branch 'sigp/unstable' into lookup-to-range * Add docs * Merge remote-tracking branch 'sigp/unstable' into lookup-to-range --- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../src/network_beacon_processor/mod.rs | 5 +- .../network/src/sync/block_lookups/mod.rs | 67 ++++++++++--- .../sync/block_lookups/single_block_lookup.rs | 10 +- .../network/src/sync/block_lookups/tests.rs | 48 +++++++++- beacon_node/network/src/sync/manager.rs | 93 +++++++++++++++++-- .../network/src/sync/network_context.rs | 6 ++ .../network/src/sync/range_sync/range.rs | 3 + 8 files changed, 206 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 344820c6a2..9be3b4cc2f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2790,12 +2790,12 @@ pub fn build_log(level: slog::Level, logger_type: LoggerType) -> Logger { match logger_type { LoggerType::Test => { let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); - let drain = Async::new(drain).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); Logger::root(drain.filter_level(level).fuse(), o!()) } LoggerType::CI => { let drain = FullFormat::new(ci_decorator()).build().fuse(); - let drain = Async::new(drain).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); Logger::root(drain.filter_level(level).fuse(), o!()) } LoggerType::Null => { diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 5ec6140828..04571e181d 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -25,6 +25,7 @@ use std::sync::Arc; use std::time::Duration; use store::MemoryStore; use task_executor::TaskExecutor; +use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::{self, error::TrySendError}; use types::*; @@ -831,7 +832,7 @@ impl NetworkBeaconProcessor { /// Send a message to `sync_tx`. /// /// Creates a log if there is an internal error. - fn send_sync_message(&self, message: SyncMessage) { + pub(crate) fn send_sync_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { debug!(self.log, "Could not send message to the sync service"; "error" => %e) @@ -859,6 +860,7 @@ impl NetworkBeaconProcessor> { // processor (but not much else). pub fn null_for_testing( network_globals: Arc>, + sync_tx: UnboundedSender>, chain: Arc>>, executor: TaskExecutor, log: Logger, @@ -871,7 +873,6 @@ impl NetworkBeaconProcessor> { } = <_>::default(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); let network_beacon_processor = Self { beacon_processor_send: beacon_processor_tx, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a9dbf11fd0..a89f533ecc 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -28,6 +28,7 @@ use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; +use crate::sync::SyncMessage; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_availability_checker::{ AvailabilityCheckError, AvailabilityCheckErrorCategory, @@ -55,7 +56,10 @@ mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. -pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +/// +/// Have the same value as range's sync tolerance to consider a peer synced. Once sync lookup +/// reaches the maximum depth it will force trigger range sync. +pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; @@ -254,22 +258,59 @@ impl BlockLookups { // blocks on top of A forming A -> C. The malicious peer forces us to fetch C // from it, which will result in parent A hitting the chain_too_long error. Then // the valid chain A -> B is dropped too. - if let Ok(block_to_drop) = find_oldest_fork_ancestor(parent_chains, chain_idx) { - // Drop all lookups descending from the child of the too long parent chain - if let Some((lookup_id, lookup)) = self + // + // `find_oldest_fork_ancestor` should never return Err, unwrapping to tip for + // complete-ness + let parent_chain_tip = parent_chain.tip; + let block_to_drop = + find_oldest_fork_ancestor(parent_chains, chain_idx).unwrap_or(parent_chain_tip); + // Drop all lookups descending from the child of the too long parent chain + if let Some((lookup_id, lookup)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == block_to_drop) + { + // If a lookup chain is too long, we can't distinguish a valid chain from a + // malicious one. We must attempt to sync this chain to not lose liveness. If + // the chain grows too long, we stop lookup sync and transition this head to + // forward range sync. We need to tell range sync which head to sync to, and + // from which peers. The lookup of the very tip of this chain may contain zero + // peers if it's the parent-child lookup. So we do a bit of a trick here: + // - Tell range sync to sync to the tip's root (if available, else its ancestor) + // - But use all peers in the ancestor lookup, which should have at least one + // peer, and its peer set is a strict superset of the tip's lookup. + if let Some((_, tip_lookup)) = self .single_block_lookups .iter() - .find(|(_, l)| l.block_root() == block_to_drop) + .find(|(_, l)| l.block_root() == parent_chain_tip) { - for &peer_id in lookup.all_peers() { - cx.report_peer( - peer_id, - PeerAction::LowToleranceError, - "chain_too_long", - ); - } - self.drop_lookup_and_children(*lookup_id); + cx.send_sync_message(SyncMessage::AddPeersForceRangeSync { + peers: lookup.all_peers().copied().collect(), + head_slot: tip_lookup.peek_downloaded_block_slot(), + head_root: parent_chain_tip, + }); + } else { + // Should never happen, log error and continue the lookup drop + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Parent chain tip lookup not found", + "block_root" => ?parent_chain_tip + ); } + + // Do not downscore peers here. Because we can't distinguish a valid chain from + // a malicious one we may penalize honest peers for attempting to discover us a + // valid chain. Until blocks_by_range allows to specify a tip, for example with + // https://github.com/ethereum/consensus-specs/pull/3845 we will have poor + // attributability. A peer can send us garbage blocks over blocks_by_root, and + // then correct blocks via blocks_by_range. + + self.drop_lookup_and_children(*lookup_id); + } else { + // Should never happen + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Block to drop lookup not found", + "block_root" => ?block_to_drop + ); } return false; diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 73ffcd4384..4e7268a72a 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock}; +use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -91,6 +91,14 @@ impl SingleBlockLookup { } } + /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` + pub fn peek_downloaded_block_slot(&self) -> Option { + self.block_request_state + .state + .peek_downloaded_data() + .map(|block| block.slot()) + } + /// Get the block root that is being requested. pub fn block_root(&self) -> Hash256 { self.block_root diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index cd4609e147..0ed624fc0d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,7 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::sync::manager::{BlockProcessType, SyncManager}; use crate::sync::peer_sampling::SamplingConfig; +use crate::sync::range_sync::RangeSyncType; use crate::sync::{SamplingId, SyncMessage}; use crate::NetworkMessage; use std::sync::Arc; @@ -78,6 +79,8 @@ struct TestRig { network_rx: mpsc::UnboundedReceiver>, /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. sync_manager: SyncManager, /// To manipulate sync state and peer connection status @@ -137,6 +140,7 @@ impl TestRig { let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, sync_rx) = mpsc::unbounded_channel::>(); // TODO(das): make the generation of the ENR use the deterministic rng to have consistent // column assignments let network_config = Arc::new(NetworkConfig::default()); @@ -148,13 +152,12 @@ impl TestRig { )); let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals, + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), ); - let (_sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let fork_name = chain.spec.fork_name_at_slot::(chain.slot().unwrap()); // All current tests expect synced and EL online state @@ -168,13 +171,15 @@ impl TestRig { beacon_processor_rx_queue: vec![], network_rx, network_rx_queue: vec![], + sync_rx, rng, network_globals: beacon_processor.network_globals.clone(), sync_manager: SyncManager::new( chain, network_tx, beacon_processor.into(), - sync_recv, + // Pass empty recv not tied to any tx + mpsc::unbounded_channel().1, SamplingConfig::Custom { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, @@ -237,6 +242,13 @@ impl TestRig { self.send_sync_message(SyncMessage::SampleBlock(block_root, block_slot)) } + /// Drain all sync messages in the sync_rx attached to the beacon processor + fn drain_sync_rx(&mut self) { + while let Ok(sync_message) = self.sync_rx.try_recv() { + self.send_sync_message(sync_message); + } + } + fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -293,6 +305,10 @@ impl TestRig { self.sync_manager.active_parent_lookups().len() } + fn active_range_sync_chain(&self) -> (RangeSyncType, Slot, Slot) { + self.sync_manager.get_range_sync_chains().unwrap().unwrap() + } + fn assert_single_lookups_count(&self, count: usize) { assert_eq!( self.active_single_lookups_count(), @@ -1696,7 +1712,18 @@ fn test_parent_lookup_too_deep_grow_ancestor() { ) } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 1) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(chain_hash); } @@ -1723,7 +1750,18 @@ fn test_parent_lookup_too_deep_grow_tip() { ); } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 2) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(tip.canonical_root()); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 708c4308b8..a2544b82b5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -94,6 +94,15 @@ pub enum SyncMessage { /// A useful peer has been discovered. AddPeer(PeerId, SyncInfo), + /// Force trigger range sync for a set of peers given a head they claim to have imported. Used + /// by block lookup to trigger range sync if a parent chain grows too large. + AddPeersForceRangeSync { + peers: Vec, + head_root: Hash256, + /// Sync lookup may not know the Slot of this head. However this situation is very rare. + head_slot: Option, + }, + /// A block has been received from the RPC. RpcBlock { request_id: SyncRequestId, @@ -322,6 +331,13 @@ impl SyncManager { .collect() } + #[cfg(test)] + pub(crate) fn get_range_sync_chains( + &self, + ) -> Result, &'static str> { + self.range_sync.state() + } + #[cfg(test)] pub(crate) fn get_failed_chains(&mut self) -> Vec { self.block_lookups.get_failed_chains() @@ -376,11 +392,30 @@ impl SyncManager { let sync_type = remote_sync_type(&local, &remote, &self.chain); // update the state of the peer. - let should_add = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); - - if matches!(sync_type, PeerSyncType::Advanced) && should_add { - self.range_sync - .add_peer(&mut self.network, local, peer_id, remote); + let is_still_connected = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); + if is_still_connected { + match sync_type { + PeerSyncType::Behind => {} // Do nothing + PeerSyncType::Advanced => { + self.range_sync + .add_peer(&mut self.network, local, peer_id, remote); + } + PeerSyncType::FullySynced => { + // Sync considers this peer close enough to the head to not trigger range sync. + // Range sync handles well syncing large ranges of blocks, of a least a few blocks. + // However this peer may be in a fork that we should sync but we have not discovered + // yet. If the head of the peer is unknown, attempt block lookup first. If the + // unknown head turns out to be on a longer fork, it will trigger range sync. + // + // A peer should always be considered `Advanced` if its finalized root is + // unknown and ahead of ours, so we don't check for that root here. + // + // TODO: This fork-choice check is potentially duplicated, review code + if !self.chain.block_is_known_to_fork_choice(&remote.head_root) { + self.handle_unknown_block_root(peer_id, remote.head_root); + } + } + } } self.update_sync_state(); @@ -391,6 +426,44 @@ impl SyncManager { } } + /// Trigger range sync for a set of peers that claim to have imported a head unknown to us. + fn add_peers_force_range_sync( + &mut self, + peers: &[PeerId], + head_root: Hash256, + head_slot: Option, + ) { + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, + }; + + let head_slot = head_slot.unwrap_or_else(|| { + debug!(self.log, + "On add peers force range sync assuming local head_slot"; + "local_head_slot" => local.head_slot, + "head_root" => ?head_root + ); + local.head_slot + }); + + let remote = SyncInfo { + head_slot, + head_root, + // Set finalized to same as local to trigger Head sync + finalized_epoch: local.finalized_epoch, + finalized_root: local.finalized_root, + }; + + for peer_id in peers { + self.range_sync + .add_peer(&mut self.network, local.clone(), *peer_id, remote.clone()); + } + } + /// Handles RPC errors related to requests that were emitted from the sync manager. fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); @@ -476,8 +549,7 @@ impl SyncManager { } /// Updates the syncing state of a peer. - /// Return whether the peer should be used for range syncing or not, according to its - /// connection status. + /// Return true if the peer is still connected and known to the peers DB fn update_peer_sync_state( &mut self, peer_id: &PeerId, @@ -686,6 +758,13 @@ impl SyncManager { SyncMessage::AddPeer(peer_id, info) => { self.add_peer(peer_id, info); } + SyncMessage::AddPeersForceRangeSync { + peers, + head_root, + head_slot, + } => { + self.add_peers_force_range_sync(&peers, head_root, head_slot); + } SyncMessage::RpcBlock { request_id, peer_id, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 492b703f8a..b67c0bf2dd 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -7,6 +7,7 @@ pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlock use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::NetworkMessage; @@ -249,6 +250,11 @@ impl SyncNetworkContext { } } + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { + self.network_beacon_processor + .send_sync_message(sync_message); + } + /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { let failed_range_ids = diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index b88253c9e8..0ef99838de 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -386,6 +386,7 @@ where #[cfg(test)] mod tests { use crate::network_beacon_processor::NetworkBeaconProcessor; + use crate::sync::SyncMessage; use crate::NetworkMessage; use super::*; @@ -690,6 +691,7 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), @@ -700,6 +702,7 @@ mod tests { let (network_beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals.clone(), + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), From 352a9cf054b442ae9140c0c399094ca264311dac Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 9 Oct 2024 16:11:24 -0700 Subject: [PATCH 46/66] Add lockbud task to CI (#6470) * Add lockbud task to CI * Allow unknown lint * Merge branch 'unstable' of https://github.com/sigp/lighthouse into lockbud * remove potential deadlock * ignore tokio util crate * Update image --- .github/workflows/test-suite.yml | 15 +++++++++++++++ beacon_node/eth1/src/service.rs | 3 ++- .../state_processing/src/consensus_context.rs | 2 ++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index aff9a71b4a..7cda3e477d 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -54,6 +54,20 @@ jobs: done echo "skip_ci=$SKIP_CI" >> $GITHUB_OUTPUT + lockbud: + name: lockbud + runs-on: ubuntu-latest + container: + image: sigmaprime/lockbud:latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies + run: apt update && apt install -y cmake + - name: Generate code coverage + run: | + cargo lockbud -k deadlock -b -l tokio_util + target-branch-check: name: target-branch-check runs-on: ubuntu-latest @@ -433,6 +447,7 @@ jobs: 'cargo-udeps', 'compile-with-beta-compiler', 'cli-check', + 'lockbud', ] steps: - uses: actions/checkout@v4 diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a70a927307..71ab98a6a2 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -549,10 +549,11 @@ impl Service { /// Returns the number of deposits with valid signatures that have been observed. pub fn get_valid_signature_count(&self) -> Option { + let highest_safe_block = self.highest_safe_block()?; self.deposits() .read() .cache - .get_valid_signature_count(self.highest_safe_block()?) + .get_valid_signature_count(highest_safe_block) } /// Returns the number of deposits with valid signatures that have been observed, without diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index b0eaf3422d..0c176d4ab1 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -147,6 +147,8 @@ impl ConsensusContext { } } + #[allow(unknown_lints)] + #[allow(elided_named_lifetimes)] pub fn get_indexed_attestation<'a>( &'a mut self, state: &BeaconState, From 244a460e704184c0e0c356ce9dda20afd995a68a Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 10 Oct 2024 05:34:41 +0300 Subject: [PATCH 47/66] Bound min size of dynamic processor queues (#6466) * Bound min size of dynamic processor queues * Use max * Add test --- beacon_node/beacon_processor/src/lib.rs | 33 +++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index cd5a1d6cff..02c287b68e 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -93,6 +93,11 @@ const DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * DEFAULT_MAX_WORK_EVENT_Q /// slightly, we don't need to adjust the queues during the lifetime of a process. const ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT: usize = 110; +/// Minimum size of dynamically sized queues. Due to integer division we don't want 0 length queues +/// as the processor won't process that message type. 128 is an arbitrary value value >= 1 that +/// seems reasonable. +const MIN_QUEUE_LEN: usize = 128; + /// Maximum number of queued items that will be stored before dropping them pub struct BeaconProcessorQueueLengths { aggregate_queue: usize, @@ -155,9 +160,15 @@ impl BeaconProcessorQueueLengths { aggregate_queue: 4096, unknown_block_aggregate_queue: 1024, // Capacity for a full slot's worth of attestations if subscribed to all subnets - attestation_queue: active_validator_count / slots_per_epoch, + attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), // Capacity for a full slot's worth of attestations if subscribed to all subnets - unknown_block_attestation_queue: active_validator_count / slots_per_epoch, + unknown_block_attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), sync_message_queue: 2048, sync_contribution_queue: 1024, gossip_voluntary_exit_queue: 4096, @@ -1686,3 +1697,21 @@ impl Drop for SendOnDrop { } } } + +#[cfg(test)] +mod tests { + use super::*; + use types::{BeaconState, ChainSpec, Eth1Data, ForkName, MainnetEthSpec}; + + #[test] + fn min_queue_len() { + // State with no validators. + let spec = ForkName::latest().make_genesis_spec(ChainSpec::mainnet()); + let genesis_time = 0; + let state = BeaconState::::new(genesis_time, Eth1Data::default(), &spec); + assert_eq!(state.validators().len(), 0); + let queue_lengths = BeaconProcessorQueueLengths::from_state(&state, &spec).unwrap(); + assert_eq!(queue_lengths.attestation_queue, MIN_QUEUE_LEN); + assert_eq!(queue_lengths.unknown_block_attestation_queue, MIN_QUEUE_LEN); + } +} From da290e8e2e9420a0d3f3a02012f052b90b5f6aab Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Thu, 10 Oct 2024 07:32:41 -0400 Subject: [PATCH 48/66] Added required `--force-bls-withdrawal-credentials` description to `--disable-deposits` usage (#6436) * cli description * complied docs changes * reverted changes and script amended * fix * reverting unwanted changes * making linter happy * requested changes * Merge branch 'unstable' into cli-fix * Merge branch 'unstable' into cli-fix --- book/src/help_vm_create.md | 4 +++- scripts/cli.sh | 2 +- validator_manager/src/create_validators.rs | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 1803bb534c..cde822e894 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -133,7 +133,9 @@ Flags: When provided don't generate the deposits JSON file that is commonly used for submitting validator deposits via a web UI. Using this flag will save several seconds per validator if the user has an alternate - strategy for submitting deposits. + strategy for submitting deposits. If used, the + --force-bls-withdrawal-credentials is also required to ensure users + are aware that an --eth1-withdrawal-address is not set. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/scripts/cli.sh b/scripts/cli.sh index 6ca019b39e..e43c05a834 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -16,7 +16,7 @@ write_to_file() { printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" # Adjust the width of the help text and append to the end of file - sed -i -e '$a\'$'\n''\n''' "$file" + printf "\n\n%s\n" "" >> "$file" } CMD=./target/release/lighthouse diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d06fce1d09..37a6040a9b 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -112,7 +112,9 @@ pub fn cli_app() -> Command { "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ - user has an alternate strategy for submitting deposits.", + user has an alternate strategy for submitting deposits. \ + If used, the --force-bls-withdrawal-credentials is also required \ + to ensure users are aware that an --eth1-withdrawal-address is not set.", ) .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) From a0a62ea3e14385d529af2be19bf2487e58db2290 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 11 Oct 2024 02:44:18 +0300 Subject: [PATCH 49/66] Prevent sync lookups from reverting to awaiting block (#6443) * Prevent sync lookups from reverting to awaiting block * Remove stale comment --- .../network/src/sync/block_lookups/common.rs | 38 +++--- .../network/src/sync/block_lookups/mod.rs | 8 +- .../sync/block_lookups/single_block_lookup.rs | 117 +++++++++++++++--- .../network/src/sync/network_context.rs | 67 +--------- 4 files changed, 128 insertions(+), 102 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index c7c043f53f..5e336d9c38 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::{DataColumnSidecarList, SignedBeaconBlock}; -use super::single_block_lookup::DownloadResult; +use super::single_block_lookup::{ComponentRequests, DownloadResult}; use super::SingleLookupId; #[derive(Debug, Copy, Clone)] @@ -42,7 +42,7 @@ pub trait RequestState { &self, id: Id, peer_id: PeerId, - downloaded_block: Option>>, + expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result; @@ -61,7 +61,7 @@ pub trait RequestState { fn response_type() -> ResponseType; /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str>; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. fn get_state(&self) -> &SingleLookupRequestState; @@ -77,7 +77,7 @@ impl RequestState for BlockRequestState { &self, id: SingleLookupId, peer_id: PeerId, - _: Option>>, + _: usize, cx: &mut SyncNetworkContext, ) -> Result { cx.block_lookup_request(id, peer_id, self.requested_block_root) @@ -107,8 +107,8 @@ impl RequestState for BlockRequestState { fn response_type() -> ResponseType { ResponseType::Block } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.block_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + Ok(&mut request.block_request_state) } fn get_state(&self) -> &SingleLookupRequestState { &self.state @@ -125,10 +125,10 @@ impl RequestState for BlobRequestState { &self, id: Id, peer_id: PeerId, - downloaded_block: Option>>, + expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.blob_lookup_request(id, peer_id, self.block_root, downloaded_block) + cx.blob_lookup_request(id, peer_id, self.block_root, expected_blobs) .map_err(LookupRequestError::SendFailedNetwork) } @@ -150,8 +150,13 @@ impl RequestState for BlobRequestState { fn response_type() -> ResponseType { ResponseType::Blob } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.blob_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::WaitingForBlock => Err("waiting for block"), + ComponentRequests::ActiveBlobRequest(request, _) => Ok(request), + ComponentRequests::ActiveCustodyRequest { .. } => Err("expecting custody request"), + ComponentRequests::NotNeeded { .. } => Err("not needed"), + } } fn get_state(&self) -> &SingleLookupRequestState { &self.state @@ -169,10 +174,10 @@ impl RequestState for CustodyRequestState { id: Id, // TODO(das): consider selecting peers that have custody but are in this set _peer_id: PeerId, - downloaded_block: Option>>, + _: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.custody_lookup_request(id, self.block_root, downloaded_block) + cx.custody_lookup_request(id, self.block_root) .map_err(LookupRequestError::SendFailedNetwork) } @@ -200,8 +205,13 @@ impl RequestState for CustodyRequestState { fn response_type() -> ResponseType { ResponseType::CustodyColumn } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { - &mut request.custody_request_state + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::WaitingForBlock => Err("waiting for block"), + ComponentRequests::ActiveBlobRequest { .. } => Err("expecting blob request"), + ComponentRequests::ActiveCustodyRequest(request) => Ok(request), + ComponentRequests::NotNeeded { .. } => Err("not needed"), + } } fn get_state(&self) -> &SingleLookupRequestState { &self.state diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a89f533ecc..f5e68d1512 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -450,7 +450,9 @@ impl BlockLookups { }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(lookup).get_state_mut(); + let request_state = R::request_state_mut(lookup) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? + .get_state_mut(); match response { Ok((response, peer_group, seen_timestamp)) => { @@ -545,7 +547,9 @@ impl BlockLookups { }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(lookup).get_state_mut(); + let request_state = R::request_state_mut(lookup) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? + .get_state_mut(); debug!( self.log, diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 4e7268a72a..d701cbbb8d 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -4,7 +4,7 @@ use crate::sync::network_context::{ LookupRequestResult, PeerGroup, ReqId, RpcRequestSendError, SendErrorProcessor, SyncNetworkContext, }; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChainTypes, BlockProcessStatus}; use derivative::Derivative; use lighthouse_network::service::api_types::Id; use rand::seq::IteratorRandom; @@ -62,8 +62,7 @@ pub enum LookupRequestError { pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, - pub blob_request_state: BlobRequestState, - pub custody_request_state: CustodyRequestState, + pub component_requests: ComponentRequests, /// Peers that claim to have imported this set of block components #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] peers: HashSet, @@ -72,6 +71,16 @@ pub struct SingleBlockLookup { created: Instant, } +#[derive(Debug)] +pub(crate) enum ComponentRequests { + WaitingForBlock, + ActiveBlobRequest(BlobRequestState, usize), + ActiveCustodyRequest(CustodyRequestState), + // When printing in debug this state display the reason why it's not needed + #[allow(dead_code)] + NotNeeded(&'static str), +} + impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, @@ -82,8 +91,7 @@ impl SingleBlockLookup { Self { id, block_request_state: BlockRequestState::new(requested_block_root), - blob_request_state: BlobRequestState::new(requested_block_root), - custody_request_state: CustodyRequestState::new(requested_block_root), + component_requests: ComponentRequests::WaitingForBlock, peers: HashSet::from_iter(peers.iter().copied()), block_root: requested_block_root, awaiting_parent, @@ -150,16 +158,28 @@ impl SingleBlockLookup { /// Returns true if the block has already been downloaded. pub fn all_components_processed(&self) -> bool { self.block_request_state.state.is_processed() - && self.blob_request_state.state.is_processed() - && self.custody_request_state.state.is_processed() + && match &self.component_requests { + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::NotNeeded { .. } => true, + } } /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() - || self.blob_request_state.state.is_awaiting_event() - || self.custody_request_state.state.is_awaiting_event() + || match &self.component_requests { + ComponentRequests::WaitingForBlock => true, + ComponentRequests::ActiveBlobRequest(request, _) => { + request.state.is_awaiting_event() + } + ComponentRequests::ActiveCustodyRequest(request) => { + request.state.is_awaiting_event() + } + ComponentRequests::NotNeeded { .. } => false, + } } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -169,9 +189,66 @@ impl SingleBlockLookup { cx: &mut SyncNetworkContext, ) -> Result { // TODO: Check what's necessary to download, specially for blobs - self.continue_request::>(cx)?; - self.continue_request::>(cx)?; - self.continue_request::>(cx)?; + self.continue_request::>(cx, 0)?; + + if let ComponentRequests::WaitingForBlock = self.component_requests { + let downloaded_block = self + .block_request_state + .state + .peek_downloaded_data() + .cloned(); + + if let Some(block) = downloaded_block.or_else(|| { + // If the block is already being processed or fully validated, retrieve how many blobs + // it expects. Consider any stage of the block. If the block root has been validated, we + // can assert that this is the correct value of `blob_kzg_commitments_count`. + match cx.chain.get_block_process_status(&self.block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), + } + }) { + let expected_blobs = block.num_expected_blobs(); + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + if expected_blobs == 0 { + self.component_requests = ComponentRequests::NotNeeded("no data"); + } + if cx.chain.should_fetch_blobs(block_epoch) { + self.component_requests = ComponentRequests::ActiveBlobRequest( + BlobRequestState::new(self.block_root), + expected_blobs, + ); + } else if cx.chain.should_fetch_custody_columns(block_epoch) { + self.component_requests = ComponentRequests::ActiveCustodyRequest( + CustodyRequestState::new(self.block_root), + ); + } else { + self.component_requests = ComponentRequests::NotNeeded("outside da window"); + } + } else { + // Wait to download the block before downloading blobs. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible blobs and + // latter handle the case where if the peer sent no blobs, penalize. + // + // Lookup sync event safety: Reaching this code means that a block is not in any pre-import + // cache nor in the request state of this lookup. Therefore, the block must either: (1) not + // be downloaded yet or (2) the block is already imported into the fork-choice. + // In case (1) the lookup must either successfully download the block or get dropped. + // In case (2) the block will be downloaded, processed, reach `DuplicateFullyImported` + // and get dropped as completed. + } + } + + match &self.component_requests { + ComponentRequests::WaitingForBlock => {} // do nothing + ComponentRequests::ActiveBlobRequest(_, expected_blobs) => { + self.continue_request::>(cx, *expected_blobs)? + } + ComponentRequests::ActiveCustodyRequest(_) => { + self.continue_request::>(cx, 0)? + } + ComponentRequests::NotNeeded { .. } => {} // do nothing + } // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. @@ -187,15 +264,12 @@ impl SingleBlockLookup { fn continue_request>( &mut self, cx: &mut SyncNetworkContext, + expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; let awaiting_parent = self.awaiting_parent.is_some(); - let downloaded_block = self - .block_request_state - .state - .peek_downloaded_data() - .cloned(); - let request = R::request_state_mut(self); + let request = + R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; // Attempt to progress awaiting downloads if request.get_state().is_awaiting_download() { @@ -214,13 +288,16 @@ impl SingleBlockLookup { // not receive any new peers for some time it will be dropped. If it receives a new // peer it must attempt to make progress. R::request_state_mut(self) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))? .get_state_mut() .update_awaiting_download_status("no peers"); return Ok(()); }; - let request = R::request_state_mut(self); - match request.make_request(id, peer_id, downloaded_block, cx)? { + let request = R::request_state_mut(self) + .map_err(|e| LookupRequestError::BadState(e.to_owned()))?; + + match request.make_request(id, peer_id, expected_blobs, cx)? { LookupRequestResult::RequestSent(req_id) => { // Lookup sync event safety: If make_request returns `RequestSent`, we are // guaranteed that `BlockLookups::on_download_response` will be called exactly diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b67c0bf2dd..9f9a189817 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -632,45 +632,8 @@ impl SyncNetworkContext { lookup_id: SingleLookupId, peer_id: PeerId, block_root: Hash256, - downloaded_block: Option>>, + expected_blobs: usize, ) -> Result { - let Some(block) = downloaded_block.or_else(|| { - // If the block is already being processed or fully validated, retrieve how many blobs - // it expects. Consider any stage of the block. If the block root has been validated, we - // can assert that this is the correct value of `blob_kzg_commitments_count`. - match self.chain.get_block_process_status(&block_root) { - BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), - } - }) else { - // Wait to download the block before downloading blobs. Then we can be sure that the - // block has data, so there's no need to do "blind" requests for all possible blobs and - // latter handle the case where if the peer sent no blobs, penalize. - // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. - // - if `num_expected_blobs` returns Some = block is processed. - // - // Lookup sync event safety: Reaching this code means that a block is not in any pre-import - // cache nor in the request state of this lookup. Therefore, the block must either: (1) not - // be downloaded yet or (2) the block is already imported into the fork-choice. - // In case (1) the lookup must either successfully download the block or get dropped. - // In case (2) the block will be downloaded, processed, reach `DuplicateFullyImported` - // and get dropped as completed. - return Ok(LookupRequestResult::Pending("waiting for block download")); - }; - let expected_blobs = block.num_expected_blobs(); - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - - // Check if we are in deneb, before peerdas and inside da window - if !self.chain.should_fetch_blobs(block_epoch) { - return Ok(LookupRequestResult::NoRequestNeeded("blobs not required")); - } - - // No data required for this block - if expected_blobs == 0 { - return Ok(LookupRequestResult::NoRequestNeeded("no data")); - } - let imported_blob_indexes = self .chain .data_availability_checker @@ -760,35 +723,7 @@ impl SyncNetworkContext { &mut self, lookup_id: SingleLookupId, block_root: Hash256, - downloaded_block: Option>>, ) -> Result { - let Some(block) = - downloaded_block.or_else(|| match self.chain.get_block_process_status(&block_root) { - BlockProcessStatus::Unknown => None, - BlockProcessStatus::NotValidated(block) - | BlockProcessStatus::ExecutionValidated(block) => Some(block.clone()), - }) - else { - // Wait to download the block before downloading columns. Then we can be sure that the - // block has data, so there's no need to do "blind" requests for all possible columns and - // latter handle the case where if the peer sent no columns, penalize. - // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. - // - if `num_expected_blobs` returns Some = block is processed. - return Ok(LookupRequestResult::Pending("waiting for block download")); - }; - let expected_blobs = block.num_expected_blobs(); - let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - - // Check if we are into peerdas and inside da window - if !self.chain.should_fetch_custody_columns(block_epoch) { - return Ok(LookupRequestResult::NoRequestNeeded("columns not required")); - } - - // No data required for this block - if expected_blobs == 0 { - return Ok(LookupRequestResult::NoRequestNeeded("no data")); - } - let custody_indexes_imported = self .chain .data_availability_checker From 17711b720e22adce6f7afb1ba610d84860a9e6f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 11 Oct 2024 17:33:49 +0100 Subject: [PATCH 50/66] Remove duplicated connection limits checks (#6156) * move main Behaviour to mod.rs for better readibility and remove connection limits checks after connection has been established, as those checks have already been done by connection limits Behaviour. * improve logging wording wrt dial logic when we call dial_peer we are not yet dialing but just adding the peer to the dial queue * do not use a constant for MAX_CONNECTIONS_PER_PEER we only use it at one place, and the function call is explicit. * address review and re-instate connection limits checks, but do it before the connection has been established. * Merge branch 'unstable' of github.com:sigp/lighthouse into remove-dial-error-denied * Merge branch 'unstable' of github.com:sigp/lighthouse into remove-dial-error-denied --- .../src/peer_manager/mod.rs | 18 +--- .../src/peer_manager/network_behaviour.rs | 100 ++++++++---------- .../src/service/behaviour.rs | 39 ------- .../lighthouse_network/src/service/mod.rs | 48 +++++++-- .../lighthouse_network/src/service/utils.rs | 2 - 5 files changed, 90 insertions(+), 117 deletions(-) delete mode 100644 beacon_node/lighthouse_network/src/service/behaviour.rs diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 1f066e9bbc..ec4d892c9b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -338,15 +338,15 @@ impl PeerManager { { // This should be updated with the peer dialing. In fact created once the peer is // dialed + let peer_id = enr.peer_id(); if let Some(min_ttl) = min_ttl { self.network_globals .peers .write() - .update_min_ttl(&enr.peer_id(), min_ttl); + .update_min_ttl(&peer_id, min_ttl); } - let peer_id = enr.peer_id(); if self.dial_peer(enr) { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + debug!(self.log, "Added discovered ENR peer to dial queue"; "peer_id" => %peer_id); to_dial_peers += 1; } } @@ -447,18 +447,6 @@ impl PeerManager { self.network_globals.peers.read().is_connected(peer_id) } - /// Reports whether the peer limit is reached in which case we stop allowing new incoming - /// connections. - pub fn peer_limit_reached(&self, count_dialing: bool) -> bool { - if count_dialing { - // This is an incoming connection so limit by the standard max peers - self.network_globals.connected_or_dialing_peers() >= self.max_peers() - } else { - // We dialed this peer, allow up to max_outbound_dialing_peers - self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() - } - } - /// Updates `PeerInfo` with `identify` information. pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index b7fd5b5e5d..c40f78b4b0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -15,7 +15,6 @@ use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; -use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; @@ -94,26 +93,20 @@ impl NetworkBehaviour for PeerManager { } if let Some(enr) = self.peers_to_dial.pop() { - let peer_id = enr.peer_id(); - self.inject_peer_connection(&peer_id, ConnectingType::Dialing, Some(enr.clone())); - - let quic_multiaddrs = if self.quic_enabled { - let quic_multiaddrs = enr.multiaddr_quic(); - if !quic_multiaddrs.is_empty() { - debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs); - } - quic_multiaddrs - } else { - Vec::new() - }; + self.inject_peer_connection(&enr.peer_id(), ConnectingType::Dialing, Some(enr.clone())); // Prioritize Quic connections over Tcp ones. - let multiaddrs = quic_multiaddrs - .into_iter() - .chain(enr.multiaddr_tcp()) - .collect(); + let multiaddrs = [ + self.quic_enabled + .then_some(enr.multiaddr_quic()) + .unwrap_or_default(), + enr.multiaddr_tcp(), + ] + .concat(); + + debug!(self.log, "Dialing peer"; "peer_id"=> %enr.peer_id(), "multiaddrs" => ?multiaddrs); return Poll::Ready(ToSwarm::Dial { - opts: DialOpts::peer_id(peer_id) + opts: DialOpts::peer_id(enr.peer_id()) .condition(PeerCondition::Disconnected) .addresses(multiaddrs) .build(), @@ -130,14 +123,7 @@ impl NetworkBehaviour for PeerManager { endpoint, other_established, .. - }) => { - // NOTE: We still need to handle the [`ConnectionEstablished`] because the - // [`NetworkBehaviour::handle_established_inbound_connection`] and - // [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This - // means another behaviour can kill the connection early, and we can't assume a - // peer as connected until this event is received. - self.on_connection_established(peer_id, endpoint, other_established) - } + }) => self.on_connection_established(peer_id, endpoint, other_established), FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, endpoint, @@ -206,6 +192,21 @@ impl NetworkBehaviour for PeerManager { "Connection to peer rejected: peer has a bad score", )); } + + // Check the connection limits + if self.network_globals.connected_or_dialing_peers() >= self.max_peers() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + return Err(ConnectionDenied::new( + "Connection to peer rejected: too many connections", + )); + } + Ok(ConnectionHandler) } @@ -218,13 +219,26 @@ impl NetworkBehaviour for PeerManager { _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); - match self.ban_status(&peer_id) { - Some(cause) => { - error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); - Err(ConnectionDenied::new(cause)) - } - None => Ok(ConnectionHandler), + if let Some(cause) = self.ban_status(&peer_id) { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + return Err(ConnectionDenied::new(cause)); } + + // Check the connection limits + if self.network_globals.connected_peers() >= self.max_outbound_dialing_peers() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + return Err(ConnectionDenied::new( + "Connection to peer rejected: too many connections", + )); + } + + Ok(ConnectionHandler) } } @@ -233,7 +247,7 @@ impl PeerManager { &mut self, peer_id: PeerId, endpoint: &ConnectedPoint, - other_established: usize, + _other_established: usize, ) { debug!(self.log, "Connection established"; "peer_id" => %peer_id, "multiaddr" => %endpoint.get_remote_address(), @@ -247,26 +261,6 @@ impl PeerManager { self.update_peer_count_metrics(); } - // Count dialing peers in the limit if the peer dialed us. - let count_dialing = endpoint.is_listener(); - // Check the connection limits - if self.peer_limit_reached(count_dialing) - && self - .network_globals - .peers - .read() - .peer_info(&peer_id) - .map_or(true, |peer| !peer.has_future_duty()) - { - // Gracefully disconnect the peer. - self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); - return; - } - - if other_established == 0 { - self.events.push(PeerManagerEvent::MetaData(peer_id)); - } - // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. match endpoint { diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs deleted file mode 100644 index ab2e43630b..0000000000 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::discovery::Discovery; -use crate::peer_manager::PeerManager; -use crate::rpc::RPC; -use crate::types::SnappyTransform; - -use libp2p::identify; -use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::NetworkBehaviour; -use libp2p::upnp::tokio::Behaviour as Upnp; -use types::EthSpec; - -use super::api_types::RequestId; - -pub type SubscriptionFilter = - gossipsub::MaxCountSubscriptionFilter; -pub type Gossipsub = gossipsub::Behaviour; - -#[derive(NetworkBehaviour)] -pub(crate) struct Behaviour -where - E: EthSpec, -{ - /// Keep track of active and pending connections to enforce hard limits. - pub connection_limits: libp2p::connection_limits::Behaviour, - /// The peer manager that keeps track of peer's reputation and status. - pub peer_manager: PeerManager, - /// The Eth2 RPC specified in the wire-0 protocol. - pub eth2_rpc: RPC, - /// Discv5 Discovery protocol. - pub discovery: Discovery, - /// Keep regular connection to peers and disconnect if absent. - // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. - /// Provides IP addresses and peer information. - pub identify: identify::Behaviour, - /// Libp2p UPnP port mapping. - pub upnp: Toggle, - /// The routing pub-sub mechanism for eth2. - pub gossipsub: Gossipsub, -} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 4cf59e15e1..ea4c3acb42 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,4 +1,3 @@ -use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ @@ -14,8 +13,6 @@ use crate::rpc::{ self, GoodbyeReason, HandlerErr, NetworkParams, Protocol, RPCError, RPCMessage, RPCReceived, RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, RpcSuccessResponse, RPC, }; -use crate::service::behaviour::BehaviourEvent; -pub use crate::service::behaviour::Gossipsub; use crate::types::{ attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, @@ -33,7 +30,8 @@ use gossipsub::{ use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::upnp::tokio::Behaviour as Upnp; use libp2p::{identify, PeerId, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::num::{NonZeroU8, NonZeroUsize}; @@ -47,10 +45,9 @@ use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use types::{ChainSpec, ForkName}; -use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; +use utils::{build_transport, strip_peer_id, Context as ServiceContext}; pub mod api_types; -mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; pub mod utils; @@ -109,6 +106,41 @@ pub enum NetworkEvent { ZeroListeners, } +pub type Gossipsub = gossipsub::Behaviour; +pub type SubscriptionFilter = + gossipsub::MaxCountSubscriptionFilter; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour +where + E: EthSpec, +{ + // NOTE: The order of the following list of behaviours has meaning, + // `NetworkBehaviour::handle_{pending, established}_{inbound, outbound}` methods + // are called sequentially for each behaviour and they are fallible, + // therefore we want `connection_limits` and `peer_manager` running first, + // which are the behaviours that may reject a connection, so that + // when the subsequent behaviours are called they are certain the connection won't be rejected. + + // + /// Keep track of active and pending connections to enforce hard limits. + pub connection_limits: libp2p::connection_limits::Behaviour, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, + /// The Eth2 RPC specified in the wire-0 protocol. + pub eth2_rpc: RPC, + /// Discv5 Discovery protocol. + pub discovery: Discovery, + /// Keep regular connection to peers and disconnect if absent. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + pub identify: identify::Behaviour, + /// Libp2p UPnP port mapping. + pub upnp: Toggle, + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, +} + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -396,7 +428,7 @@ impl Network { (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) .ceil() as u32, )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + .with_max_established_per_peer(Some(1)); libp2p::connection_limits::Behaviour::new(limits) }; @@ -1198,7 +1230,7 @@ impl Network { self.discovery_mut().remove_cached_enr(&enr.peer_id()); let peer_id = enr.peer_id(); if self.peer_manager_mut().dial_peer(enr) { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); + debug!(self.log, "Added cached ENR peer to dial queue"; "peer_id" => %peer_id); } } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 81ee86b8b9..f4988e68cd 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -24,8 +24,6 @@ use types::{ }; pub const NETWORK_KEY_FILENAME: &str = "key"; -/// The maximum simultaneous libp2p connections per peer. -pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; /// The filename to store our local metadata. pub const METADATA_FILENAME: &str = "metadata"; From 2e440df4f1642fcff80bfc2d3a4de826ad541dd0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 15 Oct 2024 07:31:10 +1100 Subject: [PATCH 51/66] Simplify hashing in shuffling (#6483) * Simplify hashing in shuffling * Fix benchmark deps * Check benchmarks when linting --- Cargo.toml | 2 +- Makefile | 2 +- consensus/swap_or_not_shuffle/Cargo.toml | 1 - consensus/swap_or_not_shuffle/src/shuffle_list.rs | 2 +- consensus/types/Cargo.toml | 2 +- consensus/types/benches/benches.rs | 2 +- crypto/kzg/benches/benchmark.rs | 4 ++-- 7 files changed, 7 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 125231ad20..94ac8e13ff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -95,7 +95,7 @@ resolver = "2" edition = "2021" [workspace.dependencies] -alloy-primitives = "0.8" +alloy-primitives = { version = "0.8", features = ["rlp", "getrandom"] } alloy-rlp = "0.3.4" alloy-consensus = "0.3.0" anyhow = "1" diff --git a/Makefile b/Makefile index e6420a4c98..32665d43ae 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index aff0225edd..dac83e7553 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -18,4 +18,3 @@ fixed_bytes = { workspace = true } [features] arbitrary = ["alloy-primitives/arbitrary"] -getrandom = ["alloy-primitives/getrandom"] diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index b49a26cc37..3e93974fe0 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -45,7 +45,7 @@ impl Buf { /// Hash the entire buffer. fn hash(&self) -> Hash256 { - Hash256::from_slice(&hash_fixed(&self.0)) + Hash256::from(hash_fixed(&self.0)) } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c1559a407c..21a15fc517 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,7 +9,7 @@ name = "benches" harness = false [dependencies] -alloy-primitives = { workspace = true, features = ["rlp", "getrandom"] } +alloy-primitives = { workspace = true } merkle_proof = { workspace = true } bls = { workspace = true, features = ["arbitrary"] } kzg = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index effc6a2106..0c8bf36c81 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -78,7 +78,7 @@ fn all_benches(c: &mut Criterion) { || (bytes.clone(), spec.clone()), |(bytes, spec)| { let state: BeaconState = - BeaconState::from_ssz_bytes(&bytes, &spec).expect("should decode"); + BeaconState::from_ssz_bytes(bytes, spec).expect("should decode"); black_box(state) }, BatchSize::SmallInput, diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs index 50f5f4e779..234e624698 100644 --- a/crypto/kzg/benches/benchmark.rs +++ b/crypto/kzg/benches/benchmark.rs @@ -8,7 +8,7 @@ pub fn bench_init_context(c: &mut Criterion) { .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .expect("should have trusted setup"); - c.bench_function(&format!("Initialize context rust_eth_kzg"), |b| { + c.bench_function("Initialize context rust_eth_kzg", |b| { b.iter(|| { let trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); DASContext::new( @@ -19,7 +19,7 @@ pub fn bench_init_context(c: &mut Criterion) { ) }) }); - c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { + c.bench_function("Initialize context c-kzg (4844)", |b| { b.iter(|| { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) From 83d5c521d749f5eb3265c773ec7e45800d9239ea Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 15 Oct 2024 10:38:43 -0700 Subject: [PATCH 52/66] Electra updates for v1.5.0-alpha.6 (#6445) * Update process_slashing * Update test vectors version * Delete Domain::Consolidation * Rename to get_max_effective_balance * Fix unused; lint * Add the pre-electra slashing processing * lint * Change request json types * Move requests from payload to beacon block body * Refactor engine api * Fix warnings * Update engine api to latest * engine api changed..again * yet again * Merge branch 'engine-requests' into electra-updates * Fix tests * Store reference instead of bytes in NewPayloadRequest * Merge branch 'unstable' into electra-updates * Update beacon_node/execution_layer/src/engine_api/json_structures.rs Co-authored-by: Michael Sproul * Update beacon_node/execution_layer/src/lib.rs Co-authored-by: Michael Sproul * Update beacon_node/execution_layer/src/test_utils/handle_rpc.rs Co-authored-by: Michael Sproul --- Cargo.lock | 1 + .../beacon_chain/src/beacon_block_streamer.rs | 4 +- beacon_node/beacon_chain/src/beacon_chain.rs | 28 +- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 296 +++++---------- .../execution_layer/src/engine_api/http.rs | 112 +----- .../src/engine_api/json_structures.rs | 358 +++++------------- .../src/engine_api/new_payload_request.rs | 5 +- beacon_node/execution_layer/src/lib.rs | 33 +- .../test_utils/execution_block_generator.rs | 4 - .../src/test_utils/handle_rpc.rs | 55 +-- .../src/test_utils/mock_builder.rs | 14 +- .../execution_layer/src/test_utils/mod.rs | 2 - .../src/per_block_processing.rs | 3 +- .../process_operations.rs | 16 +- .../src/per_epoch_processing/single_pass.rs | 33 +- consensus/types/src/beacon_block.rs | 2 + consensus/types/src/beacon_block_body.rs | 6 + consensus/types/src/beacon_state.rs | 2 +- consensus/types/src/chain_spec.rs | 6 - consensus/types/src/config_and_preset.rs | 1 - consensus/types/src/consolidation_request.rs | 13 + consensus/types/src/deposit_request.rs | 15 + consensus/types/src/execution_payload.rs | 13 - .../types/src/execution_payload_header.rs | 12 - consensus/types/src/execution_requests.rs | 59 +++ consensus/types/src/lib.rs | 2 + consensus/types/src/payload.rs | 225 +---------- consensus/types/src/signed_beacon_block.rs | 2 + consensus/types/src/validator.rs | 10 +- consensus/types/src/withdrawal_request.rs | 13 + testing/ef_tests/Makefile | 2 +- testing/ef_tests/src/type_name.rs | 1 + testing/ef_tests/tests/tests.rs | 8 + 35 files changed, 445 insertions(+), 913 deletions(-) create mode 100644 consensus/types/src/execution_requests.rs diff --git a/Cargo.lock b/Cargo.lock index 3a063e7e0e..ecbfd0cb8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3008,6 +3008,7 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "sha2 0.9.9", "slog", "slot_clock", "ssz_types", diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 198d7d61f0..b76dba88fd 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,5 +1,5 @@ use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; -use execution_layer::{ExecutionLayer, ExecutionPayloadBody}; +use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; use std::sync::Arc; @@ -57,7 +57,7 @@ struct BodiesByRange { struct BlockParts { blinded_block: Box>, header: Box>, - body: Option>>, + body: Option>>, } impl BlockParts { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 13022b8269..5d7d7f0e06 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5553,10 +5553,15 @@ impl BeaconChain { ) } BeaconState::Deneb(_) => { - let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = - block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + _maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); ( BeaconBlock::Deneb(BeaconBlockDeneb { @@ -5591,10 +5596,15 @@ impl BeaconChain { ) } BeaconState::Electra(_) => { - let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = - block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); ( BeaconBlock::Electra(BeaconBlockElectra { @@ -5619,6 +5629,8 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, + execution_requests: maybe_requests + .ok_or(BlockProductionError::MissingExecutionRequests)?, }, }), maybe_blobs_and_proofs, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 8a317ce754..a26d755316 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -294,6 +294,7 @@ pub enum BlockProductionError { InvalidBlockVariant(String), KzgError(kzg::Error), FailedToBuildBlobSidecars(String), + MissingExecutionRequests, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 93d8086149..843a7b83cb 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -52,3 +52,4 @@ alloy-rlp = { workspace = true } alloy-consensus = { workspace = true } lighthouse_version = { workspace = true } fixed_bytes = { workspace = true } +sha2 = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 8ba8ecfffb..ab275e8b11 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -2,8 +2,7 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, }; @@ -18,7 +17,6 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; -use types::execution_payload::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; pub use types::{ Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, @@ -26,7 +24,7 @@ pub use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, KzgProofs, + ExecutionPayloadElectra, ExecutionRequests, KzgProofs, }; use types::{Graffiti, GRAFFITI_BYTES_LEN}; @@ -288,6 +286,8 @@ pub struct GetPayloadResponse { pub blobs_bundle: BlobsBundle, #[superstruct(only(Deneb, Electra), partial_getter(copy))] pub should_override_builder: bool, + #[superstruct(only(Electra))] + pub requests: ExecutionRequests, } impl GetPayloadResponse { @@ -321,7 +321,12 @@ impl From> for ExecutionPayload { } impl From> - for (ExecutionPayload, Uint256, Option>) + for ( + ExecutionPayload, + Uint256, + Option>, + Option>, + ) { fn from(response: GetPayloadResponse) -> Self { match response { @@ -329,21 +334,25 @@ impl From> ExecutionPayload::Bellatrix(inner.execution_payload), inner.block_value, None, + None, ), GetPayloadResponse::Capella(inner) => ( ExecutionPayload::Capella(inner.execution_payload), inner.block_value, None, + None, ), GetPayloadResponse::Deneb(inner) => ( ExecutionPayload::Deneb(inner.execution_payload), inner.block_value, Some(inner.blobs_bundle), + None, ), GetPayloadResponse::Electra(inner) => ( ExecutionPayload::Electra(inner.execution_payload), inner.block_value, Some(inner.blobs_bundle), + Some(inner.requests), ), } } @@ -360,106 +369,25 @@ impl GetPayloadResponse { } } -#[superstruct( - variants(V1, V2), - variant_attributes(derive(Clone, Debug),), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] #[derive(Clone, Debug)] -pub struct ExecutionPayloadBody { +pub struct ExecutionPayloadBodyV1 { pub transactions: Transactions, pub withdrawals: Option>, - #[superstruct(only(V2))] - pub deposit_requests: Option>, - #[superstruct(only(V2))] - pub withdrawal_requests: Option>, - #[superstruct(only(V2))] - pub consolidation_requests: Option>, } -impl ExecutionPayloadBody { - #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Transactions, - Option>, - Option>, - Option>, - Option>, - ) { - match self { - ExecutionPayloadBody::V1(body) => { - (body.transactions, body.withdrawals, None, None, None) - } - ExecutionPayloadBody::V2(body) => ( - body.transactions, - body.withdrawals, - body.deposit_requests, - body.withdrawal_requests, - body.consolidation_requests, - ), - } - } +impl ExecutionPayloadBodyV1 { pub fn to_payload( self, header: ExecutionPayloadHeader, ) -> Result, String> { - let header_fork = header.fork_name_unchecked(); - match &self { - Self::V1(_) => { - if header_fork.electra_enabled() { - return Err(format!( - "block {} is {} but response is ExecutionPayloadBodyV1. Does the EL support {}?", - header.block_hash(), - header_fork, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - )); - } - } - Self::V2(_) => {} - } - - let ( - transactions, - withdrawals, - deposit_requests, - withdrawal_requests, - consolidation_requests, - ) = self.deconstruct(); - if !header_fork.capella_enabled() && withdrawals.is_some() { - return Err(format!( - "block {} is {} but payload body has withdrawals", - header.block_hash(), - header_fork - )); - } - if !header_fork.electra_enabled() { - if deposit_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has deposit_requests", - header.block_hash(), - header_fork - )); - } - if withdrawal_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has withdrawal_requests", - header.block_hash(), - header_fork - )); - } - if consolidation_requests.is_some() { - return Err(format!( - "block {} is {} but payload body has consolidation_requests", - header.block_hash(), - header_fork - )); - } - } - match header { ExecutionPayloadHeader::Bellatrix(header) => { + if self.withdrawals.is_some() { + return Err(format!( + "block {} is bellatrix but payload body has withdrawals", + header.block_hash + )); + } Ok(ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { parent_hash: header.parent_hash, fee_recipient: header.fee_recipient, @@ -474,108 +402,90 @@ impl ExecutionPayloadBody { extra_data: header.extra_data, base_fee_per_gas: header.base_fee_per_gas, block_hash: header.block_hash, - transactions, + transactions: self.transactions, })) } ExecutionPayloadHeader::Capella(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + })) + } else { + Err(format!( + "block {} is capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } ExecutionPayloadHeader::Deneb(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } ExecutionPayloadHeader::Electra(header) => { - let withdrawals = withdrawals.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawals set to null", - header.block_hash, header_fork - ) - })?; - let deposit_requests = deposit_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has deposit_requests set to null", - header.block_hash, header_fork - ) - })?; - let withdrawal_requests = withdrawal_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has withdrawal_requests set to null", - header.block_hash, header_fork - ) - })?; - let consolidation_requests = consolidation_requests.ok_or_else(|| { - format!( - "block {} is {} but payload body has consolidation_requests set to null", - header.block_hash, header_fork - ) - })?; - Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - deposit_requests, - withdrawal_requests, - consolidation_requests, - })) + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } } } } @@ -592,8 +502,6 @@ pub struct EngineCapabilities { pub forkchoice_updated_v3: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, - pub get_payload_bodies_by_hash_v2: bool, - pub get_payload_bodies_by_range_v2: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, @@ -631,12 +539,6 @@ impl EngineCapabilities { if self.get_payload_bodies_by_range_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1); } - if self.get_payload_bodies_by_hash_v2 { - response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2); - } - if self.get_payload_bodies_by_range_v2 { - response.push(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2); - } if self.get_payload_v1 { response.push(ENGINE_GET_PAYLOAD_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c497a4a725..9c2c43bcf7 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -50,8 +50,6 @@ pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; -pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2: &str = "engine_getPayloadBodiesByHashV2"; -pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2: &str = "engine_getPayloadBodiesByRangeV2"; pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; @@ -80,8 +78,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, ENGINE_GET_CLIENT_VERSION_V1, ]; @@ -797,6 +793,9 @@ impl HttpJsonRpc { JsonExecutionPayload::V4(new_payload_request_electra.execution_payload.clone().into()), new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, + new_payload_request_electra + .execution_requests_list + .get_execution_requests_list(), ]); let response: JsonPayloadStatusV1 = self @@ -849,7 +848,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V1(response).into()) + JsonGetPayloadResponse::V1(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Capella => { let response: JsonGetPayloadResponseV2 = self @@ -859,7 +860,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V2(response).into()) + JsonGetPayloadResponse::V2(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair | ForkName::Deneb | ForkName::Electra => Err( Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), @@ -883,7 +886,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V3(response).into()) + JsonGetPayloadResponse::V3(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair @@ -912,7 +917,9 @@ impl HttpJsonRpc { ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; - Ok(JsonGetPayloadResponse::V4(response).into()) + JsonGetPayloadResponse::V4(response) + .try_into() + .map_err(Error::BadResponse) } ForkName::Base | ForkName::Altair @@ -991,7 +998,7 @@ impl HttpJsonRpc { pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let params = json!([block_hashes]); let response: Vec>> = self @@ -1004,27 +1011,7 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) - .collect()) - } - - pub async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> Result>>, Error> { - let params = json!([block_hashes]); - - let response: Vec>> = self - .rpc_request( - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2, - params, - ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response - .into_iter() - .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) + .map(|opt_json| opt_json.map(From::from)) .collect()) } @@ -1032,7 +1019,7 @@ impl HttpJsonRpc { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); @@ -1048,31 +1035,7 @@ impl HttpJsonRpc { Ok(response .into_iter() - .map(|opt_json| opt_json.map(|v1| JsonExecutionPayloadBody::V1(v1).into())) - .collect()) - } - - pub async fn get_payload_bodies_by_range_v2( - &self, - start: u64, - count: u64, - ) -> Result>>, Error> { - #[derive(Serialize)] - #[serde(transparent)] - struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); - - let params = json!([Quantity(start), Quantity(count)]); - let response: Vec>> = self - .rpc_request( - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2, - params, - ENGINE_GET_PAYLOAD_BODIES_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - Ok(response - .into_iter() - .map(|opt_json| opt_json.map(|v2| JsonExecutionPayloadBody::V2(v2).into())) + .map(|opt_json| opt_json.map(From::from)) .collect()) } @@ -1099,10 +1062,6 @@ impl HttpJsonRpc { .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), - get_payload_bodies_by_hash_v2: capabilities - .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V2), - get_payload_bodies_by_range_v2: capabilities - .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), @@ -1278,39 +1237,6 @@ impl HttpJsonRpc { } } - pub async fn get_payload_bodies_by_hash( - &self, - block_hashes: Vec, - ) -> Result>>, Error> { - let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_bodies_by_hash_v2 { - self.get_payload_bodies_by_hash_v2(block_hashes).await - } else if engine_capabilities.get_payload_bodies_by_hash_v1 { - self.get_payload_bodies_by_hash_v1(block_hashes).await - } else { - Err(Error::RequiredMethodUnsupported( - "engine_getPayloadBodiesByHash", - )) - } - } - - pub async fn get_payload_bodies_by_range( - &self, - start: u64, - count: u64, - ) -> Result>>, Error> { - let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_bodies_by_range_v2 { - self.get_payload_bodies_by_range_v2(start, count).await - } else if engine_capabilities.get_payload_bodies_by_range_v1 { - self.get_payload_bodies_by_range_v1(start, count).await - } else { - Err(Error::RequiredMethodUnsupported( - "engine_getPayloadBodiesByRange", - )) - } - } - // automatically selects the latest version of // forkchoice_updated that the execution engine supports pub async fn forkchoice_updated( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index a05d584cfc..753554c149 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,11 +1,13 @@ use super::*; use alloy_rlp::RlpEncodable; use serde::{Deserialize, Serialize}; +use ssz::Decode; use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::{DepositRequest, FixedVector, PublicKeyBytes, Signature, Unsigned, WithdrawalRequest}; +use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; +use types::{FixedVector, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -104,14 +106,6 @@ pub struct JsonExecutionPayload { #[superstruct(only(V3, V4))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, - #[superstruct(only(V4))] - pub deposit_requests: VariableList, - #[superstruct(only(V4))] - pub withdrawal_requests: - VariableList, - #[superstruct(only(V4))] - pub consolidation_requests: - VariableList, } impl From> for JsonExecutionPayloadV1 { @@ -214,24 +208,6 @@ impl From> for JsonExecutionPayloadV4 .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests: payload - .deposit_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - withdrawal_requests: payload - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - consolidation_requests: payload - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), } } } @@ -348,24 +324,6 @@ impl From> for ExecutionPayloadElectra .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests: payload - .deposit_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - withdrawal_requests: payload - .withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), - consolidation_requests: payload - .consolidation_requests - .into_iter() - .map(Into::into) - .collect::>() - .into(), } } } @@ -381,6 +339,71 @@ impl From> for ExecutionPayload { } } +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } +} + +/// Format of `ExecutionRequests` received over the engine api. +/// +/// Array of ssz-encoded requests list encoded as hex bytes. +/// The prefix of the request type is used to index into the array. +/// +/// For e.g. [0xab, 0xcd, 0xef] +/// Here, 0xab are the deposits bytes (prefix and index == 0) +/// 0xcd are the withdrawals bytes (prefix and index == 1) +/// 0xef are the consolidations bytes (prefix and index == 2) +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct JsonExecutionRequests(pub Vec); + +impl TryFrom for ExecutionRequests { + type Error = String; + + fn try_from(value: JsonExecutionRequests) -> Result { + let mut requests = ExecutionRequests::default(); + + for (i, request) in value.0.into_iter().enumerate() { + // hex string + let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + match RequestPrefix::from_prefix(i as u8) { + Some(RequestPrefix::Deposit) => { + requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) + .map_err(|e| format!("Failed to decode DepositRequest from EL: {:?}", e))?; + } + Some(RequestPrefix::Withdrawal) => { + requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(&decoded_bytes) + .map_err(|e| { + format!("Failed to decode WithdrawalRequest from EL: {:?}", e) + })?; + } + Some(RequestPrefix::Consolidation) => { + requests.consolidations = + ConsolidationRequests::::from_ssz_bytes(&decoded_bytes).map_err( + |e| format!("Failed to decode ConsolidationRequest from EL: {:?}", e), + )?; + } + None => return Err("Empty requests string".to_string()), + } + } + Ok(requests) + } +} + #[superstruct( variants(V1, V2, V3, V4), variant_attributes( @@ -407,38 +430,42 @@ pub struct JsonGetPayloadResponse { pub blobs_bundle: JsonBlobsBundleV1, #[superstruct(only(V3, V4))] pub should_override_builder: bool, + #[superstruct(only(V4))] + pub requests: JsonExecutionRequests, } -impl From> for GetPayloadResponse { - fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { +impl TryFrom> for GetPayloadResponse { + type Error = String; + fn try_from(json_get_payload_response: JsonGetPayloadResponse) -> Result { match json_get_payload_response { JsonGetPayloadResponse::V1(response) => { - GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { + Ok(GetPayloadResponse::Bellatrix(GetPayloadResponseBellatrix { execution_payload: response.execution_payload.into(), block_value: response.block_value, - }) + })) } JsonGetPayloadResponse::V2(response) => { - GetPayloadResponse::Capella(GetPayloadResponseCapella { + Ok(GetPayloadResponse::Capella(GetPayloadResponseCapella { execution_payload: response.execution_payload.into(), block_value: response.block_value, - }) + })) } JsonGetPayloadResponse::V3(response) => { - GetPayloadResponse::Deneb(GetPayloadResponseDeneb { + Ok(GetPayloadResponse::Deneb(GetPayloadResponseDeneb { execution_payload: response.execution_payload.into(), block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - }) + })) } JsonGetPayloadResponse::V4(response) => { - GetPayloadResponse::Electra(GetPayloadResponseElectra { + Ok(GetPayloadResponse::Electra(GetPayloadResponseElectra { execution_payload: response.execution_payload.into(), block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - }) + requests: response.requests.try_into()?, + })) } } } @@ -754,122 +781,36 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[superstruct( - variants(V1, V2), - variant_attributes( - derive(Clone, Debug, Serialize, Deserialize), - serde(bound = "E: EthSpec", rename_all = "camelCase"), - ), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") -)] -#[derive(Clone, Debug, Serialize)] -#[serde(bound = "E: EthSpec", rename_all = "camelCase", untagged)] -pub struct JsonExecutionPayloadBody { +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct JsonExecutionPayloadBodyV1 { #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, pub withdrawals: Option>, - #[superstruct(only(V2))] - pub deposit_requests: Option>, - #[superstruct(only(V2))] - pub withdrawal_requests: - Option>, - #[superstruct(only(V2))] - pub consolidation_requests: - Option>, +} + +impl From> for ExecutionPayloadBodyV1 { + fn from(value: JsonExecutionPayloadBodyV1) -> Self { + Self { + transactions: value.transactions, + withdrawals: value.withdrawals.map(|json_withdrawals| { + Withdrawals::::from( + json_withdrawals + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + } + } } impl From> for JsonExecutionPayloadBodyV1 { fn from(value: ExecutionPayloadBodyV1) -> Self { Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - VariableList::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - } - } -} - -impl From> for JsonExecutionPayloadBodyV2 { - fn from(value: ExecutionPayloadBodyV2) -> Self { - Self { - transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - VariableList::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - deposit_requests: value.deposit_requests.map(|receipts| { - VariableList::from(receipts.into_iter().map(Into::into).collect::>()) - }), - withdrawal_requests: value.withdrawal_requests.map(|withdrawal_requests| { - VariableList::from( - withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - consolidation_requests: value.consolidation_requests.map(|consolidation_requests| { - VariableList::from( - consolidation_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - } - } -} - -impl From> for ExecutionPayloadBody { - fn from(value: JsonExecutionPayloadBody) -> Self { - match value { - JsonExecutionPayloadBody::V1(body_v1) => Self::V1(ExecutionPayloadBodyV1 { - transactions: body_v1.transactions, - withdrawals: body_v1.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - }), - JsonExecutionPayloadBody::V2(body_v2) => Self::V2(ExecutionPayloadBodyV2 { - transactions: body_v2.transactions, - withdrawals: body_v2.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - deposit_requests: body_v2.deposit_requests.map(|json_receipts| { - DepositRequests::::from( - json_receipts - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - withdrawal_requests: body_v2.withdrawal_requests.map(|json_withdrawal_requests| { - WithdrawalRequests::::from( - json_withdrawal_requests - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - consolidation_requests: body_v2.consolidation_requests, + withdrawals: value.withdrawals.map(|withdrawals| { + VariableList::from(withdrawals.into_iter().map(Into::into).collect::>()) }), } } @@ -950,96 +891,3 @@ impl TryFrom for ClientVersionV1 { }) } } - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonDepositRequest { - pub pubkey: PublicKeyBytes, - pub withdrawal_credentials: Hash256, - #[serde(with = "serde_utils::u64_hex_be")] - pub amount: u64, - pub signature: Signature, - #[serde(with = "serde_utils::u64_hex_be")] - pub index: u64, -} - -impl From for JsonDepositRequest { - fn from(deposit: DepositRequest) -> Self { - Self { - pubkey: deposit.pubkey, - withdrawal_credentials: deposit.withdrawal_credentials, - amount: deposit.amount, - signature: deposit.signature, - index: deposit.index, - } - } -} - -impl From for DepositRequest { - fn from(json_deposit: JsonDepositRequest) -> Self { - Self { - pubkey: json_deposit.pubkey, - withdrawal_credentials: json_deposit.withdrawal_credentials, - amount: json_deposit.amount, - signature: json_deposit.signature, - index: json_deposit.index, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonWithdrawalRequest { - pub source_address: Address, - pub validator_pubkey: PublicKeyBytes, - #[serde(with = "serde_utils::u64_hex_be")] - pub amount: u64, -} - -impl From for JsonWithdrawalRequest { - fn from(withdrawal_request: WithdrawalRequest) -> Self { - Self { - source_address: withdrawal_request.source_address, - validator_pubkey: withdrawal_request.validator_pubkey, - amount: withdrawal_request.amount, - } - } -} - -impl From for WithdrawalRequest { - fn from(json_withdrawal_request: JsonWithdrawalRequest) -> Self { - Self { - source_address: json_withdrawal_request.source_address, - validator_pubkey: json_withdrawal_request.validator_pubkey, - amount: json_withdrawal_request.amount, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct JsonConsolidationRequest { - pub source_address: Address, - pub source_pubkey: PublicKeyBytes, - pub target_pubkey: PublicKeyBytes, -} - -impl From for JsonConsolidationRequest { - fn from(consolidation_request: ConsolidationRequest) -> Self { - Self { - source_address: consolidation_request.source_address, - source_pubkey: consolidation_request.source_pubkey, - target_pubkey: consolidation_request.target_pubkey, - } - } -} - -impl From for ConsolidationRequest { - fn from(json_consolidation_request: JsonConsolidationRequest) -> Self { - Self { - source_address: json_consolidation_request.source_address, - source_pubkey: json_consolidation_request.source_pubkey, - target_pubkey: json_consolidation_request.target_pubkey, - } - } -} diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 8d2e3d5ad0..318779b7f3 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -9,7 +9,7 @@ use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, + ExecutionPayloadElectra, ExecutionRequests, }; #[superstruct( @@ -43,6 +43,8 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { pub versioned_hashes: Vec, #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(Electra))] + pub execution_requests_list: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -183,6 +185,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, + execution_requests_list: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 648963a320..f7e490233f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -48,7 +48,8 @@ use types::builder_bid::BuilderBid; use types::non_zero_usize::new_non_zero_usize; use types::payload::BlockProductionVersion; use types::{ - AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, + AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, ExecutionRequests, KzgProofs, + SignedBlindedBeaconBlock, }; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, @@ -112,12 +113,15 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Electra(builder_bid.header).into(), block_value: builder_bid.value, kzg_commitments: builder_bid.blob_kzg_commitments, blobs_and_proofs: None, + // TODO(electra): update this with builder api returning the requests + requests: None, }, }; Ok(ProvenancedPayload::Builder( @@ -194,6 +198,8 @@ pub enum BlockProposalContents> { kzg_commitments: KzgCommitments, /// `None` for blinded `PayloadAndBlobs`. blobs_and_proofs: Option<(BlobsList, KzgProofs)>, + // TODO(electra): this should probably be a separate variant/superstruct + requests: Option>, }, } @@ -214,11 +220,13 @@ impl From>> block_value, kzg_commitments, blobs_and_proofs: _, + requests, } => BlockProposalContents::PayloadAndBlobs { payload: payload.execution_payload().into(), block_value, kzg_commitments, blobs_and_proofs: None, + requests, }, } } @@ -230,13 +238,14 @@ impl> TryFrom> type Error = Error; fn try_from(response: GetPayloadResponse) -> Result { - let (execution_payload, block_value, maybe_bundle) = response.into(); + let (execution_payload, block_value, maybe_bundle, maybe_requests) = response.into(); match maybe_bundle { Some(bundle) => Ok(Self::PayloadAndBlobs { payload: execution_payload.into(), block_value, kzg_commitments: bundle.commitments, blobs_and_proofs: Some((bundle.blobs, bundle.proofs)), + requests: maybe_requests, }), None => Ok(Self::Payload { payload: execution_payload.into(), @@ -265,22 +274,25 @@ impl> BlockProposalContents>, Option<(BlobsList, KzgProofs)>, + Option>, Uint256, ) { match self { Self::Payload { payload, block_value, - } => (payload, None, None, block_value), + } => (payload, None, None, None, block_value), Self::PayloadAndBlobs { payload, block_value, kzg_commitments, blobs_and_proofs, + requests, } => ( payload, Some(kzg_commitments), blobs_and_proofs, + requests, block_value, ), } @@ -1772,10 +1784,10 @@ impl ExecutionLayer { pub async fn get_payload_bodies_by_hash( &self, hashes: Vec, - ) -> Result>>, Error> { + ) -> Result>>, Error> { self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_hash(hashes).await + engine.api.get_payload_bodies_by_hash_v1(hashes).await }) .await .map_err(Box::new) @@ -1786,11 +1798,14 @@ impl ExecutionLayer { &self, start: u64, count: u64, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BODIES_BY_RANGE); self.engine() .request(|engine: &Engine| async move { - engine.api.get_payload_bodies_by_range(start, count).await + engine + .api + .get_payload_bodies_by_range_v1(start, count) + .await }) .await .map_err(Box::new) @@ -1823,9 +1838,7 @@ impl ExecutionLayer { // Use efficient payload bodies by range method if supported. let capabilities = self.get_engine_capabilities(None).await?; - if capabilities.get_payload_bodies_by_range_v1 - || capabilities.get_payload_bodies_by_range_v2 - { + if capabilities.get_payload_bodies_by_range_v1 { let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; if payload_bodies.len() != 1 { diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 42f594fdf4..4deb91e056 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -652,10 +652,6 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, - // TODO(electra): consider how to test these fields below - deposit_requests: vec![].into(), - withdrawal_requests: vec![].into(), - consolidation_requests: vec![].into(), }), _ => unreachable!(), }, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f36cb9797d..786ac9ad9c 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -373,6 +373,8 @@ pub async fn handle_rpc( ))? .into(), should_override_builder: false, + // TODO(electra): add EL requests in mock el + requests: Default::default(), }) .unwrap() } @@ -561,60 +563,11 @@ pub async fn handle_rpc( match maybe_payload { Some(payload) => { - assert!( - !payload.fork_name().electra_enabled(), - "payload bodies V1 is not supported for Electra blocks" - ); - let payload_body = ExecutionPayloadBodyV1 { + let payload_body: ExecutionPayloadBodyV1 = ExecutionPayloadBodyV1 { transactions: payload.transactions().clone(), withdrawals: payload.withdrawals().ok().cloned(), }; - let json_payload_body = JsonExecutionPayloadBody::V1( - JsonExecutionPayloadBodyV1::::from(payload_body), - ); - response.push(Some(json_payload_body)); - } - None => response.push(None), - } - } - - Ok(serde_json::to_value(response).unwrap()) - } - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V2 => { - #[derive(Deserialize)] - #[serde(transparent)] - struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); - - let start = get_param::(params, 0) - .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? - .0; - let count = get_param::(params, 1) - .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? - .0; - - let mut response = vec![]; - for block_num in start..(start + count) { - let maybe_payload = ctx - .execution_block_generator - .read() - .execution_payload_by_number(block_num); - - match maybe_payload { - Some(payload) => { - // TODO(electra): add testing for: - // deposit_requests - // withdrawal_requests - // consolidation_requests - let payload_body = ExecutionPayloadBodyV2 { - transactions: payload.transactions().clone(), - withdrawals: payload.withdrawals().ok().cloned(), - deposit_requests: payload.deposit_requests().ok().cloned(), - withdrawal_requests: payload.withdrawal_requests().ok().cloned(), - consolidation_requests: payload.consolidation_requests().ok().cloned(), - }; - let json_payload_body = JsonExecutionPayloadBody::V2( - JsonExecutionPayloadBodyV2::::from(payload_body), - ); + let json_payload_body = JsonExecutionPayloadBodyV1::from(payload_body); response.push(Some(json_payload_body)); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 139ea06918..341daedbc8 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -20,9 +20,9 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, FixedBytesExtended, ForkName, ForkVersionedResponse, Hash256, - PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, - SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, FixedBytesExtended, ForkName, + ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, + SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::{Filter, Rejection}; @@ -542,10 +542,12 @@ pub fn serve( let mut message = match payload_response_type { crate::GetPayloadResponseType::Full(payload_response) => { - let (payload, _block_value, maybe_blobs_bundle): ( + #[allow(clippy::type_complexity)] + let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( ExecutionPayload, Uint256, Option>, + Option>, ) = payload_response.into(); match fork { @@ -593,10 +595,12 @@ pub fn serve( } } crate::GetPayloadResponseType::Blinded(payload_response) => { - let (payload, _block_value, maybe_blobs_bundle): ( + #[allow(clippy::type_complexity)] + let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( ExecutionPayload, Uint256, Option>, + Option>, ) = payload_response.into(); match fork { ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index fe847ec3ed..be99b38054 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -47,9 +47,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { forkchoice_updated_v2: true, forkchoice_updated_v3: true, get_payload_bodies_by_hash_v1: true, - get_payload_bodies_by_hash_v2: true, get_payload_bodies_by_range_v1: true, - get_payload_bodies_by_range_v2: true, get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index e7655b453a..f289b6e081 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -579,8 +579,7 @@ pub fn get_expected_withdrawals( .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance.safe_sub( - validator - .get_validator_max_effective_balance(spec, state.fork_name_unchecked()), + validator.get_max_effective_balance(spec, state.fork_name_unchecked()), )?, }); withdrawal_index.safe_add_assign(1)?; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 74166f6713..fb1c5c7eee 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -40,15 +40,13 @@ pub fn process_operations>( if state.fork_name_unchecked().electra_enabled() { state.update_pubkey_cache()?; - if let Some(deposit_requests) = block_body.execution_payload()?.deposit_requests()? { - process_deposit_requests(state, &deposit_requests, spec)?; - } - if let Some(withdrawal_requests) = block_body.execution_payload()?.withdrawal_requests()? { - process_withdrawal_requests(state, &withdrawal_requests, spec)?; - } - if let Some(consolidations) = block_body.execution_payload()?.consolidation_requests()? { - process_consolidation_requests(state, &consolidations, spec)?; - } + process_deposit_requests(state, &block_body.execution_requests()?.deposits, spec)?; + process_withdrawal_requests(state, &block_body.execution_requests()?.withdrawals, spec)?; + process_consolidation_requests( + state, + &block_body.execution_requests()?.consolidations, + spec, + )?; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 51f45b87e8..fcb480a37c 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -82,6 +82,7 @@ struct RewardsAndPenaltiesContext { struct SlashingsContext { adjusted_total_slashing_balance: u64, target_withdrawable_epoch: Epoch, + penalty_per_effective_balance_increment: u64, } struct PendingBalanceDepositsContext { @@ -775,9 +776,16 @@ impl SlashingsContext { .current_epoch .safe_add(E::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?; + let penalty_per_effective_balance_increment = adjusted_total_slashing_balance.safe_div( + state_ctxt + .total_active_balance + .safe_div(spec.effective_balance_increment)?, + )?; + Ok(Self { adjusted_total_slashing_balance, target_withdrawable_epoch, + penalty_per_effective_balance_increment, }) } } @@ -792,14 +800,20 @@ fn process_single_slashing( if validator.slashed && slashings_ctxt.target_withdrawable_epoch == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; - let penalty_numerator = validator - .effective_balance - .safe_div(increment)? - .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; - let penalty = penalty_numerator - .safe_div(state_ctxt.total_active_balance)? - .safe_mul(increment)?; - + let penalty = if state_ctxt.fork_name.electra_enabled() { + let effective_balance_increments = validator.effective_balance.safe_div(increment)?; + slashings_ctxt + .penalty_per_effective_balance_increment + .safe_mul(effective_balance_increments)? + } else { + let penalty_numerator = validator + .effective_balance + .safe_div(increment)? + .safe_mul(slashings_ctxt.adjusted_total_slashing_balance)?; + penalty_numerator + .safe_div(state_ctxt.total_active_balance)? + .safe_mul(increment)? + }; *balance.make_mut()? = balance.saturating_sub(penalty); } Ok(()) @@ -1022,8 +1036,7 @@ fn process_single_effective_balance_update( ) -> Result<(), Error> { // Use the higher effective balance limit if post-Electra and compounding withdrawal credentials // are set. - let effective_balance_limit = - validator.get_validator_max_effective_balance(spec, state_ctxt.fork_name); + let effective_balance_limit = validator.get_max_effective_balance(spec, state_ctxt.fork_name); let old_effective_balance = validator.effective_balance; let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 4a6816c024..a298303513 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -670,6 +670,7 @@ impl> BeaconBlockElectra graffiti: Graffiti::default(), execution_payload: Payload::Electra::default(), blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), }, } } @@ -700,6 +701,7 @@ impl> EmptyBlock for BeaconBlockElec execution_payload: Payload::Electra::default(), bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), }, } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 305ef10544..c81e7bcde9 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -114,6 +114,8 @@ pub struct BeaconBlockBody = FullPay VariableList, #[superstruct(only(Deneb, Electra))] pub blob_kzg_commitments: KzgCommitments, + #[superstruct(only(Electra))] + pub execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] @@ -662,6 +664,7 @@ impl From>> execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, } = body; ( @@ -680,6 +683,7 @@ impl From>> }, bls_to_execution_changes, blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests, }, Some(execution_payload), ) @@ -818,6 +822,7 @@ impl BeaconBlockBodyElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, } = self; BeaconBlockBodyElectra { @@ -835,6 +840,7 @@ impl BeaconBlockBodyElectra> { }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests: execution_requests.clone(), } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a08f6d720c..8eed790a02 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -2131,7 +2131,7 @@ impl BeaconState { let max_effective_balance = self .validators() .get(validator_index) - .map(|validator| validator.get_validator_max_effective_balance(spec, current_fork)) + .map(|validator| validator.get_max_effective_balance(spec, current_fork)) .ok_or(Error::UnknownValidator(validator_index))?; Ok(std::cmp::min( *self diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7e933eea19..d8b75260b6 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -26,7 +26,6 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, - Consolidation, ApplicationMask(ApplicationDomain), } @@ -111,7 +110,6 @@ pub struct ChainSpec { pub(crate) domain_voluntary_exit: u32, pub(crate) domain_selection_proof: u32, pub(crate) domain_aggregate_and_proof: u32, - pub(crate) domain_consolidation: u32, /* * Fork choice @@ -479,7 +477,6 @@ impl ChainSpec { Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, - Domain::Consolidation => self.domain_consolidation, } } @@ -704,7 +701,6 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_consolidation: 0x0B, /* * Fork choice @@ -1026,7 +1022,6 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_consolidation: 0x0B, /* * Fork choice @@ -1959,7 +1954,6 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); - test_domain(Domain::Consolidation, spec.domain_consolidation, &spec); // The builder domain index is zero let builder_domain_pre_mask = [0; 4]; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 110392d4b7..b1e9049b0d 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -126,7 +126,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), "full_exit_request_amount".to_uppercase() => spec.full_exit_request_amount.to_string().into(), - "domain_consolidation".to_uppercase()=> u32_hex(spec.domain_consolidation), } } diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation_request.rs index b21f34e7bb..e2df0bb972 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation_request.rs @@ -1,5 +1,6 @@ use crate::{test_utils::TestRandom, Address, PublicKeyBytes, SignedRoot}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -24,6 +25,18 @@ pub struct ConsolidationRequest { pub target_pubkey: PublicKeyBytes, } +impl ConsolidationRequest { + pub fn max_size() -> usize { + Self { + source_address: Address::repeat_byte(0), + source_pubkey: PublicKeyBytes::empty(), + target_pubkey: PublicKeyBytes::empty(), + } + .as_ssz_bytes() + .len() + } +} + impl SignedRoot for ConsolidationRequest {} #[cfg(test)] diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index f6ddf8b63a..7af949fef3 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -29,6 +30,20 @@ pub struct DepositRequest { pub index: u64, } +impl DepositRequest { + pub fn max_size() -> usize { + Self { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::ZERO, + amount: 0, + signature: Signature::empty(), + index: 0, + } + .as_ssz_bytes() + .len() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 4d41d56830..9f16b676a6 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -13,12 +13,6 @@ pub type Transactions = VariableList< >; pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; -pub type DepositRequests = - VariableList::MaxDepositRequestsPerPayload>; -pub type WithdrawalRequests = - VariableList::MaxWithdrawalRequestsPerPayload>; -pub type ConsolidationRequests = - VariableList::MaxConsolidationRequestsPerPayload>; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra), @@ -96,13 +90,6 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, - #[superstruct(only(Electra))] - pub deposit_requests: VariableList, - #[superstruct(only(Electra))] - pub withdrawal_requests: VariableList, - #[superstruct(only(Electra))] - pub consolidation_requests: - VariableList, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 90dd8c54e2..e9690435f1 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -86,12 +86,6 @@ pub struct ExecutionPayloadHeader { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, - #[superstruct(only(Electra), partial_getter(copy))] - pub deposit_requests_root: Hash256, - #[superstruct(only(Electra), partial_getter(copy))] - pub withdrawal_requests_root: Hash256, - #[superstruct(only(Electra), partial_getter(copy))] - pub consolidation_requests_root: Hash256, } impl ExecutionPayloadHeader { @@ -214,9 +208,6 @@ impl ExecutionPayloadHeaderDeneb { withdrawals_root: self.withdrawals_root, blob_gas_used: self.blob_gas_used, excess_blob_gas: self.excess_blob_gas, - deposit_requests_root: Hash256::zero(), - withdrawal_requests_root: Hash256::zero(), - consolidation_requests_root: Hash256::zero(), } } } @@ -308,9 +299,6 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe withdrawals_root: payload.withdrawals.tree_hash_root(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_requests_root: payload.deposit_requests.tree_hash_root(), - withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), - consolidation_requests_root: payload.consolidation_requests.tree_hash_root(), } } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs new file mode 100644 index 0000000000..778260dd84 --- /dev/null +++ b/consensus/types/src/execution_requests.rs @@ -0,0 +1,59 @@ +use crate::test_utils::TestRandom; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use alloy_primitives::Bytes; +use derivative::Derivative; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +pub type DepositRequests = + VariableList::MaxDepositRequestsPerPayload>; +pub type WithdrawalRequests = + VariableList::MaxWithdrawalRequestsPerPayload>; +pub type ConsolidationRequests = + VariableList::MaxConsolidationRequestsPerPayload>; + +#[derive( + arbitrary::Arbitrary, + Debug, + Derivative, + Default, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +#[serde(bound = "E: EthSpec")] +#[arbitrary(bound = "E: EthSpec")] +#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +pub struct ExecutionRequests { + pub deposits: DepositRequests, + pub withdrawals: WithdrawalRequests, + pub consolidations: ConsolidationRequests, +} + +impl ExecutionRequests { + /// Returns the encoding according to EIP-7685 to send + /// to the execution layer over the engine api. + pub fn get_execution_requests_list(&self) -> Vec { + let deposit_bytes = Bytes::from(self.deposits.as_ssz_bytes()); + let withdrawal_bytes = Bytes::from(self.withdrawals.as_ssz_bytes()); + let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); + vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] + } +} + +#[cfg(test)] +mod tests { + use crate::MainnetEthSpec; + + use super::*; + + ssz_and_tree_hash_tests!(ExecutionRequests); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 281a84d859..e168199b98 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -81,6 +81,7 @@ pub mod slot_epoch_macros; pub mod activation_queue; pub mod config_and_preset; pub mod execution_block_header; +pub mod execution_requests; pub mod fork_context; pub mod participation_flags; pub mod payload; @@ -169,6 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_requests::ExecutionRequests; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index cee8b8cc21..80a70c171f 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -39,18 +39,6 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + /// fork-specific fields fn withdrawals_root(&self) -> Result; fn blob_gas_used(&self) -> Result; - fn withdrawal_requests( - &self, - ) -> Result>, Error>; - fn deposit_requests( - &self, - ) -> Result>, Error>; - fn consolidation_requests( - &self, - ) -> Result< - Option>, - Error, - >; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -290,51 +278,6 @@ impl ExecPayload for FullPayload { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.withdrawal_requests.clone())) - } - } - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.deposit_requests.clone())) - } - } - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - match self { - FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { - Err(Error::IncorrectStateVariant) - } - FullPayload::Electra(inner) => { - Ok(Some(inner.execution_payload.consolidation_requests.clone())) - } - } - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -467,51 +410,6 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.withdrawal_requests.clone())) - } - } - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.deposit_requests.clone())) - } - } - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - match self { - FullPayloadRef::Bellatrix(_) - | FullPayloadRef::Capella(_) - | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), - FullPayloadRef::Electra(inner) => { - Ok(Some(inner.execution_payload.consolidation_requests.clone())) - } - } - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -692,30 +590,6 @@ impl ExecPayload for BlindedPayload { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - Ok(None) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - Ok(None) - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - Ok(None) - } - fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -817,30 +691,6 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } - fn withdrawal_requests( - &self, - ) -> Result>, Error> - { - Ok(None) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - Ok(None) - } - - fn consolidation_requests( - &self, - ) -> Result< - Option< - VariableList::MaxConsolidationRequestsPerPayload>, - >, - Error, - > { - Ok(None) - } - fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); @@ -867,10 +717,7 @@ macro_rules! impl_exec_payload_common { $is_default_with_empty_roots:block, $f:block, $g:block, - $h:block, - $i:block, - $j:block, - $k:block) => { + $h:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -933,30 +780,6 @@ macro_rules! impl_exec_payload_common { let h = $h; h(self) } - - fn withdrawal_requests( - &self, - ) -> Result< - Option>, - Error, - > { - let i = $i; - i(self) - } - - fn deposit_requests( - &self, - ) -> Result>, Error> { - let j = $j; - j(self) - } - - fn consolidation_requests( - &self, - ) -> Result::MaxConsolidationRequestsPerPayload>>, Error> { - let k = $k; - k(self) - } } impl From<$wrapped_type> for $wrapper_type { @@ -1002,10 +825,7 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - }, - { |_| { Ok(None) } }, - { |_| { Ok(None) } }, - { |_| { Ok(None) } } + } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -1092,47 +912,6 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option>, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawal_requests() - }; - c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option>, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.deposit_requests() - }; - c - }, - { - let c: for<'a> fn( - &'a $wrapper_type_full, - ) -> Result< - Option< - VariableList< - ConsolidationRequest, - ::MaxConsolidationRequestsPerPayload, - >, - >, - Error, - > = |payload: &$wrapper_type_full| { - let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); - wrapper_ref_type.consolidation_requests() - }; - c } ); diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4d3279a7f7..b52adcfe41 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -498,6 +498,7 @@ impl SignedBeaconBlockElectra> { execution_payload: BlindedPayloadElectra { .. }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, }, }, signature, @@ -521,6 +522,7 @@ impl SignedBeaconBlockElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + execution_requests, }, }, signature, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 3c6037e23e..298604d4f3 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -236,7 +236,7 @@ impl Validator { spec: &ChainSpec, current_fork: ForkName, ) -> bool { - let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + let max_effective_balance = self.get_max_effective_balance(spec, current_fork); let has_max_effective_balance = self.effective_balance == max_effective_balance; let has_excess_balance = balance > max_effective_balance; self.has_execution_withdrawal_credential(spec) @@ -251,11 +251,7 @@ impl Validator { } /// Returns the max effective balance for a validator in gwei. - pub fn get_validator_max_effective_balance( - &self, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { + pub fn get_max_effective_balance(&self, spec: &ChainSpec, current_fork: ForkName) -> u64 { if current_fork >= ForkName::Electra { if self.has_compounding_withdrawal_credential(spec) { spec.max_effective_balance_electra @@ -273,7 +269,7 @@ impl Validator { spec: &ChainSpec, current_fork: ForkName, ) -> u64 { - let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + let max_effective_balance = self.get_max_effective_balance(spec, current_fork); std::cmp::min(validator_balance, max_effective_balance) } } diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal_request.rs index b6db0efb26..1296426ac0 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal_request.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Address, PublicKeyBytes}; use serde::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -27,6 +28,18 @@ pub struct WithdrawalRequest { pub amount: u64, } +impl WithdrawalRequest { + pub fn max_size() -> usize { + Self { + source_address: Address::repeat_byte(0), + validator_pubkey: PublicKeyBytes::empty(), + amount: 0, + } + .as_ssz_bytes() + .len() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 0aa5f1d38d..390711079f 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.5 +TESTS_TAG := v1.5.0-alpha.6 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 49de073d6a..a9322e5dd5 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -80,6 +80,7 @@ type_name_generic!(ExecutionPayloadHeaderBellatrix, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderElectra, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionRequests); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index a677736d51..1812a101ca 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -679,6 +679,14 @@ mod ssz_static { SszStaticHandler::::electra_and_later().run(); SszStaticHandler::::electra_and_later().run(); } + + #[test] + fn execution_requests() { + SszStaticHandler::, MainnetEthSpec>::electra_and_later() + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_and_later() + .run(); + } } #[test] From 772929fae27bd9a2978884c7648dc10fecf3d876 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 17 Oct 2024 00:05:59 +0100 Subject: [PATCH 53/66] Remove generic E from RequestId (#6462) * remove Ethspec from types where it's possible to do so * remove generic E from RequestType --- .../lighthouse_network/src/rpc/codec.rs | 19 +++++++------ .../lighthouse_network/src/rpc/handler.rs | 14 ++++++---- .../lighthouse_network/src/rpc/methods.rs | 28 ++++++++----------- beacon_node/lighthouse_network/src/rpc/mod.rs | 10 +++---- .../lighthouse_network/src/rpc/outbound.rs | 8 ++++-- .../lighthouse_network/src/rpc/protocol.rs | 14 +++++----- .../src/rpc/rate_limiter.rs | 3 +- .../src/rpc/self_limiter.rs | 12 ++++---- .../lighthouse_network/src/service/mod.rs | 6 ++-- .../lighthouse_network/tests/rpc_tests.rs | 1 + .../network_beacon_processor/rpc_methods.rs | 4 +-- .../src/network_beacon_processor/tests.rs | 7 +++-- beacon_node/network/src/router.rs | 8 +++--- beacon_node/network/src/service.rs | 2 +- .../network/src/sync/network_context.rs | 1 + 15 files changed, 69 insertions(+), 68 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 13af04f9b8..17234a27a8 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -28,7 +28,7 @@ const CONTEXT_BYTES_LEN: usize = 4; /* Inbound Codec */ -pub struct SSZSnappyInboundCodec { +pub struct SSZSnappyInboundCodec { protocol: ProtocolId, inner: Uvi, len: Option, @@ -142,7 +142,7 @@ impl Encoder> for SSZSnappyInboundCodec { // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { - type Item = RequestType; + type Item = RequestType; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -194,7 +194,7 @@ impl Decoder for SSZSnappyInboundCodec { } /* Outbound Codec: Codec for initiating RPC requests */ -pub struct SSZSnappyOutboundCodec { +pub struct SSZSnappyOutboundCodec { inner: Uvi, len: Option, protocol: ProtocolId, @@ -321,10 +321,10 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder> for SSZSnappyOutboundCodec { +impl Encoder for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RequestType::Status(req) => req.as_ssz_bytes(), RequestType::Goodbye(req) => req.as_ssz_bytes(), @@ -543,11 +543,11 @@ fn handle_length( /// Decodes an `InboundRequest` from the byte stream. /// `decoded_buffer` should be an ssz-encoded bytestream with // length = length-prefix received in the beginning of the stream. -fn handle_rpc_request( +fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], spec: &ChainSpec, -) -> Result>, RPCError> { +) -> Result, RPCError> { match versioned_protocol { SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, @@ -1009,6 +1009,7 @@ mod tests { BlobsByRangeRequest { start_slot: 0, count: 10, + max_blobs_per_block: Spec::max_blobs_per_block(), } } @@ -1154,7 +1155,7 @@ mod tests { } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { + fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); @@ -1745,7 +1746,7 @@ mod tests { fn test_encode_then_decode_request() { let chain_spec = Spec::default_spec(); - let requests: &[RequestType] = &[ + let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), RequestType::Status(status_message()), RequestType::Goodbye(GoodbyeReason::Fault), diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index e76d6d2786..74ccb85dcc 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -20,6 +20,7 @@ use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ collections::{hash_map::Entry, VecDeque}, + marker::PhantomData, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -96,7 +97,7 @@ where events_out: SmallVec<[HandlerEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[(Id, RequestType); 4]>, + dial_queue: SmallVec<[(Id, RequestType); 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, @@ -206,7 +207,7 @@ pub enum OutboundSubstreamState { /// The framed negotiated substream. substream: Box>, /// Keeps track of the actual request sent. - request: RequestType, + request: RequestType, }, /// Closing an outbound substream> Closing(Box>), @@ -274,7 +275,7 @@ where } /// Opens an outbound substream with a request. - fn send_request(&mut self, id: Id, req: RequestType) { + fn send_request(&mut self, id: Id, req: RequestType) { match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); @@ -330,7 +331,7 @@ where type ToBehaviour = HandlerEvent; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; - type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request + type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -788,6 +789,7 @@ where req: req.clone(), fork_context: self.fork_context.clone(), max_rpc_size: self.listen_protocol().upgrade().max_rpc_size, + phantom: PhantomData, }, (), ) @@ -905,7 +907,7 @@ where fn on_fully_negotiated_outbound( &mut self, substream: OutboundFramed, - (id, request): (Id, RequestType), + (id, request): (Id, RequestType), ) { self.dial_negotiated -= 1; // Reset any io-retries counter. @@ -961,7 +963,7 @@ where } fn on_dial_upgrade_error( &mut self, - request_info: (Id, RequestType), + request_info: (Id, RequestType), error: StreamUpgradeError, ) { let (id, req) = request_info; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index dc7d316fb0..e187c9a40f 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -8,7 +8,6 @@ use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::U256, VariableList}; use std::collections::BTreeMap; use std::fmt::Display; -use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; @@ -93,27 +92,19 @@ pub struct Ping { variant_attributes(derive(Clone, Debug, PartialEq, Serialize),) )] #[derive(Clone, Debug, PartialEq)] -pub struct MetadataRequest { - _phantom_data: PhantomData, -} +pub struct MetadataRequest; -impl MetadataRequest { +impl MetadataRequest { pub fn new_v1() -> Self { - Self::V1(MetadataRequestV1 { - _phantom_data: PhantomData, - }) + Self::V1(MetadataRequestV1 {}) } pub fn new_v2() -> Self { - Self::V2(MetadataRequestV2 { - _phantom_data: PhantomData, - }) + Self::V2(MetadataRequestV2 {}) } pub fn new_v3() -> Self { - Self::V3(MetadataRequestV3 { - _phantom_data: PhantomData, - }) + Self::V3(MetadataRequestV3 {}) } } @@ -323,11 +314,14 @@ pub struct BlobsByRangeRequest { /// The number of slots from the start slot. pub count: u64, + + /// maximum number of blobs in a single block. + pub max_blobs_per_block: usize, } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self) -> u64 { - self.count.saturating_mul(E::max_blobs_per_block() as u64) + pub fn max_blobs_requested(&self) -> u64 { + self.count.saturating_mul(self.max_blobs_per_block as u64) } } @@ -343,7 +337,7 @@ pub struct DataColumnsByRangeRequest { } impl DataColumnsByRangeRequest { - pub fn max_requested(&self) -> u64 { + pub fn max_requested(&self) -> u64 { self.count.saturating_mul(self.columns.len() as u64) } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index e3b41ea1df..28f2addf86 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -61,7 +61,7 @@ pub enum RPCSend { /// /// The `Id` is given by the application making the request. These /// go over *outbound* connections. - Request(Id, RequestType), + Request(Id, RequestType), /// A response sent from Lighthouse. /// /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the @@ -79,7 +79,7 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(Request), + Request(Request), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the @@ -113,10 +113,10 @@ impl RequestId { /// An Rpc Request. #[derive(Debug, Clone)] -pub struct Request { +pub struct Request { pub id: RequestId, pub substream_id: SubstreamId, - pub r#type: RequestType, + pub r#type: RequestType, } impl std::fmt::Display for RPCSend { @@ -221,7 +221,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { Ok(event) => event, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index b614313a84..1037139f2f 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -7,6 +7,7 @@ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, SinkExt}; use libp2p::core::{OutboundUpgrade, UpgradeInfo}; +use std::marker::PhantomData; use std::sync::Arc; use tokio_util::{ codec::Framed, @@ -19,13 +20,14 @@ use types::{EthSpec, ForkContext}; // `OutboundUpgrade` #[derive(Debug, Clone)] -pub struct OutboundRequestContainer { - pub req: RequestType, +pub struct OutboundRequestContainer { + pub req: RequestType, pub fork_context: Arc, pub max_rpc_size: usize, + pub phantom: PhantomData, } -impl UpgradeInfo for OutboundRequestContainer { +impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 67104fbc29..417c7a3ee5 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -643,7 +643,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (RequestType, InboundFramed); +pub type InboundOutput = (RequestType, InboundFramed); pub type InboundFramed = Framed>>>, SSZSnappyInboundCodec>; @@ -711,7 +711,7 @@ where } #[derive(Debug, Clone, PartialEq)] -pub enum RequestType { +pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), @@ -724,11 +724,11 @@ pub enum RequestType { LightClientOptimisticUpdate, LightClientFinalityUpdate, Ping(Ping), - MetaData(MetadataRequest), + MetaData(MetadataRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. -impl RequestType { +impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. @@ -738,10 +738,10 @@ impl RequestType { RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRange(req) => req.max_blobs_requested(), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::DataColumnsByRange(req) => req.max_requested(), RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -973,7 +973,7 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for RequestType { +impl std::fmt::Display for RequestType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RequestType::Status(status) => write!(f, "Status Message: {}", status), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index a8e8f45b6f..06b246e74a 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -9,7 +9,6 @@ use std::pin::Pin; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; -use types::EthSpec; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -252,7 +251,7 @@ pub trait RateLimiterItem { fn max_responses(&self) -> u64; } -impl RateLimiterItem for super::RequestType { +impl RateLimiterItem for super::RequestType { fn protocol(&self) -> Protocol { self.versioned_protocol().protocol() } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e968ad11e3..9c68e0793d 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -19,8 +19,8 @@ use super::{ /// A request that was rate limited or waiting on rate limited requests for the same peer and /// protocol. -struct QueuedRequest { - req: RequestType, +struct QueuedRequest { + req: RequestType, request_id: Id, } @@ -28,7 +28,7 @@ pub(crate) struct SelfRateLimiter { /// Requests queued for sending per peer. This requests are stored when the self rate /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore /// are stored in the same way. - delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, /// The delay required to allow a peer's outbound request per protocol. next_peer_request: DelayQueue<(PeerId, Protocol)>, /// Rate limiter for our own requests. @@ -70,7 +70,7 @@ impl SelfRateLimiter { &mut self, peer_id: PeerId, request_id: Id, - req: RequestType, + req: RequestType, ) -> Result, Error> { let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. @@ -101,9 +101,9 @@ impl SelfRateLimiter { limiter: &mut RateLimiter, peer_id: PeerId, request_id: Id, - req: RequestType, + req: RequestType, log: &Logger, - ) -> Result, (QueuedRequest, Duration)> { + ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { Ok(()) => Ok(BehaviourAction::NotifyHandler { peer_id, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea4c3acb42..ff641f666f 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -80,7 +80,7 @@ pub enum NetworkEvent { /// Identifier of the request. All responses to this request must use this id. id: PeerRequestId, /// Request the peer sent. - request: rpc::Request, + request: rpc::Request, }, ResponseReceived { /// Peer that sent the response. @@ -965,7 +965,7 @@ impl Network { &mut self, peer_id: PeerId, request_id: AppRequestId, - request: RequestType, + request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { @@ -1178,7 +1178,7 @@ impl Network { /// Sends a METADATA response to a peer. fn send_meta_data_response( &mut self, - _req: MetadataRequest, + _req: MetadataRequest, id: PeerRequestId, request_id: rpc::RequestId, peer_id: PeerId, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index f721c8477c..b5125a2d6b 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -327,6 +327,7 @@ fn test_blobs_by_range_chunked_rpc() { let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: 0, count: slot_count, + max_blobs_per_block: E::max_blobs_per_block(), }); // BlocksByRange Response diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 88a7616ec7..b36c6502a5 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -793,7 +793,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { + if req.max_blobs_requested() > self.chain.spec.max_request_blob_sidecars { return Err(( RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", @@ -998,7 +998,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request data columns - if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { + if req.max_requested() > self.chain.spec.max_request_data_column_sidecars { return Err(( RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 9d774d97c1..e9805eb5ba 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -30,9 +30,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, - SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, EthSpec, Hash256, + MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedVoluntaryExit, Slot, SubnetId, }; type E = MainnetEthSpec; @@ -366,6 +366,7 @@ impl TestRig { BlobsByRangeRequest { start_slot: 0, count, + max_blobs_per_block: E::max_blobs_per_block(), }, ) .unwrap(); diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index f05cb01fa4..1a0b5b32ae 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -58,7 +58,7 @@ pub enum RouterMessage { RPCRequestReceived { peer_id: PeerId, id: PeerRequestId, - request: rpc::Request, + request: rpc::Request, }, /// An RPC response has been received. RPCResponseReceived { @@ -193,11 +193,11 @@ impl Router { /* RPC - Related functionality */ /// A new RPC request has been received from the network. - fn handle_rpc_request( + fn handle_rpc_request( &mut self, peer_id: PeerId, request_id: PeerRequestId, - rpc_request: rpc::Request, + rpc_request: rpc::Request, ) { if !self.network_globals.peers.read().is_connected(&peer_id) { debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); @@ -824,7 +824,7 @@ impl HandlerNetworkContext { } /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { + pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, request_id: AppRequestId::Router, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5a66cb7f30..269744dc05 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -62,7 +62,7 @@ pub enum NetworkMessage { /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, - request: RequestType, + request: RequestType, request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 9f9a189817..025003eef7 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -415,6 +415,7 @@ impl SyncNetworkContext { request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: *request.start_slot(), count: *request.count(), + max_blobs_per_block: T::EthSpec::max_blobs_per_block(), }), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), }) From ee7fca3ebd277a58739c7a19046608b7556afc59 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 17 Oct 2024 15:56:25 +1100 Subject: [PATCH 54/66] Refactor data column reconstruction and avoid blocking processing (#6403) * Move reconstruction logic out of `overflow_lru_cache` to simplify the code and avoids having to pass `DataColumnsToPublish` around and blocking other processing. * Publish reconstructed cells before recomputing head. Remove duplicate functions. * Merge branch 'unstable' into non-blocking-reconstruction * Merge branch 'unstable' into non-blocking-reconstruction # Conflicts: # beacon_node/beacon_chain/src/beacon_chain.rs # beacon_node/beacon_chain/src/data_availability_checker.rs # beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs # beacon_node/network/src/network_beacon_processor/sync_methods.rs * Spawn a blocking task for reconstruction. * Merge branch 'unstable' into non-blocking-reconstruction # Conflicts: # beacon_node/network/src/network_beacon_processor/mod.rs * Fix fmt * Merge branch 'unstable' into non-blocking-reconstruction # Conflicts: # beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs * Fix race condition by making check and mutation atomic as suggested by Lion. Also added error handling to reconstruction failure. * Add reconstruction reason metric and more debug logging to da checker. * Add comment and logging. * Rename `NotRequired` to `NotStarted`. * Remove extra character added. --- beacon_node/beacon_chain/src/beacon_chain.rs | 136 +++++----- beacon_node/beacon_chain/src/builder.rs | 1 + .../src/data_availability_checker.rs | 126 ++++++++- .../overflow_lru_cache.rs | 242 ++++++++++-------- .../src/data_column_verification.rs | 11 +- beacon_node/beacon_chain/src/metrics.rs | 25 ++ .../gossip_methods.rs | 35 +-- .../src/network_beacon_processor/mod.rs | 77 +++++- .../network_beacon_processor/sync_methods.rs | 47 ++-- 9 files changed, 454 insertions(+), 246 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d7d7f0e06..f8dfbc5515 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,7 +22,7 @@ pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, - DataColumnsToPublish, + DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; @@ -3015,13 +3015,7 @@ impl BeaconChain { self: &Arc, data_columns: Vec>, publish_fn: impl FnOnce() -> Result<(), BlockError>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = data_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3051,7 +3045,7 @@ impl BeaconChain { publish_fn, ) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) } /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was @@ -3110,13 +3104,7 @@ impl BeaconChain { pub async fn process_rpc_custody_columns( self: &Arc, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = custody_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3154,7 +3142,67 @@ impl BeaconChain { let r = self .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) + } + + pub async fn reconstruct_data_columns( + self: &Arc, + block_root: Hash256, + ) -> Result< + Option<( + AvailabilityProcessingStatus, + DataColumnSidecarList, + )>, + BlockError, + > { + // As of now we only reconstruct data columns on supernodes, so if the block is already + // available on a supernode, there's no need to reconstruct as the node must already have + // all columns. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Ok(None); + } + + let data_availability_checker = self.data_availability_checker.clone(); + + let result = self + .task_executor + .spawn_blocking_handle( + move || data_availability_checker.reconstruct_data_columns(&block_root), + "reconstruct_data_columns", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)??; + + match result { + DataColumnReconstructionResult::Success((availability, data_columns_to_publish)) => { + let Some(slot) = data_columns_to_publish.first().map(|d| d.slot()) else { + // This should be unreachable because empty result would return `RecoveredColumnsNotImported` instead of success. + return Ok(None); + }; + + let r = self + .process_availability(slot, availability, || Ok(())) + .await; + self.remove_notified(&block_root, r) + .map(|availability_processing_status| { + Some((availability_processing_status, data_columns_to_publish)) + }) + } + DataColumnReconstructionResult::NotStarted(reason) + | DataColumnReconstructionResult::RecoveredColumnsNotImported(reason) => { + // We use metric here because logging this would be *very* noisy. + metrics::inc_counter_vec( + &metrics::KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL, + &[reason], + ); + Ok(None) + } + } } /// Remove any block components from the *processing cache* if we no longer require them. If the @@ -3172,23 +3220,6 @@ impl BeaconChain { r } - /// Remove any block components from the *processing cache* if we no longer require them. If the - /// block was imported full or erred, we no longer require them. - fn remove_notified_custody_columns

( - &self, - block_root: &Hash256, - r: Result<(AvailabilityProcessingStatus, P), BlockError>, - ) -> Result<(AvailabilityProcessingStatus, P), BlockError> { - let has_missing_components = matches!( - r, - Ok((AvailabilityProcessingStatus::MissingComponents(_, _), _)) - ); - if !has_missing_components { - self.reqresp_pre_import_cache.write().remove(block_root); - } - r - } - /// Wraps `process_block` in logic to cache the block's commitments in the processing cache /// and evict if the block was imported or errored. pub async fn process_block_with_early_caching>( @@ -3444,26 +3475,21 @@ impl BeaconChain { block_root: Hash256, data_columns: Vec>, publish_fn: impl FnOnce() -> Result<(), BlockError>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { if let Some(slasher) = self.slasher.as_ref() { for data_colum in &data_columns { slasher.accept_block_header(data_colum.signed_block_header()); } } - let (availability, data_columns_to_publish) = self - .data_availability_checker - .put_gossip_data_columns(slot, block_root, data_columns)?; + let availability = self.data_availability_checker.put_gossip_data_columns( + slot, + block_root, + data_columns, + )?; self.process_availability(slot, availability, publish_fn) .await - .map(|result| (result, data_columns_to_publish)) } /// Checks if the provided blobs can make any cached blocks available, and imports immediately @@ -3513,13 +3539,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { // Need to scope this to ensure the lock is dropped before calling `process_availability` // Even an explicit drop is not enough to convince the borrow checker. { @@ -3544,16 +3564,14 @@ impl BeaconChain { // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. - let (availability, data_columns_to_publish) = - self.data_availability_checker.put_rpc_custody_columns( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - custody_columns, - )?; + let availability = self.data_availability_checker.put_rpc_custody_columns( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + custody_columns, + )?; self.process_availability(slot, availability, || Ok(())) .await - .map(|result| (result, data_columns_to_publish)) } /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 001dbf0080..5f1e94fc8c 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -984,6 +984,7 @@ where store, self.import_all_data_columns, self.spec, + log.new(o!("service" => "data_availability_checker")), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 395f40c5db..047764d705 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,10 +2,12 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner; -use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; +use crate::data_availability_checker::overflow_lru_cache::{ + DataAvailabilityCheckerInner, ReconstructColumnsDecision, +}; +use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slog::{debug, error}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -27,11 +29,12 @@ use crate::data_column_verification::{ verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, }; +use crate::metrics::{ + KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, +}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; -pub use self::overflow_lru_cache::DataColumnsToPublish; - /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this @@ -71,6 +74,16 @@ pub struct DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Arc, spec: Arc, + log: Logger, +} + +pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); + +#[derive(Debug)] +pub enum DataColumnReconstructionResult { + Success(AvailabilityAndReconstructedColumns), + NotStarted(&'static str), + RecoveredColumnsNotImported(&'static str), } /// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. @@ -101,6 +114,7 @@ impl DataAvailabilityChecker { store: BeaconStore, import_all_data_columns: bool, spec: Arc, + log: Logger, ) -> Result { let custody_subnet_count = if import_all_data_columns { spec.data_column_sidecar_subnet_count as usize @@ -124,6 +138,7 @@ impl DataAvailabilityChecker { slot_clock, kzg, spec, + log, }) } @@ -205,7 +220,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, epoch, verified_blobs) + .put_kzg_verified_blobs(block_root, epoch, verified_blobs, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -216,8 +231,7 @@ impl DataAvailabilityChecker { block_root: Hash256, epoch: Epoch, custody_columns: DataColumnSidecarList, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + ) -> Result, AvailabilityCheckError> { // TODO(das): report which column is invalid for proper peer scoring // TODO(das): batch KZG verification here, but fallback into checking each column // individually to report which column(s) are invalid. @@ -233,10 +247,10 @@ impl DataAvailabilityChecker { .collect::, AvailabilityCheckError>>()?; self.availability_cache.put_kzg_verified_data_columns( - &self.kzg, block_root, epoch, verified_custody_columns, + &self.log, ) } @@ -253,6 +267,7 @@ impl DataAvailabilityChecker { gossip_blob.block_root(), gossip_blob.epoch(), vec![gossip_blob.into_inner()], + &self.log, ) } @@ -267,8 +282,7 @@ impl DataAvailabilityChecker { slot: Slot, block_root: Hash256, gossip_data_columns: Vec>, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + ) -> Result, AvailabilityCheckError> { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let custody_columns = gossip_data_columns @@ -277,10 +291,10 @@ impl DataAvailabilityChecker { .collect::>(); self.availability_cache.put_kzg_verified_data_columns( - &self.kzg, block_root, epoch, custody_columns, + &self.log, ) } @@ -291,7 +305,7 @@ impl DataAvailabilityChecker { executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_pending_executed_block(executed_block) + .put_pending_executed_block(executed_block, &self.log) } pub fn remove_pending_components(&self, block_root: Hash256) { @@ -511,6 +525,92 @@ impl DataAvailabilityChecker { block_cache_size: self.availability_cache.block_cache_size(), } } + + pub fn reconstruct_data_columns( + &self, + block_root: &Hash256, + ) -> Result, AvailabilityCheckError> { + let pending_components = match self + .availability_cache + .check_and_set_reconstruction_started(block_root) + { + ReconstructColumnsDecision::Yes(pending_components) => pending_components, + ReconstructColumnsDecision::No(reason) => { + return Ok(DataColumnReconstructionResult::NotStarted(reason)); + } + }; + + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS); + let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); + + let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( + &self.kzg, + &pending_components.verified_data_columns, + &self.spec, + ) + .map_err(|e| { + error!( + self.log, + "Error reconstructing data columns"; + "block_root" => ?block_root, + "error" => ?e + ); + self.availability_cache + .handle_reconstruction_failure(block_root); + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES); + AvailabilityCheckError::ReconstructColumnsError(e) + })?; + + // Check indices from cache again to make sure we don't publish components we've already received. + let Some(existing_column_indices) = self.cached_data_column_indexes(block_root) else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "block already imported", + )); + }; + + let data_columns_to_publish = all_data_columns + .into_iter() + .filter(|d| !existing_column_indices.contains(&d.index())) + .collect::>(); + + let Some(slot) = data_columns_to_publish + .first() + .map(|d| d.as_data_column().slot()) + else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "No new columns to import and publish", + )); + }; + + metrics::stop_timer(timer); + metrics::inc_counter_by( + &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, + data_columns_to_publish.len() as u64, + ); + + debug!(self.log, "Reconstructed columns"; + "count" => data_columns_to_publish.len(), + "block_root" => ?block_root, + "slot" => slot, + ); + + self.availability_cache + .put_kzg_verified_data_columns( + *block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + data_columns_to_publish.clone(), + &self.log, + ) + .map(|availability| { + DataColumnReconstructionResult::Success(( + availability, + data_columns_to_publish + .into_iter() + .map(|d| d.clone_arc()) + .collect::>(), + )) + }) + } } /// Helper struct to group data availability checker metrics. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 8f91bf34fc..6d4636e8ed 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -6,23 +6,19 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; -use crate::metrics; use crate::BeaconChainTypes; -use kzg::Kzg; use lru::LruCache; use parking_lot::RwLock; +use slog::{debug, Logger}; use ssz_types::{FixedVector, VariableList}; -use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, SignedBeaconBlock, }; -pub type DataColumnsToPublish = Option>; - /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. @@ -95,7 +91,7 @@ impl PendingComponents { /// block. /// /// This corresponds to the number of commitments that are present in a block. - pub fn num_expected_blobs(&self) -> Option { + pub fn block_kzg_commitments_count(&self) -> Option { self.get_cached_block() .as_ref() .map(|b| b.get_commitments().len()) @@ -203,21 +199,61 @@ impl PendingComponents { /// /// Returns `true` if both the block exists and the number of received blobs / custody columns /// matches the number of expected blobs / custody columns. - pub fn is_available(&self, block_import_requirement: &BlockImportRequirement) -> bool { + pub fn is_available( + &self, + block_import_requirement: &BlockImportRequirement, + log: &Logger, + ) -> bool { + let block_kzg_commitments_count_opt = self.block_kzg_commitments_count(); + match block_import_requirement { - BlockImportRequirement::AllBlobs => self - .num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == self.num_received_blobs() - }), + BlockImportRequirement::AllBlobs => { + let received_blobs = self.num_received_blobs(); + let expected_blobs_msg = block_kzg_commitments_count_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_blobs" => received_blobs, + "expected_blobs" => expected_blobs_msg, + ); + + block_kzg_commitments_count_opt.map_or(false, |num_expected_blobs| { + num_expected_blobs == received_blobs + }) + } BlockImportRequirement::ColumnSampling(num_expected_columns) => { - let num_received_data_columns = self.num_received_data_columns(); // No data columns when there are 0 blobs - self.num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == 0 - || *num_expected_columns == num_received_data_columns - }) + let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { + if blob_count > 0 { + *num_expected_columns + } else { + 0 + } + }); + + let expected_columns_msg = expected_columns_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + let num_received_columns = self.num_received_data_columns(); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_columns" => num_received_columns, + "expected_columns" => expected_columns_msg, + ); + + expected_columns_opt.map_or(false, |num_expected_columns| { + num_expected_columns == num_received_columns + }) } } } @@ -311,10 +347,6 @@ impl PendingComponents { ))) } - pub fn reconstruction_started(&mut self) { - self.reconstruction_started = true; - } - /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block @@ -358,6 +390,15 @@ pub struct DataAvailabilityCheckerInner { spec: Arc, } +// This enum is only used internally within the crate in the reconstruction function to improve +// readability, so it's OK to not box the variant value, and it shouldn't impact memory much with +// the current usage, as it's deconstructed immediately. +#[allow(clippy::large_enum_variant)] +pub(crate) enum ReconstructColumnsDecision { + Yes(PendingComponents), + No(&'static str), +} + impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, @@ -448,33 +489,12 @@ impl DataAvailabilityCheckerInner { } } - /// Potentially trigger reconstruction if: - /// - Our custody requirement is all columns - /// - We >= 50% of columns, but not all columns - fn should_reconstruct( - &self, - block_import_requirement: &BlockImportRequirement, - pending_components: &PendingComponents, - ) -> bool { - let BlockImportRequirement::ColumnSampling(num_expected_columns) = block_import_requirement - else { - return false; - }; - - let num_of_columns = self.spec.number_of_columns; - let has_missing_columns = pending_components.verified_data_columns.len() < num_of_columns; - - has_missing_columns - && !pending_components.reconstruction_started - && *num_expected_columns == num_of_columns - && pending_components.verified_data_columns.len() >= num_of_columns / 2 - } - pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, epoch: Epoch, kzg_verified_blobs: I, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); @@ -496,7 +516,7 @@ impl DataAvailabilityCheckerInner { pending_components.merge_blobs(fixed_blobs); let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -514,12 +534,11 @@ impl DataAvailabilityCheckerInner { I: IntoIterator>, >( &self, - kzg: &Kzg, block_root: Hash256, epoch: Epoch, kzg_verified_data_columns: I, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + log: &Logger, + ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. @@ -533,65 +552,67 @@ impl DataAvailabilityCheckerInner { let block_import_requirement = self.block_import_requirement(epoch)?; - // Potentially trigger reconstruction if: - // - Our custody requirement is all columns - // - We >= 50% of columns - let data_columns_to_publish = - if self.should_reconstruct(&block_import_requirement, &pending_components) { - pending_components.reconstruction_started(); - - let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); - - let existing_column_indices = pending_components - .verified_data_columns - .iter() - .map(|d| d.index()) - .collect::>(); - - // Will only return an error if: - // - < 50% of columns - // - There are duplicates - let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( - kzg, - pending_components.verified_data_columns.as_slice(), - &self.spec, - ) - .map_err(AvailabilityCheckError::ReconstructColumnsError)?; - - let data_columns_to_publish = all_data_columns - .iter() - .filter(|d| !existing_column_indices.contains(&d.index())) - .map(|d| d.clone_arc()) - .collect::>(); - - pending_components.verified_data_columns = all_data_columns; - - metrics::stop_timer(timer); - metrics::inc_counter_by( - &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, - data_columns_to_publish.len() as u64, - ); - - Some(data_columns_to_publish) - } else { - None - }; - - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components - .make_available(block_import_requirement, &self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) - .map(|availability| (availability, data_columns_to_publish)) + pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) } else { write_lock.put(block_root, pending_components); - Ok(( - Availability::MissingComponents(block_root), - data_columns_to_publish, - )) + Ok(Availability::MissingComponents(block_root)) + } + } + + /// Check whether data column reconstruction should be attempted. + /// + /// Potentially trigger reconstruction if: + /// - Our custody requirement is all columns (supernode), and we haven't got all columns + /// - We have >= 50% of columns, but not all columns + /// - Reconstruction hasn't been started for the block + /// + /// If reconstruction is required, returns `PendingComponents` which contains the + /// components to be used as inputs to reconstruction, otherwise returns a `reason`. + pub fn check_and_set_reconstruction_started( + &self, + block_root: &Hash256, + ) -> ReconstructColumnsDecision { + let mut write_lock = self.critical.write(); + let Some(pending_components) = write_lock.get_mut(block_root) else { + // Block may have been imported as it does not exist in availability cache. + return ReconstructColumnsDecision::No("block already imported"); + }; + + // If we're sampling all columns, it means we must be custodying all columns. + let custody_column_count = self.sampling_column_count(); + let total_column_count = self.spec.number_of_columns; + let received_column_count = pending_components.verified_data_columns.len(); + + if pending_components.reconstruction_started { + return ReconstructColumnsDecision::No("already started"); + } + if custody_column_count != total_column_count { + return ReconstructColumnsDecision::No("not required for full node"); + } + if received_column_count == self.spec.number_of_columns { + return ReconstructColumnsDecision::No("all columns received"); + } + if received_column_count < total_column_count / 2 { + return ReconstructColumnsDecision::No("not enough columns"); + } + + pending_components.reconstruction_started = true; + ReconstructColumnsDecision::Yes(pending_components.clone()) + } + + /// This could mean some invalid data columns made it through to the `DataAvailabilityChecker`. + /// In this case, we remove all data columns in `PendingComponents`, reset reconstruction + /// status so that we can attempt to retrieve columns from peers again. + pub fn handle_reconstruction_failure(&self, block_root: &Hash256) { + if let Some(pending_components_mut) = self.critical.write().get_mut(block_root) { + pending_components_mut.verified_data_columns = vec![]; + pending_components_mut.reconstruction_started = false; } } @@ -600,6 +621,7 @@ impl DataAvailabilityCheckerInner { pub fn put_pending_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); let block_root = executed_block.import_data.block_root; @@ -621,7 +643,7 @@ impl DataAvailabilityCheckerInner { // Check if we have all components and entire set is consistent. let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -919,7 +941,7 @@ mod test { ); assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); if blobs_expected == 0 { assert!( @@ -958,7 +980,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -985,7 +1007,7 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); assert_eq!( availability, @@ -995,7 +1017,7 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); assert!( matches!(availability, Availability::Available(_)), @@ -1063,7 +1085,7 @@ mod test { // put the block in the cache let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); // grab the diet block from the cache for later testing diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 44873fab4a..a4e83b2751 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -313,10 +313,7 @@ impl KzgVerifiedCustodyDataColumn { kzg: &Kzg, partial_set_of_columns: &[Self], spec: &ChainSpec, - ) -> Result, KzgError> { - // Will only return an error if: - // - < 50% of columns - // - There are duplicates + ) -> Result>, KzgError> { let all_data_columns = reconstruct_data_columns( kzg, &partial_set_of_columns @@ -328,10 +325,8 @@ impl KzgVerifiedCustodyDataColumn { Ok(all_data_columns .into_iter() - .map(|d| { - KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { - data: d, - }) + .map(|data| { + KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { data }) }) .collect::>()) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index f15b46fc4b..0b5608f084 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1887,6 +1887,31 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> ) }); +pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_attempts", + "Count of times data column reconstruction has been attempted", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_failures", + "Count of times data column reconstruction has failed", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "kzg_data_column_reconstruction_incomplete_total", + "Count of times data column reconstruction attempts did not result in an import", + &["reason"], + ) + }); + /* * light_client server metrics */ diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 3153ce533c..4d875cb4a1 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,6 +4,7 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; @@ -18,13 +19,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use beacon_chain::{ - blob_verification::{GossipBlobError, GossipVerifiedBlob}, - data_availability_checker::DataColumnsToPublish, -}; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, PubsubMessage, ReportSource, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -171,26 +166,6 @@ impl NetworkBeaconProcessor { }) } - pub(crate) fn handle_data_columns_to_publish( - &self, - data_columns_to_publish: DataColumnsToPublish, - ) { - if let Some(data_columns_to_publish) = data_columns_to_publish { - self.send_network_message(NetworkMessage::Publish { - messages: data_columns_to_publish - .iter() - .map(|d| { - let subnet = DataColumnSubnetId::from_column_index::( - d.index as usize, - &self.chain.spec, - ); - PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) - }) - .collect(), - }); - } - } - /// Send a message on `message_tx` that the `message_id` sent by `peer_id` should be propagated on /// the gossip network. /// @@ -1022,9 +997,7 @@ impl NetworkBeaconProcessor { .process_gossip_data_columns(vec![verified_data_column], || Ok(())) .await { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish); - + Ok(availability) => { match availability { AvailabilityProcessingStatus::Imported(block_root) => { // Note: Reusing block imported metric here @@ -1052,7 +1025,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); - // Potentially trigger reconstruction + self.attempt_data_column_reconstruction(block_root).await; } } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 04571e181d..c884ecea4e 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -2,7 +2,9 @@ use crate::sync::manager::BlockProcessType; use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, AvailabilityProcessingStatus, BeaconChain, +}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, @@ -16,9 +18,9 @@ use lighthouse_network::rpc::methods::{ use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, + Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, }; -use slog::{debug, Logger}; +use slog::{debug, error, trace, Logger}; use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; @@ -848,6 +850,75 @@ impl NetworkBeaconProcessor { "error" => %e) }); } + + /// Attempt to reconstruct all data columns if the following conditions satisfies: + /// - Our custody requirement is all columns + /// - We have >= 50% of columns, but not all columns + /// + /// Returns `Some(AvailabilityProcessingStatus)` if reconstruction is successfully performed, + /// otherwise returns `None`. + async fn attempt_data_column_reconstruction( + &self, + block_root: Hash256, + ) -> Option { + let result = self.chain.reconstruct_data_columns(block_root).await; + match result { + Ok(Some((availability_processing_status, data_columns_to_publish))) => { + self.send_network_message(NetworkMessage::Publish { + messages: data_columns_to_publish + .iter() + .map(|d| { + let subnet = DataColumnSubnetId::from_column_index::( + d.index as usize, + &self.chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) + }) + .collect(), + }); + + match &availability_processing_status { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components available via reconstruction"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Block components still missing block after reconstruction"; + "result" => "imported all custody columns", + "block_hash" => %block_root, + ); + } + } + + Some(availability_processing_status) + } + Ok(None) => { + // reason is tracked via the `KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL` metric + trace!( + self.log, + "Reconstruction not required for block"; + "block_hash" => %block_root, + ); + None + } + Err(e) => { + error!( + self.log, + "Error during data column reconstruction"; + "block_root" => %block_root, + "error" => ?e + ); + None + } + } + } } type TestBeaconChainType = diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index dcad6160b3..82d06c20f8 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -327,34 +327,37 @@ impl NetworkBeaconProcessor { _seen_timestamp: Duration, process_type: BlockProcessType, ) { - let result = self + let mut result = self .chain .process_rpc_custody_columns(custody_columns) .await; match &result { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish.clone()); - - match availability { - AvailabilityProcessingStatus::Imported(hash) => { - debug!( - self.log, - "Block components retrieved"; - "result" => "imported block and custody columns", - "block_hash" => %hash, - ); - self.chain.recompute_head_at_current_slot().await; - } - AvailabilityProcessingStatus::MissingComponents(_, _) => { - debug!( - self.log, - "Missing components over rpc"; - "block_hash" => %block_root, - ); + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + // Attempt reconstruction here before notifying sync, to avoid sending out more requests + // that we may no longer need. + if let Some(availability) = + self.attempt_data_column_reconstruction(block_root).await + { + result = Ok(availability) } } - } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -374,7 +377,7 @@ impl NetworkBeaconProcessor { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: result.map(|(r, _)| r).into(), + result: result.into(), }); } From a61b587dce07f5c4a9abfae9aded1a9b1b23c472 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 17 Oct 2024 08:53:20 +0300 Subject: [PATCH 55/66] Better assert message in lookup sampling test (#6473) * Better assert message in lookup sampling test * Export status * Merge remote-tracking branch 'sigp/unstable' into lookup-sampling-test-assert * Drop unused * Use slice --- .../network/src/sync/block_lookups/tests.rs | 53 +++++++++++++++---- beacon_node/network/src/sync/manager.rs | 10 ++-- beacon_node/network/src/sync/peer_sampling.rs | 35 +++++------- 3 files changed, 60 insertions(+), 38 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 0ed624fc0d..ae9f96a348 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1319,14 +1319,44 @@ impl TestRig { }); } - fn assert_sampling_request_status( - &self, - block_root: Hash256, - ongoing: &Vec, - no_peers: &Vec, - ) { - self.sync_manager - .assert_sampling_request_status(block_root, ongoing, no_peers) + fn assert_sampling_request_ongoing(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::Sampling { .. }) { + panic!("expected {block_root} {index} request to be on going: {status:?}"); + } + } + } + + fn assert_sampling_request_nopeers(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + panic!("expected {block_root} {index} request to be no peers: {status:?}"); + } + } + } + + fn log_sampling_requests(&self, block_root: Hash256, indices: &[ColumnIndex]) { + let statuses = indices + .iter() + .map(|index| { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + (index, status) + }) + .collect::>(); + self.log(&format!( + "Sampling request status for {block_root}: {statuses:?}" + )); } } @@ -2099,7 +2129,7 @@ fn sampling_batch_requests() { .pop() .unwrap(); assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); - r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + r.assert_sampling_request_ongoing(block_root, &column_indexes); // Resolve the request. r.complete_valid_sampling_column_requests( @@ -2127,7 +2157,7 @@ fn sampling_batch_requests_not_enough_responses_returned() { assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); // The request status should be set to Sampling. - r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + r.assert_sampling_request_ongoing(block_root, &column_indexes); // Split the indexes to simulate the case where the supernode doesn't have the requested column. let (_column_indexes_supernode_does_not_have, column_indexes_to_complete) = @@ -2145,7 +2175,8 @@ fn sampling_batch_requests_not_enough_responses_returned() { ); // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. - r.assert_sampling_request_status(block_root, &vec![], &column_indexes); + r.log_sampling_requests(block_root, &column_indexes); + r.assert_sampling_request_nopeers(block_root, &column_indexes); // The sampling request stalls. r.expect_empty_network(); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a2544b82b5..ef01763d4d 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -354,14 +354,12 @@ impl SyncManager { } #[cfg(test)] - pub(crate) fn assert_sampling_request_status( + pub(crate) fn get_sampling_request_status( &self, block_root: Hash256, - ongoing: &Vec, - no_peers: &Vec, - ) { - self.sampling - .assert_sampling_request_status(block_root, ongoing, no_peers); + index: &ColumnIndex, + ) -> Option { + self.sampling.get_request_status(block_root, index) } fn network_globals(&self) -> &NetworkGlobals { diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 086fb0ec8d..decabfd216 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -1,4 +1,6 @@ use self::request::ActiveColumnSampleRequest; +#[cfg(test)] +pub(crate) use self::request::Status; use super::network_context::{ DataColumnsByRootSingleBlockRequest, RpcResponseError, SyncNetworkContext, }; @@ -43,15 +45,15 @@ impl Sampling { } #[cfg(test)] - pub fn assert_sampling_request_status( + pub fn get_request_status( &self, block_root: Hash256, - ongoing: &Vec, - no_peers: &Vec, - ) { + index: &ColumnIndex, + ) -> Option { let requester = SamplingRequester::ImportedBlock(block_root); - let active_sampling_request = self.requests.get(&requester).unwrap(); - active_sampling_request.assert_sampling_request_status(ongoing, no_peers); + self.requests + .get(&requester) + .and_then(|req| req.get_request_status(index)) } /// Create a new sampling request for a known block @@ -233,18 +235,8 @@ impl ActiveSamplingRequest { } #[cfg(test)] - pub fn assert_sampling_request_status( - &self, - ongoing: &Vec, - no_peers: &Vec, - ) { - for idx in ongoing { - assert!(self.column_requests.get(idx).unwrap().is_ongoing()); - } - - for idx in no_peers { - assert!(self.column_requests.get(idx).unwrap().is_no_peers()); - } + pub fn get_request_status(&self, index: &ColumnIndex) -> Option { + self.column_requests.get(index).map(|req| req.status()) } /// Insert a downloaded column into an active sampling request. Then make progress on the @@ -584,8 +576,9 @@ mod request { peers_dont_have: HashSet, } + // Exposed only for testing assertions in lookup tests #[derive(Debug, Clone)] - enum Status { + pub(crate) enum Status { NoPeers, NotStarted, Sampling(PeerId), @@ -630,8 +623,8 @@ mod request { } #[cfg(test)] - pub(crate) fn is_no_peers(&self) -> bool { - matches!(self.status, Status::NoPeers) + pub(crate) fn status(&self) -> Status { + self.status.clone() } pub(crate) fn choose_peer( From 2edf225c3b9cef4a8386ec5dad4253f32d773e17 Mon Sep 17 00:00:00 2001 From: Pop Chunhapanya Date: Thu, 17 Oct 2024 12:53:23 +0700 Subject: [PATCH 56/66] Use snap install yq in local_testnet (#6468) * Use snap install yq in local_testnet snap is better than apt since it's recommended in yq doc --- scripts/local_testnet/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 0275cb217f..ca701eb7e9 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `sudo apt install yq -y`. +1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -82,4 +82,4 @@ The script comes with some CLI options, which can be viewed with `./start_local_ ```bash ./start_local_testnet.sh -b false -``` \ No newline at end of file +``` From 7091c789c99f2f19378a147a6e0c202b3006b8a8 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 16 Oct 2024 22:53:31 -0700 Subject: [PATCH 57/66] Optimization to Redb slasher (#6481) * first() & last() optimizations * Fmt --- Cargo.lock | 4 ++-- slasher/Cargo.toml | 2 +- slasher/src/database/redb_impl.rs | 12 ++---------- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecbfd0cb8d..b12df12265 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6798,9 +6798,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58323dc32ea52a8ae105ff94bc0460c5d906307533ba3401aa63db3cbe491fe5" +checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" dependencies = [ "libc", ] diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index d74b0ac062..034a1b71a3 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -37,7 +37,7 @@ mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -redb = { version = "2.1", optional = true } +redb = { version = "2.1.4", optional = true } [dev-dependencies] maplit = { workspace = true } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 6c5b62a44f..12bef71148 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -164,13 +164,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let first = table - .iter()? - .next() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let first = table.first()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = first { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); Ok(self.current_key.clone()) } else { @@ -182,13 +178,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let last = table - .iter()? - .next_back() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let last = table.last()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = last { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); return Ok(self.current_key.clone()); } From 606a113cff10bb86568cbe709b7601deb1b94215 Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Thu, 17 Oct 2024 04:27:56 -0400 Subject: [PATCH 58/66] IDONTWANT message optimisation to cutoff for smaller messages (#6456) * idontwant message opitmising * requested changes and linter appeasing * added the config cli flag * Merge branch 'unstable' into fix/idontwant-optimise * cli docs generated * const declaration * Hide extra technical cli flag * passing ci * Merge branch 'unstable' into fix/idontwant-optimise --- .../gossipsub/src/behaviour.rs | 9 ++-- .../gossipsub/src/behaviour/tests.rs | 46 ++++++++++++++++++- .../gossipsub/src/config.rs | 27 +++++++++++ beacon_node/lighthouse_network/src/config.rs | 8 ++++ .../lighthouse_network/src/service/mod.rs | 1 + beacon_node/src/cli.rs | 10 +++- beacon_node/src/config.rs | 14 ++++++ 7 files changed, 110 insertions(+), 5 deletions(-) diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index bf77f30979..c50e76e7f2 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1812,9 +1812,6 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); - // Broadcast IDONTWANT messages. - self.send_idontwant(&raw_message, &msg_id, propagation_source); - // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1830,6 +1827,12 @@ where self.mcache.observe_duplicate(&msg_id, propagation_source); return; } + + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, propagation_source); + } + tracing::debug!( message=%msg_id, "Put message in duplicate_cache and resolve promises" diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 00de3ba2db..62f026b568 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -5266,13 +5266,14 @@ fn sends_idontwant() { let message = RawMessage { source: Some(peers[1]), - data: vec![12], + data: vec![12u8; 1024], sequence_number: Some(0), topic: topic_hashes[0].clone(), signature: None, key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers @@ -5292,6 +5293,48 @@ fn sends_idontwant() { ); } +#[test] +fn doesnt_sends_idontwant_for_lower_message_size() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT was sent" + ); +} + /// Test that a node doesn't send IDONTWANT messages to the mesh peers /// that don't run Gossipsub v1.2. #[test] @@ -5316,6 +5359,7 @@ fn doesnt_send_idontwant() { key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers diff --git a/beacon_node/lighthouse_network/gossipsub/src/config.rs b/beacon_node/lighthouse_network/gossipsub/src/config.rs index 1296e614c8..eb8dd432a3 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/config.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/config.rs @@ -98,6 +98,7 @@ pub struct Config { connection_handler_queue_len: usize, connection_handler_publish_duration: Duration, connection_handler_forward_duration: Duration, + idontwant_message_size_threshold: usize, } impl Config { @@ -370,6 +371,16 @@ impl Config { pub fn forward_queue_duration(&self) -> Duration { self.connection_handler_forward_duration } + + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&self) -> usize { + self.idontwant_message_size_threshold + } } impl Default for Config { @@ -440,6 +451,7 @@ impl Default for ConfigBuilder { connection_handler_queue_len: 5000, connection_handler_publish_duration: Duration::from_secs(5), connection_handler_forward_duration: Duration::from_millis(1000), + idontwant_message_size_threshold: 1000, }, invalid_protocol: false, } @@ -825,6 +837,17 @@ impl ConfigBuilder { self } + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&mut self, size: usize) -> &mut Self { + self.config.idontwant_message_size_threshold = size; + self + } + /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config @@ -895,6 +918,10 @@ impl std::fmt::Debug for Config { "published_message_ids_cache_time", &self.published_message_ids_cache_time, ); + let _ = builder.field( + "idontwant_message_size_threhold", + &self.idontwant_message_size_threshold, + ); builder.finish() } } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index ea29501784..d70e50b1da 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -19,6 +19,7 @@ pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const DEFAULT_TCP_PORT: u16 = 9000u16; pub const DEFAULT_DISC_PORT: u16 = 9000u16; pub const DEFAULT_QUIC_PORT: u16 = 9001u16; +pub const DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD: usize = 1000usize; /// The maximum size of gossip messages. pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { @@ -141,6 +142,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Configuration for the minimum message size for which IDONTWANT messages are send in the mesh. + /// Lower the value reduces the optimization effect of the IDONTWANT messages. + pub idontwant_message_size_threshold: usize, } impl Config { @@ -352,6 +357,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, } } } @@ -433,6 +439,7 @@ pub fn gossipsub_config( gossipsub_config_params: GossipsubConfigParams, seconds_per_slot: u64, slots_per_epoch: u64, + idontwant_message_size_threshold: usize, ) -> gossipsub::Config { fn prefix( prefix: [u8; 4], @@ -498,6 +505,7 @@ pub fn gossipsub_config( .duplicate_cache_time(duplicate_cache_time) .message_id_fn(gossip_message_id) .allow_self_origin(true) + .idontwant_message_size_threshold(idontwant_message_size_threshold) .build() .expect("valid gossipsub configuration") } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ff641f666f..79889274de 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -237,6 +237,7 @@ impl Network { gossipsub_config_params, ctx.chain_spec.seconds_per_slot, E::slots_per_epoch(), + config.idontwant_message_size_threshold, ); let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e9611fd1e..d6ed106803 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -659,7 +659,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - + .arg( + Arg::new("idontwant-message-size-threshold") + .long("idontwant-message-size-threshold") + .help("Specifies the minimum message size for which IDONTWANT messages are sent. \ + This an optimization strategy to not send IDONTWANT messages for smaller messages.") + .action(ArgAction::Set) + .hide(true) + .display_order(0) + ) /* * Monitoring metrics */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0eff8577c4..f62ccfe3ed 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1487,6 +1487,20 @@ pub fn set_network_config( Some(Default::default()) } }; + + if let Some(idontwant_message_size_threshold) = + cli_args.get_one::("idontwant-message-size-threshold") + { + config.idontwant_message_size_threshold = idontwant_message_size_threshold + .parse::() + .map_err(|_| { + format!( + "Invalid idontwant message size threshold value passed: {}", + idontwant_message_size_threshold + ) + })?; + } + Ok(()) } From a074e9eb338ab56cce689b26f747d55d56b6287f Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 17 Oct 2024 21:14:13 +0300 Subject: [PATCH 59/66] Generalize sync ActiveRequests (#6398) * Generalize sync ActiveRequests * Remove impossible to hit test * Update beacon_node/lighthouse_network/src/service/api_types.rs Co-authored-by: realbigsean * Update beacon_node/network/src/sync/network_context.rs Co-authored-by: realbigsean * Update beacon_node/network/src/sync/network_context.rs Co-authored-by: realbigsean * Simplify match * Fix display * Merge remote-tracking branch 'sigp/unstable' into sync-active-request-generalize * Sampling requests should not expect all responses * Merge remote-tracking branch 'sigp/unstable' into sync-active-request-generalize * Fix sampling_batch_requests_not_enough_responses_returned test * Merge remote-tracking branch 'sigp/unstable' into sync-active-request-generalize * Merge branch 'unstable' of https://github.com/sigp/lighthouse into sync-active-request-generalize --- .../src/service/api_types.rs | 18 +- .../network/src/sync/block_lookups/tests.rs | 58 ++-- beacon_node/network/src/sync/manager.rs | 16 +- .../network/src/sync/network_context.rs | 253 +++++++----------- .../src/sync/network_context/custody.rs | 4 + .../src/sync/network_context/requests.rs | 174 +++++++++++- .../network_context/requests/blobs_by_root.rs | 61 ++--- .../requests/blocks_by_root.rs | 43 ++- .../requests/data_columns_by_root.rs | 60 +---- beacon_node/network/src/sync/peer_sampling.rs | 19 +- 10 files changed, 371 insertions(+), 335 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e57e846c33..d8a1039783 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -22,11 +22,6 @@ pub struct SingleLookupReqId { pub req_id: Id, } -/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. -/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct DataColumnsByRootRequestId(pub Id); - /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -35,11 +30,19 @@ pub enum SyncRequestId { /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. - DataColumnsByRoot(DataColumnsByRootRequestId, DataColumnsByRootRequester), + DataColumnsByRoot(DataColumnsByRootRequestId), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } +/// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. +/// Wrapping this particular req_id, ensures not mixing this request with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId { + pub id: Id, + pub requester: DataColumnsByRootRequester, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { Sampling(SamplingId), @@ -173,8 +176,9 @@ impl slog::Value for RequestId { } } +// This custom impl reduces log boilerplate not printing `DataColumnsByRootRequestId` on each id log impl std::fmt::Display for DataColumnsByRootRequestId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{} {:?}", self.id, self.requester) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index ae9f96a348..7192faa12d 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -25,8 +25,8 @@ use beacon_chain::{ use beacon_processor::WorkEvent; use lighthouse_network::rpc::{RPCError, RequestType, RpcErrorResponse}; use lighthouse_network::service::api_types::{ - AppRequestId, DataColumnsByRootRequester, Id, SamplingRequester, SingleLookupReqId, - SyncRequestId, + AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingRequester, + SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::SyncState; use lighthouse_network::NetworkConfig; @@ -745,10 +745,10 @@ impl TestRig { let first_dc = data_columns.first().unwrap(); let block_root = first_dc.block_root(); let sampling_request_id = match id.0 { - SyncRequestId::DataColumnsByRoot( - _, - _requester @ DataColumnsByRootRequester::Sampling(sampling_id), - ) => sampling_id.sampling_request_id, + SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Sampling(sampling_id), + .. + }) => sampling_id.sampling_request_id, _ => unreachable!(), }; self.complete_data_columns_by_root_request(id, data_columns); @@ -773,14 +773,15 @@ impl TestRig { data_columns: Vec>>, missing_components: bool, ) { - let lookup_id = - if let SyncRequestId::DataColumnsByRoot(_, DataColumnsByRootRequester::Custody(id)) = - ids.first().unwrap().0 - { - id.requester.0.lookup_id - } else { - panic!("not a custody requester") - }; + let lookup_id = if let SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Custody(id), + .. + }) = ids.first().unwrap().0 + { + id.requester.0.lookup_id + } else { + panic!("not a custody requester") + }; let first_column = data_columns.first().cloned().unwrap(); @@ -1189,6 +1190,7 @@ impl TestRig { penalty_msg, expect_penalty_msg, "Unexpected penalty msg for {peer_id}" ); + self.log(&format!("Found expected penalty {penalty_msg}")); } pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { @@ -1416,7 +1418,7 @@ fn test_single_block_lookup_empty_response() { // The peer does not have the block. It should be penalized. r.single_lookup_block_response(id, peer_id, None); - r.expect_penalty(peer_id, "NoResponseReturned"); + r.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // it should be retried let id = r.expect_block_lookup_request(block_root); // Send the right block this time. @@ -2160,7 +2162,7 @@ fn sampling_batch_requests_not_enough_responses_returned() { r.assert_sampling_request_ongoing(block_root, &column_indexes); // Split the indexes to simulate the case where the supernode doesn't have the requested column. - let (_column_indexes_supernode_does_not_have, column_indexes_to_complete) = + let (column_indexes_supernode_does_not_have, column_indexes_to_complete) = column_indexes.split_at(1); // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. @@ -2176,7 +2178,7 @@ fn sampling_batch_requests_not_enough_responses_returned() { // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. r.log_sampling_requests(block_root, &column_indexes); - r.assert_sampling_request_nopeers(block_root, &column_indexes); + r.assert_sampling_request_nopeers(block_root, column_indexes_supernode_does_not_have); // The sampling request stalls. r.expect_empty_network(); @@ -2721,11 +2723,6 @@ mod deneb_only { self.blobs.pop().expect("blobs"); self } - fn invalidate_blobs_too_many(mut self) -> Self { - let first_blob = self.blobs.first().expect("blob").clone(); - self.blobs.push(first_blob); - self - } fn expect_block_process(mut self) -> Self { self.rig.expect_block_process(ResponseType::Block); self @@ -2814,21 +2811,6 @@ mod deneb_only { .expect_no_block_request(); } - #[test] - fn single_block_response_then_too_many_blobs_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty("TooManyResponses") - // Network context returns "download success" because the request has enough blobs + it - // downscores the peer for returning too many. - .expect_no_block_request(); - } - // Test peer returning block that has unknown parent, and a new lookup is created #[test] fn parent_block_unknown_parent() { @@ -2869,7 +2851,7 @@ mod deneb_only { }; tester .empty_block_response() - .expect_penalty("NoResponseReturned") + .expect_penalty("NotEnoughResponsesReturned") .expect_block_request() .expect_no_blobs_request() .block_response_and_expect_blob_request() diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index ef01763d4d..882f199b52 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -472,13 +472,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } - SyncRequestId::DataColumnsByRoot(req_id, requester) => self - .on_data_columns_by_root_response( - req_id, - requester, - peer_id, - RpcEvent::RPCError(error), - ), + SyncRequestId::DataColumnsByRoot(req_id) => { + self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -1104,10 +1100,9 @@ impl SyncManager { seen_timestamp: Duration, ) { match request_id { - SyncRequestId::DataColumnsByRoot(req_id, requester) => { + SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response( req_id, - requester, peer_id, match data_column { Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), @@ -1149,7 +1144,6 @@ impl SyncManager { fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, - requester: DataColumnsByRootRequester, peer_id: PeerId, data_column: RpcEvent>>, ) { @@ -1157,7 +1151,7 @@ impl SyncManager { self.network .on_data_columns_by_root_response(req_id, peer_id, data_column) { - match requester { + match req_id.requester { DataColumnsByRootRequester::Sampling(id) => { if let Some((requester, result)) = self.sampling diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 025003eef7..eb42e697cd 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -2,7 +2,6 @@ //! channel and stores a global RPC ID to perform requests. use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; -use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; @@ -30,8 +29,11 @@ use lighthouse_network::service::api_types::{ use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use rand::seq::SliceRandom; use rand::thread_rng; -use requests::ActiveDataColumnsByRootRequest; pub use requests::LookupVerifyError; +use requests::{ + ActiveRequests, BlobsByRootRequestItems, BlocksByRootRequestItems, + DataColumnsByRootRequestItems, +}; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::HashMap; @@ -180,18 +182,17 @@ pub struct SyncNetworkContext { request_id: Id, /// A mapping of active BlocksByRoot requests, including both current slot and parent lookups. - blocks_by_root_requests: FnvHashMap, - + blocks_by_root_requests: + ActiveRequests>, /// A mapping of active BlobsByRoot requests, including both current slot and parent lookups. - blobs_by_root_requests: FnvHashMap>, + blobs_by_root_requests: ActiveRequests>, + /// A mapping of active DataColumnsByRoot requests + data_columns_by_root_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, - /// A mapping of active DataColumnsByRoot requests - data_columns_by_root_requests: - FnvHashMap>, - /// BlocksByRange requests paired with BlobsByRange range_block_components_requests: FnvHashMap)>, @@ -239,9 +240,9 @@ impl SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, - blocks_by_root_requests: <_>::default(), - blobs_by_root_requests: <_>::default(), - data_columns_by_root_requests: <_>::default(), + blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), + blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), + data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), custody_by_root_requests: <_>::default(), range_block_components_requests: FnvHashMap::default(), network_beacon_processor, @@ -270,34 +271,19 @@ impl SyncNetworkContext { let failed_block_ids = self .blocks_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlock { id: *id }) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlock { id: *id }); let failed_blob_ids = self .blobs_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlob { id: *id }) - } else { - None - } - }); - let failed_data_column_by_root_ids = - self.data_columns_by_root_requests - .iter() - .filter_map(|(req_id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::DataColumnsByRoot(*req_id, request.requester)) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlob { id: *id }); + let failed_data_column_by_root_ids = self + .data_columns_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); failed_range_ids .chain(failed_block_ids) @@ -616,8 +602,14 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blocks_by_root_requests - .insert(id, ActiveBlocksByRootRequest::new(request, peer_id)); + self.blocks_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests as returned for blocks_by_root. We always request a single + // block and the peer must have it. + true, + BlocksByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -677,8 +669,15 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blobs_by_root_requests - .insert(id, ActiveBlobsByRootRequest::new(request, peer_id)); + self.blobs_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests are returned for blobs_by_root. We only issue requests for + // blocks after we know the block has data, and only request peers after they claim to + // have imported the block+blobs. + true, + BlobsByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -689,8 +688,12 @@ impl SyncNetworkContext { requester: DataColumnsByRootRequester, peer_id: PeerId, request: DataColumnsByRootSingleBlockRequest, + expect_max_responses: bool, ) -> Result, &'static str> { - let req_id = DataColumnsByRootRequestId(self.next_id()); + let req_id = DataColumnsByRootRequestId { + id: self.next_id(), + requester, + }; debug!( self.log, "Sending DataColumnsByRoot Request"; @@ -705,12 +708,14 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id, requester)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id)), })?; self.data_columns_by_root_requests.insert( req_id, - ActiveDataColumnsByRootRequest::new(request, peer_id, requester), + peer_id, + expect_max_responses, + DataColumnsByRootRequestItems::new(request), ); Ok(LookupRequestResult::RequestSent(req_id)) @@ -916,142 +921,74 @@ impl SyncNetworkContext { // Request handlers - pub fn on_single_block_response( + pub(crate) fn on_single_block_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>> { - let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blocks_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(block, seen_timestamp) => { - match request.get_mut().add_response(block) { - Ok(block) => Ok((block, seen_timestamp)), - Err(e) => { - // The request must be dropped after receiving an error. - request.remove(); - Err(e.into()) - } + let response = self.blocks_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then(|(mut blocks, seen_timestamp)| { + // Enforce that exactly one chunk = one block is returned. ReqResp behavior limits the + // response count to at most 1. + match blocks.pop() { + Some(block) => Ok((block, seen_timestamp)), + // Should never happen, `blocks_by_root_requests` enforces that we receive at least + // 1 chunk. + None => Err(LookupVerifyError::NotEnoughResponsesReturned { actual: 0 }.into()), } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - Err(e) => Err(e.into()), - }, - RpcEvent::RPCError(e) => { - request.remove(); - Err(e.into()) - } - }; - - if let Err(RpcResponseError::VerifyError(e)) = &resp { + }) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } - Some(resp) + response } - pub fn on_single_blob_response( + pub(crate) fn on_single_blob_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>> { - let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blobs_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(blob, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(blob) { - Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, seen_timestamp)) - .map_err(|e| (e.into(), request.resolve())), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; - - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more blobs than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(e) = &e { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + let response = self.blobs_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then( + |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + }, + ) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + response } #[allow(clippy::type_complexity)] - pub fn on_data_columns_by_root_response( + pub(crate) fn on_data_columns_by_root_response( &mut self, id: DataColumnsByRootRequestId, - _peer_id: PeerId, + peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>>> { - let Entry::Occupied(mut request) = self.data_columns_by_root_requests.entry(id) else { - return None; - }; + let resp = self + .data_columns_by_root_requests + .on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } - let resp = match rpc_event { - RpcEvent::Response(data_column, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(data_column) { - Ok(Some(data_columns)) => Ok((data_columns, seen_timestamp)), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; - - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more columns than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(_e) = &e { - // TODO(das): this is a bug, we should not penalise peer in this case. - // confirm this can be removed. - // self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + fn report_rpc_response_errors( + &mut self, + resp: Option>, + peer_id: PeerId, + ) -> Option> { + if let Some(Err(RpcResponseError::VerifyError(e))) = &resp { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + resp } /// Insert a downloaded column into an active custody request. Then make progress on the diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 6736bfb82f..e4bce3dafc 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -283,6 +283,10 @@ impl ActiveCustodyRequest { block_root: self.block_root, indices: indices.clone(), }, + // true = enforce max_requests are returned data_columns_by_root. We only issue requests + // for blocks after we know the block has data, and only request peers after they claim to + // have imported the block+columns and claim to be custodians + true, ) .map_err(Error::SendFailed)?; diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 0c2f59d143..b9214bafcd 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,23 +1,187 @@ +use std::{collections::hash_map::Entry, hash::Hash}; + +use beacon_chain::validator_monitor::timestamp_now; +use fnv::FnvHashMap; +use lighthouse_network::PeerId; use strum::IntoStaticStr; use types::Hash256; -pub use blobs_by_root::{ActiveBlobsByRootRequest, BlobsByRootSingleBlockRequest}; -pub use blocks_by_root::{ActiveBlocksByRootRequest, BlocksByRootSingleRequest}; +pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; +pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest}; pub use data_columns_by_root::{ - ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, + DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +use crate::metrics; + +use super::{RpcEvent, RpcResponseResult}; + mod blobs_by_root; mod blocks_by_root; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { - NoResponseReturned, - NotEnoughResponsesReturned { expected: usize, actual: usize }, + NotEnoughResponsesReturned { actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, } + +/// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` +pub struct ActiveRequests { + requests: FnvHashMap>, + name: &'static str, +} + +/// Stateful container for a single active ReqResp request +struct ActiveRequest { + state: State, + peer_id: PeerId, + // Error if the request terminates before receiving max expected responses + expect_max_responses: bool, +} + +enum State { + Active(T), + CompletedEarly, + Errored, +} + +impl ActiveRequests { + pub fn new(name: &'static str) -> Self { + Self { + requests: <_>::default(), + name, + } + } + + pub fn insert(&mut self, id: K, peer_id: PeerId, expect_max_responses: bool, items: T) { + self.requests.insert( + id, + ActiveRequest { + state: State::Active(items), + peer_id, + expect_max_responses, + }, + ); + } + + /// Handle an `RpcEvent` for a specific request index by `id`. + /// + /// Lighthouse ReqResp protocol API promises to send 0 or more `RpcEvent::Response` chunks, + /// and EITHER a single `RpcEvent::RPCError` or RpcEvent::StreamTermination. + /// + /// Downstream code expects to receive a single `Result` value per request ID. However, + /// `add_item` may convert ReqResp success chunks into errors. This function handles the + /// multiple errors / stream termination internally ensuring that a single `Some` is + /// returned. + pub fn on_response( + &mut self, + id: K, + rpc_event: RpcEvent, + ) -> Option>> { + let Entry::Occupied(mut entry) = self.requests.entry(id) else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &[self.name]); + return None; + }; + + match rpc_event { + // Handler of a success ReqResp chunk. Adds the item to the request accumulator. + // `ActiveRequestItems` validates the item before appending to its internal state. + RpcEvent::Response(item, seen_timestamp) => { + let request = &mut entry.get_mut(); + match &mut request.state { + State::Active(items) => { + match items.add(item) { + // Received all items we are expecting for, return early, but keep the request + // struct to handle the stream termination gracefully. + Ok(true) => { + let items = items.consume(); + request.state = State::CompletedEarly; + Some(Ok((items, seen_timestamp))) + } + // Received item, but we are still expecting more + Ok(false) => None, + // Received an invalid item + Err(e) => { + request.state = State::Errored; + Some(Err(e.into())) + } + } + } + // Should never happen, ReqResp network behaviour enforces a max count of chunks + // When `max_remaining_chunks <= 1` a the inbound stream in terminated in + // `rpc/handler.rs`. Handling this case adds complexity for no gain. Even if an + // attacker could abuse this, there's no gain in sending garbage chunks that + // will be ignored anyway. + State::CompletedEarly => None, + // Ignore items after errors. We may want to penalize repeated invalid chunks + // for the same response. But that's an optimization to ban peers sending + // invalid data faster that we choose to not adopt for now. + State::Errored => None, + } + } + RpcEvent::StreamTermination => { + // After stream termination we must forget about this request, there will be no more + // messages coming from the network + let request = entry.remove(); + match request.state { + // Received a stream termination in a valid sequence, consume items + State::Active(mut items) => { + if request.expect_max_responses { + Some(Err(LookupVerifyError::NotEnoughResponsesReturned { + actual: items.consume().len(), + } + .into())) + } else { + Some(Ok((items.consume(), timestamp_now()))) + } + } + // Items already returned, ignore stream termination + State::CompletedEarly => None, + // Returned an error earlier, ignore stream termination + State::Errored => None, + } + } + RpcEvent::RPCError(e) => { + // After an Error event from the network we must forget about this request as this + // may be the last message for this request. + match entry.remove().state { + // Received error while request is still active, propagate error. + State::Active(_) => Some(Err(e.into())), + // Received error after completing the request, ignore the error. This is okay + // because the network has already registered a downscore event if necessary for + // this message. + State::CompletedEarly => None, + // Received a network error after a validity error. Okay to ignore, see above + State::Errored => None, + } + } + } + } + + pub fn active_requests_of_peer(&self, peer_id: &PeerId) -> Vec<&K> { + self.requests + .iter() + .filter(|(_, request)| &request.peer_id == peer_id) + .map(|(id, _)| id) + .collect() + } + + pub fn len(&self) -> usize { + self.requests.len() + } +} + +pub trait ActiveRequestItems { + type Item; + + /// Add a new item into the accumulator. Returns true if all expected items have been received. + fn add(&mut self, item: Self::Item) -> Result; + + /// Return all accumulated items consuming them. + fn consume(&mut self) -> Vec; +} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index cb2b1a42ec..fefb27a5ef 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,8 +1,8 @@ -use lighthouse_network::{rpc::methods::BlobsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct BlobsByRootSingleBlockRequest { @@ -25,34 +25,27 @@ impl BlobsByRootSingleBlockRequest { } } -pub struct ActiveBlobsByRootRequest { +pub struct BlobsByRootRequestItems { request: BlobsByRootSingleBlockRequest, - blobs: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { +impl BlobsByRootRequestItems { + pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { Self { request, - blobs: vec![], - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlobsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - blob: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, blob: Self::Item) -> Result { let block_root = blob.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -63,34 +56,16 @@ impl ActiveBlobsByRootRequest { if !self.request.indices.contains(&blob.index) { return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } - if self.blobs.iter().any(|b| b.index == blob.index) { + if self.items.iter().any(|b| b.index == blob.index) { return Err(LookupVerifyError::DuplicateData); } - self.blobs.push(blob); - if self.blobs.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.blobs))) - } else { - Ok(None) - } + self.items.push(blob); + + Ok(self.items.len() >= self.request.indices.len()) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.blobs.len(), - }) - } - } - - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs index a15d4e3935..f3cdcbe714 100644 --- a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -1,9 +1,9 @@ use beacon_chain::get_block_root; -use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; +use lighthouse_network::rpc::BlocksByRootRequest; use std::sync::Arc; use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Copy, Clone)] pub struct BlocksByRootSingleRequest(pub Hash256); @@ -14,47 +14,38 @@ impl BlocksByRootSingleRequest { } } -pub struct ActiveBlocksByRootRequest { +pub struct BlocksByRootRequestItems { request: BlocksByRootSingleRequest, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { +impl BlocksByRootRequestItems { + pub fn new(request: BlocksByRootSingleRequest) -> Self { Self { request, - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlocksByRootRequestItems { + type Item = Arc>; /// Append a response to the single chunk request. If the chunk is valid, the request is /// resolved immediately. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - block: Arc>, - ) -> Result>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, block: Self::Item) -> Result { let block_root = get_block_root(&block); if self.request.0 != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); } - // Valid data, blocks by root expects a single response - self.resolved = true; - Ok(block) + self.items.push(block); + // Always returns true, blocks by root expects a single response + Ok(true) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NoResponseReturned) - } + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index a42ae7ca41..1b8d46ff07 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -1,9 +1,8 @@ -use lighthouse_network::service::api_types::DataColumnsByRootRequester; -use lighthouse_network::{rpc::methods::DataColumnsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::DataColumnsByRootRequest; use std::sync::Arc; use types::{ChainSpec, DataColumnIdentifier, DataColumnSidecar, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct DataColumnsByRootSingleBlockRequest { @@ -26,40 +25,27 @@ impl DataColumnsByRootSingleBlockRequest { } } -pub struct ActiveDataColumnsByRootRequest { +pub struct DataColumnsByRootRequestItems { request: DataColumnsByRootSingleBlockRequest, items: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, - pub(crate) requester: DataColumnsByRootRequester, } -impl ActiveDataColumnsByRootRequest { - pub fn new( - request: DataColumnsByRootSingleBlockRequest, - peer_id: PeerId, - requester: DataColumnsByRootRequester, - ) -> Self { +impl DataColumnsByRootRequestItems { + pub fn new(request: DataColumnsByRootSingleBlockRequest) -> Self { Self { request, items: vec![], - resolved: false, - peer_id, - requester, } } +} + +impl ActiveRequestItems for DataColumnsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - data_column: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, data_column: Self::Item) -> Result { let block_root = data_column.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -75,29 +61,11 @@ impl ActiveDataColumnsByRootRequest { } self.items.push(data_column); - if self.items.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.items))) - } else { - Ok(None) - } + + Ok(self.items.len() >= self.request.indices.len()) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.items.len(), - }) - } - } - - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index decabfd216..7e725f5df5 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -88,7 +88,11 @@ impl Sampling { } }; - debug!(self.log, "Created new sample request"; "id" => ?id); + debug!(self.log, + "Created new sample request"; + "id" => ?id, + "column_selection" => ?request.column_selection() + ); // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough // to sample here, immediately failing the sampling request. There should be some grace @@ -239,6 +243,15 @@ impl ActiveSamplingRequest { self.column_requests.get(index).map(|req| req.status()) } + /// Return the current ordered list of columns that this requests has to sample to succeed + pub(crate) fn column_selection(&self) -> Vec { + self.column_shuffle + .iter() + .take(REQUIRED_SUCCESSES[0]) + .copied() + .collect() + } + /// Insert a downloaded column into an active sampling request. Then make progress on the /// entire request. /// @@ -531,6 +544,10 @@ impl ActiveSamplingRequest { block_root: self.block_root, indices: column_indexes.clone(), }, + // false = We issue request to custodians who may or may not have received the + // samples yet. We don't any signal (like an attestation or status messages that the + // custodian has received data). + false, ) .map_err(SamplingError::SendFailed)?; self.column_indexes_by_sampling_request From 9f1bec63720132c7247c71733c49b12c5e1852e2 Mon Sep 17 00:00:00 2001 From: Kolby Moroz Liebl <31669092+KolbyML@users.noreply.github.com> Date: Thu, 17 Oct 2024 19:09:28 -0700 Subject: [PATCH 60/66] Add Trin Execution ClientCode (#6502) * Add Trin Execution ClientCode --- beacon_node/execution_layer/src/engine_api.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ab275e8b11..1c23c8ba66 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -570,6 +570,7 @@ pub enum ClientCode { Lodestar, Nethermind, Nimbus, + TrinExecution, Teku, Prysm, Reth, @@ -588,6 +589,7 @@ impl std::fmt::Display for ClientCode { ClientCode::Lodestar => "LS", ClientCode::Nethermind => "NM", ClientCode::Nimbus => "NB", + ClientCode::TrinExecution => "TE", ClientCode::Teku => "TK", ClientCode::Prysm => "PM", ClientCode::Reth => "RH", @@ -611,6 +613,7 @@ impl TryFrom for ClientCode { "LS" => Ok(Self::Lodestar), "NM" => Ok(Self::Nethermind), "NB" => Ok(Self::Nimbus), + "TE" => Ok(Self::TrinExecution), "TK" => Ok(Self::Teku), "PM" => Ok(Self::Prysm), "RH" => Ok(Self::Reth), From d1fda938a338739578b09c1f1a1ba6144d8b5149 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 17 Oct 2024 19:50:51 -0700 Subject: [PATCH 61/66] Light client updates by range RPC (#6383) * enable lc update over rpc * resolve TODOs * resolve merge conflicts * move max light client updates to eth spec * Merge branch 'unstable' of https://github.com/sigp/lighthouse into light-client-updates-by-range-rpc * remove ethspec dependency * Update beacon_node/network/src/network_beacon_processor/rpc_methods.rs Co-authored-by: Michael Sproul * Update beacon_node/lighthouse_network/src/rpc/methods.rs Co-authored-by: Michael Sproul --- beacon_node/beacon_processor/src/lib.rs | 13 ++- .../src/peer_manager/mod.rs | 3 + .../lighthouse_network/src/rpc/codec.rs | 38 ++++++- .../lighthouse_network/src/rpc/config.rs | 10 ++ .../lighthouse_network/src/rpc/methods.rs | 45 +++++++- beacon_node/lighthouse_network/src/rpc/mod.rs | 3 + .../lighthouse_network/src/rpc/protocol.rs | 61 ++++++++++- .../src/rpc/rate_limiter.rs | 16 +++ .../src/service/api_types.rs | 10 +- .../lighthouse_network/src/service/mod.rs | 19 ++++ .../src/network_beacon_processor/mod.rs | 27 +++++ .../network_beacon_processor/rpc_methods.rs | 100 ++++++++++++++++++ beacon_node/network/src/router.rs | 14 ++- consensus/types/src/light_client_update.rs | 33 +++++- lighthouse/environment/src/lib.rs | 2 +- 15 files changed, 383 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 02c287b68e..2a69b04c91 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -135,6 +135,7 @@ pub struct BeaconProcessorQueueLengths { lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, lc_finality_update_queue: usize, + lc_update_range_queue: usize, api_request_p0_queue: usize, api_request_p1_queue: usize, } @@ -202,6 +203,7 @@ impl BeaconProcessorQueueLengths { lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, lc_finality_update_queue: 512, + lc_update_range_queue: 512, api_request_p0_queue: 1024, api_request_p1_queue: 1024, }) @@ -622,6 +624,7 @@ pub enum Work { LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), LightClientFinalityUpdateRequest(BlockingFn), + LightClientUpdatesByRangeRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), ApiRequestP1(BlockingOrAsync), } @@ -673,6 +676,7 @@ pub enum WorkType { LightClientBootstrapRequest, LightClientOptimisticUpdateRequest, LightClientFinalityUpdateRequest, + LightClientUpdatesByRangeRequest, ApiRequestP0, ApiRequestP1, } @@ -723,6 +727,7 @@ impl Work { WorkType::LightClientOptimisticUpdateRequest } Work::LightClientFinalityUpdateRequest(_) => WorkType::LightClientFinalityUpdateRequest, + Work::LightClientUpdatesByRangeRequest(_) => WorkType::LightClientUpdatesByRangeRequest, Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, @@ -902,6 +907,7 @@ impl BeaconProcessor { let mut lc_optimistic_update_queue = FifoQueue::new(queue_lengths.lc_optimistic_update_queue); let mut lc_finality_update_queue = FifoQueue::new(queue_lengths.lc_finality_update_queue); + let mut lc_update_range_queue = FifoQueue::new(queue_lengths.lc_update_range_queue); let mut api_request_p0_queue = FifoQueue::new(queue_lengths.api_request_p0_queue); let mut api_request_p1_queue = FifoQueue::new(queue_lengths.api_request_p1_queue); @@ -1379,6 +1385,9 @@ impl BeaconProcessor { Work::LightClientFinalityUpdateRequest { .. } => { lc_finality_update_queue.push(work, work_id, &self.log) } + Work::LightClientUpdatesByRangeRequest { .. } => { + lc_update_range_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1470,6 +1479,7 @@ impl BeaconProcessor { WorkType::LightClientFinalityUpdateRequest => { lc_finality_update_queue.len() } + WorkType::LightClientUpdatesByRangeRequest => lc_update_range_queue.len(), WorkType::ApiRequestP0 => api_request_p0_queue.len(), WorkType::ApiRequestP1 => api_request_p1_queue.len(), }; @@ -1622,7 +1632,8 @@ impl BeaconProcessor { | Work::GossipBlsToExecutionChange(process_fn) | Work::LightClientBootstrapRequest(process_fn) | Work::LightClientOptimisticUpdateRequest(process_fn) - | Work::LightClientFinalityUpdateRequest(process_fn) => { + | Work::LightClientFinalityUpdateRequest(process_fn) + | Work::LightClientUpdatesByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } }; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ec4d892c9b..c1e72d250f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -558,6 +558,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, @@ -585,6 +586,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::MetaData => PeerAction::Fatal, Protocol::Status => PeerAction::Fatal, } @@ -606,6 +608,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 17234a27a8..19f1b8def7 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -18,9 +18,9 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, - SignedBeaconBlockElectra, + LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -76,6 +76,7 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientUpdatesByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::Pong(res) => res.data.as_ssz_bytes(), RpcSuccessResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. @@ -342,6 +343,7 @@ impl Encoder for SSZSnappyOutboundCodec { RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), + RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), // no metadata to encode RequestType::MetaData(_) | RequestType::LightClientOptimisticUpdate @@ -503,6 +505,10 @@ fn context_bytes( return lc_finality_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } + RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => { + return lc_update + .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); + } // These will not pass the has_context_bytes() check RpcSuccessResponse::Status(_) | RpcSuccessResponse::Pong(_) @@ -613,6 +619,11 @@ fn handle_rpc_request( SupportedProtocol::LightClientFinalityUpdateV1 => { Ok(Some(RequestType::LightClientFinalityUpdate)) } + SupportedProtocol::LightClientUpdatesByRangeV1 => { + Ok(Some(RequestType::LightClientUpdatesByRange( + LightClientUpdatesByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. SupportedProtocol::MetaDataV3 => { @@ -795,6 +806,21 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::LightClientUpdatesByRangeV1 => match fork_name { + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientUpdatesByRange( + Arc::new(LightClientUpdate::from_ssz_bytes( + decoded_buffer, + &fork_name, + )?), + ))), + None => Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V3( MetaDataV3::from_ssz_bytes(decoded_buffer)?, @@ -1215,6 +1241,12 @@ mod tests { ) } RequestType::LightClientOptimisticUpdate | RequestType::LightClientFinalityUpdate => {} + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) => { + assert_eq!( + decoded, + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) + ) + } } } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index fcb9c98604..42ece6dc4f 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -96,6 +96,7 @@ pub struct RateLimiterConfig { pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, + pub(super) light_client_updates_by_range_quota: Quota, } impl RateLimiterConfig { @@ -121,6 +122,7 @@ impl RateLimiterConfig { pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA: Quota = Quota::one_every(10); } impl Default for RateLimiterConfig { @@ -140,6 +142,7 @@ impl Default for RateLimiterConfig { light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, light_client_finality_update_quota: Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA, + light_client_updates_by_range_quota: Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA, } } } @@ -198,6 +201,7 @@ impl FromStr for RateLimiterConfig { let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; + let mut light_client_updates_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; @@ -228,6 +232,10 @@ impl FromStr for RateLimiterConfig { light_client_finality_update_quota = light_client_finality_update_quota.or(quota) } + Protocol::LightClientUpdatesByRange => { + light_client_updates_by_range_quota = + light_client_updates_by_range_quota.or(quota) + } } } Ok(RateLimiterConfig { @@ -252,6 +260,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA), light_client_finality_update_quota: light_client_finality_update_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA), + light_client_updates_by_range_quota: light_client_updates_by_range_quota + .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index e187c9a40f..912fda3606 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -13,10 +13,11 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; +use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -471,6 +472,34 @@ impl DataColumnsByRootRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientUpdatesByRangeRequest { + /// The starting period to request light client updates. + pub start_period: u64, + /// The number of periods from `start_period`. + pub count: u64, +} + +impl LightClientUpdatesByRangeRequest { + pub fn max_requested(&self) -> u64 { + MAX_REQUEST_LIGHT_CLIENT_UPDATES + } + + pub fn ssz_min_len() -> usize { + LightClientUpdatesByRangeRequest { + start_period: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -498,6 +527,9 @@ pub enum RpcSuccessResponse { /// A response to a get LIGHT_CLIENT_FINALITY_UPDATE request. LightClientFinalityUpdate(Arc>), + /// A response to a get LIGHT_CLIENT_UPDATES_BY_RANGE request. + LightClientUpdatesByRange(Arc>), + /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), @@ -534,6 +566,9 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + + /// Light client updates by range stream termination. + LightClientUpdatesByRange, } /// The structured response containing a result/code indicating success or failure @@ -633,6 +668,7 @@ impl RpcSuccessResponse { Protocol::LightClientOptimisticUpdate } RpcSuccessResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, } } } @@ -704,6 +740,13 @@ impl std::fmt::Display for RpcSuccessResponse { update.signature_slot() ) } + RpcSuccessResponse::LightClientUpdatesByRange(update) => { + write!( + f, + "LightClientUpdatesByRange Slot: {}", + update.signature_slot(), + ) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 28f2addf86..ed4da463ff 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -553,6 +553,9 @@ where ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::LightClientUpdatesByRange => { + Protocol::LightClientUpdatesByRange + } }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 417c7a3ee5..b4f6dac4fa 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,8 @@ use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, Signature, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -143,6 +144,13 @@ pub static LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX: LazyLock = LazyLock::new(| LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Electra) }); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Capella)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Deneb)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Electra)); + /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// The number of seconds to wait for the first bytes of a request once a protocol has been @@ -190,6 +198,26 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } } +fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> RpcLimits { + let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); + + match ¤t_fork { + ForkName::Base => RpcLimits::new(0, 0), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } + ForkName::Capella => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX) + } + ForkName::Deneb => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX) + } + ForkName::Electra => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX) + } + } +} + fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> RpcLimits { let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); @@ -286,6 +314,9 @@ pub enum Protocol { /// The `LightClientFinalityUpdate` protocol name. #[strum(serialize = "light_client_finality_update")] LightClientFinalityUpdate, + /// The `LightClientUpdatesByRange` protocol name + #[strum(serialize = "light_client_updates_by_range")] + LightClientUpdatesByRange, } impl Protocol { @@ -304,6 +335,7 @@ impl Protocol { Protocol::LightClientBootstrap => None, Protocol::LightClientOptimisticUpdate => None, Protocol::LightClientFinalityUpdate => None, + Protocol::LightClientUpdatesByRange => None, } } } @@ -334,6 +366,7 @@ pub enum SupportedProtocol { LightClientBootstrapV1, LightClientOptimisticUpdateV1, LightClientFinalityUpdateV1, + LightClientUpdatesByRangeV1, } impl SupportedProtocol { @@ -356,6 +389,7 @@ impl SupportedProtocol { SupportedProtocol::LightClientBootstrapV1 => "1", SupportedProtocol::LightClientOptimisticUpdateV1 => "1", SupportedProtocol::LightClientFinalityUpdateV1 => "1", + SupportedProtocol::LightClientUpdatesByRangeV1 => "1", } } @@ -380,6 +414,7 @@ impl SupportedProtocol { Protocol::LightClientOptimisticUpdate } SupportedProtocol::LightClientFinalityUpdateV1 => Protocol::LightClientFinalityUpdate, + SupportedProtocol::LightClientUpdatesByRangeV1 => Protocol::LightClientUpdatesByRange, } } @@ -542,6 +577,10 @@ impl ProtocolId { ), Protocol::LightClientOptimisticUpdate => RpcLimits::new(0, 0), Protocol::LightClientFinalityUpdate => RpcLimits::new(0, 0), + Protocol::LightClientUpdatesByRange => RpcLimits::new( + LightClientUpdatesByRangeRequest::ssz_min_len(), + LightClientUpdatesByRangeRequest::ssz_max_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -577,6 +616,9 @@ impl ProtocolId { Protocol::LightClientFinalityUpdate => { rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork()) } + Protocol::LightClientUpdatesByRange => { + rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork()) + } } } @@ -592,7 +634,8 @@ impl ProtocolId { | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 - | SupportedProtocol::LightClientFinalityUpdateV1 => true, + | SupportedProtocol::LightClientFinalityUpdateV1 + | SupportedProtocol::LightClientUpdatesByRangeV1 => true, SupportedProtocol::StatusV1 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 @@ -723,6 +766,7 @@ pub enum RequestType { LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, + LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -747,6 +791,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => 1, RequestType::LightClientOptimisticUpdate => 1, RequestType::LightClientFinalityUpdate => 1, + RequestType::LightClientUpdatesByRange(req) => req.max_requested(), } } @@ -780,6 +825,9 @@ impl RequestType { RequestType::LightClientFinalityUpdate => { SupportedProtocol::LightClientFinalityUpdateV1 } + RequestType::LightClientUpdatesByRange(_) => { + SupportedProtocol::LightClientUpdatesByRangeV1 + } } } @@ -802,6 +850,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => unreachable!(), RequestType::LightClientFinalityUpdate => unreachable!(), RequestType::LightClientOptimisticUpdate => unreachable!(), + RequestType::LightClientUpdatesByRange(_) => unreachable!(), } } @@ -861,6 +910,10 @@ impl RequestType { SupportedProtocol::LightClientFinalityUpdateV1, Encoding::SSZSnappy, )], + RequestType::LightClientUpdatesByRange(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientUpdatesByRangeV1, + Encoding::SSZSnappy, + )], } } @@ -879,6 +932,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => true, RequestType::LightClientOptimisticUpdate => true, RequestType::LightClientFinalityUpdate => true, + RequestType::LightClientUpdatesByRange(_) => true, } } } @@ -997,6 +1051,9 @@ impl std::fmt::Display for RequestType { RequestType::LightClientFinalityUpdate => { write!(f, "Light client finality update request") } + RequestType::LightClientUpdatesByRange(_) => { + write!(f, "Light client updates by range request") + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 06b246e74a..e11f7f0e73 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -106,6 +106,8 @@ pub struct RPCRateLimiter { lc_optimistic_update_rl: Limiter, /// LightClientFinalityUpdate rate limiter. lc_finality_update_rl: Limiter, + /// LightClientUpdatesByRange rate limiter. + lc_updates_by_range_rl: Limiter, } /// Error type for non conformant requests @@ -146,6 +148,8 @@ pub struct RPCRateLimiterBuilder { lc_optimistic_update_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. lc_finality_update_quota: Option, + /// Quota for the LightClientUpdatesByRange protocol. + lc_updates_by_range_quota: Option, } impl RPCRateLimiterBuilder { @@ -166,6 +170,7 @@ impl RPCRateLimiterBuilder { Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, + Protocol::LightClientUpdatesByRange => self.lc_updates_by_range_quota = q, } self } @@ -191,6 +196,9 @@ impl RPCRateLimiterBuilder { let lc_finality_update_quota = self .lc_finality_update_quota .ok_or("LightClientFinalityUpdate quota not specified")?; + let lc_updates_by_range_quota = self + .lc_updates_by_range_quota + .ok_or("LightClientUpdatesByRange quota not specified")?; let blbrange_quota = self .blbrange_quota @@ -221,6 +229,7 @@ impl RPCRateLimiterBuilder { let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; + let lc_updates_by_range_rl = Limiter::from_quota(lc_updates_by_range_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -241,6 +250,7 @@ impl RPCRateLimiterBuilder { lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, + lc_updates_by_range_rl, init_time: Instant::now(), }) } @@ -278,6 +288,7 @@ impl RPCRateLimiter { light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, + light_client_updates_by_range_quota, } = config; Self::builder() @@ -300,6 +311,10 @@ impl RPCRateLimiter { Protocol::LightClientFinalityUpdate, light_client_finality_update_quota, ) + .set_quota( + Protocol::LightClientUpdatesByRange, + light_client_updates_by_range_quota, + ) .build() } @@ -332,6 +347,7 @@ impl RPCRateLimiter { Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, + Protocol::LightClientUpdatesByRange => &mut self.lc_updates_by_range_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index d8a1039783..cb22815390 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; use crate::rpc::{ @@ -117,6 +117,8 @@ pub enum Response { LightClientOptimisticUpdate(Arc>), /// A response to a LightClientFinalityUpdate request. LightClientFinalityUpdate(Arc>), + /// A response to a LightClientUpdatesByRange request. + LightClientUpdatesByRange(Option>>), } impl std::convert::From> for RpcResponse { @@ -156,6 +158,12 @@ impl std::convert::From> for RpcResponse { Response::LightClientFinalityUpdate(f) => { RpcResponse::Success(RpcSuccessResponse::LightClientFinalityUpdate(f)) } + Response::LightClientUpdatesByRange(f) => match f { + Some(d) => RpcResponse::Success(RpcSuccessResponse::LightClientUpdatesByRange(d)), + None => { + RpcResponse::StreamTermination(ResponseTermination::LightClientUpdatesByRange) + } + }, } } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 79889274de..462612e40a 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1579,6 +1579,17 @@ impl Network { request, }) } + RequestType::LightClientUpdatesByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_updates_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1632,6 +1643,11 @@ impl Network { peer_id, Response::LightClientFinalityUpdate(update), ), + RpcSuccessResponse::LightClientUpdatesByRange(update) => self.build_response( + id, + peer_id, + Response::LightClientUpdatesByRange(Some(update)), + ), } } Ok(RPCReceived::EndOfStream(id, termination)) => { @@ -1642,6 +1658,9 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::LightClientUpdatesByRange => { + Response::LightClientUpdatesByRange(None) + } }; self.build_response(id, peer_id, response) } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index c884ecea4e..76f5e886ff 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -14,6 +14,7 @@ use beacon_processor::{ use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + LightClientUpdatesByRangeRequest, }; use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ @@ -831,6 +832,32 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process a `LightClientUpdatesByRange` request from the RPC network. + pub fn send_light_client_updates_by_range_request( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_light_client_updates_by_range( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::LightClientUpdatesByRangeRequest(Box::new(process_fn)), + }) + } + /// Send a message to `sync_tx`. /// /// Creates a log if there is an internal error. diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index b36c6502a5..709cbe5b12 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -10,6 +10,7 @@ use lighthouse_network::rpc::methods::{ }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; +use methods::LightClientUpdatesByRangeRequest; use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; @@ -428,6 +429,105 @@ impl NetworkBeaconProcessor { Ok(()) } + pub fn handle_light_client_updates_by_range( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) { + self.terminate_response_stream( + peer_id, + connection_id, + substream_id, + request_id, + self.clone() + .handle_light_client_updates_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), + Response::LightClientUpdatesByRange, + ); + } + + /// Handle a `LightClientUpdatesByRange` request from the peer. + pub fn handle_light_client_updates_by_range_request_inner( + self: Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + req: LightClientUpdatesByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!(self.log, "Received LightClientUpdatesByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_period" => req.start_period, + ); + + // Should not send more than max light client updates + let max_request_size: u64 = req.max_requested(); + if req.count > max_request_size { + return Err(( + RpcErrorResponse::InvalidRequest, + "Request exceeded max size", + )); + } + + let lc_updates = match self + .chain + .get_light_client_updates(req.start_period, req.count) + { + Ok(lc_updates) => lc_updates, + Err(e) => { + error!(self.log, "Unable to obtain light client updates"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RpcErrorResponse::ServerError, "Database error")); + } + }; + + for lc_update in lc_updates.iter() { + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::LightClientUpdatesByRange(Some(Arc::new(lc_update.clone()))), + request_id, + id: (connection_id, substream_id), + }); + } + + let lc_updates_sent = lc_updates.len(); + + if lc_updates_sent < req.count as usize { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "info" => "Failed to return all requested light client updates. The peer may have requested data ahead of whats currently available", + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } else { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } + + Ok(()) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 1a0b5b32ae..a445cd6ea3 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -311,6 +311,17 @@ impl Router { rpc_request.id, ), ), + RequestType::LightClientUpdatesByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_light_client_updates_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), + ), _ => {} } } @@ -351,7 +362,8 @@ impl Router { // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) - | Response::LightClientFinalityUpdate(_) => unreachable!(), + | Response::LightClientFinalityUpdate(_) + | Response::LightClientUpdatesByRange(_) => unreachable!(), } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 3b48a68df3..1f5592a929 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,5 +1,6 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; +use crate::LightClientHeader; use crate::{ beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, @@ -10,7 +11,7 @@ use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; -use ssz::Decode; +use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; use ssz_types::typenum::{U4, U5, U6}; @@ -35,6 +36,10 @@ pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + type FinalityBranch = FixedVector; type NextSyncCommitteeBranch = FixedVector; @@ -403,6 +408,32 @@ impl LightClientUpdate { } true } + + // A `LightClientUpdate` has two `LightClientHeader`s + // Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate + #[allow(clippy::arithmetic_side_effects)] + pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { + let fixed_len = match fork_name { + ForkName::Base | ForkName::Bellatrix => 0, + ForkName::Altair => as Encode>::ssz_fixed_len(), + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) + } + + pub fn map_with_fork_name(&self, func: F) -> R + where + F: Fn(ForkName) -> R, + { + match self { + Self::Altair(_) => func(ForkName::Altair), + Self::Capella(_) => func(ForkName::Capella), + Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), + } + } } fn compute_sync_committee_period_at_slot( diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 9ad40a6acd..89d759d662 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -333,7 +333,7 @@ impl EnvironmentBuilder { eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. - self.eth2_config.spec = Arc::new(eth2_network_config.chain_spec::()?); + self.eth2_config.spec = eth2_network_config.chain_spec::()?.into(); self.eth2_network_config = Some(eth2_network_config); Ok(self) From 6ad2c187ddae833c47927211db6b447566f6679f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 18 Oct 2024 15:21:46 +1100 Subject: [PATCH 62/66] Remove deprecated flags in prep for v6.0.0 (#6490) * Delete previously deprecated flags * Update CLI docs * Remove deprecated BN flags * Use ethereum-package main branch * Delete env_log/-l --- beacon_node/http_api/src/lib.rs | 5 +- .../operation_pool/src/attestation_id.rs | 12 ---- beacon_node/operation_pool/src/lib.rs | 1 - beacon_node/src/cli.rs | 61 ------------------- beacon_node/src/config.rs | 40 ------------ book/src/help_bn.md | 9 --- book/src/help_general.md | 3 - book/src/help_vc.md | 8 --- book/src/redundancy.md | 3 +- consensus/types/src/config_and_preset.rs | 1 + lighthouse/src/main.rs | 10 --- lighthouse/tests/beacon_node.rs | 52 ---------------- lighthouse/tests/validator_client.rs | 34 ----------- scripts/local_testnet/start_local_testnet.sh | 2 +- validator_client/src/cli.rs | 31 ---------- validator_client/src/config.rs | 27 -------- 16 files changed, 4 insertions(+), 295 deletions(-) delete mode 100644 beacon_node/operation_pool/src/attestation_id.rs diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ffcfda4680..307584b82d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -146,7 +146,6 @@ pub struct Config { pub listen_port: u16, pub allow_origin: Option, pub tls_config: Option, - pub spec_fork_name: Option, pub data_dir: PathBuf, pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, @@ -164,7 +163,6 @@ impl Default for Config { listen_port: 5052, allow_origin: None, tls_config: None, - spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), sse_capacity_multiplier: 1, enable_beacon_processor: true, @@ -2643,7 +2641,6 @@ pub fn serve( ); // GET config/spec - let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) @@ -2653,7 +2650,7 @@ pub fn serve( move |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); + ConfigAndPreset::from_chain_spec::(&chain.spec, None); Ok(api_types::GenericResponse::from(config_and_preset)) }) }, diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs deleted file mode 100644 index f0dc6536a5..0000000000 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; - -/// Serialized `AttestationData` augmented with a domain to encode the fork info. -/// -/// [DEPRECATED] To be removed once all nodes have updated to schema v12. -#[derive( - PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, -)] -pub struct AttestationId { - v: Vec, -} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 0b032b0c8a..3a002bf870 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,4 @@ mod attestation; -mod attestation_id; mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index d6ed106803..dff030fb0f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -401,15 +401,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("self-limiter") - .long("self-limiter") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-self-limiter") .long("disable-self-limiter") @@ -525,16 +516,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-spec-fork") - .long("http-spec-fork") - .requires("enable_http") - .value_name("FORK") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("http-enable-tls") .long("http-enable-tls") @@ -564,16 +545,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-allow-sync-stalled") - .long("http-allow-sync-stalled") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .requires("enable_http") - .help("This flag is deprecated and has no effect.") - .hide(true) - .display_order(0) - ) .arg( Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") @@ -1291,14 +1262,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-lock-timeouts") - .long("disable-lock-timeouts") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") @@ -1511,14 +1474,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("always-prefer-builder-payload") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .long("always-prefer-builder-payload") - .help("This flag is deprecated and has no effect.") - .display_order(0) - ) .arg( Arg::new("invalid-gossip-verified-blocks-path") .action(ArgAction::Set) @@ -1530,14 +1485,6 @@ pub fn cli_app() -> Command { filling up their disks.") .display_order(0) ) - .arg( - Arg::new("progressive-balances") - .long("progressive-balances") - .value_name("MODE") - .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") @@ -1599,13 +1546,5 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-duplicate-warn-logs") - .long("disable-duplicate-warn-logs") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f62ccfe3ed..2d31815351 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -152,14 +152,6 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.get_one::("http-spec-fork").is_some() { - warn!( - log, - "Ignoring --http-spec-fork"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args @@ -175,14 +167,6 @@ pub fn get_config( }); } - if cli_args.get_flag("http-allow-sync-stalled") { - warn!( - log, - "Ignoring --http-allow-sync-stalled"; - "info" => "this flag is deprecated and will be removed" - ); - } - client_config.http_api.sse_capacity_multiplier = parse_required(cli_args, "http-sse-capacity-multiplier")?; @@ -362,14 +346,6 @@ pub fn get_config( .map(Duration::from_millis); } - if cli_args.get_flag("always-prefer-builder-payload") { - warn!( - log, - "Ignoring --always-prefer-builder-payload"; - "info" => "this flag is deprecated and will be removed" - ); - } - // Set config values from parse values. el_config.secret_file = Some(secret_file.clone()); el_config.execution_endpoint = Some(execution_endpoint.clone()); @@ -787,14 +763,6 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.get_flag("disable-lock-timeouts") { - warn!( - log, - "Ignoring --disable-lock-timeouts"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; @@ -894,14 +862,6 @@ pub fn get_config( client_config.network.invalid_block_storage = Some(path); } - if cli_args.get_one::("progressive-balances").is_some() { - warn!( - log, - "Progressive balances mode is deprecated"; - "info" => "please remove --progressive-balances" - ); - } - if let Some(max_workers) = clap_utils::parse_optional(cli_args, "beacon-processor-max-workers")? { client_config.beacon_processor.max_workers = max_workers; diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 733446e5d2..338905a4fb 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -292,9 +292,6 @@ Options: which don't improve their payload after the first call, and high values are useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be - disabled. --proposer-reorg-cutoff Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent failed reorgs by ensuring the @@ -445,8 +442,6 @@ Flags: incompatible with data availability checks. Checkpoint syncing is the preferred method for syncing a node. Only use this flag when testing. DO NOT use on mainnet! - --always-prefer-builder-payload - This flag is deprecated and has no effect. --always-prepare-payload Send payload attributes with every fork choice update. This is intended for use by block builders, relays and developers. You should @@ -470,8 +465,6 @@ Flags: Explicitly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. - --disable-duplicate-warn-logs - This flag is deprecated and has no effect. --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This @@ -479,8 +472,6 @@ Flags: boot. --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). - --disable-lock-timeouts - This flag is deprecated and has no effect. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/book/src/help_general.md b/book/src/help_general.md index 1c2d1266d0..48314d5108 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -122,9 +122,6 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - -l - DEPRECATED Enables environment logging giving access to sub-protocol - logs such as discv5 and libp2p --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 23a8491993..aa24ab3d91 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -210,12 +210,6 @@ Flags: If present, do not configure the system allocator. Providing this flag will generally increase memory usage, it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes - attestation, sync committee subscriptions and proposer preparation - messages to all beacon nodes provided in the `--beacon-nodes flag`. - This option changes that behaviour such that these api calls only go - out to the first available and synced beacon node --disable-slashing-protection-web3signer Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC but is only safe if slashing @@ -280,8 +274,6 @@ Flags: --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. - --produce-block-v3 - This flag is deprecated and is no longer in use. --stdin-inputs If present, read all user inputs from stdin instead of tty. --unencrypted-http-transport diff --git a/book/src/redundancy.md b/book/src/redundancy.md index ee685a17cf..daf0eb4a5b 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -74,8 +74,7 @@ lighthouse bn \ Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and `--import-all-attestations` flags. These flags are no longer required as the validator client will now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour -can be disabled using the `--broadcast none` flag for `lighthouse vc` (or `--disable-run-on-all` -[deprecated]). +can be disabled using the `--broadcast none` flag for `lighthouse vc`. ### Broadcast modes diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index b1e9049b0d..c80d678b2a 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -41,6 +41,7 @@ pub struct ConfigAndPreset { } impl ConfigAndPreset { + // DEPRECATED: the `fork_name` argument is never used, we should remove it. pub fn from_chain_spec(spec: &ChainSpec, fork_name: Option) -> Self { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index aad8860fcc..4f4dabff89 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -115,16 +115,6 @@ fn main() { .global(true) .display_order(0), ) - .arg( - Arg::new("env_log") - .short('l') - .help( - "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", - ) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("logfile") .long("logfile") diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f3832a1a1e..f22e438700 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -159,13 +159,6 @@ fn max_skip_slots_flag() { .with_config(|config| assert_eq!(config.chain.import_max_skip_slots, Some(10))); } -#[test] -fn disable_lock_timeouts_flag() { - CommandLineTest::new() - .flag("disable-lock-timeouts", None) - .run_with_zero_port(); -} - #[test] fn shuffling_cache_default() { CommandLineTest::new() @@ -1612,19 +1605,6 @@ fn http_port_flag() { .run() .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } -#[test] -fn empty_self_limiter_flag() { - // Test that empty rate limiter is accepted using the default rate limiting configurations. - CommandLineTest::new() - .flag("self-limiter", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.network.outbound_rate_limiter_config, - Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) - ) - }); -} #[test] fn empty_inbound_rate_limiter_flag() { @@ -1667,14 +1647,6 @@ fn http_allow_origin_all_flag() { .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } -#[test] -fn http_allow_sync_stalled_flag() { - CommandLineTest::new() - .flag("http", None) - .flag("http-allow-sync-stalled", None) - .run_with_zero_port(); -} - #[test] fn http_enable_beacon_processor() { CommandLineTest::new() @@ -1713,22 +1685,6 @@ fn http_tls_flags() { }); } -#[test] -fn http_spec_fork_default() { - CommandLineTest::new() - .flag("http", None) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); -} - -#[test] -fn http_spec_fork_override() { - CommandLineTest::new() - .flag("http", None) - .flag("http-spec-fork", Some("altair")) - .run_with_zero_port(); -} - // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -2631,14 +2587,6 @@ fn invalid_gossip_verified_blocks_path() { }); } -#[test] -fn progressive_balances_checked() { - // Flag is deprecated but supplying it should not crash until we remove it completely. - CommandLineTest::new() - .flag("progressive-balances", Some("checked")) - .run_with_zero_port(); -} - #[test] fn beacon_processor() { CommandLineTest::new() diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index baf50aa7c0..147a371f0e 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -426,13 +426,6 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } -#[test] -fn produce_block_v3_flag() { - // The flag is DEPRECATED but providing it should not trigger an error. - // We can delete this test when deleting the flag entirely. - CommandLineTest::new().flag("produce-block-v3", None).run(); -} - #[test] fn no_gas_limit_flag() { CommandLineTest::new() @@ -513,23 +506,6 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } -#[test] -fn disable_run_on_all_flag() { - CommandLineTest::new() - .flag("disable-run-on-all", None) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![]); - }); - // --broadcast flag takes precedence - CommandLineTest::new() - .flag("disable-run-on-all", None) - .flag("broadcast", Some("attestations")) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![ApiTopic::Attestations]); - }); -} #[test] fn no_broadcast_flag() { @@ -623,16 +599,6 @@ fn disable_latency_measurement_service() { assert!(!config.enable_latency_measurement_service); }); } -#[test] -fn latency_measurement_service() { - // This flag is DEPRECATED so has no effect, but should still be accepted. - CommandLineTest::new() - .flag("latency-measurement-service", Some("false")) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); -} #[test] fn validator_registration_batch_size() { diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index f90132764e..1f15688693 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -7,7 +7,7 @@ set -Eeuo pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" ENCLAVE_NAME=local-testnet NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml -ETHEREUM_PKG_VERSION=4.2.0 +ETHEREUM_PKG_VERSION=main BUILD_IMAGE=true BUILDER_PROPOSALS=false diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index b027ad0df6..209876f07b 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -39,20 +39,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - // TODO remove this flag in a future release - .arg( - Arg::new("disable-run-on-all") - .long("disable-run-on-all") - .value_name("DISABLE_RUN_ON_ALL") - .help("DEPRECATED. Use --broadcast. \ - By default, Lighthouse publishes attestation, sync committee subscriptions \ - and proposer preparation messages to all beacon nodes provided in the \ - `--beacon-nodes flag`. This option changes that behaviour such that these \ - api calls only go out to the first available and synced beacon node") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("broadcast") .long("broadcast") @@ -167,14 +153,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("produce-block-v3") - .long("produce-block-v3") - .help("This flag is deprecated and is no longer in use.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("distributed") .long("distributed") @@ -403,15 +381,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("latency-measurement-service") - .long("latency-measurement-service") - .help("DEPRECATED") - .action(ArgAction::Set) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) .arg( Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index c2c445c48c..f42ed55146 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -244,14 +244,6 @@ impl Config { config.distributed = true; } - if cli_args.get_flag("disable-run-on-all") { - warn!( - log, - "The --disable-run-on-all flag is deprecated"; - "msg" => "please use --broadcast instead" - ); - config.broadcast_topics = vec![]; - } if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') @@ -397,14 +389,6 @@ impl Config { config.prefer_builder_proposals = true; } - if cli_args.get_flag("produce-block-v3") { - warn!( - log, - "produce-block-v3 flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.gas_limit = cli_args .get_one::("gas-limit") .map(|gas_limit| { @@ -429,17 +413,6 @@ impl Config { config.enable_latency_measurement_service = !cli_args.get_flag("disable-latency-measurement-service"); - if cli_args - .get_one::("latency-measurement-service") - .is_some() - { - warn!( - log, - "latency-measurement-service flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; if config.validator_registration_batch_size == 0 { From 6eaa370188395d95eba12c993811e0a3f94b52b9 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 18 Oct 2024 15:31:55 -0500 Subject: [PATCH 63/66] Simplify Validator Creation and Align with Spec (#6515) * Simplify Validator Creation and Align with Spec * clippy * Bug Fix --- .../process_operations.rs | 43 ++----------------- consensus/types/src/beacon_state.rs | 29 +++++++++++++ consensus/types/src/validator.rs | 32 +++++++++++++- 3 files changed, 62 insertions(+), 42 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index fb1c5c7eee..a53dc15126 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -475,50 +475,13 @@ pub fn apply_deposit( return Ok(()); } - let new_validator_index = state.validators().len(); - - // [Modified in Electra:EIP7251] - let (effective_balance, state_balance) = if state.fork_name_unchecked() >= ForkName::Electra - { - (0, 0) - } else { - ( - std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - amount, - ) - }; - // Create a new validator. - let validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance, - slashed: false, - }; - state.validators_mut().push(validator)?; - state.balances_mut().push(state_balance)?; - - // Altair or later initializations. - if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { - previous_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(current_epoch_participation) = state.current_epoch_participation_mut() { - current_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(inactivity_scores) = state.inactivity_scores_mut() { - inactivity_scores.push(0)?; - } + state.add_validator_to_registry(&deposit_data, spec)?; + let new_validator_index = state.validators().len().safe_sub(1)? as u64; // [New in Electra:EIP7251] if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index as u64, + index: new_validator_index, amount, })?; } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 8eed790a02..d772cb23b3 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1548,6 +1548,35 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + pub fn add_validator_to_registry( + &mut self, + deposit_data: &DepositData, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork = self.fork_name_unchecked(); + let amount = if fork.electra_enabled() { + 0 + } else { + deposit_data.amount + }; + self.validators_mut() + .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + self.balances_mut().push(amount)?; + + // Altair or later initializations. + if let Ok(previous_epoch_participation) = self.previous_epoch_participation_mut() { + previous_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(current_epoch_participation) = self.current_epoch_participation_mut() { + current_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(inactivity_scores) = self.inactivity_scores_mut() { + inactivity_scores.push(0)?; + } + + Ok(()) + } + /// Safe copy-on-write accessor for the `validators` list. pub fn get_validator_cow( &mut self, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 298604d4f3..8cf118eea5 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -35,6 +35,34 @@ pub struct Validator { } impl Validator { + #[allow(clippy::arithmetic_side_effects)] + pub fn from_deposit( + deposit_data: &DepositData, + amount: u64, + fork_name: ForkName, + spec: &ChainSpec, + ) -> Self { + let mut validator = Validator { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: 0, + slashed: false, + }; + + let max_effective_balance = validator.get_max_effective_balance(spec, fork_name); + // safe math is unnecessary here since the spec.effecive_balance_increment is never <= 0 + validator.effective_balance = std::cmp::min( + amount - (amount % spec.effective_balance_increment), + max_effective_balance, + ); + + validator + } + /// Returns `true` if the validator is considered active at some epoch. pub fn is_active_at(&self, epoch: Epoch) -> bool { self.activation_epoch <= epoch && epoch < self.exit_epoch From a732a8784643d053051d386294ce53f542cf8237 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Oct 2024 12:28:55 +1100 Subject: [PATCH 64/66] Remove TTD flags and `safe-slots-to-import-*` (#6489) * Delete SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY * Update fork choice tests * Remove TTD related flags * Add deprecation warning * Remove more dead code * Delete EF on_merge_block tests * Remove even more dead code * Address Mac's review comments --- .../beacon_chain/src/block_verification.rs | 34 +- .../beacon_chain/src/execution_payload.rs | 34 +- .../tests/payload_invalidation.rs | 552 ------------------ book/src/api-vc-endpoints.md | 2 - book/src/help_bn.md | 29 - book/src/help_general.md | 29 - book/src/help_vc.md | 29 - book/src/help_vm.md | 29 - book/src/help_vm_create.md | 29 - book/src/help_vm_import.md | 29 - book/src/help_vm_move.md | 29 - common/clap_utils/src/lib.rs | 32 +- consensus/fork_choice/src/fork_choice.rs | 37 -- consensus/fork_choice/tests/tests.rs | 122 +--- consensus/types/presets/gnosis/phase0.yaml | 6 - consensus/types/presets/mainnet/phase0.yaml | 6 - consensus/types/presets/minimal/phase0.yaml | 6 - consensus/types/src/chain_spec.rs | 19 - consensus/types/src/preset.rs | 3 - lighthouse/src/main.rs | 44 +- lighthouse/tests/beacon_node.rs | 43 +- lighthouse/tests/exec.rs | 5 - testing/ef_tests/check_all_files_accessed.py | 2 + testing/ef_tests/src/handler.rs | 4 +- testing/ef_tests/tests/tests.rs | 6 - 25 files changed, 44 insertions(+), 1116 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a8233f170f..661b539fbe 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -55,8 +55,8 @@ use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlo use crate::data_column_verification::GossipDataColumnError; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ - is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, + validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, + NotifyExecutionLayer, PayloadNotifier, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; @@ -74,7 +74,7 @@ use lighthouse_metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, warn, Logger}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -95,9 +95,9 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, - BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -1388,28 +1388,6 @@ impl ExecutionPendingBlock { } let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.block_hash()); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - Ok(PayloadVerificationOutcome { payload_verification_status, is_valid_merge_transition_block, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b9b98bfbc0..f2420eea0d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -277,9 +277,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if allow_optimistic_import == AllowOptimisticImport::Yes - && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? - { + if allow_optimistic_import == AllowOptimisticImport::Yes { debug!( chain.log, "Optimistically importing merge transition block"; @@ -297,36 +295,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } } -/// Check to see if a block with the given parameters is valid to be imported optimistically. -pub async fn is_optimistic_candidate_block( - chain: &Arc>, - block_slot: Slot, - block_parent_root: Hash256, -) -> Result { - let current_slot = chain.slot()?; - let inner_chain = chain.clone(); - - // Use a blocking task to check if the block is an optimistic candidate. Interacting - // with the `fork_choice` lock in an async task can block the core executor. - chain - .spawn_blocking_handle( - move || { - inner_chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_candidate_block( - current_slot, - block_slot, - &block_parent_root, - &inner_chain.spec, - ) - }, - "validate_merge_block_optimistic_candidate", - ) - .await? - .map_err(BeaconChainError::from) -} - /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index dd195048e8..1325875a27 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,20 +1,14 @@ #![cfg(not(debug_assertions))] -use beacon_chain::otb_verification_service::{ - load_optimistic_transition_blocks, validate_optimistic_transition_blocks, - OptimisticTransitionBlock, -}; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, - test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; @@ -1270,552 +1264,6 @@ async fn attesting_to_optimistic_head() { get_aggregated_by_slot_and_root().unwrap(); } -/// A helper struct to build out a chain of some configurable length which undergoes the merge -/// transition. -struct OptimisticTransitionSetup { - blocks: Vec>>, - execution_block_generator: ExecutionBlockGenerator, -} - -impl OptimisticTransitionSetup { - async fn new(num_blocks: usize, ttd: u64) -> Self { - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(ttd); - let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); - rig.move_to_terminal_block(); - - let mut blocks = Vec::with_capacity(num_blocks); - for _ in 0..num_blocks { - let root = rig.import_block(Payload::Valid).await; - let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); - blocks.push(Arc::new(block)); - } - - let execution_block_generator = rig - .harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .execution_block_generator() - .clone(); - - Self { - blocks, - execution_block_generator, - } - } -} - -/// Build a chain which has optimistically imported a transition block. -/// -/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the -/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. -async fn build_optimistic_chain( - block_ttd: u64, - rig_ttd: u64, - num_blocks: usize, -) -> InvalidPayloadRig { - let OptimisticTransitionSetup { - blocks, - execution_block_generator, - } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; - // Build a brand-new testing harness. We will apply the blocks from the previous harness to - // this one. - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(rig_ttd); - let rig = InvalidPayloadRig::new_with_spec(spec); - - let spec = &rig.harness.chain.spec; - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - - // Ensure all the execution blocks from the first rig are available in the second rig. - *mock_execution_layer.server.execution_block_generator() = execution_block_generator; - - // Make the execution layer respond `SYNCING` to all `newPayload` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_new_payload(true); - // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_forkchoice_updated(); - // Make the execution layer respond `None` to all `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - let current_slot = std::cmp::max( - blocks[0].slot() + spec.safe_slots_to_import_optimistically, - num_blocks.into(), - ); - rig.harness.set_current_slot(current_slot); - - for block in blocks { - rig.harness - .chain - .process_block( - block.canonical_root(), - block, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await - .unwrap(); - } - - rig.harness.chain.recompute_head_at_current_slot().await; - - // Make the execution layer respond normally to `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - // Perform some sanity checks to ensure that the transition happened exactly where we expected. - let pre_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let pre_transition_block = rig - .harness - .chain - .get_block(&pre_transition_block_root) - .await - .unwrap() - .unwrap(); - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - assert_eq!( - pre_transition_block_root, - post_transition_block.parent_root(), - "the blocks form a single chain" - ); - assert!( - pre_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has not* undergone the merge transition" - ); - assert!( - !post_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has* undergone the merge transition" - ); - - // Assert that the transition block was optimistically imported. - // - // Note: we're using the "fallback" check for optimistic status, so if the block was - // pre-finality then we'll just use the optimistic status of the finalized block. - assert!( - rig.harness - .chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&post_transition_block_root) - .unwrap(), - "the transition block should be imported optimistically" - ); - - // Get the mock execution layer to respond to `getBlockByHash` requests normally again. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - rig -} - -#[tokio::test] -async fn optimistic_transition_block_valid_unfinalized() { - let ttd = 42; - let num_blocks = 16_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_valid_finalized() { - let ttd = 42; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a - // syncing EE. - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - // It should still be marked as optimistic. - assert!(rig - .execution_status(post_transition_block_root) - .is_strictly_optimistic()); - - // the optimistic merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The optimistic merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_finalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered yet. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should invalidate merge transition block and shutdown the client"); - - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - )] - ); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - /// Helper for running tests where we generate a chain with an invalid head and then a /// `fork_block` to recover it. struct InvalidHeadSetup { diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 6cb6685912..80eba7a059 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -230,7 +230,6 @@ Example Response Body "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", - "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY": "128", "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", "MIN_GENESIS_TIME": "1614588812", "GENESIS_FORK_VERSION": "0x00001020", @@ -263,7 +262,6 @@ Example Response Body "HYSTERESIS_QUOTIENT": "4", "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", "HYSTERESIS_UPWARD_MULTIPLIER": "5", - "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", "MIN_DEPOSIT_AMOUNT": "1000000000", "MAX_EFFECTIVE_BALANCE": "32000000000", "EFFECTIVE_BALANCE_INCREMENT": "1000000000", diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 338905a4fb..69701a3ad9 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -326,14 +326,6 @@ Options: --quic-port6 The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --self-limiter-protocols Enables the outbound rate limiter (requests made by this node).Rate limit quotas per protocol can be set in the form of @@ -387,27 +379,6 @@ Options: database. --target-peers The target number of peers. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --trusted-peers One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system. diff --git a/book/src/help_general.md b/book/src/help_general.md index 48314d5108..aa0ae76855 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -77,39 +77,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir

Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. -V, --version Print version diff --git a/book/src/help_vc.md b/book/src/help_vc.md index aa24ab3d91..2cfbfbc857 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -118,14 +118,6 @@ Options: specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --secrets-dir The directory which contains the password to unlock the validator voting keypairs. Each password should be contained in a file where the @@ -140,27 +132,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validator-registration-batch-size Defines the number of validators per validator/register_validator request sent to the BN. This value can be reduced to avoid timeouts diff --git a/book/src/help_vm.md b/book/src/help_vm.md index f787985b21..9b6c5d4f3b 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -69,39 +69,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-log-timestamp diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index cde822e894..2743117eae 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -91,14 +91,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --suggested-fee-recipient All created validators will use this value for the suggested fee recipient. Omit this flag to use the default value from the VC. @@ -106,27 +98,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-deposits diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 0883139ad2..b4999d3fe3 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -50,39 +50,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators-file The path to a JSON file containing a list of validators to be imported to the validator client. This file is usually named "validators.json". diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 12dd1e9140..99eee32c78 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -74,14 +74,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --src-vc-token The file containing a token required by the source validator client. --src-vc-url @@ -95,27 +87,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index cba7399c9b..a4b5f4dc1c 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,6 +1,5 @@ //! A helper library for parsing values from `clap::ArgMatches`. -use alloy_primitives::U256 as Uint256; use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; @@ -30,38 +29,9 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? - { - let stripped = string.replace(',', ""); - let terminal_total_difficulty = Uint256::from_str(&stripped).map_err(|e| { - format!( - "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", - e - ) - })?; - - eth2_network_config.config.terminal_total_difficulty = terminal_total_difficulty; - } - - if let Some(hash) = parse_optional(cli_args, "terminal-block-hash-override")? { - eth2_network_config.config.terminal_block_hash = hash; - } - - if let Some(epoch) = parse_optional(cli_args, "terminal-block-hash-epoch-override")? { - eth2_network_config - .config - .terminal_block_hash_activation_epoch = epoch; - } - - if let Some(slots) = parse_optional(cli_args, "safe-slots-to-import-optimistically")? { - eth2_network_config - .config - .safe_slots_to_import_optimistically = slots; - } - Ok(eth2_network_config) } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ca59a6adfb..85704042df 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1300,43 +1300,6 @@ where } } - /// Returns `Ok(false)` if a block is not viable to be imported optimistically. - /// - /// ## Notes - /// - /// Equivalent to the function with the same name in the optimistic sync specs: - /// - /// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#helpers - pub fn is_optimistic_candidate_block( - &self, - current_slot: Slot, - block_slot: Slot, - block_parent_root: &Hash256, - spec: &ChainSpec, - ) -> Result> { - // If the block is sufficiently old, import it. - if block_slot + spec.safe_slots_to_import_optimistically <= current_slot { - return Ok(true); - } - - // If the parent block has execution enabled, always import the block. - // - // See: - // - // https://github.com/ethereum/consensus-specs/pull/2844 - if self - .proto_array - .get_block(block_parent_root) - .map_or(false, |parent| { - parent.execution_status.is_execution_enabled() - }) - { - return Ok(true); - } - - Ok(false) - } - /// Return the current finalized checkpoint. pub fn finalized_checkpoint(&self) -> Checkpoint { *self.fc_store.finalized_checkpoint() diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ce19d68203..29265e34e4 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -256,36 +256,6 @@ impl ForkChoiceTest { self } - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - /// - /// If the chain is presently in an unsafe period, transition through it and the following safe - /// period. - /// - /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed - /// from the fork choice spec in Q1 2023. We're still leaving references to - /// it in our tests because (a) it's easier and (b) it allows us to easily - /// test for the absence of that parameter. - pub fn move_to_next_unsafe_period(self) -> Self { - self.move_inside_safe_to_update() - .move_outside_safe_to_update() - } - - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_outside_safe_to_update(self) -> Self { - while is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - - /// Moves to the next slot that is *inside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_inside_safe_to_update(self) -> Self { - while !is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. @@ -516,10 +486,6 @@ impl ForkChoiceTest { } } -fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { - slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified -} - #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); @@ -536,15 +502,13 @@ fn justified_and_finalized_blocks() { assert!(fork_choice.get_finalized_block().is_ok()); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` +/// - The new justified checkpoint descends from the current. Near genesis. #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent_first_justification() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() - .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) .await @@ -552,49 +516,29 @@ async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { } /// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) .await .unwrap() - .move_outside_safe_to_update() .assert_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - This is the first justification since genesis -#[tokio::test] -async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .move_to_next_unsafe_period() - .assert_justified_epoch(0) - .apply_blocks(1) - .await - .assert_justified_epoch(2); -} - /// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has **not** increased. #[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { +async fn justified_checkpoint_updates_with_non_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() .apply_blocks(1) .await - .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. @@ -611,64 +555,6 @@ async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_with .assert_justified_epoch(3); } -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. -/// - Finalized epoch has **not** increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should not change. - state.finalized_checkpoint().epoch = Epoch::new(0); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new - // block should have updated the justified checkpoint. - .assert_justified_epoch(3); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should change. - state.finalized_checkpoint_mut().epoch = Epoch::new(1); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - .assert_justified_epoch(3); -} - /// Check that the balances are obtained correctly. #[tokio::test] async fn justified_balances() { diff --git a/consensus/types/presets/gnosis/phase0.yaml b/consensus/types/presets/gnosis/phase0.yaml index 87c73e6fb7..48129cb47e 100644 --- a/consensus/types/presets/gnosis/phase0.yaml +++ b/consensus/types/presets/gnosis/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 89bb97d6a8..02bc96c8cd 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index c9c81325f1..1f75603142 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d8b75260b6..1c4effb4ae 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -114,7 +114,6 @@ pub struct ChainSpec { /* * Fork choice */ - pub safe_slots_to_update_justified: u64, pub proposer_score_boost: Option, pub reorg_head_weight_threshold: Option, pub reorg_parent_weight_threshold: Option, @@ -157,7 +156,6 @@ pub struct ChainSpec { pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, - pub safe_slots_to_import_optimistically: u64, /* * Capella hard fork params @@ -705,7 +703,6 @@ impl ChainSpec { /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -756,7 +753,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -886,7 +882,6 @@ impl ChainSpec { inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, - safe_slots_to_update_justified: 2, // Altair epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], @@ -1026,7 +1021,6 @@ impl ChainSpec { /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -1077,7 +1071,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -1212,9 +1205,6 @@ pub struct Config { pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_import_optimistically: u64, #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, @@ -1425,10 +1415,6 @@ fn default_terminal_block_hash_activation_epoch() -> Epoch { Epoch::new(u64::MAX) } -fn default_safe_slots_to_import_optimistically() -> u64 { - 128u64 -} - fn default_subnets_per_node() -> u8 { 2u8 } @@ -1649,7 +1635,6 @@ impl Config { terminal_total_difficulty: spec.terminal_total_difficulty, terminal_block_hash: spec.terminal_block_hash, terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically: spec.safe_slots_to_import_optimistically, min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, @@ -1751,7 +1736,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, @@ -1851,7 +1835,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -2103,7 +2086,6 @@ mod yaml_tests { #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 - #SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY: 2 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 MIN_GENESIS_TIME: 1606824000 GENESIS_FORK_VERSION: 0x00000000 @@ -2152,7 +2134,6 @@ mod yaml_tests { check_default!(terminal_total_difficulty); check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); - check_default!(safe_slots_to_import_optimistically); check_default!(bellatrix_fork_version); check_default!(gossip_max_size); check_default!(min_epochs_for_block_requests); diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 2c576ed332..435a74bdc3 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -27,8 +27,6 @@ pub struct BasePreset { #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_update_justified: u64, - #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, @@ -90,7 +88,6 @@ impl BasePreset { hysteresis_quotient: spec.hysteresis_quotient, hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - safe_slots_to_update_justified: spec.safe_slots_to_update_justified, min_deposit_amount: spec.min_deposit_amount, max_effective_balance: spec.max_effective_balance, effective_balance_increment: spec.effective_balance_increment, diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 4f4dabff89..e33e4cb9b8 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -323,57 +323,43 @@ fn main() { Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") - .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ - Accepts a 256-bit decimal integer (not a hex value). \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-epoch-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") - .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override this parameter in the event \ - of an attack at the PoS transition block. Incorrect use of this flag can cause your \ - node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ - this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("genesis-state-url") @@ -631,6 +617,20 @@ fn run( ); } + // Warn for DEPRECATED global flags. This code should be removed when we finish deleting these + // flags. + let deprecated_flags = [ + "terminal-total-difficulty-override", + "terminal-block-hash-override", + "terminal-block-hash-epoch-override", + "safe-slots-to-import-optimistically", + ]; + for flag in deprecated_flags { + if matches.get_one::(flag).is_some() { + slog::warn!(log, "The {} flag is deprecated and does nothing", flag); + } + } + // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f22e438700..ac7ddcdbd9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -21,7 +21,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::non_zero_usize::new_non_zero_usize; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -742,61 +742,30 @@ fn jwt_optional_flags() { fn jwt_optional_alias_flags() { run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_total_difficulty_override_flag() { - use beacon_node::beacon_chain::types::Uint256; CommandLineTest::new() .flag("terminal-total-difficulty-override", Some("1337424242")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.terminal_total_difficulty, Uint256::from(1337424242)) - }); + .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_block_hash_and_activation_epoch_override_flags() { CommandLineTest::new() .flag("terminal-block-hash-epoch-override", Some("1337")) - .flag( - "terminal-block-hash-override", - Some("0x4242424242424242424242424242424242424242424242424242424242424242"), - ) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!( - spec.terminal_block_hash, - ExecutionBlockHash::from_str( - "0x4242424242424242424242424242424242424242424242424242424242424242" - ) - .unwrap() - ); - assert_eq!(spec.terminal_block_hash_activation_epoch, 1337); - }); -} -#[test] -#[should_panic] -fn terminal_block_hash_missing_activation_epoch() { - CommandLineTest::new() .flag( "terminal-block-hash-override", Some("0x4242424242424242424242424242424242424242424242424242424242424242"), ) .run_with_zero_port(); } -#[test] -#[should_panic] -fn epoch_override_missing_terminal_block_hash() { - CommandLineTest::new() - .flag("terminal-block-hash-epoch-override", Some("1337")) - .run_with_zero_port(); -} +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn safe_slots_to_import_optimistically_flag() { CommandLineTest::new() .flag("safe-slots-to-import-optimistically", Some("421337")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.safe_slots_to_import_optimistically, 421337) - }); + .run_with_zero_port(); } // Tests for Network flags. diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 9d6453908c..5379912c13 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -140,11 +140,6 @@ impl CompletedTest { func(&self.config); } - pub fn with_spec(self, func: F) { - let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); - func(spec); - } - pub fn with_config_and_dir(self, func: F) { func(&self.config, &self.dir); } diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 9495047e7f..117c89a22f 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,6 +25,8 @@ excluded_paths = [ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # We no longer implement merge logic. + "tests/.*/bellatrix/fork_choice/on_merge_block", # light_client "tests/.*/.*/light_client/single_merkle_proof", "tests/.*/.*/light_client/sync", diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 97b449dab9..5e928d2244 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -627,8 +627,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix. - if self.handler_name == "on_merge_block" && fork_name != ForkName::Bellatrix { + // We no longer run on_merge_block tests since removing merge support. + if self.handler_name == "on_merge_block" { return false; } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 1812a101ca..c2524c14e2 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -826,12 +826,6 @@ fn fork_choice_on_block() { ForkChoiceHandler::::new("on_block").run(); } -#[test] -fn fork_choice_on_merge_block() { - ForkChoiceHandler::::new("on_merge_block").run(); - ForkChoiceHandler::::new("on_merge_block").run(); -} - #[test] fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); From 56a9befaa1e96d971e2f2ddf036ebeddb90e695e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Oct 2024 17:54:42 +1100 Subject: [PATCH 65/66] Clarify command-line reference docs (#6524) * Clarify command-line reference docs * Update page title * Merge remote-tracking branch 'origin/unstable' into cli-reference * Update CLI script --- book/src/SUMMARY.md | 4 +-- book/src/cli.md | 55 ---------------------------------------- book/src/help_general.md | 2 +- scripts/cli.sh | 2 +- 4 files changed, 4 insertions(+), 59 deletions(-) delete mode 100644 book/src/cli.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7fb0b2f4e7..86c97af0da 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -54,13 +54,13 @@ * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) * [Blobs](./advanced-blobs.md) -* [Built-In Documentation](./help_general.md) +* [Command Line Reference (CLI)](./help_general.md) * [Beacon Node](./help_bn.md) * [Validator Client](./help_vc.md) * [Validator Manager](./help_vm.md) * [Create](./help_vm_create.md) * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/cli.md b/book/src/cli.md deleted file mode 100644 index f9e7df0748..0000000000 --- a/book/src/cli.md +++ /dev/null @@ -1,55 +0,0 @@ -# Command-Line Interface (CLI) - -The `lighthouse` binary provides all necessary Ethereum consensus client functionality. It -has two primary sub-commands: - -- `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. -- `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. - -There are also some ancillary binaries like `lcli` and `account_manager`, but -these are primarily for testing. - -> **Note:** documentation sometimes uses `$ lighthouse bn` and `$ lighthouse -> vc` instead of the long-form `beacon_node` and `validator_client`. These -> commands are valid on the CLI too. - -## Installation - -Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install ---path lighthouse` from the root of the repository. See ["Configuring the -`PATH` environment variable"](https://www.rust-lang.org/tools/install) for more -information. - -For developers, we recommend building Lighthouse using the `$ cargo build --release ---bin lighthouse` command and executing binaries from the -`/target/release` directory. This is more ergonomic when -modifying and rebuilding regularly. - -## Documentation - -Each binary supports the `--help` flag, this is the best source of -documentation. - -```bash -lighthouse beacon_node --help -``` - -```bash -lighthouse validator_client --help -``` - -## Creating a new database/testnet - -Lighthouse should run out-of-the box and connect to the current testnet -maintained by Sigma Prime. - -However, for developers, testnets can be created by following the instructions -outlined in [testnets](./testnets.md). The steps listed here will create a -local database specified to a new testnet. - -## Resuming from an existing database - -Once a database/testnet has been created, it can be resumed by running `$ lighthouse bn`. diff --git a/book/src/help_general.md b/book/src/help_general.md index aa0ae76855..996b048d10 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,4 +1,4 @@ -# Lighthouse General Commands +# Lighthouse CLI Reference ``` Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a diff --git a/scripts/cli.sh b/scripts/cli.sh index e43c05a834..ef4ed158ad 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -40,7 +40,7 @@ vm_import=./help_vm_import.md vm_move=./help_vm_move.md # create .md files -write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$general_cli" "$general" "Lighthouse CLI Reference" write_to_file "$bn_cli" "$bn" "Beacon Node" write_to_file "$vc_cli" "$vc" "Validator Client" write_to_file "$vm_cli" "$vm" "Validator Manager" From 9aefb5539baff637d68deb3dd386ff45312f3573 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 21 Oct 2024 23:42:51 +1100 Subject: [PATCH 66/66] Fix BlobsByRange by reverting PR6462 (#6526) * Revert "Remove generic E from RequestId (#6462)" This reverts commit 772929fae27bd9a2978884c7648dc10fecf3d876. --- .../lighthouse_network/src/rpc/codec.rs | 19 ++++++------- .../lighthouse_network/src/rpc/handler.rs | 14 ++++------ .../lighthouse_network/src/rpc/methods.rs | 28 +++++++++++-------- beacon_node/lighthouse_network/src/rpc/mod.rs | 10 +++---- .../lighthouse_network/src/rpc/outbound.rs | 8 ++---- .../lighthouse_network/src/rpc/protocol.rs | 14 +++++----- .../src/rpc/rate_limiter.rs | 3 +- .../src/rpc/self_limiter.rs | 12 ++++---- .../lighthouse_network/src/service/mod.rs | 6 ++-- .../lighthouse_network/tests/rpc_tests.rs | 1 - .../network_beacon_processor/rpc_methods.rs | 4 +-- .../src/network_beacon_processor/tests.rs | 7 ++--- beacon_node/network/src/router.rs | 8 +++--- beacon_node/network/src/service.rs | 2 +- .../network/src/sync/network_context.rs | 1 - 15 files changed, 68 insertions(+), 69 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 19f1b8def7..9bdecab70b 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -28,7 +28,7 @@ const CONTEXT_BYTES_LEN: usize = 4; /* Inbound Codec */ -pub struct SSZSnappyInboundCodec { +pub struct SSZSnappyInboundCodec { protocol: ProtocolId, inner: Uvi, len: Option, @@ -143,7 +143,7 @@ impl Encoder> for SSZSnappyInboundCodec { // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SSZSnappyInboundCodec { - type Item = RequestType; + type Item = RequestType; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -195,7 +195,7 @@ impl Decoder for SSZSnappyInboundCodec { } /* Outbound Codec: Codec for initiating RPC requests */ -pub struct SSZSnappyOutboundCodec { +pub struct SSZSnappyOutboundCodec { inner: Uvi, len: Option, protocol: ProtocolId, @@ -322,10 +322,10 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder for SSZSnappyOutboundCodec { +impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RequestType, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RequestType::Status(req) => req.as_ssz_bytes(), RequestType::Goodbye(req) => req.as_ssz_bytes(), @@ -549,11 +549,11 @@ fn handle_length( /// Decodes an `InboundRequest` from the byte stream. /// `decoded_buffer` should be an ssz-encoded bytestream with // length = length-prefix received in the beginning of the stream. -fn handle_rpc_request( +fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], spec: &ChainSpec, -) -> Result, RPCError> { +) -> Result>, RPCError> { match versioned_protocol { SupportedProtocol::StatusV1 => Ok(Some(RequestType::Status( StatusMessage::from_ssz_bytes(decoded_buffer)?, @@ -1035,7 +1035,6 @@ mod tests { BlobsByRangeRequest { start_slot: 0, count: 10, - max_blobs_per_block: Spec::max_blobs_per_block(), } } @@ -1181,7 +1180,7 @@ mod tests { } /// Verifies that requests we send are encoded in a way that we would correctly decode too. - fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { + fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); @@ -1778,7 +1777,7 @@ mod tests { fn test_encode_then_decode_request() { let chain_spec = Spec::default_spec(); - let requests: &[RequestType] = &[ + let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), RequestType::Status(status_message()), RequestType::Goodbye(GoodbyeReason::Fault), diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 74ccb85dcc..e76d6d2786 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -20,7 +20,6 @@ use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ collections::{hash_map::Entry, VecDeque}, - marker::PhantomData, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -97,7 +96,7 @@ where events_out: SmallVec<[HandlerEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[(Id, RequestType); 4]>, + dial_queue: SmallVec<[(Id, RequestType); 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, @@ -207,7 +206,7 @@ pub enum OutboundSubstreamState { /// The framed negotiated substream. substream: Box>, /// Keeps track of the actual request sent. - request: RequestType, + request: RequestType, }, /// Closing an outbound substream> Closing(Box>), @@ -275,7 +274,7 @@ where } /// Opens an outbound substream with a request. - fn send_request(&mut self, id: Id, req: RequestType) { + fn send_request(&mut self, id: Id, req: RequestType) { match self.state { HandlerState::Active => { self.dial_queue.push((id, req)); @@ -331,7 +330,7 @@ where type ToBehaviour = HandlerEvent; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; - type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request + type OutboundOpenInfo = (Id, RequestType); // Keep track of the id and the request type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { @@ -789,7 +788,6 @@ where req: req.clone(), fork_context: self.fork_context.clone(), max_rpc_size: self.listen_protocol().upgrade().max_rpc_size, - phantom: PhantomData, }, (), ) @@ -907,7 +905,7 @@ where fn on_fully_negotiated_outbound( &mut self, substream: OutboundFramed, - (id, request): (Id, RequestType), + (id, request): (Id, RequestType), ) { self.dial_negotiated -= 1; // Reset any io-retries counter. @@ -963,7 +961,7 @@ where } fn on_dial_upgrade_error( &mut self, - request_info: (Id, RequestType), + request_info: (Id, RequestType), error: StreamUpgradeError, ) { let (id, req) = request_info; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 912fda3606..bb8bfb0e20 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -8,6 +8,7 @@ use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::U256, VariableList}; use std::collections::BTreeMap; use std::fmt::Display; +use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; @@ -93,19 +94,27 @@ pub struct Ping { variant_attributes(derive(Clone, Debug, PartialEq, Serialize),) )] #[derive(Clone, Debug, PartialEq)] -pub struct MetadataRequest; +pub struct MetadataRequest { + _phantom_data: PhantomData, +} -impl MetadataRequest { +impl MetadataRequest { pub fn new_v1() -> Self { - Self::V1(MetadataRequestV1 {}) + Self::V1(MetadataRequestV1 { + _phantom_data: PhantomData, + }) } pub fn new_v2() -> Self { - Self::V2(MetadataRequestV2 {}) + Self::V2(MetadataRequestV2 { + _phantom_data: PhantomData, + }) } pub fn new_v3() -> Self { - Self::V3(MetadataRequestV3 {}) + Self::V3(MetadataRequestV3 { + _phantom_data: PhantomData, + }) } } @@ -315,14 +324,11 @@ pub struct BlobsByRangeRequest { /// The number of slots from the start slot. pub count: u64, - - /// maximum number of blobs in a single block. - pub max_blobs_per_block: usize, } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self) -> u64 { - self.count.saturating_mul(self.max_blobs_per_block as u64) + pub fn max_blobs_requested(&self) -> u64 { + self.count.saturating_mul(E::max_blobs_per_block() as u64) } } @@ -338,7 +344,7 @@ pub struct DataColumnsByRangeRequest { } impl DataColumnsByRangeRequest { - pub fn max_requested(&self) -> u64 { + pub fn max_requested(&self) -> u64 { self.count.saturating_mul(self.columns.len() as u64) } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index ed4da463ff..7d091da766 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -61,7 +61,7 @@ pub enum RPCSend { /// /// The `Id` is given by the application making the request. These /// go over *outbound* connections. - Request(Id, RequestType), + Request(Id, RequestType), /// A response sent from Lighthouse. /// /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the @@ -79,7 +79,7 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(Request), + Request(Request), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the @@ -113,10 +113,10 @@ impl RequestId { /// An Rpc Request. #[derive(Debug, Clone)] -pub struct Request { +pub struct Request { pub id: RequestId, pub substream_id: SubstreamId, - pub r#type: RequestType, + pub r#type: RequestType, } impl std::fmt::Display for RPCSend { @@ -221,7 +221,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { + pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { Ok(event) => event, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 1037139f2f..b614313a84 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -7,7 +7,6 @@ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, SinkExt}; use libp2p::core::{OutboundUpgrade, UpgradeInfo}; -use std::marker::PhantomData; use std::sync::Arc; use tokio_util::{ codec::Framed, @@ -20,14 +19,13 @@ use types::{EthSpec, ForkContext}; // `OutboundUpgrade` #[derive(Debug, Clone)] -pub struct OutboundRequestContainer { - pub req: RequestType, +pub struct OutboundRequestContainer { + pub req: RequestType, pub fork_context: Arc, pub max_rpc_size: usize, - pub phantom: PhantomData, } -impl UpgradeInfo for OutboundRequestContainer { +impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index b4f6dac4fa..16c3a13391 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -686,7 +686,7 @@ pub fn rpc_data_column_limits() -> RpcLimits { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (RequestType, InboundFramed); +pub type InboundOutput = (RequestType, InboundFramed); pub type InboundFramed = Framed>>>, SSZSnappyInboundCodec>; @@ -754,7 +754,7 @@ where } #[derive(Debug, Clone, PartialEq)] -pub enum RequestType { +pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), @@ -768,11 +768,11 @@ pub enum RequestType { LightClientFinalityUpdate, LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), - MetaData(MetadataRequest), + MetaData(MetadataRequest), } /// Implements the encoding per supported protocol for `RPCRequest`. -impl RequestType { +impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. @@ -782,10 +782,10 @@ impl RequestType { RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested(), + RequestType::BlobsByRange(req) => req.max_blobs_requested::(), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, - RequestType::DataColumnsByRange(req) => req.max_requested(), + RequestType::DataColumnsByRange(req) => req.max_requested::(), RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -1027,7 +1027,7 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for RequestType { +impl std::fmt::Display for RequestType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RequestType::Status(status) => write!(f, "Status Message: {}", status), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index e11f7f0e73..ecbacc8c11 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -9,6 +9,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; +use types::EthSpec; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -261,7 +262,7 @@ pub trait RateLimiterItem { fn max_responses(&self) -> u64; } -impl RateLimiterItem for super::RequestType { +impl RateLimiterItem for super::RequestType { fn protocol(&self) -> Protocol { self.versioned_protocol().protocol() } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 9c68e0793d..e968ad11e3 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -19,8 +19,8 @@ use super::{ /// A request that was rate limited or waiting on rate limited requests for the same peer and /// protocol. -struct QueuedRequest { - req: RequestType, +struct QueuedRequest { + req: RequestType, request_id: Id, } @@ -28,7 +28,7 @@ pub(crate) struct SelfRateLimiter { /// Requests queued for sending per peer. This requests are stored when the self rate /// limiter rejects them. Rate limiting is based on a Peer and Protocol basis, therefore /// are stored in the same way. - delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, + delayed_requests: HashMap<(PeerId, Protocol), VecDeque>>, /// The delay required to allow a peer's outbound request per protocol. next_peer_request: DelayQueue<(PeerId, Protocol)>, /// Rate limiter for our own requests. @@ -70,7 +70,7 @@ impl SelfRateLimiter { &mut self, peer_id: PeerId, request_id: Id, - req: RequestType, + req: RequestType, ) -> Result, Error> { let protocol = req.versioned_protocol().protocol(); // First check that there are not already other requests waiting to be sent. @@ -101,9 +101,9 @@ impl SelfRateLimiter { limiter: &mut RateLimiter, peer_id: PeerId, request_id: Id, - req: RequestType, + req: RequestType, log: &Logger, - ) -> Result, (QueuedRequest, Duration)> { + ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { Ok(()) => Ok(BehaviourAction::NotifyHandler { peer_id, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 462612e40a..056b6be24d 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -80,7 +80,7 @@ pub enum NetworkEvent { /// Identifier of the request. All responses to this request must use this id. id: PeerRequestId, /// Request the peer sent. - request: rpc::Request, + request: rpc::Request, }, ResponseReceived { /// Peer that sent the response. @@ -966,7 +966,7 @@ impl Network { &mut self, peer_id: PeerId, request_id: AppRequestId, - request: RequestType, + request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { @@ -1179,7 +1179,7 @@ impl Network { /// Sends a METADATA response to a peer. fn send_meta_data_response( &mut self, - _req: MetadataRequest, + _req: MetadataRequest, id: PeerRequestId, request_id: rpc::RequestId, peer_id: PeerId, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index b5125a2d6b..f721c8477c 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -327,7 +327,6 @@ fn test_blobs_by_range_chunked_rpc() { let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: 0, count: slot_count, - max_blobs_per_block: E::max_blobs_per_block(), }); // BlocksByRange Response diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 709cbe5b12..6d32806713 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -893,7 +893,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - if req.max_blobs_requested() > self.chain.spec.max_request_blob_sidecars { + if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { return Err(( RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", @@ -1098,7 +1098,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request data columns - if req.max_requested() > self.chain.spec.max_request_data_column_sidecars { + if req.max_requested::() > self.chain.spec.max_request_data_column_sidecars { return Err(( RpcErrorResponse::InvalidRequest, "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index e9805eb5ba..9d774d97c1 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -30,9 +30,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, EthSpec, Hash256, - MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedVoluntaryExit, Slot, SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, + SubnetId, }; type E = MainnetEthSpec; @@ -366,7 +366,6 @@ impl TestRig { BlobsByRangeRequest { start_slot: 0, count, - max_blobs_per_block: E::max_blobs_per_block(), }, ) .unwrap(); diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index a445cd6ea3..e1badfda9d 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -58,7 +58,7 @@ pub enum RouterMessage { RPCRequestReceived { peer_id: PeerId, id: PeerRequestId, - request: rpc::Request, + request: rpc::Request, }, /// An RPC response has been received. RPCResponseReceived { @@ -193,11 +193,11 @@ impl Router { /* RPC - Related functionality */ /// A new RPC request has been received from the network. - fn handle_rpc_request( + fn handle_rpc_request( &mut self, peer_id: PeerId, request_id: PeerRequestId, - rpc_request: rpc::Request, + rpc_request: rpc::Request, ) { if !self.network_globals.peers.read().is_connected(&peer_id) { debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); @@ -836,7 +836,7 @@ impl HandlerNetworkContext { } /// Sends a request to the network task. - pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { + pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, request_id: AppRequestId::Router, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 269744dc05..5a66cb7f30 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -62,7 +62,7 @@ pub enum NetworkMessage { /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, - request: RequestType, + request: RequestType, request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index eb42e697cd..5f7778ffcc 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -401,7 +401,6 @@ impl SyncNetworkContext { request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot: *request.start_slot(), count: *request.count(), - max_blobs_per_block: T::EthSpec::max_blobs_per_block(), }), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), })