diff --git a/Cargo.lock b/Cargo.lock index 69204ccaec..5a63ab1e72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3120,13 +3120,16 @@ dependencies = [ "context_deserialize", "educe", "eip_3076", + "enr", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "futures", "futures-util", + "libp2p-identity", "mediatype", + "multiaddr", "pretty_reqwest_error", "proto_array", "rand 0.9.2", @@ -6078,6 +6081,7 @@ dependencies = [ "metrics", "operation_pool", "parking_lot", + "paste", "rand 0.8.5", "rand 0.9.2", "rand_chacha 0.3.1", diff --git a/Cargo.toml b/Cargo.toml index 100a916c50..98e8c057b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -271,7 +271,7 @@ tracing_samplers = { path = "common/tracing_samplers" } tree_hash = "0.12.0" tree_hash_derive = "0.12.0" typenum = "1" -types = { path = "consensus/types" } +types = { path = "consensus/types", features = ["saturating-arith"] } url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } validator_client = { path = "validator_client" } diff --git a/Makefile b/Makefile index 9e2b1d24c5..0995a869f4 100644 --- a/Makefile +++ b/Makefile @@ -36,8 +36,12 @@ PROFILE ?= release RECENT_FORKS_BEFORE_GLOAS=electra fulu # List of all recent hard forks. This list is used to set env variables for http_api tests +# Include phase0 to test the code paths in sync that are pre blobs RECENT_FORKS=electra fulu gloas +# For network tests include phase0 to cover genesis syncing (blocks without blobs or columns) +TEST_NETWORK_FORKS=phase0 $(RECENT_FORKS_BEFORE_GLOAS) + # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= @@ -226,12 +230,15 @@ test-op-pool-%: # Run the tests in the `network` crate for all known forks. # TODO(EIP-7732) Extend to support gloas by using RECENT_FORKS instead -test-network: $(patsubst %,test-network-%,$(RECENT_FORKS_BEFORE_GLOAS)) +test-network: $(patsubst %,test-network-%,$(TEST_NETWORK_FORKS)) test-network-%: - env FORK_NAME=$* cargo nextest run --release \ - --features "fork_from_env,$(TEST_FEATURES)" \ + env FORK_NAME=$* cargo nextest run --no-fail-fast --release \ + --features "fork_from_env,fake_crypto,$(TEST_FEATURES)" \ -p network + env FORK_NAME=$* cargo nextest run --no-fail-fast --release \ + --features "fork_from_env,$(TEST_FEATURES)" \ + -p network crypto_on # Run the tests in the `slasher` crate for all supported database backends. test-slasher: diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 5e1c41b830..eec8836ff4 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,7 +19,7 @@ alloy-primitives = { workspace = true } bitvec = { workspace = true } bls = { workspace = true } educe = { workspace = true } -eth2 = { workspace = true, features = ["lighthouse"] } +eth2 = { workspace = true, features = ["lighthouse", "network"] } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index faa396966f..667bafe445 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -61,8 +61,9 @@ use tracing::{debug, error}; use tree_hash::TreeHash; use types::{ Attestation, AttestationData, AttestationRef, BeaconCommittee, - BeaconStateError::NoCommitteeFound, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, - IndexedAttestation, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, + BeaconStateError::NoCommitteeFound, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, + Hash256, IndexedAttestation, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, + SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -160,6 +161,12 @@ pub enum Error { /// /// The peer has sent an invalid message. CommitteeIndexNonZero(usize), + /// The validator index is set to an invalid value after Gloas. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + CommitteeIndexInvalid, /// The `attestation.data.beacon_block_root` block is unknown. /// /// ## Peer scoring @@ -550,8 +557,12 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { } .tree_hash_root(); + let fork_name = chain + .spec + .fork_name_at_slot::(attestation.data().slot); + // [New in Electra:EIP7549] - verify_committee_index(attestation)?; + verify_committee_index(attestation, fork_name)?; if chain .observed_attestations @@ -595,6 +606,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // attestation and do not delay consideration for later. let head_block = verify_head_block_is_known(chain, attestation.data(), None)?; + // [New in Gloas]: If the attested block is from the same slot as the attestation, + // index must be 0. + if fork_name.gloas_enabled() + && head_block.slot == attestation.data().slot + && attestation.data().index != 0 + { + return Err(Error::CommitteeIndexNonZero( + attestation.data().index as usize, + )); + } + // Check the attestation target root is consistent with the head root. // // This check is not in the specification, however we guard against it since it opens us up @@ -871,7 +893,12 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { let fork_name = chain .spec .fork_name_at_slot::(attestation.data.slot); - if fork_name.electra_enabled() { + if fork_name.gloas_enabled() { + // [New in Gloas] + if attestation.data.index >= 2 { + return Err(Error::CommitteeIndexInvalid); + } + } else if fork_name.electra_enabled() { // [New in Electra:EIP7549] if attestation.data.index != 0 { return Err(Error::CommitteeIndexNonZero( @@ -890,6 +917,17 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { chain.config.import_max_skip_slots, )?; + // [New in Gloas]: If the attested block is from the same slot as the attestation, + // index must be 0. + if fork_name.gloas_enabled() + && head_block.slot == attestation.data.slot + && attestation.data.index != 0 + { + return Err(Error::CommitteeIndexNonZero( + attestation.data.index as usize, + )); + } + // Check the attestation target root is consistent with the head root. verify_attestation_target_root::(&head_block, &attestation.data)?; @@ -1404,7 +1442,10 @@ pub fn verify_signed_aggregate_signatures( /// Verify that the `attestation` committee index is properly set for the attestation's fork. /// This function will only apply verification post-Electra. -pub fn verify_committee_index(attestation: AttestationRef) -> Result<(), Error> { +pub fn verify_committee_index( + attestation: AttestationRef, + fork_name: ForkName, +) -> Result<(), Error> { if let Ok(committee_bits) = attestation.committee_bits() { // Check to ensure that the attestation is for a single committee. let num_committee_bits = get_committee_indices::(committee_bits); @@ -1414,11 +1455,18 @@ pub fn verify_committee_index(attestation: AttestationRef) -> Res )); } - // Ensure the attestation index is set to zero post Electra. - if attestation.data().index != 0 { - return Err(Error::CommitteeIndexNonZero( - attestation.data().index as usize, - )); + // Ensure the attestation index is valid for the fork. + let index = attestation.data().index; + if fork_name.gloas_enabled() { + // [New in Gloas]: index must be < 2. + if index >= 2 { + return Err(Error::CommitteeIndexInvalid); + } + } else { + // [New in Electra:EIP7549]: index must be 0. + if index != 0 { + return Err(Error::CommitteeIndexNonZero(index as usize)); + } } } Ok(()) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 81398ee282..832d1a3a62 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1669,7 +1669,7 @@ impl BeaconChain { let validator_index = *validator_index as usize; committee_cache.get_attestation_duties(validator_index) }) - .collect(); + .collect::, _>>()?; Ok((duties, dependent_root)) }, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 9bb6757341..e0943d5d93 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1798,10 +1798,12 @@ pub fn check_block_relevancy( ) -> Result { let block = signed_block.message(); + let present_slot = chain.slot()?; + // Do not process blocks from the future. - if block.slot() > chain.slot()? { + if block.slot() > present_slot { return Err(BlockError::FutureSlot { - present_slot: chain.slot()?, + present_slot, block_slot: block.slot(), }); } diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 6a028e6c98..f98cd40d08 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -287,21 +287,6 @@ pub struct BlockImportData { pub consensus_context: ConsensusContext, } -impl BlockImportData { - pub fn __new_for_test( - block_root: Hash256, - state: BeaconState, - parent_block: SignedBeaconBlock>, - ) -> Self { - Self { - block_root, - state, - parent_block, - consensus_context: ConsensusContext::new(Slot::new(0)), - } - } -} - /// Trait for common block operations. pub trait AsBlock { fn slot(&self) -> Slot; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 37f7ca9811..4c82c93ba3 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -372,8 +372,8 @@ where // Initialize anchor info before attempting to write the genesis state. // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent - // historic states from being retained (unless `--reconstruct-historic-states` is set). - let retain_historic_states = self.chain_config.reconstruct_historic_states; + // historic states from being retained (unless `--archive` is set). + let retain_historic_states = self.chain_config.archive; let genesis_beacon_block = genesis_block(&mut beacon_state, &self.spec)?; self.pending_io_batch.push( store @@ -529,7 +529,7 @@ where // case it will be stored in the hot DB. In this case, we need to ensure the store's anchor // is initialised prior to storing the state, as the anchor is required for working out // hdiff storage strategies. - let retain_historic_states = self.chain_config.reconstruct_historic_states; + let retain_historic_states = self.chain_config.archive; self.pending_io_batch.push( store .init_anchor_info( @@ -1126,9 +1126,7 @@ where ); // Check for states to reconstruct (in the background). - if beacon_chain.config.reconstruct_historic_states - && beacon_chain.store.get_oldest_block_slot() == 0 - { + if beacon_chain.config.archive && beacon_chain.store.get_oldest_block_slot() == 0 { beacon_chain.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index ad923000e2..e9cc4f24e9 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -38,7 +38,7 @@ pub struct ChainConfig { /// If `None`, there is no weak subjectivity verification. pub weak_subjectivity_checkpoint: Option, /// Determine whether to reconstruct historic states, usually after a checkpoint sync. - pub reconstruct_historic_states: bool, + pub archive: bool, /// The max size of a message that can be sent over the network. pub max_network_size: usize, /// Maximum percentage of the head committee weight at which to attempt re-orging the canonical head. @@ -130,7 +130,7 @@ impl Default for ChainConfig { Self { import_max_skip_slots: None, weak_subjectivity_checkpoint: None, - reconstruct_historic_states: false, + archive: false, max_network_size: 10 * 1_048_576, // 10M re_org_head_threshold: Some(DEFAULT_RE_ORG_HEAD_THRESHOLD), re_org_parent_threshold: Some(DEFAULT_RE_ORG_PARENT_THRESHOLD), diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index f7bd646f82..7260a4aca0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -698,6 +698,8 @@ impl DataAvailabilityCheckerInner { pub fn remove_pre_execution_block(&self, block_root: &Hash256) { // The read lock is immediately dropped so we can safely remove the block from the cache. if let Some(BlockProcessStatus::NotValidated(_, _)) = self.get_cached_block(block_root) { + // If the block is execution invalid, this status is permanent and idempotent to this + // block_root. We drop its components (e.g. columns) because they will never be useful. self.critical.write().pop(block_root); } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 8d9eb950f3..752e4d1a96 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -2,6 +2,7 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{BeaconChainError as Error, metrics}; use parking_lot::RwLock; use proto_array::Block as ProtoBlock; +use safe_arith::SafeArith; use std::sync::Arc; use tracing::instrument; use types::*; @@ -59,12 +60,13 @@ impl CommitteeLengths { slots_per_epoch, committees_per_slot, committee_index as usize, - ); + )?; + let epoch_committee_count = committees_per_slot.safe_mul(slots_per_epoch)?; let range = compute_committee_range_in_epoch( - epoch_committee_count(committees_per_slot, slots_per_epoch), + epoch_committee_count, index_in_epoch, self.active_validator_indices_len, - ) + )? .ok_or(Error::EarlyAttesterCacheError)?; range diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 45ae9d7b84..3a3c3739c7 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -305,10 +305,7 @@ impl BeaconChain { // If backfill has completed and the chain is configured to reconstruct historic states, // send a message to the background migrator instructing it to begin reconstruction. // This can only happen if we have backfilled all the way to genesis. - if backfill_complete - && self.genesis_backfill_slot == Slot::new(0) - && self.config.reconstruct_historic_states - { + if backfill_complete && self.genesis_backfill_slot == Slot::new(0) && self.config.archive { self.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index f816dbac53..096a0516fc 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -818,7 +818,11 @@ where } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { - let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); + let block = self + .chain + .get_blinded_block(block_root) + .unwrap() + .unwrap_or_else(|| panic!("block root does not exist in harness {block_root:?}")); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 208798dfdf..96071be89f 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -54,7 +54,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness bool { + self.harness + .spec + .fork_name_at_slot::(self.valid_attestation.data.slot) + .gloas_enabled() + } + pub fn earliest_valid_attestation_slot(&self) -> Slot { let offset = if self .harness @@ -522,6 +529,44 @@ impl GossipTester { self } + + /// Like `inspect_aggregate_err`, but only runs the check if gloas is enabled. + /// If gloas is not enabled, this is a no-op that returns self. + pub fn inspect_aggregate_err_if_gloas( + self, + desc: &str, + get_attn: G, + inspect_err: I, + ) -> Self + where + G: Fn(&Self, &mut SignedAggregateAndProof), + I: Fn(&Self, AttnError), + { + if self.is_gloas() { + self.inspect_aggregate_err(desc, get_attn, inspect_err) + } else { + self + } + } + + /// Like `inspect_unaggregate_err`, but only runs the check if gloas is enabled. + /// If gloas is not enabled, this is a no-op that returns self. + pub fn inspect_unaggregate_err_if_gloas( + self, + desc: &str, + get_attn: G, + inspect_err: I, + ) -> Self + where + G: Fn(&Self, &mut SingleAttestation, &mut SubnetId, &ChainSpec), + I: Fn(&Self, AttnError), + { + if self.is_gloas() { + self.inspect_unaggregate_err(desc, get_attn, inspect_err) + } else { + self + } + } } /// Tests verification of `SignedAggregateAndProof` from the gossip network. #[tokio::test] @@ -854,6 +899,27 @@ async fn aggregated_gossip_verification() { )) }, ) + /* + * [New in Gloas]: attestation.data.index must be < 2 + */ + .inspect_aggregate_err_if_gloas( + "gloas: aggregate with index >= 2", + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(_) => { + panic!("Expected Electra attestation variant"); + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.index = 2; + } + }, + |_, err| { + assert!( + matches!(err, AttnError::CommitteeIndexInvalid), + "expected CommitteeIndexInvalid, got {:?}", + err + ) + }, + ) // NOTE: from here on, the tests are stateful, and rely on the valid attestation having // been seen. .import_valid_aggregate() @@ -1071,6 +1137,22 @@ async fn unaggregated_gossip_verification() { )) }, ) + /* + * [New in Gloas]: attestation.data.index must be < 2 + */ + .inspect_unaggregate_err_if_gloas( + "gloas: attestation with index >= 2", + |_, a, _, _| { + a.data.index = 2; + }, + |_, err| { + assert!( + matches!(err, AttnError::CommitteeIndexInvalid), + "expected CommitteeIndexInvalid, got {:?}", + err + ) + }, + ) // NOTE: from here on, the tests are stateful, and rely on the valid attestation having // been seen. .import_valid_unaggregate() @@ -1700,3 +1782,180 @@ async fn aggregated_attestation_verification_use_head_state_fork() { ); } } + +/// [New in Gloas]: Tests that unaggregated attestations with `data.index == 1` are rejected +/// when `head_block.slot == attestation.data.slot`. +/// +/// This test only runs when `FORK_NAME=gloas` is set with `fork_from_env` feature. +// TODO(EIP-7732): Enable this test once gloas block production works in test harness. +// `state.latest_execution_payload_header()` not available in Gloas. +#[ignore] +#[tokio::test] +async fn gloas_unaggregated_attestation_same_slot_index_must_be_zero() { + let harness = get_harness(VALIDATOR_COUNT); + + // Skip this test if not running with gloas fork + if !harness + .spec + .fork_name_at_epoch(Epoch::new(0)) + .gloas_enabled() + { + return; + } + + // Extend the chain out a few epochs so we have some chain depth to play with. + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Produce a block in the current slot (this creates the same-slot scenario) + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + let current_slot = harness.chain.slot().expect("should get slot"); + let head = harness.chain.head_snapshot(); + + // Verify head block is in the current slot + assert_eq!( + head.beacon_block.slot(), + current_slot, + "head block should be in current slot for same-slot test" + ); + + // Produce an attestation for the current slot + let (mut attestation, _attester_sk, subnet_id) = + get_valid_unaggregated_attestation(&harness.chain); + + // Verify we have a same-slot scenario + let attested_block_slot = harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&attestation.data.beacon_block_root) + .expect("block should exist") + .slot; + assert_eq!( + attested_block_slot, attestation.data.slot, + "attested block slot should equal attestation slot for same-slot test" + ); + + // index == 1 should be rejected when head_block.slot == attestation.data.slot + attestation.data.index = 1; + let result = harness + .chain + .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)); + assert!( + matches!(result, Err(AttnError::CommitteeIndexNonZero(_))), + "gloas: attestation with index == 1 when head_block.slot == attestation.data.slot should be rejected, got {:?}", + result.err() + ); +} + +/// [New in Gloas]: Tests that aggregated attestations with `data.index == 1` are rejected +/// when `head_block.slot == attestation.data.slot`. +/// +/// This test only runs when `FORK_NAME=gloas` is set with `fork_from_env` feature. +// TODO(EIP-7732): Enable this test once gloas block production works in test harness. +// `state.latest_execution_payload_header()` not available in Gloas. +#[ignore] +#[tokio::test] +async fn gloas_aggregated_attestation_same_slot_index_must_be_zero() { + let harness = get_harness(VALIDATOR_COUNT); + + // Skip this test if not running with gloas fork + if !harness + .spec + .fork_name_at_epoch(Epoch::new(0)) + .gloas_enabled() + { + return; + } + + // Extend the chain out a few epochs so we have some chain depth to play with. + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Produce a block in the current slot (this creates the same-slot scenario) + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + let current_slot = harness.chain.slot().expect("should get slot"); + let head = harness.chain.head_snapshot(); + + // Verify head block is in the current slot + assert_eq!( + head.beacon_block.slot(), + current_slot, + "head block should be in current slot for same-slot test" + ); + + // Produce an attestation for the current slot + let (valid_attestation, _attester_sk, _subnet_id) = + get_valid_unaggregated_attestation(&harness.chain); + + // Verify we have a same-slot scenario + let attested_block_slot = harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&valid_attestation.data.beacon_block_root) + .expect("block should exist") + .slot; + assert_eq!( + attested_block_slot, valid_attestation.data.slot, + "attested block slot should equal attestation slot for same-slot test" + ); + + // Convert to aggregate + let committee = head + .beacon_state + .get_beacon_committee(current_slot, valid_attestation.committee_index) + .expect("should get committee"); + let fork_name = harness + .spec + .fork_name_at_slot::(valid_attestation.data.slot); + let aggregate_attestation = + single_attestation_to_attestation(&valid_attestation, committee.committee, fork_name) + .unwrap(); + + let (mut valid_aggregate, _, _) = + get_valid_aggregated_attestation(&harness.chain, aggregate_attestation); + + // index == 1 should be rejected when head_block.slot == attestation.data.slot + match valid_aggregate.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.index = 1; + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.index = 1; + } + } + + let result = harness + .chain + .verify_aggregated_attestation_for_gossip(&valid_aggregate); + assert!( + matches!(result, Err(AttnError::CommitteeIndexNonZero(_))), + "gloas: aggregate with index == 1 when head_block.slot == attestation.data.slot should be rejected, got {:?}", + result.err() + ); +} diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index e39c53729f..ee61177b2a 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -29,7 +29,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) .chain_config(ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 417d2811dd..d214ea6b15 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -119,7 +119,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .chain_config(ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index ca9893941a..9941c957e2 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -32,7 +32,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) .chain_config(ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index f1e52de27b..eb8e57a5d5 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -50,7 +50,7 @@ impl InvalidPayloadRig { let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.into()) .chain_config(ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }) .deterministic_keypairs(VALIDATOR_COUNT) diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 94ad97c963..bc7c98041f 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -29,7 +29,7 @@ static KEYPAIRS: LazyLock> = fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, ..Default::default() }; @@ -48,7 +48,7 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { fn get_electra_harness(spec: ChainSpec) -> BeaconChainHarness> { let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, ..Default::default() }; diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 3dc009366d..8200748ae6 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -70,7 +70,7 @@ async fn schema_stability() { let store = get_store(&datadir, store_config, spec.clone()); let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 5410f26a5d..6bea5f6013 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -100,7 +100,7 @@ fn get_harness( ) -> TestHarness { // Most tests expect to retain historic states, so we use this as the default. let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }; get_harness_generic( @@ -118,7 +118,7 @@ fn get_harness_import_all_data_columns( // Most tests expect to retain historic states, so we use this as the default. let chain_config = ChainConfig { ignore_ws_check: true, - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }; get_harness_generic( @@ -2876,7 +2876,7 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { slot_clock.set_slot(harness.get_current_slot().as_u64()); let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }; @@ -3030,9 +3030,9 @@ async fn weak_subjectivity_sync_test( slot_clock.set_slot(harness.get_current_slot().as_u64()); let chain_config = ChainConfig { - // Set reconstruct_historic_states to true from the start in the genesis case. This makes + // Set archive to true from the start in the genesis case. This makes // some of the later checks more uniform across the genesis/non-genesis cases. - reconstruct_historic_states: checkpoint_slot == 0, + archive: checkpoint_slot == 0, ..ChainConfig::default() }; @@ -3685,7 +3685,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let temp = tempdir().unwrap(); let store = get_store(&temp); let chain_config = ChainConfig { - reconstruct_historic_states: false, + archive: false, ..ChainConfig::default() }; let harness = get_harness_generic( @@ -4110,16 +4110,13 @@ async fn revert_minority_fork_on_resume() { // version is correct. This is the easiest schema test to write without historic versions of // Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually // as old downgrades are deprecated. -async fn schema_downgrade_to_min_version( - store_config: StoreConfig, - reconstruct_historic_states: bool, -) { +async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: bool) { let num_blocks_produced = E::slots_per_epoch() * 4; let db_path = tempdir().unwrap(); let spec = test_spec::(); let chain_config = ChainConfig { - reconstruct_historic_states, + archive, ..ChainConfig::default() }; @@ -4174,7 +4171,7 @@ async fn schema_downgrade_to_min_version( .build(); // Check chain dump for appropriate range depending on whether this is an archive node. - let chain_dump_start_slot = if reconstruct_historic_states { + let chain_dump_start_slot = if archive { Slot::new(0) } else { store.get_split_slot() @@ -5154,7 +5151,7 @@ async fn ancestor_state_root_prior_to_split() { ..StoreConfig::default() }; let chain_config = ChainConfig { - reconstruct_historic_states: false, + archive: false, ..ChainConfig::default() }; @@ -5247,7 +5244,7 @@ async fn replay_from_split_state() { ..StoreConfig::default() }; let chain_config = ChainConfig { - reconstruct_historic_states: false, + archive: false, ..ChainConfig::default() }; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index fb86a1a845..b052ba66f1 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -33,7 +33,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness> { let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, ..Default::default() }; let harness = BeaconChainHarness::builder(MainnetEthSpec) @@ -85,7 +85,7 @@ fn get_harness_semi_supernode( let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .chain_config(ChainConfig { - reconstruct_historic_states: true, + archive: true, ..Default::default() }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) @@ -950,7 +950,7 @@ async fn pseudo_finalize_test_generic( let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let chain_config = ChainConfig { - reconstruct_historic_states: true, + archive: true, epochs_per_migration, ..Default::default() }; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index d3e9133542..33a00bfa49 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -243,12 +243,15 @@ impl From for WorkEvent { }, }, ReadyWork::RpcBlock(QueuedRpcBlock { - beacon_block_root: _, + beacon_block_root, process_fn, ignore_fn: _, }) => Self { drop_during_sync: false, - work: Work::RpcBlock { process_fn }, + work: Work::RpcBlock { + process_fn, + beacon_block_root, + }, }, ReadyWork::IgnoredRpcBlock(IgnoredRpcBlock { process_fn }) => Self { drop_during_sync: false, @@ -389,6 +392,7 @@ pub enum Work { GossipLightClientFinalityUpdate(BlockingFn), GossipLightClientOptimisticUpdate(BlockingFn), RpcBlock { + beacon_block_root: Hash256, process_fn: AsyncFn, }, RpcBlobs { @@ -479,7 +483,7 @@ pub enum WorkType { } impl Work { - fn str_id(&self) -> &'static str { + pub fn str_id(&self) -> &'static str { self.to_type().into() } @@ -1432,7 +1436,10 @@ impl BeaconProcessor { beacon_block_root: _, process_fn, } => task_spawner.spawn_async(process_fn), - Work::RpcBlock { process_fn } + Work::RpcBlock { + process_fn, + beacon_block_root: _, + } | Work::RpcBlobs { process_fn } | Work::RpcCustodyColumn(process_fn) | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index c443e94574..a23ea948e4 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -13,7 +13,7 @@ arc-swap = "1.6.0" bls = { workspace = true } builder_client = { path = "../builder_client" } bytes = { workspace = true } -eth2 = { workspace = true, features = ["events", "lighthouse"] } +eth2 = { workspace = true, features = ["events", "lighthouse", "network"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } fixed_bytes = { workspace = true } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 6b247a4cd4..8591359f15 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -18,6 +18,7 @@ use ssz_types::VariableList; use std::cmp::max; use std::collections::HashMap; use std::sync::Arc; +use tracing::warn; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ @@ -537,6 +538,21 @@ impl ExecutionBlockGenerator { .contains_key(&forkchoice_state.finalized_block_hash); if unknown_head_block_hash || unknown_safe_block_hash || unknown_finalized_block_hash { + if unknown_head_block_hash { + warn!(?head_block_hash, "Received unknown head block hash"); + } + if unknown_safe_block_hash { + warn!( + safe_block_hash = ?forkchoice_state.safe_block_hash, + "Received unknown safe block hash" + ); + } + if unknown_finalized_block_hash { + warn!( + finalized_block_hash = ?forkchoice_state.finalized_block_hash, + "Received unknown finalized block hash" + ) + } return Ok(JsonForkchoiceUpdatedV1Response { payload_status: JsonPayloadStatusV1 { status: JsonPayloadStatusV1Status::Syncing, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 2168ed8961..53eb3b5166 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -5,6 +5,7 @@ use crate::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WE use serde::{Deserialize, de::DeserializeOwned}; use serde_json::Value as JsonValue; use std::sync::Arc; +use tracing::debug; pub const GENERIC_ERROR_CODE: i64 = -1234; pub const BAD_PARAMS_ERROR_CODE: i64 = -32602; @@ -28,6 +29,8 @@ pub async fn handle_rpc( .ok_or_else(|| "missing/invalid params field".to_string()) .map_err(|s| (s, GENERIC_ERROR_CODE))?; + debug!(method, "Mock execution engine"); + match method { ETH_SYNCING => ctx .syncing_response @@ -517,6 +520,12 @@ pub async fn handle_rpc( _ => unreachable!(), }; + debug!( + ?payload_attributes, + ?forkchoice_state, + "ENGINE_FORKCHOICE_UPDATED" + ); + // validate method called correctly according to fork time if let Some(pa) = payload_attributes.as_ref() { match ctx diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 6211ac6726..78e7af71f4 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -14,7 +14,7 @@ bytes = { workspace = true } context_deserialize = { workspace = true } directory = { workspace = true } either = { workspace = true } -eth2 = { workspace = true, features = ["lighthouse"] } +eth2 = { workspace = true, features = ["lighthouse", "network"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 22e7e7450e..969755fbe5 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2160,12 +2160,9 @@ pub fn serve( let discovery_addresses = enr.multiaddr_p2p_udp(); Ok(api_types::GenericResponse::from(api_types::IdentityData { peer_id: network_globals.local_peer_id().to_base58(), - enr: enr.to_base64(), - p2p_addresses: p2p_addresses.iter().map(|a| a.to_string()).collect(), - discovery_addresses: discovery_addresses - .iter() - .map(|a| a.to_string()) - .collect(), + enr, + p2p_addresses, + discovery_addresses, metadata: utils::from_meta_data::( &network_globals.local_metadata, &chain.spec, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 07aa10fed5..d903d792d5 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -136,7 +136,7 @@ impl ApiTester { let mut harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .chain_config(ChainConfig { - reconstruct_historic_states: config.retain_historic_states, + archive: config.retain_historic_states, ..ChainConfig::default() }) .deterministic_keypairs(VALIDATOR_COUNT) @@ -2856,19 +2856,9 @@ impl ApiTester { let expected = IdentityData { peer_id: self.local_enr.peer_id().to_string(), - enr: self.local_enr.to_base64(), - p2p_addresses: self - .local_enr - .multiaddr_p2p_tcp() - .iter() - .map(|a| a.to_string()) - .collect(), - discovery_addresses: self - .local_enr - .multiaddr_p2p_udp() - .iter() - .map(|a| a.to_string()) - .collect(), + enr: self.local_enr.clone(), + p2p_addresses: self.local_enr.multiaddr_p2p_tcp(), + discovery_addresses: self.local_enr.multiaddr_p2p_udp(), metadata: MetaData::V2(MetaDataV2 { seq_number: 0, attnets: "0x0000000000000000".to_string(), @@ -2897,7 +2887,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(&self.external_peer_id.to_string()) + .get_node_peers_by_id(self.external_peer_id) .await .unwrap() .data; diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 4c285ea86c..01a01d55ab 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -200,11 +200,23 @@ pub fn build_enr( builder.ip6(*ip); } - if let Some(udp4_port) = config.enr_udp4_port { + // If the ENR port is not set, and we are listening over that ip version, use the listening + // discovery port instead. + if let Some(udp4_port) = config.enr_udp4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.disc_port.try_into().ok()) + }) { builder.udp4(udp4_port.get()); } - if let Some(udp6_port) = config.enr_udp6_port { + if let Some(udp6_port) = config.enr_udp6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.disc_port.try_into().ok()) + }) { builder.udp6(udp6_port.get()); } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 34d8efccd1..b75ca72eda 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -731,7 +731,7 @@ where } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, IntoStaticStr)] pub enum RequestType { Status(StatusMessage), Goodbye(GoodbyeReason), diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index f1a4d87de7..d0323bab52 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -135,7 +135,7 @@ pub struct CustodyId { pub struct CustodyRequester(pub SingleLookupReqId); /// Application level requests sent to the network. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum AppRequestId { Sync(SyncRequestId), Router, diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 78dc0c48a7..68c77252ab 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill disable-backfill = [] fork_from_env = ["beacon_chain/fork_from_env"] +fake_crypto = ["bls/fake_crypto", "kzg/fake_crypto"] portable = ["beacon_chain/portable"] test_logger = [] @@ -57,6 +58,7 @@ k256 = "0.13.4" kzg = { workspace = true } libp2p = { workspace = true } matches = "0.1.8" +paste = { workspace = true } rand_08 = { package = "rand", version = "0.8.5" } rand_chacha = "0.9.0" rand_chacha_03 = { package = "rand_chacha", version = "0.3.1" } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index a4125f3df0..a9198f1943 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2415,6 +2415,25 @@ impl NetworkBeaconProcessor { "attn_comm_index_non_zero", ); } + AttnError::CommitteeIndexInvalid => { + /* + * The committee index is invalid after Gloas. + * + * The peer has published an invalid consensus message. + */ + debug!( + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Committee index invalid" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_comm_index_invalid", + ); + } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( %peer_id, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index fd67fcde82..e1adf860de 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -526,7 +526,10 @@ impl NetworkBeaconProcessor { ); self.try_send(BeaconWorkEvent { drop_during_sync: false, - work: Work::RpcBlock { process_fn }, + work: Work::RpcBlock { + process_fn, + beacon_block_root: block_root, + }, }) } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index a6b3ea9e4b..629a42c688 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -219,7 +219,7 @@ impl NetworkBeaconProcessor { // to be sent from the peers if we already have them. let publish_blobs = false; self.fetch_engine_blobs_and_publish(signed_beacon_block, block_root, publish_blobs) - .await + .await; } _ => {} } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 49b1c0c262..aa03ee931d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -940,20 +940,20 @@ async fn data_column_reconstruction_at_deadline() { .set_current_time(slot_start + Duration::from_millis(reconstruction_deadline_millis)); let min_columns_for_reconstruction = E::number_of_columns() / 2; + + // Enqueue all columns first - at deadline, reconstruction races with gossip drain for i in 0..min_columns_for_reconstruction { rig.enqueue_gossip_data_columns(i); - rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) - .await; } - // Since we're at the reconstruction deadline, reconstruction should be triggered immediately - rig.assert_event_journal_with_timeout( - &[WorkType::ColumnReconstruction.into()], - Duration::from_millis(50), - false, - false, - ) - .await; + // Expect all gossip events + reconstruction + let mut expected_events: Vec = (0..min_columns_for_reconstruction) + .map(|_| WorkType::GossipDataColumnSidecar) + .collect(); + expected_events.push(WorkType::ColumnReconstruction); + + rig.assert_event_journal_contains_ordered(&expected_events) + .await; } // Test the column reconstruction is delayed for columns that arrive for a previous slot. diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 9065f05753..cbf65505ef 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -121,15 +121,24 @@ pub struct BlockLookups { // TODO: Why not index lookups by block_root? single_block_lookups: FnvHashMap>, + + /// Used for testing assertions + metrics: BlockLookupsMetrics, } #[cfg(test)] use lighthouse_network::service::api_types::Id; #[cfg(test)] -/// Tuple of `SingleLookupId`, requested block root, awaiting parent block root (if any), -/// and list of peers that claim to have imported this set of block components. -pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec); +#[derive(Debug)] +pub(crate) struct BlockLookupSummary { + /// Lookup ID + pub id: Id, + /// Requested block root + pub block_root: Hash256, + /// List of peers that claim to have imported this set of block components. + pub peers: Vec, +} impl BlockLookups { pub fn new() -> Self { @@ -138,9 +147,15 @@ impl BlockLookups { IGNORED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), + metrics: <_>::default(), } } + #[cfg(test)] + pub(crate) fn metrics(&self) -> &BlockLookupsMetrics { + &self.metrics + } + #[cfg(test)] pub(crate) fn insert_ignored_chain(&mut self, block_root: Hash256) { self.ignored_chains.insert(block_root); @@ -155,7 +170,11 @@ impl BlockLookups { pub(crate) fn active_single_lookups(&self) -> Vec { self.single_block_lookups .iter() - .map(|(id, l)| (*id, l.block_root(), l.awaiting_parent(), l.all_peers())) + .map(|(id, l)| BlockLookupSummary { + id: *id, + block_root: l.block_root(), + peers: l.all_peers(), + }) .collect() } @@ -306,7 +325,7 @@ impl BlockLookups { // attributability. A peer can send us garbage blocks over blocks_by_root, and // then correct blocks via blocks_by_range. - self.drop_lookup_and_children(*lookup_id); + self.drop_lookup_and_children(*lookup_id, "chain_too_long"); } else { // Should never happen error!( @@ -414,6 +433,7 @@ impl BlockLookups { "Created block lookup" ); metrics::inc_counter(&metrics::SYNC_LOOKUP_CREATED); + self.metrics.created_lookups += 1; let result = lookup.continue_requests(cx); if self.on_lookup_result(id, result, "new_current_lookup", cx) { @@ -513,8 +533,11 @@ impl BlockLookups { /* Error responses */ pub fn peer_disconnected(&mut self, peer_id: &PeerId) { - for (_, lookup) in self.single_block_lookups.iter_mut() { + for (id, lookup) in self.single_block_lookups.iter_mut() { lookup.remove_peer(peer_id); + if lookup.has_no_peers() { + debug!(%id, "Lookup has no peers"); + } } } @@ -566,7 +589,8 @@ impl BlockLookups { let action = match result { BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::DuplicateFullyImported(..)) => { + | BlockProcessingResult::Err(BlockError::DuplicateFullyImported(..)) + | BlockProcessingResult::Err(BlockError::GenesisBlock) => { // Successfully imported request_state.on_processing_success()?; Action::Continue @@ -747,6 +771,15 @@ impl BlockLookups { let lookup_result = if imported { Ok(LookupResult::Completed) } else { + // A lookup may be in the following state: + // - Block awaiting processing from a different source + // - Blobs downloaded processed, and inserted into the da_checker + // + // At this point the block fails processing (e.g. execution engine offline) and it is + // removed from the da_checker. Note that ALL components are removed from the da_checker + // so when we re-download and process the block we get the error + // MissingComponentsAfterAllProcessed and get stuck. + lookup.reset_requests(); lookup.continue_requests(cx) }; let id = *id; @@ -779,14 +812,17 @@ impl BlockLookups { /// Drops `dropped_id` lookup and all its children recursively. Lookups awaiting a parent need /// the parent to make progress to resolve, therefore we must drop them if the parent is /// dropped. - pub fn drop_lookup_and_children(&mut self, dropped_id: SingleLookupId) { + pub fn drop_lookup_and_children(&mut self, dropped_id: SingleLookupId, reason: &'static str) { if let Some(dropped_lookup) = self.single_block_lookups.remove(&dropped_id) { debug!( id = ?dropped_id, block_root = ?dropped_lookup.block_root(), awaiting_parent = ?dropped_lookup.awaiting_parent(), + reason, "Dropping lookup" ); + metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[reason]); + self.metrics.dropped_lookups += 1; let child_lookups = self .single_block_lookups @@ -796,7 +832,7 @@ impl BlockLookups { .collect::>(); for id in child_lookups { - self.drop_lookup_and_children(id); + self.drop_lookup_and_children(id, reason); } } } @@ -814,8 +850,13 @@ impl BlockLookups { Ok(LookupResult::Pending) => true, // no action Ok(LookupResult::Completed) => { if let Some(lookup) = self.single_block_lookups.remove(&id) { - debug!(block = ?lookup.block_root(), id, "Dropping completed lookup"); + debug!( + block = ?lookup.block_root(), + id, + "Dropping completed lookup" + ); metrics::inc_counter(&metrics::SYNC_LOOKUP_COMPLETED); + self.metrics.completed_lookups += 1; // Block imported, continue the requests of pending child blocks self.continue_child_lookups(lookup.block_root(), cx); self.update_metrics(); @@ -829,8 +870,7 @@ impl BlockLookups { Err(LookupRequestError::UnknownLookup) => false, Err(error) => { debug!(id, source, ?error, "Dropping lookup on request error"); - metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[error.into()]); - self.drop_lookup_and_children(id); + self.drop_lookup_and_children(id, error.into()); self.update_metrics(); false } @@ -897,7 +937,7 @@ impl BlockLookups { %block_root, "Dropping lookup with no peers" ); - self.drop_lookup_and_children(lookup_id); + self.drop_lookup_and_children(lookup_id, "no_peers"); } } @@ -946,7 +986,7 @@ impl BlockLookups { } metrics::inc_counter(&metrics::SYNC_LOOKUPS_STUCK); - self.drop_lookup_and_children(ancestor_stuck_lookup.id); + self.drop_lookup_and_children(ancestor_stuck_lookup.id, "lookup_stuck"); } } @@ -1022,3 +1062,10 @@ impl BlockLookups { } } } + +#[derive(Default, Clone, Debug)] +pub(crate) struct BlockLookupsMetrics { + pub created_lookups: usize, + pub dropped_lookups: usize, + pub completed_lookups: usize, +} diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 43bfe29a84..919526c238 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -109,6 +109,12 @@ impl SingleBlockLookup { } } + /// Reset the status of all internal requests + pub fn reset_requests(&mut self) { + self.block_request_state = BlockRequestState::new(self.block_root); + self.component_requests = ComponentRequests::WaitingForBlock; + } + /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` pub fn peek_downloaded_block_slot(&self) -> Option { self.block_request_state diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 096ed9c328..c1ab6221dd 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -70,6 +70,7 @@ use slot_clock::SlotClock; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; +use strum::IntoStaticStr; use tokio::sync::mpsc; use tracing::{debug, error, info, trace}; use types::{ @@ -90,7 +91,7 @@ pub const SLOT_IMPORT_TOLERANCE: usize = 32; /// arbitrary number that covers a full slot, but allows recovery if sync get stuck for a few slots. const NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS: u64 = 30; -#[derive(Debug)] +#[derive(Debug, IntoStaticStr)] /// A message that can be sent to the sync manager thread. pub enum SyncMessage { /// A useful peer has been discovered. @@ -323,17 +324,18 @@ impl SyncManager { } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec { - self.block_lookups.active_single_lookups() + pub(crate) fn send_sync_message(&mut self, sync_message: SyncMessage<::EthSpec>) { + self.network.send_sync_message(sync_message); } #[cfg(test)] - pub(crate) fn active_parent_lookups(&self) -> Vec> { - self.block_lookups - .active_parent_lookups() - .iter() - .map(|c| c.chain.clone()) - .collect() + pub(crate) fn block_lookups(&self) -> &BlockLookups { + &self.block_lookups + } + + #[cfg(test)] + pub(crate) fn range_sync(&self) -> &RangeSync { + &self.range_sync } #[cfg(test)] @@ -512,17 +514,18 @@ impl SyncManager { /// there is no way to guarantee that libp2p always emits a error along with /// the disconnect. fn peer_disconnect(&mut self, peer_id: &PeerId) { - // Inject a Disconnected error on all requests associated with the disconnected peer - // to retry all batches/lookups - for sync_request_id in self.network.peer_disconnected(peer_id) { - self.inject_error(*peer_id, sync_request_id, RPCError::Disconnected); - } - // Remove peer from all data structures self.range_sync.peer_disconnect(&mut self.network, peer_id); let _ = self.backfill_sync.peer_disconnected(peer_id); self.block_lookups.peer_disconnected(peer_id); + // Inject a Disconnected error on all requests associated with the disconnected peer + // to retry all batches/lookups. Only after removing the peer from the data structures to + // avoid sending retry requests to the disconnecting peer. + for sync_request_id in self.network.peer_disconnected(peer_id) { + self.inject_error(*peer_id, sync_request_id, RPCError::Disconnected); + } + // Regardless of the outcome, we update the sync status. self.update_sync_state(); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 542625b8a3..7e2c0d9a94 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,7 +17,7 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use crate::sync::range_data_column_batch_request::RangeDataColumnBatchRequest; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; @@ -1095,13 +1095,14 @@ impl SyncNetworkContext { })?; // Include only the blob indexes not yet imported (received through gossip) - let custody_indexes_to_fetch = self + let mut custody_indexes_to_fetch = self .chain .sampling_columns_for_epoch(current_epoch) .iter() .copied() .filter(|index| !custody_indexes_imported.contains(index)) .collect::>(); + custody_indexes_to_fetch.sort_unstable(); if custody_indexes_to_fetch.is_empty() { // No indexes required, do not issue any request @@ -1595,7 +1596,7 @@ impl SyncNetworkContext { ) .map_err(|_| SendErrorProcessor::SendError)?; - debug!(block = ?block_root, id, "Sending block for processing"); + debug!(block = ?block_root, block_slot = %block.slot(), id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type beacon_processor diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 61ae95ee70..de5d9b6e0b 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -198,7 +198,14 @@ impl ActiveCustodyRequest { cx: &mut SyncNetworkContext, ) -> CustodyRequestResult { let _guard = self.span.clone().entered(); - if self.column_requests.values().all(|r| r.is_downloaded()) { + let total_requests = self.column_requests.len(); + let completed_requests = self + .column_requests + .values() + .filter(|r| r.is_downloaded()) + .count(); + + if completed_requests >= total_requests { // All requests have completed successfully. let mut peers = HashMap::>::new(); let mut seen_timestamps = vec![]; @@ -222,6 +229,7 @@ impl ActiveCustodyRequest { let active_request_count_by_peer = cx.active_request_count_by_peer(); let mut columns_to_request_by_peer = HashMap::>::new(); + let mut columns_without_peers = vec![]; let lookup_peers = self.lookup_peers.read(); // Create deterministic hasher per request to ensure consistent peer ordering within // this request (avoiding fragmentation) while varying selection across different requests @@ -256,6 +264,7 @@ impl ActiveCustodyRequest { return Err(Error::NoPeer(*column_index)); } else { // Do not issue requests if there is no custody peer on this column + columns_without_peers.push(*column_index); } } } @@ -270,10 +279,13 @@ impl ActiveCustodyRequest { lookup_peers = lookup_peers.len(), "Requesting {} columns from {} peers", columns_requested_count, peer_requests, ); - } else { + } else if !columns_without_peers.is_empty() { debug!( lookup_peers = lookup_peers.len(), - "No column peers found for look up", + total_requests, + completed_requests, + ?columns_without_peers, + "No column peers found for lookup", ); } @@ -288,7 +300,7 @@ impl ActiveCustodyRequest { }, // If peer is in the lookup peer set, it claims to have imported the block and // must have its columns in custody. In that case, set `true = enforce max_requests` - // and downscore if data_columns_by_root does not returned the expected custody + // and downscore if data_columns_by_root does not return the expected custody // columns. For the rest of peers, don't downscore if columns are missing. lookup_peers.contains(&peer_id), ) diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 1d57ee6c3d..b91b88b55c 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -41,6 +41,13 @@ pub enum RangeSyncState { pub type SyncChainStatus = Result, &'static str>; +#[cfg(test)] +#[derive(Default, Debug)] +pub struct ChainCollectionMetrics { + pub chains_added: usize, + pub chains_removed: usize, +} + /// A collection of finalized and head chains currently being processed. pub struct ChainCollection { /// The beacon chain for processing. @@ -51,6 +58,9 @@ pub struct ChainCollection { head_chains: FnvHashMap>, /// The current sync state of the process. state: RangeSyncState, + #[cfg(test)] + /// Used for testing assertions + metrics: ChainCollectionMetrics, } impl ChainCollection { @@ -60,12 +70,23 @@ impl ChainCollection { finalized_chains: FnvHashMap::default(), head_chains: FnvHashMap::default(), state: RangeSyncState::Idle, + #[cfg(test)] + metrics: <_>::default(), } } + #[cfg(test)] + pub(crate) fn metrics(&self) -> &ChainCollectionMetrics { + &self.metrics + } + /// Updates the Syncing state of the collection after a chain is removed. fn on_chain_removed(&mut self, id: &ChainId, was_syncing: bool, sync_type: RangeSyncType) { metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_REMOVED, &[sync_type.as_str()]); + #[cfg(test)] + { + self.metrics.chains_removed += 1; + } self.update_metrics(); match self.state { @@ -510,6 +531,10 @@ impl ChainCollection { ); collection.insert(id, new_chain); metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_ADDED, &[sync_type.as_str()]); + #[cfg(test)] + { + self.metrics.chains_added += 1; + } self.update_metrics(); } } diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index c9656ad1d0..86625444be 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -98,6 +98,11 @@ where self.failed_chains.keys().copied().collect() } + #[cfg(test)] + pub(crate) fn metrics(&self) -> &super::chain_collection::ChainCollectionMetrics { + self.chains.metrics() + } + pub fn state(&self) -> SyncChainStatus { self.chains.state() } diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index b6e96737d6..769a11d976 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,79 +1,171 @@ +use super::*; use crate::NetworkMessage; -use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::sync::block_lookups::{ - BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, -}; +use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; +use crate::sync::block_lookups::{BlockLookupSummary, PARENT_DEPTH_TOLERANCE}; use crate::sync::{ SyncMessage, manager::{BlockProcessType, BlockProcessingResult, SyncManager}, }; -use std::sync::Arc; -use std::time::Duration; - -use super::*; - -use crate::sync::block_lookups::common::ResponseType; -use beacon_chain::observed_data_sidecars::Observe; +use beacon_chain::blob_verification::KzgVerifiedBlob; +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ - AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, - PayloadVerificationOutcome, PayloadVerificationStatus, - blob_verification::GossipVerifiedBlob, - block_verification_types::{AsBlock, BlockImportData}, - custody_context::NodeCustodyType, + AvailabilityProcessingStatus, BlockError, NotifyExecutionLayer, + block_verification_types::{AsBlock, AvailableBlockData}, data_availability_checker::Availability, test_utils::{ - BeaconChainHarness, EphemeralHarnessType, NumBlobs, generate_rand_block_and_blobs, - generate_rand_block_and_data_columns, test_spec, + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, NumBlobs, + generate_rand_block_and_blobs, test_spec, }, - validator_monitor::timestamp_now, }; -use beacon_processor::WorkEvent; +use beacon_processor::{BeaconProcessorChannels, DuplicateCache, Work, WorkEvent}; +use educe::Educe; +use itertools::Itertools; use lighthouse_network::discovery::CombinedKey; use lighthouse_network::{ NetworkConfig, NetworkGlobals, PeerId, - rpc::{RPCError, RequestType, RpcErrorResponse}, - service::api_types::{ - AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, - SingleLookupReqId, SyncRequestId, - }, + rpc::{RPCError, RequestType}, + service::api_types::{AppRequestId, SyncRequestId}, types::SyncState, }; use slot_clock::{SlotClock, TestingSlotClock}; +use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc; use tracing::info; use types::{ - BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, - ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, - data::ColumnIndex, - test_utils::{SeedableRng, TestRandom, XorShiftRng}, + BlobSidecar, BlockImportSource, ColumnIndex, DataColumnSidecar, EthSpec, ForkContext, ForkName, + Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + test_utils::{SeedableRng, XorShiftRng}, }; const D: Duration = Duration::new(0, 0); -const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; -type DCByRootIds = Vec; -type DCByRootId = (SyncRequestId, Vec); -impl TestRig { - pub fn test_setup() -> Self { - Self::test_setup_with_custody_type(NodeCustodyType::Fullnode) +/// Configuration for how the test rig should respond to sync requests. +/// +/// Controls simulated peer behavior during lookup tests, including RPC errors, +/// invalid responses, and custom block processing results. Use builder methods +/// to configure specific failure scenarios. +#[derive(Default, Educe)] +#[educe(Debug)] +pub struct SimulateConfig { + return_rpc_error: Option, + return_wrong_blocks_n_times: usize, + return_wrong_sidecar_for_block_n_times: usize, + return_no_blocks_n_times: usize, + return_no_data_n_times: usize, + return_too_few_data_n_times: usize, + return_no_columns_on_indices_n_times: usize, + return_no_columns_on_indices: Vec, + skip_by_range_routes: bool, + // Use a callable fn because BlockProcessingResult does not implement Clone + #[educe(Debug(ignore))] + process_result_conditional: + Option Option + Send + Sync>>, + // Import a block directly before processing it (for simulating race conditions) + import_block_before_process: HashSet, +} + +impl SimulateConfig { + fn new() -> Self { + Self::default() } - pub fn test_setup_with_custody_type(node_custody_type: NodeCustodyType) -> Self { + fn happy_path() -> Self { + Self::default() + } + + fn return_no_blocks_always(mut self) -> Self { + self.return_no_blocks_n_times = usize::MAX; + self + } + + fn return_no_blocks_once(mut self) -> Self { + self.return_no_blocks_n_times = 1; + self + } + + fn return_no_data_once(mut self) -> Self { + self.return_no_data_n_times = 1; + self + } + + fn return_wrong_blocks_once(mut self) -> Self { + self.return_wrong_blocks_n_times = 1; + self + } + + fn return_wrong_sidecar_for_block_once(mut self) -> Self { + self.return_wrong_sidecar_for_block_n_times = 1; + self + } + + fn return_too_few_data_once(mut self) -> Self { + self.return_too_few_data_n_times = 1; + self + } + + fn return_no_columns_on_indices(mut self, indices: &[ColumnIndex], times: usize) -> Self { + self.return_no_columns_on_indices_n_times = times; + self.return_no_columns_on_indices = indices.to_vec(); + self + } + + fn return_rpc_error(mut self, error: RPCError) -> Self { + self.return_rpc_error = Some(error); + self + } + + fn no_range_sync(mut self) -> Self { + self.skip_by_range_routes = true; + self + } + + fn with_process_result(mut self, f: F) -> Self + where + F: Fn() -> BlockProcessingResult + Send + Sync + 'static, + { + self.process_result_conditional = Some(Box::new(move |_| Some(f()))); + self + } + + fn with_import_block_before_process(mut self, block_root: Hash256) -> Self { + self.import_block_before_process.insert(block_root); + self + } +} + +fn genesis_fork() -> ForkName { + test_spec::().fork_name_at_slot::(Slot::new(0)) +} + +pub(crate) struct TestRigConfig { + fulu_test_type: FuluTestType, + /// Override the node custody type derived from `fulu_test_type` + node_custody_type_override: Option, +} + +impl TestRig { + pub(crate) fn new(test_rig_config: TestRigConfig) -> Self { // Use `fork_from_env` logic to set correct fork epochs - let spec = test_spec::(); + let spec = Arc::new(test_spec::()); + let clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(12), + ); // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) - .spec(Arc::new(spec)) + .spec(spec.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() .mock_execution_layer() - .testing_slot_clock(TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(0), - Duration::from_secs(12), - )) - .node_custody_type(node_custody_type) + .testing_slot_clock(clock.clone()) + .node_custody_type( + test_rig_config + .node_custody_type_override + .unwrap_or_else(|| test_rig_config.fulu_test_type.we_node_custody_type()), + ) .build(); let chain = harness.chain.clone(); @@ -93,12 +185,23 @@ impl TestRig { network_config, chain.spec.clone(), )); - let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( - globals, + + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx, + } = <_>::default(); + + let beacon_processor = NetworkBeaconProcessor { + beacon_processor_send: beacon_processor_tx, + duplicate_cache: DuplicateCache::default(), + chain: chain.clone(), + // TODO: What is this sender used for? + network_tx: mpsc::unbounded_channel().0, sync_tx, - chain.clone(), - harness.runtime.task_executor.clone(), - ); + network_globals: globals.clone(), + invalid_block_storage: InvalidBlockStorage::Disabled, + executor: harness.runtime.task_executor.clone(), + }; let fork_name = chain.spec.fork_name_at_slot::(chain.slot().unwrap()); @@ -119,6 +222,7 @@ impl TestRig { network_rx, network_rx_queue: vec![], sync_rx, + sync_rx_queue: vec![], rng_08, rng, network_globals: beacon_processor.network_globals.clone(), @@ -132,36 +236,985 @@ impl TestRig { ), harness, fork_name, + network_blocks_by_root: <_>::default(), + network_blocks_by_slot: <_>::default(), + penalties: <_>::default(), + seen_lookups: <_>::default(), + requests: <_>::default(), + complete_strategy: <_>::default(), + initial_block_lookups_metrics: <_>::default(), + fulu_test_type: test_rig_config.fulu_test_type, } } - fn test_setup_after_deneb_before_fulu() -> Option { - let r = Self::test_setup(); - if r.after_deneb() && !r.fork_name.fulu_enabled() { - Some(r) + pub fn default() -> Self { + // Before Fulu, FuluTestType is irrelevant + Self::new(TestRigConfig { + fulu_test_type: FuluTestType::WeFullnodeThemSupernode, + node_custody_type_override: None, + }) + } + + pub fn with_custody_type(node_custody_type: NodeCustodyType) -> Self { + Self::new(TestRigConfig { + fulu_test_type: FuluTestType::WeFullnodeThemSupernode, + node_custody_type_override: Some(node_custody_type), + }) + } + + /// Runs the sync simulation until all event queues are empty. + /// + /// Processes events from sync_rx (sink), beacon processor, and network queues in fixed + /// priority order each tick. Handles completed work before pulling new requests. + async fn simulate(&mut self, complete_strategy: SimulateConfig) { + self.complete_strategy = complete_strategy; + self.log(&format!( + "Running simulate with config {:?}", + self.complete_strategy + )); + + let mut i = 0; + + loop { + i += 1; + + // Record current status + for BlockLookupSummary { + id, + block_root, + peers, + .. + } in self.active_single_lookups() + { + let lookup = self.seen_lookups.entry(id).or_insert(SeenLookup { + id, + block_root, + seen_peers: <_>::default(), + }); + for peer in peers { + lookup.seen_peers.insert(peer); + } + } + + // Drain all channels into queues + while let Ok(ev) = self.network_rx.try_recv() { + self.network_rx_queue.push(ev); + } + while let Ok(ev) = self.beacon_processor_rx.try_recv() { + self.beacon_processor_rx_queue.push(ev); + } + while let Ok(ev) = self.sync_rx.try_recv() { + self.sync_rx_queue.push(ev); + } + + // Process one event per tick in fixed priority: sink → processor → network + if !self.sync_rx_queue.is_empty() { + let sync_message = self.sync_rx_queue.remove(0); + self.log(&format!( + "Tick {i}: sync_rx event: {}", + Into::<&'static str>::into(&sync_message) + )); + self.sync_manager.handle_message(sync_message); + } else if !self.beacon_processor_rx_queue.is_empty() { + let event = self.beacon_processor_rx_queue.remove(0); + self.log(&format!("Tick {i}: beacon_processor event: {event:?}")); + match event.work { + Work::RpcBlock { + process_fn, + beacon_block_root, + } => { + // Import block before processing if configured (for simulating race conditions) + if self + .complete_strategy + .import_block_before_process + .contains(&beacon_block_root) + { + self.log(&format!( + "Importing block {} before processing (race condition simulation)", + beacon_block_root + )); + self.import_block_by_root(beacon_block_root).await; + } + + if let Some(f) = self.complete_strategy.process_result_conditional.as_ref() + && let Some(result) = f(beacon_block_root) + { + let id = self.lookup_by_root(beacon_block_root).id; + self.log(&format!( + "Sending custom process result to lookup id {id}: {result:?}" + )); + self.push_sync_message(SyncMessage::BlockComponentProcessed { + process_type: BlockProcessType::SingleBlock { id }, + result, + }); + } else { + process_fn.await + } + } + Work::RpcBlobs { process_fn } + | Work::RpcCustodyColumn(process_fn) + | Work::ChainSegment(process_fn) => process_fn.await, + Work::Reprocess(_) => {} // ignore + other => panic!("Unsupported Work event {}", other.str_id()), + } + } else if !self.network_rx_queue.is_empty() { + let event = self.network_rx_queue.remove(0); + self.log(&format!("Tick {i}: network_rx event: {event:?}")); + match event { + NetworkMessage::SendRequest { + peer_id, + request, + app_request_id, + } => { + self.simulate_on_request(peer_id, request, app_request_id); + } + NetworkMessage::ReportPeer { peer_id, msg, .. } => { + self.penalties.push(ReportedPenalty { peer_id, msg }); + } + _ => {} + } + } else { + break; + } + } + + self.log("No more events in simulation"); + self.log(&format!( + "Lookup metrics: {:?}", + self.sync_manager.block_lookups().metrics() + )); + self.log(&format!( + "Range sync metrics: {:?}", + self.sync_manager.range_sync().metrics() + )); + self.log(&format!( + "Max known slot: {}, Head slot: {}", + self.max_known_slot(), + self.head_slot() + )); + self.log(&format!("Penalties: {:?}", self.penalties)); + self.log(&format!( + "Total requests {}: {:?}", + self.requests.len(), + self.requests_count() + )) + } + + fn simulate_on_request( + &mut self, + peer_id: PeerId, + request: RequestType, + app_req_id: AppRequestId, + ) { + self.requests.push((request.clone(), app_req_id)); + + if let AppRequestId::Sync(req_id) = app_req_id + && let Some(error) = self.complete_strategy.return_rpc_error.take() + { + self.log(&format!( + "Completing request {req_id:?} to {peer_id} with RPCError {error:?}" + )); + self.send_sync_message(SyncMessage::RpcError { + sync_request_id: req_id, + peer_id, + error, + }); + return; + } + + match (request, app_req_id) { + (RequestType::BlocksByRoot(req), AppRequestId::Sync(req_id)) => { + let blocks = + req.block_roots() + .iter() + .filter_map(|block_root| { + if self.complete_strategy.return_no_blocks_n_times > 0 { + self.complete_strategy.return_no_blocks_n_times -= 1; + None + } else if self.complete_strategy.return_wrong_blocks_n_times > 0 { + self.complete_strategy.return_wrong_blocks_n_times -= 1; + Some(Arc::new(self.rand_block())) + } else { + Some(self.network_blocks_by_root + .get(block_root) + .unwrap_or_else(|| { + panic!("Test consumer requested unknown block: {block_root:?}") + }) + .block_cloned()) + } + }) + .collect::>(); + + self.send_rpc_blocks_response(req_id, peer_id, &blocks); + } + + (RequestType::BlobsByRoot(req), AppRequestId::Sync(req_id)) => { + if self.complete_strategy.return_no_data_n_times > 0 { + self.complete_strategy.return_no_data_n_times -= 1; + return self.send_rpc_blobs_response(req_id, peer_id, &[]); + } + + let mut blobs = req + .blob_ids + .iter() + .map(|id| { + self.network_blocks_by_root + .get(&id.block_root) + .unwrap_or_else(|| { + panic!("Test consumer requested unknown block: {id:?}") + }) + .block_data() + .and_then(|d| d.blobs()) + .unwrap_or_else(|| panic!("Block {id:?} has no blobs")) + .iter() + .find(|blob| blob.index == id.index) + .unwrap_or_else(|| panic!("Blob id {id:?} not avail")) + .clone() + }) + .collect::>(); + + if self.complete_strategy.return_too_few_data_n_times > 0 { + self.complete_strategy.return_too_few_data_n_times -= 1; + blobs.pop(); + } + + if self + .complete_strategy + .return_wrong_sidecar_for_block_n_times + > 0 + { + self.complete_strategy + .return_wrong_sidecar_for_block_n_times -= 1; + let first = blobs.first_mut().expect("empty blobs"); + let mut blob = Arc::make_mut(first).clone(); + blob.signed_block_header.message.body_root = Hash256::ZERO; + *first = Arc::new(blob); + } + + self.send_rpc_blobs_response(req_id, peer_id, &blobs); + } + + (RequestType::DataColumnsByRoot(req), AppRequestId::Sync(req_id)) => { + if self.complete_strategy.return_no_data_n_times > 0 { + self.complete_strategy.return_no_data_n_times -= 1; + return self.send_rpc_columns_response(req_id, peer_id, &[]); + } + + let will_omit_columns = req.data_column_ids.iter().any(|id| { + id.columns.iter().any(|c| { + self.complete_strategy + .return_no_columns_on_indices + .contains(c) + }) + }); + let columns_to_omit = if will_omit_columns + && self.complete_strategy.return_no_columns_on_indices_n_times > 0 + { + self.log(&format!("OMIT {:?}", req)); + self.complete_strategy.return_no_columns_on_indices_n_times -= 1; + self.complete_strategy.return_no_columns_on_indices.clone() + } else { + vec![] + }; + + let mut columns = req + .data_column_ids + .iter() + .flat_map(|id| { + let block_columns = self + .network_blocks_by_root + .get(&id.block_root) + .unwrap_or_else(|| { + panic!("Test consumer requested unknown block: {id:?}") + }) + .block_data() + .and_then(|d| d.data_columns()) + .unwrap_or_else(|| panic!("Block id {id:?} has no columns")); + id.columns + .iter() + .filter(|index| !columns_to_omit.contains(index)) + .map(move |index| { + block_columns + .iter() + .find(|c| *c.index() == *index) + .unwrap_or_else(|| { + panic!("Column {index:?} {:?} not found", id.block_root) + }) + .clone() + }) + }) + .collect::>(); + + if self.complete_strategy.return_too_few_data_n_times > 0 { + self.complete_strategy.return_too_few_data_n_times -= 1; + columns.pop(); + } + + if self + .complete_strategy + .return_wrong_sidecar_for_block_n_times + > 0 + { + self.complete_strategy + .return_wrong_sidecar_for_block_n_times -= 1; + let first = columns.first_mut().expect("empty columns"); + let column = Arc::make_mut(first); + column + .signed_block_header_mut() + .expect("not fulu") + .message + .body_root = Hash256::ZERO; + } + self.send_rpc_columns_response(req_id, peer_id, &columns); + } + + (RequestType::BlocksByRange(req), AppRequestId::Sync(req_id)) => { + if self.complete_strategy.skip_by_range_routes { + return; + } + let blocks = (*req.start_slot()..req.start_slot() + req.count()) + .filter_map(|slot| { + self.network_blocks_by_slot + .get(&Slot::new(slot)) + .map(|block| block.block_cloned()) + }) + .collect::>(); + + self.send_rpc_blocks_response(req_id, peer_id, &blocks); + } + + (RequestType::BlobsByRange(req), AppRequestId::Sync(req_id)) => { + if self.complete_strategy.skip_by_range_routes { + return; + } + + // Note: This function is permissive, blocks may have zero blobs and it won't + // error. Some caveats: + // - The genesis block never has blobs + // - Some blocks may not have blobs as the blob count is random + let blobs = (req.start_slot..req.start_slot + req.count) + .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) + .filter_map(|block| block.block_data().and_then(|d| d.blobs())) + .flat_map(|blobs| blobs.into_iter()) + .collect::>(); + self.send_rpc_blobs_response(req_id, peer_id, &blobs); + } + + (RequestType::DataColumnsByRange(req), AppRequestId::Sync(req_id)) => { + if self.complete_strategy.skip_by_range_routes { + return; + } + // Note: This function is permissive, blocks may have zero columns and it won't + // error. Some caveats: + // - The genesis block never has columns + // - Some blocks may not have columns as the blob count is random + let columns = (req.start_slot..req.start_slot + req.count) + .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) + .filter_map(|block| block.block_data().and_then(|d| d.data_columns())) + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| req.columns.contains(c.index())) + }) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &columns); + } + + (RequestType::Status(_req), AppRequestId::Router) => { + // Ignore Status requests for now + } + + other => panic!("Request not supported: {app_req_id:?} {other:?}"), + } + } + + fn send_rpc_blocks_response( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + blocks: &[Arc>], + ) { + let slots = blocks.iter().map(|block| block.slot()).collect::>(); + self.log(&format!( + "Completing request {sync_request_id:?} to {peer_id} with blocks {slots:?}" + )); + + for block in blocks { + self.push_sync_message(SyncMessage::RpcBlock { + sync_request_id, + peer_id, + beacon_block: Some(block.clone()), + seen_timestamp: D, + }); + } + self.push_sync_message(SyncMessage::RpcBlock { + sync_request_id, + peer_id, + beacon_block: None, + seen_timestamp: D, + }); + } + + fn send_rpc_blobs_response( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + blobs: &[Arc>], + ) { + let slots = blobs + .iter() + .map(|block| block.slot()) + .unique() + .collect::>(); + self.log(&format!( + "Completing request {sync_request_id:?} to {peer_id} with blobs {slots:?}" + )); + + for blob in blobs { + self.push_sync_message(SyncMessage::RpcBlob { + sync_request_id, + peer_id, + blob_sidecar: Some(blob.clone()), + seen_timestamp: D, + }); + } + self.push_sync_message(SyncMessage::RpcBlob { + sync_request_id, + peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + + fn send_rpc_columns_response( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + columns: &[Arc>], + ) { + let slots = columns + .iter() + .map(|block| block.slot()) + .unique() + .collect::>(); + let indices = columns + .iter() + .map(|column| *column.index()) + .unique() + .collect::>(); + self.log(&format!( + "Completing request {sync_request_id:?} to {peer_id} with columns {slots:?} indices {indices:?}" + )); + + for column in columns { + self.push_sync_message(SyncMessage::RpcDataColumn { + sync_request_id, + peer_id, + data_column: Some(column.clone()), + seen_timestamp: D, + }); + } + self.push_sync_message(SyncMessage::RpcDataColumn { + sync_request_id, + peer_id, + data_column: None, + seen_timestamp: D, + }); + } + + // Preparation steps + + /// Returns the block root of the tip of the built chain + async fn build_chain(&mut self, block_count: usize) -> Hash256 { + let mut blocks = vec![]; + + // Initialise a new beacon chain + let external_harness = BeaconChainHarness::>::builder(E) + .spec(self.harness.spec.clone()) + .deterministic_keypairs(1) + .fresh_ephemeral_store() + .mock_execution_layer() + .testing_slot_clock(self.harness.chain.slot_clock.clone()) + // Make the external harness a supernode so all columns are available + .node_custody_type(NodeCustodyType::Supernode) + .build(); + // Ensure all blocks have data. Otherwise, the triggers for unknown blob parent and unknown + // data column parent fail. + external_harness + .execution_block_generator() + .set_min_blob_count(1); + + // Add genesis block for completeness + let genesis_block = external_harness.get_head_block(); + self.network_blocks_by_root + .insert(genesis_block.canonical_root(), genesis_block.clone()); + self.network_blocks_by_slot + .insert(genesis_block.slot(), genesis_block); + + for i in 0..block_count { + external_harness.advance_slot(); + let block_root = external_harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + let block = external_harness.get_full_block(&block_root); + let block_root = block.canonical_root(); + let block_slot = block.slot(); + self.network_blocks_by_root + .insert(block_root, block.clone()); + self.network_blocks_by_slot.insert(block_slot, block); + self.log(&format!( + "Produced block {} index {i} in external harness", + block_slot, + )); + blocks.push((block_slot, block_root)); + } + + // Re-log to have a nice list of block roots at the end + for block in &blocks { + self.log(&format!("Build chain {block:?}")); + } + + // Auto-update the clock on the main harness to accept the blocks + self.harness + .set_current_slot(external_harness.get_current_slot()); + + blocks.last().expect("empty blocks").1 + } + + fn corrupt_last_block_signature(&mut self) { + let rpc_block = self.get_last_block().clone(); + let mut block = (*rpc_block.block_cloned()).clone(); + let blobs = rpc_block.block_data().and_then(|d| d.blobs()); + let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + *block.signature_mut() = self.valid_signature(); + self.re_insert_block(Arc::new(block), blobs, columns); + } + + fn valid_signature(&mut self) -> bls::Signature { + let keypair = bls::Keypair::random(); + let msg = Hash256::random(); + keypair.sk.sign(msg) + } + + fn corrupt_last_blob_proposer_signature(&mut self) { + let rpc_block = self.get_last_block().clone(); + let block = rpc_block.block_cloned(); + let mut blobs = rpc_block + .block_data() + .and_then(|d| d.blobs()) + .expect("no blobs") + .into_iter() + .collect::>(); + let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let first = blobs.first_mut().expect("empty blobs"); + Arc::make_mut(first).signed_block_header.signature = self.valid_signature(); + let max_blobs = + self.harness + .spec + .max_blobs_per_block(block.slot().epoch(E::slots_per_epoch())) as usize; + let blobs = + types::BlobSidecarList::new(blobs, max_blobs).expect("invalid blob sidecar list"); + self.re_insert_block(block, Some(blobs), columns); + } + + fn corrupt_last_blob_kzg_proof(&mut self) { + let rpc_block = self.get_last_block().clone(); + let block = rpc_block.block_cloned(); + let mut blobs = rpc_block + .block_data() + .and_then(|d| d.blobs()) + .expect("no blobs") + .into_iter() + .collect::>(); + let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let first = blobs.first_mut().expect("empty blobs"); + Arc::make_mut(first).kzg_proof = kzg::KzgProof::empty(); + let max_blobs = + self.harness + .spec + .max_blobs_per_block(block.slot().epoch(E::slots_per_epoch())) as usize; + let blobs = + types::BlobSidecarList::new(blobs, max_blobs).expect("invalid blob sidecar list"); + self.re_insert_block(block, Some(blobs), columns); + } + + fn corrupt_last_column_proposer_signature(&mut self) { + let rpc_block = self.get_last_block().clone(); + let block = rpc_block.block_cloned(); + let blobs = rpc_block.block_data().and_then(|d| d.blobs()); + let mut columns = rpc_block + .block_data() + .and_then(|d| d.data_columns()) + .expect("no columns"); + let first = columns.first_mut().expect("empty columns"); + Arc::make_mut(first) + .signed_block_header_mut() + .expect("not fulu") + .signature = self.valid_signature(); + self.re_insert_block(block, blobs, Some(columns)); + } + + fn corrupt_last_column_kzg_proof(&mut self) { + let rpc_block = self.get_last_block().clone(); + let block = rpc_block.block_cloned(); + let blobs = rpc_block.block_data().and_then(|d| d.blobs()); + let mut columns = rpc_block + .block_data() + .and_then(|d| d.data_columns()) + .expect("no columns"); + let first = columns.first_mut().expect("empty columns"); + let column = Arc::make_mut(first); + let proof = column.kzg_proofs_mut().first_mut().expect("no kzg proofs"); + *proof = kzg::KzgProof::empty(); + self.re_insert_block(block, blobs, Some(columns)); + } + + fn get_last_block(&self) -> &RpcBlock { + let (_, last_block) = self + .network_blocks_by_root + .iter() + .max_by_key(|(_, block)| block.slot()) + .expect("no blocks"); + last_block + } + + fn re_insert_block( + &mut self, + block: Arc>, + blobs: Option>, + columns: Option>, + ) { + self.network_blocks_by_slot.clear(); + self.network_blocks_by_root.clear(); + let block_root = block.canonical_root(); + let block_slot = block.slot(); + let block_data = if let Some(columns) = columns { + Some(AvailableBlockData::new_with_data_columns(columns)) + } else if let Some(blobs) = blobs { + Some(AvailableBlockData::new_with_blobs(blobs)) + } else { + Some(AvailableBlockData::NoData) + }; + let rpc_block = RpcBlock::new( + block, + block_data, + &self.harness.chain.data_availability_checker, + self.harness.chain.spec.clone(), + ) + .unwrap(); + self.network_blocks_by_slot + .insert(block_slot, rpc_block.clone()); + self.network_blocks_by_root.insert(block_root, rpc_block); + } + + /// Trigger a lookup with the last created block + fn trigger_with_last_block(&mut self) { + let peer_id = match self.fulu_test_type.them_node_custody_type() { + NodeCustodyType::Fullnode => self.new_connected_peer(), + NodeCustodyType::Supernode | NodeCustodyType::SemiSupernode => { + self.new_connected_supernode_peer() + } + }; + let last_block = self.get_last_block().canonical_root(); + self.trigger_unknown_block_from_attestation(last_block, peer_id); + } + + fn block_at_slot(&self, slot: u64) -> Arc> { + self.network_blocks_by_slot + .get(&Slot::new(slot)) + .unwrap_or_else(|| panic!("No block for slot {slot}")) + .block_cloned() + } + + fn block_root_at_slot(&self, slot: u64) -> Hash256 { + self.block_at_slot(slot).canonical_root() + } + + fn trigger_with_block_at_slot(&mut self, slot: u64) { + let peer_id = self.new_connected_supernode_peer(); + let block = self.block_at_slot(slot); + self.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); + } + + async fn build_chain_and_trigger_last_block(&mut self, block_count: usize) { + self.build_chain(block_count).await; + self.trigger_with_last_block(); + } + + /// Import a block directly into the chain without going through lookup sync + async fn import_block_by_root(&mut self, block_root: Hash256) { + let rpc_block = self + .network_blocks_by_root + .get(&block_root) + .unwrap_or_else(|| panic!("No block for root {block_root}")) + .clone(); + + self.harness + .chain + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::Yes, + BlockImportSource::Gossip, + || Ok(()), + ) + .await + .unwrap(); + + self.harness.chain.recompute_head_at_current_slot().await; + } + + fn trigger_with_last_unknown_block_parent(&mut self) { + let peer_id = self.new_connected_supernode_peer(); + let last_block = self.get_last_block().block_cloned(); + self.trigger_unknown_parent_block(peer_id, last_block); + } + + fn trigger_with_last_unknown_blob_parent(&mut self) { + let peer_id = self.new_connected_supernode_peer(); + let blobs = self + .get_last_block() + .block_data() + .and_then(|d| d.blobs()) + .expect("no blobs"); + let blob = blobs.first().expect("empty blobs"); + self.trigger_unknown_parent_blob(peer_id, blob.clone()); + } + + fn trigger_with_last_unknown_data_column_parent(&mut self) { + let peer_id = self.new_connected_supernode_peer(); + let columns = self + .get_last_block() + .block_data() + .and_then(|d| d.data_columns()) + .expect("No data columns"); + let column = columns.first().expect("empty columns"); + self.trigger_unknown_parent_column(peer_id, column.clone()); + } + + // Post-test assertions + + fn head_slot(&self) -> Slot { + self.harness.chain.head().head_slot() + } + + fn assert_head_slot(&self, slot: u64) { + assert_eq!(self.head_slot(), Slot::new(slot), "Unexpected head slot"); + } + + fn max_known_slot(&self) -> Slot { + self.network_blocks_by_slot + .keys() + .max() + .copied() + .expect("no blocks") + } + + fn assert_penalties(&self, expected_penalties: &[&'static str]) { + let penalties = self + .penalties + .iter() + .map(|penalty| penalty.msg) + .collect::>(); + if penalties != expected_penalties { + panic!( + "Expected penalties: {:#?} but got {:#?}", + expected_penalties, + self.penalties + .iter() + .map(|p| format!("{} for peer {}", p.msg, p.peer_id)) + .collect::>() + ); + } + } + + fn assert_penalties_of_type(&self, expected_penalty: &'static str) { + if self.penalties.is_empty() { + panic!("No penalties but expected some of type {expected_penalty}"); + } + let non_matching_penalties = self + .penalties + .iter() + .filter(|penalty| penalty.msg != expected_penalty) + .collect::>(); + if !non_matching_penalties.is_empty() { + panic!( + "Found non-matching penalties to {}: {:?}", + expected_penalty, non_matching_penalties + ); + } + } + + fn assert_no_penalties(&mut self) { + if !self.penalties.is_empty() { + panic!("Some downscore events: {:?}", self.penalties); + } + } + fn assert_failed_lookup_sync(&mut self) { + assert!(self.created_lookups() > 0, "no created lookups"); + assert_eq!(self.completed_lookups(), 0, "some completed lookups"); + assert_eq!( + self.dropped_lookups(), + self.created_lookups(), + "not all dropped. Current lookups {:?}", + self.active_single_lookups(), + ); + self.assert_empty_network(); + self.assert_no_active_lookups(); + } + + fn assert_successful_lookup_sync(&mut self) { + assert!(self.created_lookups() > 0, "no created lookups"); + assert_eq!(self.dropped_lookups(), 0, "some dropped lookups"); + assert_eq!( + self.completed_lookups(), + self.created_lookups(), + "not all lookups completed. Current lookups {:?}", + self.active_single_lookups(), + ); + self.assert_empty_network(); + self.assert_no_active_lookups(); + } + + /// There is a lookup created with the block that triggers the unknown message that can't be + /// completed because it has zero peers + fn assert_successful_lookup_sync_parent_trigger(&mut self) { + assert!(self.created_lookups() > 0, "no created lookups"); + assert_eq!( + self.completed_lookups() + 1, + self.created_lookups(), + "all completed" + ); + assert_eq!(self.dropped_lookups(), 0, "some dropped lookups"); + self.assert_empty_network(); + } + + fn assert_pending_lookup_sync(&self) { + assert!(self.created_lookups() > 0, "no created lookups"); + assert_eq!(self.dropped_lookups(), 0, "some dropped lookups"); + assert_eq!(self.completed_lookups(), 0, "some completed lookups"); + } + + /// Assert there is at least one range sync chain created and that all sync chains completed + fn assert_successful_range_sync(&self) { + assert!( + self.range_sync_chains_added() > 0, + "No created range sync chains" + ); + assert_eq!( + self.range_sync_chains_added(), + self.range_sync_chains_removed(), + "Not all chains completed" + ); + } + + fn lookup_at_slot(&self, slot: u64) -> &SeenLookup { + let block_root = self.block_root_at_slot(slot); + self.seen_lookups + .values() + .find(|lookup| lookup.block_root == block_root) + .unwrap_or_else(|| panic!("No lookup for block_root {block_root} of slot {slot}")) + } + + fn assert_peers_at_lookup_of_slot(&self, slot: u64, expected_peers: usize) { + let lookup = self.lookup_at_slot(slot); + if lookup.seen_peers.len() != expected_peers { + panic!( + "Expected lookup of slot {slot} to have {expected_peers} peers but had {:?}", + lookup.seen_peers + ) + } + } + + /// Total count of unique lookups created + fn created_lookups(&self) -> usize { + // Subtract initial value to allow resetting metrics mid test + self.sync_manager.block_lookups().metrics().created_lookups + - self.initial_block_lookups_metrics.created_lookups + } + + /// Total count of lookups completed or dropped + fn dropped_lookups(&self) -> usize { + // Subtract initial value to allow resetting metrics mid test + self.sync_manager.block_lookups().metrics().dropped_lookups + - self.initial_block_lookups_metrics.dropped_lookups + } + + fn completed_lookups(&self) -> usize { + // Subtract initial value to allow resetting metrics mid test + self.sync_manager + .block_lookups() + .metrics() + .completed_lookups + - self.initial_block_lookups_metrics.completed_lookups + } + + fn capture_metrics_baseline(&mut self) { + self.initial_block_lookups_metrics = self.sync_manager.block_lookups().metrics().clone() + } + + /// Returns the last lookup seen with matching block_root + fn lookup_by_root(&self, block_root: Hash256) -> &SeenLookup { + self.seen_lookups + .values() + .filter(|lookup| lookup.block_root == block_root) + .max_by_key(|lookup| lookup.id) + .unwrap_or_else(|| panic!("No loookup for block_root {block_root}")) + } + + fn range_sync_chains_added(&self) -> usize { + self.sync_manager.range_sync().metrics().chains_added + } + + fn range_sync_chains_removed(&self) -> usize { + self.sync_manager.range_sync().metrics().chains_removed + } + + fn custody_columns(&self) -> &[ColumnIndex] { + self.harness + .chain + .data_availability_checker + .custody_context() + .custody_columns_for_epoch(None, &self.harness.spec) + } + + // Test setup + + fn new_after_deneb() -> Option { + genesis_fork().deneb_enabled().then(Self::default) + } + + fn new_after_deneb_before_fulu() -> Option { + let fork = genesis_fork(); + if fork.deneb_enabled() && !fork.fulu_enabled() { + Some(Self::default()) } else { None } } - pub fn test_setup_after_fulu() -> Option { - let r = Self::test_setup(); - if r.fork_name.fulu_enabled() { - Some(r) - } else { - None - } + pub fn new_fulu_peer_test(fulu_test_type: FuluTestType) -> Option { + genesis_fork().fulu_enabled().then(|| { + Self::new(TestRigConfig { + fulu_test_type, + node_custody_type_override: None, + }) + }) } pub fn log(&self, msg: &str) { info!(msg, "TEST_RIG"); } - pub fn after_deneb(&self) -> bool { + pub fn is_after_deneb(&self) -> bool { self.fork_name.deneb_enabled() } - pub fn after_fulu(&self) -> bool { + pub fn is_after_fulu(&self) -> bool { self.fork_name.fulu_enabled() } @@ -170,8 +1223,16 @@ impl TestRig { self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)) } - fn trigger_unknown_parent_blob(&mut self, peer_id: PeerId, blob: BlobSidecar) { - self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob.into())); + fn trigger_unknown_parent_blob(&mut self, peer_id: PeerId, blob: Arc>) { + self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); + } + + fn trigger_unknown_parent_column( + &mut self, + peer_id: PeerId, + column: Arc>, + ) { + self.send_sync_message(SyncMessage::UnknownParentDataColumn(peer_id, column)); } fn trigger_unknown_block_from_attestation(&mut self, block_root: Hash256, peer_id: PeerId) { @@ -180,13 +1241,6 @@ impl TestRig { )); } - /// Drain all sync messages in the sync_rx attached to the beacon processor - fn drain_sync_rx(&mut self) { - while let Ok(sync_message) = self.sync_rx.try_recv() { - self.send_sync_message(sync_message); - } - } - fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -200,105 +1254,36 @@ impl TestRig { generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } - fn rand_block_and_data_columns( - &mut self, - ) -> (SignedBeaconBlock, Vec>>) { - let num_blobs = NumBlobs::Number(1); - generate_rand_block_and_data_columns::( - self.fork_name, - num_blobs, - &mut self.rng, - &self.harness.spec, - ) - } - - pub fn rand_block_and_parent( - &mut self, - ) -> (SignedBeaconBlock, SignedBeaconBlock, Hash256, Hash256) { - let parent = self.rand_block(); - let parent_root = parent.canonical_root(); - let mut block = self.rand_block(); - *block.message_mut().parent_root_mut() = parent_root; - let block_root = block.canonical_root(); - (parent, block, parent_root, block_root) - } - pub fn send_sync_message(&mut self, sync_message: SyncMessage) { self.sync_manager.handle_message(sync_message); } + pub fn push_sync_message(&mut self, sync_message: SyncMessage) { + self.sync_manager.send_sync_message(sync_message); + } + fn active_single_lookups(&self) -> Vec { - self.sync_manager.active_single_lookups() + self.sync_manager.block_lookups().active_single_lookups() } fn active_single_lookups_count(&self) -> usize { - self.sync_manager.active_single_lookups().len() - } - - fn active_parent_lookups(&self) -> Vec> { - self.sync_manager.active_parent_lookups() - } - - fn active_parent_lookups_count(&self) -> usize { - self.sync_manager.active_parent_lookups().len() - } - - fn active_range_sync_chain(&self) -> (RangeSyncType, Slot, Slot) { - self.sync_manager.get_range_sync_chains().unwrap().unwrap() + self.active_single_lookups().len() } fn assert_single_lookups_count(&self, count: usize) { assert_eq!( self.active_single_lookups_count(), count, - "Unexpected count of single lookups. Current lookups: {:?}", + "Unexpected count of single lookups. Current lookups: {:#?}", self.active_single_lookups() ); } - fn assert_parent_lookups_count(&self, count: usize) { - assert_eq!( - self.active_parent_lookups_count(), - count, - "Unexpected count of parent lookups. Parent lookups: {:?}. Current lookups: {:?}", - self.active_parent_lookups(), - self.active_single_lookups() - ); - } - - fn assert_lookup_is_active(&self, block_root: Hash256) { - let lookups = self.sync_manager.active_single_lookups(); - if !lookups.iter().any(|l| l.1 == block_root) { - panic!("Expected lookup {block_root} to be the only active: {lookups:?}"); - } - } - - fn assert_lookup_peers(&self, block_root: Hash256, mut expected_peers: Vec) { - let mut lookup = self - .sync_manager - .active_single_lookups() - .into_iter() - .find(|l| l.1 == block_root) - .unwrap_or_else(|| panic!("no lookup for {block_root}")); - lookup.3.sort(); - expected_peers.sort(); - assert_eq!( - lookup.3, expected_peers, - "unexpected peers on lookup {block_root}" - ); - } - fn insert_ignored_chain(&mut self, block_root: Hash256) { + self.log(&format!("Inserting block in ignored chains {block_root:?}")); self.sync_manager.insert_ignored_chain(block_root); } - fn assert_not_ignored_chain(&mut self, chain_hash: Hash256) { - let chains = self.sync_manager.get_ignored_chains(); - if chains.contains(&chain_hash) { - panic!("ignored chains contain {chain_hash:?}: {chains:?}"); - } - } - fn assert_ignored_chain(&mut self, chain_hash: Hash256) { let chains = self.sync_manager.get_ignored_chains(); if !chains.contains(&chain_hash) { @@ -306,16 +1291,8 @@ impl TestRig { } } - fn find_single_lookup_for(&self, block_root: Hash256) -> Id { - self.active_single_lookups() - .iter() - .find(|l| l.1 == block_root) - .unwrap_or_else(|| panic!("no single block lookup found for {block_root}")) - .0 - } - #[track_caller] - fn expect_no_active_single_lookups(&self) { + fn assert_no_active_single_lookups(&self) { assert!( self.active_single_lookups().is_empty(), "expect no single block lookups: {:?}", @@ -324,13 +1301,8 @@ impl TestRig { } #[track_caller] - fn expect_no_active_lookups(&self) { - self.expect_no_active_single_lookups(); - } - - fn expect_no_active_lookups_empty_network(&mut self) { - self.expect_no_active_lookups(); - self.expect_empty_network(); + fn assert_no_active_lookups(&self) { + self.assert_no_active_single_lookups(); } pub fn new_connected_peer(&mut self) -> PeerId { @@ -340,367 +1312,62 @@ impl TestRig { .peers .write() .__add_connected_peer_testing_only(false, &self.harness.spec, key); - self.log(&format!("Added new peer for testing {peer_id:?}")); + + // Assumes custody subnet count == column count + let custody_subnets = self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .expect("Peer should be known") + .custody_subnets_iter() + .copied() + .collect::>(); + let peer_custody_str = + if custody_subnets.len() == self.harness.spec.number_of_custody_groups as usize { + "all".to_owned() + } else { + format!("{custody_subnets:?}") + }; + + self.log(&format!( + "Added new peer for testing {peer_id:?}, custody: {peer_custody_str}" + )); peer_id } pub fn new_connected_supernode_peer(&mut self) -> PeerId { let key = self.determinstic_key(); - self.network_globals + let peer_id = self + .network_globals .peers .write() - .__add_connected_peer_testing_only(true, &self.harness.spec, key) + .__add_connected_peer_testing_only(true, &self.harness.spec, key); + self.log(&format!( + "Added new peer for testing {peer_id:?}, custody: supernode" + )); + peer_id } fn determinstic_key(&mut self) -> CombinedKey { k256::ecdsa::SigningKey::random(&mut self.rng_08).into() } - pub fn new_connected_peers_for_peerdas(&mut self) { - // Enough sampling peers with few columns - for _ in 0..100 { - self.new_connected_peer(); - } - // One supernode peer to ensure all columns have at least one peer - self.new_connected_supernode_peer(); - } - - fn parent_chain_processed_success( - &mut self, - chain_hash: Hash256, - blocks: &[Arc>], - ) { - // Send import events for all pending parent blocks - for _ in blocks { - self.parent_block_processed_imported(chain_hash); - } - // Send final import event for the block that triggered the lookup - self.single_block_component_processed_imported(chain_hash); - } - - /// Locate a parent lookup chain with tip hash `chain_hash` - fn find_oldest_parent_lookup(&self, chain_hash: Hash256) -> Hash256 { - let parent_chain = self - .active_parent_lookups() - .into_iter() - .find(|chain| chain.first() == Some(&chain_hash)) - .unwrap_or_else(|| { - panic!( - "No parent chain with chain_hash {chain_hash:?}: Parent lookups {:?} Single lookups {:?}", - self.active_parent_lookups(), - self.active_single_lookups(), - ) - }); - *parent_chain.last().unwrap() - } - - fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { - let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); - self.single_block_component_processed(id, result); - } - - fn parent_blob_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { - let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); - self.single_blob_component_processed(id, result); - } - - fn parent_block_processed_imported(&mut self, chain_hash: Hash256) { - self.parent_block_processed( - chain_hash, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(chain_hash)), - ); - } - - fn single_block_component_processed(&mut self, id: Id, result: BlockProcessingResult) { - self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleBlock { id }, - result, - }) - } - - fn single_block_component_processed_imported(&mut self, block_root: Hash256) { - let id = self.find_single_lookup_for(block_root); - self.single_block_component_processed( - id, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), - ) - } - - fn single_blob_component_processed(&mut self, id: Id, result: BlockProcessingResult) { - self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleBlob { id }, - result, - }) - } - - fn parent_lookup_block_response( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - beacon_block: Option>>, - ) { - self.log("parent_lookup_block_response"); - self.send_sync_message(SyncMessage::RpcBlock { - sync_request_id: SyncRequestId::SingleBlock { id }, - peer_id, - beacon_block, - seen_timestamp: D, - }); - } - - fn single_lookup_block_response( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - beacon_block: Option>>, - ) { - self.log("single_lookup_block_response"); - self.send_sync_message(SyncMessage::RpcBlock { - sync_request_id: SyncRequestId::SingleBlock { id }, - peer_id, - beacon_block, - seen_timestamp: D, - }); - } - - fn parent_lookup_blob_response( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - blob_sidecar: Option>>, - ) { - self.log(&format!( - "parent_lookup_blob_response {:?}", - blob_sidecar.as_ref().map(|b| b.index) - )); - self.send_sync_message(SyncMessage::RpcBlob { - sync_request_id: SyncRequestId::SingleBlob { id }, - peer_id, - blob_sidecar, - seen_timestamp: D, - }); - } - - fn single_lookup_blob_response( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - blob_sidecar: Option>>, - ) { - self.send_sync_message(SyncMessage::RpcBlob { - sync_request_id: SyncRequestId::SingleBlob { id }, - peer_id, - blob_sidecar, - seen_timestamp: D, - }); - } - - fn complete_single_lookup_blob_download( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - blobs: Vec>, - ) { - for blob in blobs { - self.single_lookup_blob_response(id, peer_id, Some(blob.into())); - } - self.single_lookup_blob_response(id, peer_id, None); - } - - fn complete_single_lookup_blob_lookup_valid( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - blobs: Vec>, - import: bool, - ) { - let block_root = blobs.first().unwrap().block_root(); - let block_slot = blobs.first().unwrap().slot(); - self.complete_single_lookup_blob_download(id, peer_id, blobs); - self.expect_block_process(ResponseType::Blob); - self.single_blob_component_processed( - id.lookup_id, - if import { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) - } else { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - block_slot, block_root, - )) - }, - ); - } - - fn complete_lookup_block_download(&mut self, block: SignedBeaconBlock) { - let block_root = block.canonical_root(); - let id = self.expect_block_lookup_request(block_root); - self.expect_empty_network(); - let peer_id = self.new_connected_peer(); - self.single_lookup_block_response(id, peer_id, Some(block.into())); - self.single_lookup_block_response(id, peer_id, None); - } - - fn complete_lookup_block_import_valid(&mut self, block_root: Hash256, import: bool) { - self.expect_block_process(ResponseType::Block); - let id = self.find_single_lookup_for(block_root); - self.single_block_component_processed( - id, - if import { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) - } else { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - Slot::new(0), - block_root, - )) - }, - ) - } - - fn complete_single_lookup_block_valid(&mut self, block: SignedBeaconBlock, import: bool) { - let block_root = block.canonical_root(); - self.complete_lookup_block_download(block); - self.complete_lookup_block_import_valid(block_root, import) - } - - fn parent_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { - self.send_sync_message(SyncMessage::RpcError { - peer_id, - sync_request_id: SyncRequestId::SingleBlock { id }, - error, - }) - } - - fn parent_lookup_failed_unavailable(&mut self, id: SingleLookupReqId, peer_id: PeerId) { - self.parent_lookup_failed( - id, - peer_id, - RPCError::ErrorResponse( - RpcErrorResponse::ResourceUnavailable, - "older than deneb".into(), - ), - ); - } - - fn single_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { - self.send_sync_message(SyncMessage::RpcError { - peer_id, - sync_request_id: SyncRequestId::SingleBlock { id }, - error, - }) - } - - fn complete_valid_block_request( - &mut self, - id: SingleLookupReqId, - block: Arc>, - missing_components: bool, - ) { - // Complete download - let peer_id = PeerId::random(); - let slot = block.slot(); - let block_root = block.canonical_root(); - self.single_lookup_block_response(id, peer_id, Some(block)); - self.single_lookup_block_response(id, peer_id, None); - // Expect processing and resolve with import - self.expect_block_process(ResponseType::Block); - self.single_block_component_processed( - id.lookup_id, - if missing_components { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - slot, block_root, - )) - } else { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) - }, - ) - } - - fn complete_valid_custody_request( - &mut self, - ids: DCByRootIds, - data_columns: Vec>>, - missing_components: bool, - ) { - let lookup_id = if let SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { - requester: DataColumnsByRootRequester::Custody(id), - .. - }) = ids.first().unwrap().0 - { - id.requester.0.lookup_id - } else { - panic!("not a custody requester") - }; - - let first_column = data_columns.first().cloned().unwrap(); - - for id in ids { - self.log(&format!("return valid data column for {id:?}")); - let indices = &id.1; - let columns_to_send = indices - .iter() - .map(|&i| data_columns[i as usize].clone()) - .collect::>(); - self.complete_data_columns_by_root_request(id, &columns_to_send); - } - - // Expect work event - self.expect_rpc_custody_column_work_event(); - - // Respond with valid result - self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleCustodyColumn(lookup_id), - result: if missing_components { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - first_column.slot(), - first_column.block_root(), - )) - } else { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported( - first_column.block_root(), - )) - }, - }); - } - - fn complete_data_columns_by_root_request( - &mut self, - (sync_request_id, _): DCByRootId, - data_columns: &[Arc>], - ) { - let peer_id = PeerId::random(); - for data_column in data_columns { - // Send chunks - self.send_sync_message(SyncMessage::RpcDataColumn { - sync_request_id, - peer_id, - data_column: Some(data_column.clone()), - seen_timestamp: timestamp_now(), - }); - } - // Send stream termination - self.send_sync_message(SyncMessage::RpcDataColumn { - sync_request_id, - peer_id, - data_column: None, - seen_timestamp: timestamp_now(), - }); - } - - /// Return RPCErrors for all active requests of peer - fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { - self.drain_network_rx(); - while let Ok(sync_request_id) = self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - app_request_id: AppRequestId::Sync(id), - .. - } if *peer_id == disconnected_peer_id => Some(*id), - _ => None, - }) { - self.send_sync_message(SyncMessage::RpcError { - peer_id: disconnected_peer_id, - sync_request_id, - error: RPCError::Disconnected, - }); + pub fn new_connected_peers_for_peerdas(&mut self) -> Vec { + match self.fulu_test_type.them_node_custody_type() { + NodeCustodyType::Fullnode => { + // Enough sampling peers with few columns + let mut peers = (0..100) + .map(|_| self.new_connected_peer()) + .collect::>(); + // One supernode peer to ensure all columns have at least one peer + peers.push(self.new_connected_supernode_peer()); + peers + } + NodeCustodyType::Supernode | NodeCustodyType::SemiSupernode => { + let peer = self.new_connected_supernode_peer(); + vec![peer] + } } } @@ -708,6 +1375,22 @@ impl TestRig { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } + fn get_connected_peers(&self) -> Vec { + self.network_globals + .peers + .read() + .peers() + .map(|(peer, _)| *peer) + .collect::>() + } + + fn disconnect_all_peers(&mut self) { + for peer in self.get_connected_peers() { + self.log(&format!("Disconnecting peer {peer}")); + self.send_sync_message(SyncMessage::Disconnect(peer)); + } + } + fn drain_network_rx(&mut self) { while let Ok(event) = self.network_rx.try_recv() { self.network_rx_queue.push(event); @@ -764,7 +1447,7 @@ impl TestRig { } } - pub fn expect_empty_processor(&mut self) { + pub fn assert_empty_processor(&mut self) { self.drain_processor_rx(); if !self.beacon_processor_rx_queue.is_empty() { panic!( @@ -774,215 +1457,8 @@ impl TestRig { } } - fn find_block_lookup_request( - &mut self, - for_block: Hash256, - ) -> Result { - self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id: _, - request: RequestType::BlocksByRoot(request), - app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), - } if request.block_roots().to_vec().contains(&for_block) => Some(*id), - _ => None, - }) - } - #[track_caller] - fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { - self.find_block_lookup_request(for_block) - .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) - } - - fn find_blob_lookup_request( - &mut self, - for_block: Hash256, - ) -> Result { - self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id: _, - request: RequestType::BlobsByRoot(request), - app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), - } if request - .blob_ids - .to_vec() - .iter() - .any(|r| r.block_root == for_block) => - { - Some(*id) - } - _ => None, - }) - } - - #[track_caller] - fn expect_blob_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { - self.find_blob_lookup_request(for_block) - .unwrap_or_else(|e| panic!("Expected blob request for {for_block:?}: {e}")) - } - - #[track_caller] - fn expect_block_parent_request(&mut self, for_block: Hash256) -> SingleLookupReqId { - self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id: _, - request: RequestType::BlocksByRoot(request), - app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), - } if request.block_roots().to_vec().contains(&for_block) => Some(*id), - _ => None, - }) - .unwrap_or_else(|e| panic!("Expected block parent request for {for_block:?}: {e}")) - } - - fn expect_no_requests_for(&mut self, block_root: Hash256) { - if let Ok(request) = self.find_block_lookup_request(block_root) { - panic!("Expected no block request for {block_root:?} found {request:?}"); - } - if let Ok(request) = self.find_blob_lookup_request(block_root) { - panic!("Expected no blob request for {block_root:?} found {request:?}"); - } - } - - #[track_caller] - fn expect_blob_parent_request(&mut self, for_block: Hash256) -> SingleLookupReqId { - self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id: _, - request: RequestType::BlobsByRoot(request), - app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), - } if request - .blob_ids - .to_vec() - .iter() - .all(|r| r.block_root == for_block) => - { - Some(*id) - } - _ => None, - }) - .unwrap_or_else(|e| panic!("Expected blob parent request for {for_block:?}: {e}")) - } - - /// Retrieves an unknown number of requests for data columns of `block_root`. Because peer ENRs - /// are random, and peer selection is random, the total number of batched requests is unknown. - fn expect_data_columns_by_root_requests( - &mut self, - block_root: Hash256, - count: usize, - ) -> DCByRootIds { - let mut requests: DCByRootIds = vec![]; - loop { - let req = self - .pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id: _, - request: RequestType::DataColumnsByRoot(request), - app_request_id: - AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), - } => { - let matching = request - .data_column_ids - .iter() - .find(|id| id.block_root == block_root)?; - - let indices = matching.columns.iter().copied().collect(); - Some((*id, indices)) - } - _ => None, - }) - .unwrap_or_else(|e| { - panic!("Expected more DataColumnsByRoot requests for {block_root:?}: {e}") - }); - requests.push(req); - - // Should never infinite loop because sync does not send requests for 0 columns - if requests.iter().map(|r| r.1.len()).sum::() >= count { - return requests; - } - } - } - - fn expect_only_data_columns_by_root_requests( - &mut self, - for_block: Hash256, - count: usize, - ) -> DCByRootIds { - let ids = self.expect_data_columns_by_root_requests(for_block, count); - self.expect_empty_network(); - ids - } - - #[track_caller] - fn expect_block_process(&mut self, response_type: ResponseType) { - match response_type { - ResponseType::Block => self - .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::RpcBlock).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expected block work event: {e}")), - ResponseType::Blob => self - .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::RpcBlobs).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), - ResponseType::CustodyColumn => self - .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expected column work event: {e}")), - } - } - - fn expect_rpc_custody_column_work_event(&mut self) { - self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn { - Some(()) - } else { - None - } - }) - .unwrap_or_else(|e| panic!("Expected RPC custody column work: {e}")) - } - - #[allow(dead_code)] - fn expect_no_work_event(&mut self) { - self.drain_processor_rx(); - assert!(self.network_rx_queue.is_empty()); - } - - fn expect_no_penalty_for(&mut self, peer_id: PeerId) { - self.drain_network_rx(); - let downscore_events = self - .network_rx_queue - .iter() - .filter_map(|ev| match ev { - NetworkMessage::ReportPeer { - peer_id: p_id, msg, .. - } if p_id == &peer_id => Some(msg), - _ => None, - }) - .collect::>(); - if !downscore_events.is_empty() { - panic!("Some downscore events for {peer_id}: {downscore_events:?}"); - } - } - - #[track_caller] - fn expect_parent_chain_process(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - // Parent chain sends blocks one by one - assert_eq!(work.work_type(), beacon_processor::WorkType::RpcBlock); - } - other => panic!( - "Expected rpc_block from chain segment process, found {:?}", - other - ), - } - } - - #[track_caller] - pub fn expect_empty_network(&mut self) { + pub fn assert_empty_network(&mut self) { self.drain_network_rx(); if !self.network_rx_queue.is_empty() { let n = self.network_rx_queue.len(); @@ -993,115 +1469,52 @@ impl TestRig { } } - #[track_caller] - fn expect_empty_beacon_processor(&mut self) { - match self.beacon_processor_rx.try_recv() { - Err(mpsc::error::TryRecvError::Empty) => {} // ok - Ok(event) => panic!("expected empty beacon processor: {:?}", event), - other => panic!("unexpected err {:?}", other), - } - } - - #[track_caller] - pub fn expect_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { - let penalty_msg = self - .pop_received_network_event(|ev| match ev { - NetworkMessage::ReportPeer { - peer_id: p_id, msg, .. - } if p_id == &peer_id => Some(msg.to_owned()), - _ => None, - }) - .unwrap_or_else(|_| { - panic!( - "Expected '{expect_penalty_msg}' penalty for peer {peer_id}: {:#?}", - self.network_rx_queue - ) - }); - assert_eq!( - penalty_msg, expect_penalty_msg, - "Unexpected penalty msg for {peer_id}" - ); - self.log(&format!("Found expected penalty {penalty_msg}")); - } - - pub fn block_with_parent_and_blobs( + async fn import_block_to_da_checker( &mut self, - parent_root: Hash256, - num_blobs: NumBlobs, - ) -> (SignedBeaconBlock, Vec>) { - let (mut block, mut blobs) = self.rand_block_and_blobs(num_blobs); - *block.message_mut().parent_root_mut() = parent_root; - blobs.iter_mut().for_each(|blob| { - blob.signed_block_header = block.signed_block_header(); - }); - (block, blobs) - } - - pub fn rand_blockchain(&mut self, depth: usize) -> Vec>> { - let mut blocks = Vec::>>::with_capacity(depth); - for slot in 0..depth { - let parent = blocks - .last() - .map(|b| b.canonical_root()) - .unwrap_or_else(Hash256::random); - let mut block = self.rand_block(); - *block.message_mut().parent_root_mut() = parent; - *block.message_mut().slot_mut() = slot.into(); - blocks.push(block.into()); - } - self.log(&format!( - "Blockchain dump {:#?}", - blocks - .iter() - .map(|b| format!( - "block {} {} parent {}", - b.slot(), - b.canonical_root(), - b.parent_root() - )) - .collect::>() - )); - blocks - } - - fn insert_block_to_da_checker(&mut self, block: Arc>) { - let state = BeaconState::Base(BeaconStateBase::random_for_test(&mut self.rng)); - let parent_block = self.rand_block(); - let import_data = BlockImportData::::__new_for_test( - block.canonical_root(), - state, - parent_block.into(), - ); - let payload_verification_outcome = PayloadVerificationOutcome { - payload_verification_status: PayloadVerificationStatus::Verified, - is_valid_merge_transition_block: false, - }; - let executed_block = - AvailabilityPendingExecutedBlock::new(block, import_data, payload_verification_outcome); - match self - .harness + block: Arc>, + ) -> AvailabilityProcessingStatus { + // Simulate importing block from another source. Don't use GossipVerified as it checks with + // the clock, which does not match the timestamp in the payload. + let block_root = block.canonical_root(); + let rpc_block = RpcBlock::BlockOnly { block_root, block }; + self.harness .chain - .data_availability_checker - .put_executed_block(executed_block) - .unwrap() - { - Availability::Available(_) => panic!("block removed from da_checker, available"), - Availability::MissingComponents(block_root) => { + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::Yes, + BlockImportSource::Gossip, + || Ok(()), + ) + .await + .expect("Error processing block") + } + + async fn insert_block_to_da_chain_and_assert_missing_componens( + &mut self, + block: Arc>, + ) { + match self.import_block_to_da_checker(block).await { + AvailabilityProcessingStatus::Imported(_) => { + panic!("block removed from da_checker, available") + } + AvailabilityProcessingStatus::MissingComponents(_, block_root) => { self.log(&format!("inserted block to da_checker {block_root:?}")) } - }; + } } - fn insert_blob_to_da_checker(&mut self, blob: BlobSidecar) { + fn insert_blob_to_da_checker(&mut self, blob: Arc>) { match self .harness .chain .data_availability_checker - .put_gossip_verified_blobs( + .put_kzg_verified_blobs( blob.block_root(), - std::iter::once(GossipVerifiedBlob::<_, Observe>::__assumed_valid( - blob.into(), - )), + std::iter::once( + KzgVerifiedBlob::new(blob, &self.harness.chain.kzg, Duration::new(0, 0)) + .expect("Invalid blob"), + ), ) .unwrap() { @@ -1112,7 +1525,11 @@ impl TestRig { }; } - fn insert_block_to_availability_cache(&mut self, block: Arc>) { + fn insert_block_to_da_checker_as_pre_execution(&mut self, block: Arc>) { + self.log(&format!( + "Inserting block to availability_cache as pre_execution_block {:?}", + block.canonical_root() + )); self.harness .chain .data_availability_checker @@ -1121,6 +1538,9 @@ impl TestRig { } fn simulate_block_gossip_processing_becomes_invalid(&mut self, block_root: Hash256) { + self.log(&format!( + "Marking block {block_root:?} in da_checker as execution error" + )); self.harness .chain .data_availability_checker @@ -1132,19 +1552,38 @@ impl TestRig { }); } - fn simulate_block_gossip_processing_becomes_valid_missing_components( + async fn simulate_block_gossip_processing_becomes_valid( &mut self, block: Arc>, ) { let block_root = block.canonical_root(); - self.insert_block_to_da_checker(block); + match self.import_block_to_da_checker(block).await { + AvailabilityProcessingStatus::Imported(block_root) => { + self.log(&format!( + "insert block to da_checker and it imported {block_root:?}" + )); + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + panic!("block not imported after adding to da_checker"); + } + } self.send_sync_message(SyncMessage::GossipBlockProcessResult { block_root, imported: false, }); } + + fn requests_count(&self) -> HashMap<&'static str, usize> { + let mut requests_count = HashMap::new(); + for (request, _) in &self.requests { + *requests_count + .entry(Into::<&'static str>::into(request)) + .or_default() += 1; + } + requests_count + } } #[test] @@ -1161,1558 +1600,803 @@ fn stable_rng() { ); } -#[test] -fn test_single_block_lookup_happy_path() { - let mut rig = TestRig::test_setup(); - let block = rig.rand_block(); - let peer_id = rig.new_connected_peer(); - let block_root = block.canonical_root(); - // Trigger the request - rig.trigger_unknown_block_from_attestation(block_root, peer_id); - let id = rig.expect_block_lookup_request(block_root); +macro_rules! run_lookups_tests_for_depths { + ($($depth:literal),+ $(,)?) => { + paste::paste! { + $( + #[tokio::test] + async fn []() { + happy_path_unknown_attestation($depth).await; + } - // The peer provides the correct block, should not be penalized. Now the block should be sent - // for processing. - rig.single_lookup_block_response(id, peer_id, Some(block.into())); - rig.expect_empty_network(); - rig.expect_block_process(ResponseType::Block); + #[tokio::test] + async fn []() { + happy_path_unknown_block_parent($depth).await; + } - // The request should still be active. - assert_eq!(rig.active_single_lookups_count(), 1); + #[tokio::test] + async fn []() { + happy_path_unknown_data_parent($depth).await; + } - // Send the stream termination. Peer should have not been penalized, and the request removed - // after processing. - rig.single_lookup_block_response(id, peer_id, None); - rig.single_block_component_processed_imported(block_root); - rig.expect_empty_network(); - rig.expect_no_active_lookups(); + #[tokio::test] + async fn []() { + happy_path_multiple_triggers($depth).await; + } + + #[tokio::test] + async fn []() { + bad_peer_empty_block_response($depth).await; + } + + #[tokio::test] + async fn []() { + bad_peer_empty_data_response($depth).await; + } + + #[tokio::test] + async fn []() { + bad_peer_too_few_data_response($depth).await; + } + + #[tokio::test] + async fn []() { + bad_peer_wrong_block_response($depth).await; + } + + #[tokio::test] + async fn []() { + bad_peer_wrong_data_response($depth).await; + } + + #[tokio::test] + async fn []() { + bad_peer_rpc_failure($depth).await; + } + + #[tokio::test] + async fn []() { + too_many_download_failures($depth).await; + } + + #[tokio::test] + async fn []() { + too_many_processing_failures($depth).await; + } + + #[tokio::test] + async fn []() { + peer_disconnected_then_rpc_error($depth).await; + } + )+ + } + }; } -// Tests that if a peer does not respond with a block, we downscore and retry the block only -#[test] -fn test_single_block_lookup_empty_response() { - let mut r = TestRig::test_setup(); +run_lookups_tests_for_depths!(1, 2); - let block = r.rand_block(); - let block_root = block.canonical_root(); - let peer_id = r.new_connected_peer(); - - // Trigger the request - r.trigger_unknown_block_from_attestation(block_root, peer_id); - let id = r.expect_block_lookup_request(block_root); - - // The peer does not have the block. It should be penalized. - r.single_lookup_block_response(id, peer_id, None); - r.expect_penalty(peer_id, "NotEnoughResponsesReturned"); - // it should be retried - let id = r.expect_block_lookup_request(block_root); - // Send the right block this time. - r.single_lookup_block_response(id, peer_id, Some(block.into())); - r.expect_block_process(ResponseType::Block); - r.single_block_component_processed_imported(block_root); - r.expect_no_active_lookups(); +/// Assert that lookup sync succeeds with the happy case +async fn happy_path_unknown_attestation(depth: usize) { + let mut r = TestRig::default(); + // We get attestation for a block descendant (depth) blocks of current head + r.build_chain_and_trigger_last_block(depth).await; + // Complete the request with good peer behaviour + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync(); } -#[test] -fn test_single_block_lookup_wrong_response() { - let mut rig = TestRig::test_setup(); - - let block_hash = Hash256::random(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_block_lookup_request(block_hash); - - // Peer sends something else. It should be penalized. - let bad_block = rig.rand_block(); - rig.single_lookup_block_response(id, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); - rig.expect_block_lookup_request(block_hash); // should be retried - - // Send the stream termination. This should not produce an additional penalty. - rig.single_lookup_block_response(id, peer_id, None); - rig.expect_empty_network(); +async fn happy_path_unknown_block_parent(depth: usize) { + let mut r = TestRig::default(); + r.build_chain(depth).await; + r.trigger_with_last_unknown_block_parent(); + r.simulate(SimulateConfig::happy_path()).await; + // All lookups should NOT complete on this test, however note the following for the tip lookup, + // it's the lookup for the tip block which has 0 peers and a block cached: + // - before deneb the block is cached, so it's sent for processing, and success + // - before fulu the block is cached, but we can't fetch blobs so it's stuck + // - after fulu the block is cached, we start a custody request and since we use the global pool + // of peers we DO have 1 connected synced supernode peer, which gives us the columns and the + // lookup succeeds + if r.is_after_deneb() && !r.is_after_fulu() { + r.assert_successful_lookup_sync_parent_trigger() + } else { + r.assert_successful_lookup_sync(); + } } -#[test] -fn test_single_block_lookup_failure() { - let mut rig = TestRig::test_setup(); - - let block_hash = Hash256::random(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_block_lookup_request(block_hash); - - // The request fails. RPC failures are handled elsewhere so we should not penalize the peer. - rig.single_lookup_failed(id, peer_id, RPCError::UnsupportedProtocol); - rig.expect_block_lookup_request(block_hash); - rig.expect_empty_network(); +/// Assert that sync completes from a GossipUnknownParentBlob / UnknownDataColumnParent +async fn happy_path_unknown_data_parent(depth: usize) { + let Some(mut r) = TestRig::new_after_deneb() else { + return; + }; + r.build_chain(depth).await; + if r.is_after_fulu() { + r.trigger_with_last_unknown_data_column_parent(); + } else if r.is_after_deneb() { + r.trigger_with_last_unknown_blob_parent(); + } + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync_parent_trigger(); } -#[test] -fn test_single_block_lookup_peer_disconnected_then_rpc_error() { - let mut rig = TestRig::test_setup(); +/// Assert that multiple trigger types don't create extra lookups +async fn happy_path_multiple_triggers(depth: usize) { + let mut r = TestRig::default(); + // + 1, because the unknown parent trigger needs two new blocks + r.build_chain(depth + 1).await; + r.trigger_with_last_block(); + r.trigger_with_last_block(); + r.trigger_with_last_unknown_block_parent(); + r.trigger_with_last_unknown_block_parent(); + if r.is_after_fulu() { + r.trigger_with_last_unknown_data_column_parent(); + } else if r.is_after_deneb() { + r.trigger_with_last_unknown_blob_parent(); + } + r.simulate(SimulateConfig::happy_path()).await; + assert_eq!(r.created_lookups(), depth + 1, "Don't create extra lookups"); + r.assert_successful_lookup_sync(); +} - let block_hash = Hash256::random(); - let peer_id = rig.new_connected_peer(); +// Test bad behaviour of peers - // Trigger the request. - rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_block_lookup_request(block_hash); +/// Assert that if peer responds with no blocks, we downscore, and retry the same lookup +async fn bad_peer_empty_block_response(depth: usize) { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(depth).await; + // Simulate that peer returns empty response once, then good behaviour + r.simulate(SimulateConfig::new().return_no_blocks_once()) + .await; + // We register a penalty, retry and complete sync successfully + r.assert_penalties(&["NotEnoughResponsesReturned"]); + r.assert_successful_lookup_sync(); + // TODO(tree-sync) For post-deneb assert that the blobs are not re-fetched + // TODO(tree-sync) Assert that a single lookup is created (no drops) +} + +/// Assert that if peer responds with no blobs / columns, we downscore, and retry the same lookup +async fn bad_peer_empty_data_response(depth: usize) { + let Some(mut r) = TestRig::new_after_deneb() else { + return; + }; + r.build_chain_and_trigger_last_block(depth).await; + r.simulate(SimulateConfig::new().return_no_data_once()) + .await; + // We register a penalty, retry and complete sync successfully + r.assert_penalties(&["NotEnoughResponsesReturned"]); + r.assert_successful_lookup_sync(); + // TODO(tree-sync) Assert that a single lookup is created (no drops) +} + +/// Assert that if peer responds with not enough blobs / columns, we downscore, and retry the same +/// lookup +async fn bad_peer_too_few_data_response(depth: usize) { + let Some(mut r) = TestRig::new_after_deneb() else { + return; + }; + r.build_chain_and_trigger_last_block(depth).await; + r.simulate(SimulateConfig::new().return_too_few_data_once()) + .await; + // We register a penalty, retry and complete sync successfully + r.assert_penalties(&["NotEnoughResponsesReturned"]); + r.assert_successful_lookup_sync(); + // TODO(tree-sync) Assert that a single lookup is created (no drops) +} + +/// Assert that if peer responds with bad blocks, we downscore, and retry the same lookup +async fn bad_peer_wrong_block_response(depth: usize) { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(depth).await; + r.simulate(SimulateConfig::new().return_wrong_blocks_once()) + .await; + r.assert_penalties(&["UnrequestedBlockRoot"]); + r.assert_successful_lookup_sync(); + + // TODO(tree-sync) Assert that a single lookup is created (no drops) +} + +/// Assert that if peer responds with bad blobs / columns, we downscore, and retry the same lookup +async fn bad_peer_wrong_data_response(depth: usize) { + let Some(mut r) = TestRig::new_after_deneb() else { + return; + }; + r.build_chain_and_trigger_last_block(depth).await; + r.simulate(SimulateConfig::new().return_wrong_sidecar_for_block_once()) + .await; + // We register a penalty, retry and complete sync successfully + r.assert_penalties(&["UnrequestedBlockRoot"]); + r.assert_successful_lookup_sync(); + // TODO(tree-sync) Assert that a single lookup is created (no drops) +} + +/// Assert that on network error, we DON'T downscore, and retry the same lookup +async fn bad_peer_rpc_failure(depth: usize) { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(depth).await; + r.simulate(SimulateConfig::new().return_rpc_error(RPCError::UnsupportedProtocol)) + .await; + r.assert_no_penalties(); + r.assert_successful_lookup_sync(); +} + +// Test retry logic + +/// Assert that on too many download failures the lookup fails, but we can still sync +async fn too_many_download_failures(depth: usize) { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(depth).await; + // Simulate that a peer always returns empty + r.simulate(SimulateConfig::new().return_no_blocks_always()) + .await; + // We register multiple penalties, the lookup fails and sync does not progress + r.assert_penalties_of_type("NotEnoughResponsesReturned"); + r.assert_failed_lookup_sync(); + + // Trigger sync again for same block, and complete successfully. + // Asserts that the lookup is not on a blacklist + r.capture_metrics_baseline(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync(); +} + +/// Assert that on too many processing failures the lookup fails, but we can still sync +async fn too_many_processing_failures(depth: usize) { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(depth).await; + // Simulate that a peer always returns empty + r.simulate( + SimulateConfig::new() + .with_process_result(|| BlockProcessingResult::Err(BlockError::BlockSlotLimitReached)), + ) + .await; + // We register multiple penalties, the lookup fails and sync does not progress + r.assert_penalties_of_type("lookup_block_processing_failure"); + r.assert_failed_lookup_sync(); + + // Trigger sync again for same block, and complete successfully. + // Asserts that the lookup is not on a blacklist + r.capture_metrics_baseline(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync(); +} + +#[tokio::test] +/// Assert that multiple trigger types don't create extra lookups +async fn unknown_parent_does_not_add_peers_to_itself() { + let Some(mut r) = TestRig::new_after_deneb() else { + return; + }; + // 2, because the unknown parent trigger needs two new blocks + r.build_chain(2).await; + r.trigger_with_last_unknown_block_parent(); + r.trigger_with_last_unknown_block_parent(); + if r.is_after_fulu() { + r.trigger_with_last_unknown_data_column_parent(); + } else if r.is_after_deneb() { + r.trigger_with_last_unknown_blob_parent(); + } + r.simulate(SimulateConfig::happy_path()).await; + r.assert_peers_at_lookup_of_slot(2, 0); + r.assert_peers_at_lookup_of_slot(1, 3); + assert_eq!(r.created_lookups(), 2, "Don't create extra lookups"); + // All lookups should NOT complete on this test, however note the following for the tip lookup, + // it's the lookup for the tip block which has 0 peers and a block cached: + // - before fulu the block is cached, but we can't fetch blobs so it's stuck + // - after fulu the block is cached, we start a custody request and since we use the global pool + // of peers we DO have >1 connected synced supernode peer, which gives us the columns and the + // lookup succeeds + if r.is_after_fulu() { + r.assert_successful_lookup_sync() + } else { + r.assert_successful_lookup_sync_parent_trigger(); + } +} + +#[tokio::test] +/// Assert that if the beacon processor returns Ignored, the lookup is dropped +async fn test_single_block_lookup_ignored_response() { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(1).await; + // Send an Ignored response, the request should be dropped + r.simulate(SimulateConfig::new().with_process_result(|| BlockProcessingResult::Ignored)) + .await; + // The block was not actually imported + r.assert_head_slot(0); + assert_eq!(r.created_lookups(), 1, "no created lookups"); + assert_eq!(r.dropped_lookups(), 1, "no dropped lookups"); + assert_eq!(r.completed_lookups(), 0, "some completed lookups"); +} + +#[tokio::test] +/// Assert that if the beacon processor returns DuplicateFullyImported, the lookup completes successfully +async fn test_single_block_lookup_duplicate_response() { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(1).await; + // Send a DuplicateFullyImported response, the lookup should complete successfully + r.simulate(SimulateConfig::new().with_process_result(|| { + BlockProcessingResult::Err(BlockError::DuplicateFullyImported(Hash256::ZERO)) + })) + .await; + // The block was not actually imported + r.assert_head_slot(0); + r.assert_successful_lookup_sync(); +} + +/// Assert that when peers disconnect the lookups are not dropped (kept with zero peers) +async fn peer_disconnected_then_rpc_error(depth: usize) { + let mut r = TestRig::default(); + r.build_chain_and_trigger_last_block(depth).await; + r.assert_single_lookups_count(1); // The peer disconnect event reaches sync before the rpc error. - rig.peer_disconnected(peer_id); + r.disconnect_all_peers(); // The lookup is not removed as it can still potentially make progress. - rig.assert_single_lookups_count(1); - // The request fails. - rig.single_lookup_failed(id, peer_id, RPCError::Disconnected); - rig.expect_block_lookup_request(block_hash); - // The request should be removed from the network context on disconnection. - rig.expect_empty_network(); + r.assert_single_lookups_count(1); + r.simulate(SimulateConfig::new().return_rpc_error(RPCError::Disconnected)) + .await; + + // Regardless of depth, only the initial lookup is created, because the peer disconnects before + // being able to download the block + assert_eq!(r.created_lookups(), 1, "no created lookups"); + assert_eq!(r.completed_lookups(), 0, "some completed lookups"); + assert_eq!(r.dropped_lookups(), 0, "some dropped lookups"); + r.assert_empty_network(); + r.assert_single_lookups_count(1); } -#[test] -fn test_single_block_lookup_becomes_parent_request() { - let mut rig = TestRig::test_setup(); - - let block = Arc::new(rig.rand_block()); - let block_root = block.canonical_root(); - let parent_root = block.parent_root(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); - let id = rig.expect_block_parent_request(block_root); - - // The peer provides the correct block, should not be penalized. Now the block should be sent - // for processing. - rig.single_lookup_block_response(id, peer_id, Some(block.clone())); - rig.expect_empty_network(); - rig.expect_block_process(ResponseType::Block); - - // The request should still be active. - assert_eq!(rig.active_single_lookups_count(), 1); - - // Send the stream termination. Peer should have not been penalized, and the request moved to a - // parent request after processing. - rig.single_block_component_processed( - id.lookup_id, - BlockProcessingResult::Err(BlockError::ParentUnknown { - parent_root: block.parent_root(), - }), - ); - assert_eq!(rig.active_single_lookups_count(), 2); // 2 = current + parent - rig.expect_block_parent_request(parent_root); - rig.expect_empty_network(); - assert_eq!(rig.active_parent_lookups_count(), 1); -} - -#[test] -fn test_parent_lookup_happy_path() { - let mut rig = TestRig::test_setup(); - - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id = rig.expect_block_parent_request(parent_root); - - // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); - // No request of blobs because the block has not data - rig.expect_empty_network(); - rig.expect_block_process(ResponseType::Block); - rig.expect_empty_network(); - - // Add peer to child lookup to prevent it being dropped - rig.trigger_unknown_block_from_attestation(block_root, peer_id); - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.parent_block_processed( - block_root, - BlockError::DuplicateFullyImported(block_root).into(), - ); - rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root, &[]); - rig.expect_no_active_lookups_empty_network(); -} - -#[test] -fn test_parent_lookup_wrong_response() { - let mut rig = TestRig::test_setup(); - - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_block_parent_request(parent_root); - - // Peer sends the wrong block, peer should be penalized and the block re-requested. - let bad_block = rig.rand_block(); - rig.parent_lookup_block_response(id1, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); - let id2 = rig.expect_block_parent_request(parent_root); - - // Send the stream termination for the first request. This should not produce extra penalties. - rig.parent_lookup_block_response(id1, peer_id, None); - rig.expect_empty_network(); - - // Send the right block this time. - rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); - rig.expect_block_process(ResponseType::Block); - - // Add peer to child lookup to prevent it being dropped - rig.trigger_unknown_block_from_attestation(block_root, peer_id); - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.parent_block_processed_imported(block_root); - rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root, &[]); - rig.expect_no_active_lookups_empty_network(); -} - -#[test] -fn test_parent_lookup_rpc_failure() { - let mut rig = TestRig::test_setup(); - - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id = rig.expect_block_parent_request(parent_root); - - // The request fails. It should be tried again. - rig.parent_lookup_failed_unavailable(id, peer_id); - let id = rig.expect_block_parent_request(parent_root); - - // Send the right block this time. - rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); - rig.expect_block_process(ResponseType::Block); - - // Add peer to child lookup to prevent it being dropped - rig.trigger_unknown_block_from_attestation(block_root, peer_id); - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.parent_block_processed_imported(block_root); - rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root, &[]); - rig.expect_no_active_lookups_empty_network(); -} - -#[test] -fn test_parent_lookup_too_many_attempts() { - let mut rig = TestRig::test_setup(); - - let block = rig.rand_block(); - let parent_root = block.parent_root(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - for i in 1..=PARENT_FAIL_TOLERANCE { - let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - - if i % 2 == 0 { - // make sure every error is accounted for - // The request fails. It should be tried again. - rig.parent_lookup_failed_unavailable(id, peer_id); - } else { - // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); - rig.parent_lookup_block_response(id, peer_id, Some(bad_block.into())); - // Send the stream termination - - // Note, previously we would send the same lookup id with a stream terminator, - // we'd ignore it because we'd intrepret it as an unrequested response, since - // we already got one response for the block. I'm not sure what the intent is - // for having this stream terminator line in this test at all. Receiving an invalid - // block and a stream terminator with the same Id now results in two failed attempts, - // I'm unsure if this is how it should behave? - // - rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); - } +#[tokio::test] +/// Assert that when creating multiple lookups their parent-child relation is discovered and we add +/// peers recursively from child to parent. +async fn lookups_form_chain() { + let depth = 5; + let mut r = TestRig::default(); + r.build_chain(depth).await; + for slot in (1..=depth).rev() { + r.trigger_with_block_at_slot(slot as u64); } + // TODO(tree-sync): Assert that there are `depth` disjoint chains + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync(); - rig.expect_no_active_lookups_empty_network(); + // Assert that the peers are added to ancestor lookups, + // - The lookup with max slot has 1 peer + // - The lookup with min slot has all the peers + for slot in 1..=(depth as u64) { + let lookup = r.lookup_by_root(r.block_root_at_slot(slot)); + assert_eq!( + lookup.seen_peers.len(), + 1 + depth - slot as usize, + "Unexpected peer count for lookup at slot {slot}" + ); + } } -#[test] -fn test_parent_lookup_too_many_download_attempts_no_blacklist() { - let mut rig = TestRig::test_setup(); +#[tokio::test] +/// Assert that if a lookup chain (by appending ancestors) is too long we drop it +async fn test_parent_lookup_too_deep_grow_ancestor_one() { + let mut r = TestRig::default(); + r.build_chain(PARENT_DEPTH_TOLERANCE + 1).await; + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - for i in 1..=PARENT_FAIL_TOLERANCE { - rig.assert_not_ignored_chain(block_root); - let id = rig.expect_block_parent_request(parent_root); - if i % 2 != 0 { - // The request fails. It should be tried again. - rig.parent_lookup_failed_unavailable(id, peer_id); - } else { - // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); - rig.parent_lookup_block_response(id, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); - } - } - - rig.assert_not_ignored_chain(block_root); - rig.assert_not_ignored_chain(parent.canonical_root()); - rig.expect_no_active_lookups_empty_network(); -} - -#[test] -fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { - const PROCESSING_FAILURES: u8 = PARENT_FAIL_TOLERANCE / 2 + 1; - let mut rig = TestRig::test_setup(); - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - - rig.log("Fail downloading the block"); - for _ in 0..(PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { - let id = rig.expect_block_parent_request(parent_root); - // The request fails. It should be tried again. - rig.parent_lookup_failed_unavailable(id, peer_id); - } - - rig.log("Now fail processing a block in the parent request"); - for _ in 0..PROCESSING_FAILURES { - let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the previous first iteration as this test only retries blocks - rig.assert_not_ignored_chain(block_root); - // send the right parent but fail processing - rig.parent_lookup_block_response(id, peer_id, Some(parent.clone().into())); - rig.parent_block_processed(block_root, BlockError::BlockSlotLimitReached.into()); - rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id, "lookup_block_processing_failure"); - } - - rig.assert_not_ignored_chain(block_root); - rig.expect_no_active_lookups_empty_network(); -} - -#[test] -fn test_parent_lookup_too_deep_grow_ancestor() { - let mut rig = TestRig::test_setup(); - let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE); - - let peer_id = rig.new_connected_peer(); - let trigger_block = blocks.pop().unwrap(); - let chain_hash = trigger_block.canonical_root(); - rig.trigger_unknown_parent_block(peer_id, trigger_block); - - for block in blocks.into_iter().rev() { - let id = rig.expect_block_parent_request(block.canonical_root()); - // the block - rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); - // the stream termination - rig.parent_lookup_block_response(id, peer_id, None); - // the processing request - rig.expect_block_process(ResponseType::Block); - // the processing result - rig.parent_block_processed( - chain_hash, - BlockProcessingResult::Err(BlockError::ParentUnknown { - parent_root: block.parent_root(), - }), - ) - } - - // Should create a new syncing chain - rig.drain_sync_rx(); - assert_eq!( - rig.active_range_sync_chain(), - ( - RangeSyncType::Head, - Slot::new(0), - Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 1) - ) - ); + r.assert_head_slot(PARENT_DEPTH_TOLERANCE as u64 + 1); + r.assert_no_penalties(); // Should not penalize peer, but network is not clear because of the blocks_by_range requests - rig.expect_no_penalty_for(peer_id); - rig.assert_ignored_chain(chain_hash); + // r.assert_ignored_chain(chain_hash); + // + // Assert that chain is in failed chains + // Assert that there were 0 lookups completed, 33 dropped + // Assert that there were 1 range sync chains + // Bound resources: + // - Limit amount of requests + // - Limit the types of sync used + assert_eq!(r.completed_lookups(), 0, "no completed lookups"); + assert_eq!( + r.dropped_lookups(), + PARENT_DEPTH_TOLERANCE, + "All lookups dropped" + ); + r.assert_successful_range_sync(); +} + +#[tokio::test] +async fn test_parent_lookup_too_deep_grow_ancestor_zero() { + let mut r = TestRig::default(); + r.build_chain(PARENT_DEPTH_TOLERANCE).await; + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + + r.assert_head_slot(PARENT_DEPTH_TOLERANCE as u64); + r.assert_no_penalties(); + assert_eq!( + r.completed_lookups(), + PARENT_DEPTH_TOLERANCE, + "completed all lookups" + ); + assert_eq!(r.dropped_lookups(), 0, "no dropped lookups"); } // Regression test for https://github.com/sigp/lighthouse/pull/7118 // 8042 UPDATE: block was previously added to the failed_chains cache, now it's inserted into the -// ignored chains cache. The regression test still applies as the chaild lookup is not created -#[test] -fn test_child_lookup_not_created_for_ignored_chain_parent_after_processing() { - // GIVEN: A parent chain longer than PARENT_DEPTH_TOLERANCE. - let mut rig = TestRig::test_setup(); - let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE + 1); - let peer_id = rig.new_connected_peer(); - - // The child of the trigger block to be used to extend the chain. - let trigger_block_child = blocks.pop().unwrap(); - // The trigger block that starts the lookup. - let trigger_block = blocks.pop().unwrap(); - let tip_root = trigger_block.canonical_root(); - - // Trigger the initial unknown parent block for the tip. - rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); - - // Simulate the lookup chain building up via `ParentUnknown` errors. - for block in blocks.into_iter().rev() { - let id = rig.expect_block_parent_request(block.canonical_root()); - rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); - rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_block_process(ResponseType::Block); - rig.parent_block_processed( - tip_root, - BlockProcessingResult::Err(BlockError::ParentUnknown { - parent_root: block.parent_root(), - }), - ); - } +// ignored chains cache. The regression test still applies as the child lookup is not created +#[tokio::test] +async fn test_child_lookup_not_created_for_ignored_chain_parent_after_processing() { + let mut r = TestRig::default(); + let depth = PARENT_DEPTH_TOLERANCE + 1; + r.build_chain(depth + 1).await; + r.trigger_with_block_at_slot(depth as u64); + r.simulate(SimulateConfig::new().no_range_sync()).await; // At this point, the chain should have been deemed too deep and pruned. // The tip root should have been inserted into ignored chains. - rig.assert_ignored_chain(tip_root); - rig.expect_no_penalty_for(peer_id); + // Ensure no blocks have been synced + r.assert_head_slot(0); + r.assert_no_active_lookups(); + r.assert_no_penalties(); + r.assert_ignored_chain(r.block_at_slot(depth as u64).canonical_root()); // WHEN: Trigger the extending block that points to the tip. - let trigger_block_child_root = trigger_block_child.canonical_root(); - rig.trigger_unknown_block_from_attestation(trigger_block_child_root, peer_id); - let id = rig.expect_block_lookup_request(trigger_block_child_root); - rig.single_lookup_block_response(id, peer_id, Some(trigger_block_child.clone())); - rig.single_lookup_block_response(id, peer_id, None); - rig.expect_block_process(ResponseType::Block); - rig.single_block_component_processed( - id.lookup_id, - BlockProcessingResult::Err(BlockError::ParentUnknown { - parent_root: tip_root, - }), - ); - + let peer = r.new_connected_peer(); + r.trigger_unknown_parent_block(peer, r.block_at_slot(depth as u64 + 1)); // THEN: The extending block should not create a lookup because the tip was inserted into // ignored chains. - rig.expect_no_active_lookups(); - rig.expect_no_penalty_for(peer_id); - rig.expect_empty_network(); + r.assert_no_active_lookups(); + r.assert_no_penalties(); + r.assert_empty_network(); } -#[test] -fn test_parent_lookup_too_deep_grow_tip() { - let mut rig = TestRig::test_setup(); - let blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE - 1); - let peer_id = rig.new_connected_peer(); - let tip = blocks.last().unwrap().clone(); - - for block in blocks.into_iter() { - let block_root = block.canonical_root(); - rig.trigger_unknown_block_from_attestation(block_root, peer_id); - let id = rig.expect_block_parent_request(block_root); - rig.single_lookup_block_response(id, peer_id, Some(block.clone())); - rig.single_lookup_block_response(id, peer_id, None); - rig.expect_block_process(ResponseType::Block); - rig.single_block_component_processed( - id.lookup_id, - BlockError::ParentUnknown { - parent_root: block.parent_root(), - } - .into(), - ); +#[tokio::test] +/// Assert that if a lookup chain (by appending tips) is too long we drop it +async fn test_parent_lookup_too_deep_grow_tip() { + let depth = PARENT_DEPTH_TOLERANCE + 1; + let mut r = TestRig::default(); + r.build_chain(depth).await; + for slot in (1..=depth).rev() { + r.trigger_with_block_at_slot(slot as u64); } + r.simulate(SimulateConfig::happy_path()).await; - // Should create a new syncing chain - rig.drain_sync_rx(); + // Even if the chain is longer than `PARENT_DEPTH_TOLERANCE` because the lookups are created all + // at once they chain by sections and it's possible that the oldest ancestors start processing + // before the full chain is connected. + assert!(r.created_lookups() > 0, "no created lookups"); assert_eq!( - rig.active_range_sync_chain(), - ( - RangeSyncType::Head, - Slot::new(0), - Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 2) - ) + r.completed_lookups(), + r.created_lookups(), + "not all completed lookups" ); + assert_eq!(r.dropped_lookups(), 0, "some dropped lookups"); + r.assert_successful_lookup_sync(); // Should not penalize peer, but network is not clear because of the blocks_by_range requests - rig.expect_no_penalty_for(peer_id); - rig.assert_ignored_chain(tip.canonical_root()); + r.assert_no_penalties(); } -#[test] -fn test_lookup_peer_disconnected_no_peers_left_while_request() { - let mut rig = TestRig::test_setup(); - let peer_id = rig.new_connected_peer(); - let trigger_block = rig.rand_block(); - rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); - rig.peer_disconnected(peer_id); - rig.rpc_error_all_active_requests(peer_id); - // Erroring all rpc requests and disconnecting the peer shouldn't remove the requests - // from the lookups map as they can still progress. - rig.assert_single_lookups_count(2); -} - -#[test] -fn test_lookup_disconnection_peer_left() { - let mut rig = TestRig::test_setup(); - let peer_ids = (0..2).map(|_| rig.new_connected_peer()).collect::>(); - let disconnecting_peer = *peer_ids.first().unwrap(); - let block_root = Hash256::random(); - // lookup should have two peers associated with the same block - for peer_id in peer_ids.iter() { - rig.trigger_unknown_block_from_attestation(block_root, *peer_id); - } - // Disconnect the first peer only, which is the one handling the request - rig.peer_disconnected(disconnecting_peer); - rig.rpc_error_all_active_requests(disconnecting_peer); - rig.assert_single_lookups_count(1); -} - -#[test] -fn test_lookup_add_peers_to_parent() { - let mut r = TestRig::test_setup(); - let peer_id_1 = r.new_connected_peer(); - let peer_id_2 = r.new_connected_peer(); - let blocks = r.rand_blockchain(5); - let last_block_root = blocks.last().unwrap().canonical_root(); - // Create a chain of lookups - for block in &blocks { - r.trigger_unknown_parent_block(peer_id_1, block.clone()); - } - r.trigger_unknown_block_from_attestation(last_block_root, peer_id_2); - for block in blocks.iter().take(blocks.len() - 1) { - // Parent has the original unknown parent event peer + new peer - r.assert_lookup_peers(block.canonical_root(), vec![peer_id_1, peer_id_2]); - } - // Child lookup only has the unknown attestation peer - r.assert_lookup_peers(last_block_root, vec![peer_id_2]); -} - -#[test] -fn test_skip_creating_ignored_parent_lookup() { - let mut rig = TestRig::test_setup(); - let (_, block, parent_root, _) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - rig.insert_ignored_chain(parent_root); - rig.trigger_unknown_parent_block(peer_id, block.into()); - rig.expect_no_penalty_for(peer_id); +#[tokio::test] +async fn test_skip_creating_ignored_parent_lookup() { + let mut r = TestRig::default(); + r.build_chain(2).await; + r.insert_ignored_chain(r.block_root_at_slot(1)); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_no_penalties(); // Both current and parent lookup should not be created - rig.expect_no_active_lookups(); + r.assert_no_active_lookups(); } -#[test] -fn test_single_block_lookup_ignored_response() { - let mut rig = TestRig::test_setup(); +#[tokio::test] +/// Assert that if the oldest block in a chain is already imported (DuplicateFullyImported), +/// the remaining blocks in the chain are still processed successfully. This tests a race +/// condition where a block gets imported elsewhere while the lookup is processing. +/// +/// The processing sequence is: +/// - Block 3: UnknownParent (needs block 2) +/// - Block 2: UnknownParent (needs block 1) +/// - Block 1: About to be processed, but gets imported via gossip (race condition) +/// - Block 1: DuplicateFullyImported (already in chain from race) +/// - Block 2: Import ok (parent block 1 is available) +/// - Block 3: Import ok (parent block 2 is available) +async fn test_same_chain_race_condition() { + let mut r = TestRig::default(); + r.build_chain(3).await; - let block = rig.rand_block(); - let peer_id = rig.new_connected_peer(); + let block_1_root = r.block_root_at_slot(1); - // Trigger the request - rig.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); - let id = rig.expect_block_lookup_request(block.canonical_root()); + // Trigger a lookup with block 3. This creates a parent lookup chain that will + // request blocks 3 → 2 → 1. + r.trigger_with_block_at_slot(3); - // The peer provides the correct block, should not be penalized. Now the block should be sent - // for processing. - rig.single_lookup_block_response(id, peer_id, Some(block.into())); - rig.expect_empty_network(); - rig.expect_block_process(ResponseType::Block); + // Configure simulate to import block 1 right before it's processed by the lookup. + // This simulates the race condition where block 1 arrives via gossip at the same + // time the lookup is trying to process it. + r.simulate(SimulateConfig::new().with_import_block_before_process(block_1_root)) + .await; - // The request should still be active. - assert_eq!(rig.active_single_lookups_count(), 1); - - // Send the stream termination. Peer should have not been penalized, and the request removed - // after processing. - rig.single_lookup_block_response(id, peer_id, None); - // Send an Ignored response, the request should be dropped - rig.single_block_component_processed(id.lookup_id, BlockProcessingResult::Ignored); - rig.expect_no_active_lookups_empty_network(); + // The chain should complete successfully with head at slot 3, proving that + // the lookup correctly handled the DuplicateFullyImported for block 1 and + // continued processing blocks 2 and 3. + r.assert_head_slot(3); + r.assert_successful_lookup_sync(); } -#[test] -fn test_parent_lookup_ignored_response() { - let mut rig = TestRig::test_setup(); - - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.clone().into()); - let id = rig.expect_block_parent_request(parent_root); - // Note: single block lookup for current `block` does not trigger any request because it does - // not have blobs, and the block is already cached - - // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); - rig.expect_block_process(ResponseType::Block); - rig.expect_empty_network(); - - // Return an Ignored result. The request should be dropped - rig.parent_block_processed(block_root, BlockProcessingResult::Ignored); - rig.expect_empty_network(); - rig.expect_no_active_lookups(); -} - -/// This is a regression test. -#[test] -fn test_same_chain_race_condition() { - let mut rig = TestRig::test_setup(); - - // if we use one or two blocks it will match on the hash or the parent hash, so make a longer - // chain. - let depth = 4; - let mut blocks = rig.rand_blockchain(depth); - let peer_id = rig.new_connected_peer(); - let trigger_block = blocks.pop().unwrap(); - let chain_hash = trigger_block.canonical_root(); - rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); - - for (i, block) in blocks.clone().into_iter().rev().enumerate() { - let id = rig.expect_block_parent_request(block.canonical_root()); - // the block - rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); - // the stream termination - rig.parent_lookup_block_response(id, peer_id, None); - // the processing request - rig.expect_block_process(ResponseType::Block); - // the processing result - if i + 2 == depth { - rig.log(&format!("Block {i} was removed and is already known")); - rig.parent_block_processed( - chain_hash, - BlockError::DuplicateFullyImported(block.canonical_root()).into(), - ) - } else { - rig.log(&format!("Block {i} ParentUnknown")); - rig.parent_block_processed( - chain_hash, - BlockProcessingResult::Err(BlockError::ParentUnknown { - parent_root: block.parent_root(), - }), - ) - } - } - - // Try to get this block again while the chain is being processed. We should not request it again. - let peer_id = rig.new_connected_peer(); - rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); - rig.expect_empty_network(); - - // Add a peer to the tip child lookup which has zero peers - rig.trigger_unknown_block_from_attestation(trigger_block.canonical_root(), peer_id); - - rig.log("Processing succeeds, now the rest of the chain should be sent for processing."); - for block in blocks.iter().skip(1).chain(&[trigger_block]) { - rig.expect_parent_chain_process(); - rig.single_block_component_processed_imported(block.canonical_root()); - } - rig.expect_no_active_lookups_empty_network(); -} - -#[test] -fn block_in_da_checker_skips_download() { - let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { +#[tokio::test] +/// Assert that if the lookup's block is in the da_checker we don't download it again +async fn block_in_da_checker_skips_download() { + // Only in Deneb, as the block needs blobs to remain in the da_checker + let Some(mut r) = TestRig::new_after_deneb_before_fulu() else { return; }; - let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); - let block_root = block.canonical_root(); - let peer_id = r.new_connected_peer(); - r.insert_block_to_da_checker(block.into()); - r.trigger_unknown_block_from_attestation(block_root, peer_id); - // Should not trigger block request - let id = r.expect_blob_lookup_request(block_root); - r.expect_empty_network(); - // Resolve blob and expect lookup completed - r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); - r.expect_no_active_lookups(); + // Add block to da_checker + // Complete test with happy path + // Assert that there were no requests for blocks + r.build_chain(1).await; + r.insert_block_to_da_chain_and_assert_missing_componens(r.block_at_slot(1)) + .await; + r.trigger_with_block_at_slot(1); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync(); + assert_eq!( + r.requests + .iter() + .filter(|(request, _)| matches!(request, RequestType::BlocksByRoot(_))) + .collect::>(), + Vec::<&(RequestType, AppRequestId)>::new(), + "There should be no block requests" + ); } -#[test] -fn block_in_processing_cache_becomes_invalid() { - let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { +#[tokio::test] +async fn block_in_processing_cache_becomes_invalid() { + let Some(mut r) = TestRig::new_after_deneb_before_fulu() else { return; }; - let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); - let block_root = block.canonical_root(); - let peer_id = r.new_connected_peer(); - r.insert_block_to_availability_cache(block.clone().into()); - r.trigger_unknown_block_from_attestation(block_root, peer_id); - // Should trigger blob request - let id = r.expect_blob_lookup_request(block_root); - // Should not trigger block request - r.expect_empty_network(); + r.build_chain(1).await; + let block = r.block_at_slot(1); + r.insert_block_to_da_checker_as_pre_execution(block.clone()); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_pending_lookup_sync(); + // Here the only active lookup is waiting for the block to finish processing + // Simulate invalid block, removing it from processing cache - r.simulate_block_gossip_processing_becomes_invalid(block_root); + r.simulate_block_gossip_processing_becomes_invalid(block.canonical_root()); // Should download block, then issue blobs request - r.complete_lookup_block_download(block); - // Should not trigger block or blob request - r.expect_empty_network(); - r.complete_lookup_block_import_valid(block_root, false); - // Resolve blob and expect lookup completed - r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); - r.expect_no_active_lookups(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_successful_lookup_sync(); } -#[test] -fn block_in_processing_cache_becomes_valid_imported() { - let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { +#[tokio::test] +async fn block_in_processing_cache_becomes_valid_imported() { + let Some(mut r) = TestRig::new_after_deneb_before_fulu() else { return; }; - let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); - let block_root = block.canonical_root(); - let peer_id = r.new_connected_peer(); - r.insert_block_to_availability_cache(block.clone().into()); - r.trigger_unknown_block_from_attestation(block_root, peer_id); - // Should trigger blob request - let id = r.expect_blob_lookup_request(block_root); - // Should not trigger block request - r.expect_empty_network(); + r.build_chain(1).await; + let block = r.block_at_slot(1); + r.insert_block_to_da_checker_as_pre_execution(block.clone()); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_pending_lookup_sync(); + // Here the only active lookup is waiting for the block to finish processing + // Resolve the block from processing step - r.simulate_block_gossip_processing_becomes_valid_missing_components(block.into()); + r.simulate_block_gossip_processing_becomes_valid(block) + .await; // Should not trigger block or blob request - r.expect_empty_network(); + r.assert_empty_network(); // Resolve blob and expect lookup completed - r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); - r.expect_no_active_lookups(); + r.assert_no_active_lookups(); } // IGNORE: wait for change that delays blob fetching to knowing the block -#[ignore] -#[test] -fn blobs_in_da_checker_skip_download() { - let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { +#[tokio::test] +async fn blobs_in_da_checker_skip_download() { + let Some(mut r) = TestRig::new_after_deneb_before_fulu() else { return; }; - let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); - let block_root = block.canonical_root(); - let peer_id = r.new_connected_peer(); - for blob in blobs { - r.insert_blob_to_da_checker(blob); + r.build_chain(1).await; + let block = r.get_last_block().clone(); + let blobs = block + .block_data() + .and_then(|d| d.blobs()) + .expect("block with no blobs"); + for blob in &blobs { + r.insert_blob_to_da_checker(blob.clone()); } - r.trigger_unknown_block_from_attestation(block_root, peer_id); - // Should download and process the block - r.complete_single_lookup_block_valid(block, true); - // Should not trigger blob request - r.expect_empty_network(); - r.expect_no_active_lookups(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + + r.assert_successful_lookup_sync(); + assert_eq!( + r.requests + .iter() + .filter(|(request, _)| matches!(request, RequestType::BlobsByRoot(_))) + .collect::>(), + Vec::<&(RequestType, AppRequestId)>::new(), + "There should be no blob requests" + ); } -#[test] -fn custody_lookup_happy_path() { - let Some(mut r) = TestRig::test_setup_after_fulu() else { +macro_rules! fulu_peer_matrix_tests { + ( + [$($name:ident => $variant:expr),+ $(,)?] + ) => { + paste::paste! { + $( + #[tokio::test] + async fn []() { + custody_lookup_happy_path($variant).await; + } + + #[tokio::test] + async fn []() { + custody_lookup_some_custody_failures($variant).await; + } + + #[tokio::test] + async fn []() { + custody_lookup_permanent_custody_failures($variant).await; + } + )+ + } + }; +} + +fulu_peer_matrix_tests!( + [ + we_supernode_them_supernode => FuluTestType::WeSupernodeThemSupernode, + we_supernode_them_fullnodes => FuluTestType::WeSupernodeThemFullnodes, + we_fullnode_them_supernode => FuluTestType::WeFullnodeThemSupernode, + we_fullnode_them_fullnodes => FuluTestType::WeFullnodeThemFullnodes, + ] +); + +async fn custody_lookup_happy_path(test_type: FuluTestType) { + let Some(mut r) = TestRig::new_fulu_peer_test(test_type) else { return; }; - let spec = E::default_spec(); + r.build_chain(1).await; r.new_connected_peers_for_peerdas(); - let (block, data_columns) = r.rand_block_and_data_columns(); - let block_root = block.canonical_root(); - let peer_id = r.new_connected_peer(); - r.trigger_unknown_block_from_attestation(block_root, peer_id); - // Should not request blobs - let id = r.expect_block_lookup_request(block.canonical_root()); - r.complete_valid_block_request(id, block.into(), true); - // for each slot we download `samples_per_slot` columns - let sample_column_count = spec.samples_per_slot * spec.data_columns_per_group::(); - let custody_ids = - r.expect_only_data_columns_by_root_requests(block_root, sample_column_count as usize); - r.complete_valid_custody_request(custody_ids, data_columns, false); - r.expect_no_active_lookups(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_no_penalties(); + r.assert_successful_lookup_sync(); } +async fn custody_lookup_some_custody_failures(test_type: FuluTestType) { + let Some(mut r) = TestRig::new_fulu_peer_test(test_type) else { + return; + }; + let block_root = r.build_chain(1).await; + // Send the same trigger from all peers, so that the lookup has all peers + for peer in r.new_connected_peers_for_peerdas() { + r.trigger_unknown_block_from_attestation(block_root, peer); + } + let custody_columns = r.custody_columns(); + r.simulate(SimulateConfig::new().return_no_columns_on_indices(&custody_columns[..4], 3)) + .await; + r.assert_penalties_of_type("NotEnoughResponsesReturned"); + r.assert_successful_lookup_sync(); +} + +async fn custody_lookup_permanent_custody_failures(test_type: FuluTestType) { + let Some(mut r) = TestRig::new_fulu_peer_test(test_type) else { + return; + }; + let block_root = r.build_chain(1).await; + + // Send the same trigger from all peers, so that the lookup has all peers + for peer in r.new_connected_peers_for_peerdas() { + r.trigger_unknown_block_from_attestation(block_root, peer); + } + + let custody_columns = r.custody_columns(); + r.simulate( + SimulateConfig::new().return_no_columns_on_indices(&custody_columns[..2], usize::MAX), + ) + .await; + // Every peer that does not return a column is part of the lookup because it claimed to have + // imported the lookup, so we will penalize. + r.assert_penalties_of_type("NotEnoughResponsesReturned"); + r.assert_failed_lookup_sync(); +} + +// We supernode, diverse peers +// We not supernode, diverse peers + // TODO(das): Test retries of DataColumnByRoot: // - Expect request for column_index // - Respond with bad data // - Respond with stream terminator // ^ The stream terminator should be ignored and not close the next retry -mod deneb_only { - use super::*; - use beacon_chain::{ - block_verification_types::{AsBlock, RpcBlock}, - data_availability_checker::AvailabilityCheckError, - }; - use std::collections::VecDeque; - - struct DenebTester { - rig: TestRig, - block: Arc>, - blobs: Vec>>, - parent_block_roots: Vec, - parent_block: VecDeque>>, - parent_blobs: VecDeque>>>, - unknown_parent_block: Option>>, - unknown_parent_blobs: Option>>>, - peer_id: PeerId, - block_req_id: Option, - parent_block_req_id: Option, - blob_req_id: Option, - parent_blob_req_id: Option, - slot: Slot, - block_root: Hash256, - } - - enum RequestTrigger { - AttestationUnknownBlock, - GossipUnknownParentBlock(usize), - GossipUnknownParentBlob(usize), - } - - impl RequestTrigger { - fn num_parents(&self) -> usize { - match self { - RequestTrigger::AttestationUnknownBlock => 0, - RequestTrigger::GossipUnknownParentBlock(num_parents) => *num_parents, - RequestTrigger::GossipUnknownParentBlob(num_parents) => *num_parents, - } - } - } - - impl DenebTester { - fn new(request_trigger: RequestTrigger) -> Option { - let Some(mut rig) = TestRig::test_setup_after_deneb_before_fulu() else { - return None; - }; - let (block, blobs) = rig.rand_block_and_blobs(NumBlobs::Random); - let mut block = Arc::new(block); - let mut blobs = blobs.into_iter().map(Arc::new).collect::>(); - let slot = block.slot(); - - let num_parents = request_trigger.num_parents(); - let mut parent_block_chain = VecDeque::with_capacity(num_parents); - let mut parent_blobs_chain = VecDeque::with_capacity(num_parents); - let mut parent_block_roots = vec![]; - for _ in 0..num_parents { - // Set the current block as the parent. - let parent_root = block.canonical_root(); - let parent_block = block.clone(); - let parent_blobs = blobs.clone(); - parent_block_chain.push_front(parent_block); - parent_blobs_chain.push_front(parent_blobs); - parent_block_roots.push(parent_root); - - // Create the next block. - let (child_block, child_blobs) = - rig.block_with_parent_and_blobs(parent_root, NumBlobs::Random); - let mut child_block = Arc::new(child_block); - let mut child_blobs = child_blobs.into_iter().map(Arc::new).collect::>(); - - // Update the new block to the current block. - std::mem::swap(&mut child_block, &mut block); - std::mem::swap(&mut child_blobs, &mut blobs); - } - let block_root = block.canonical_root(); - - let peer_id = rig.new_connected_peer(); - - // Trigger the request - let (block_req_id, blob_req_id, parent_block_req_id, parent_blob_req_id) = - match request_trigger { - RequestTrigger::AttestationUnknownBlock => { - rig.send_sync_message(SyncMessage::UnknownBlockHashFromAttestation( - peer_id, block_root, - )); - let block_req_id = rig.expect_block_lookup_request(block_root); - (Some(block_req_id), None, None, None) - } - RequestTrigger::GossipUnknownParentBlock { .. } => { - rig.send_sync_message(SyncMessage::UnknownParentBlock( - peer_id, - block.clone(), - block_root, - )); - - let parent_root = block.parent_root(); - let parent_block_req_id = rig.expect_block_parent_request(parent_root); - rig.expect_empty_network(); // expect no more requests - (None, None, Some(parent_block_req_id), None) - } - RequestTrigger::GossipUnknownParentBlob { .. } => { - let single_blob = blobs.first().cloned().unwrap(); - let parent_root = single_blob.block_parent_root(); - rig.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, single_blob)); - - let parent_block_req_id = rig.expect_block_parent_request(parent_root); - rig.expect_empty_network(); // expect no more requests - (None, None, Some(parent_block_req_id), None) - } - }; - - Some(Self { - rig, - block, - blobs, - parent_block: parent_block_chain, - parent_blobs: parent_blobs_chain, - parent_block_roots, - unknown_parent_block: None, - unknown_parent_blobs: None, - peer_id, - block_req_id, - parent_block_req_id, - blob_req_id, - parent_blob_req_id, - slot, - block_root, - }) - } - - fn trigger_unknown_block_from_attestation(mut self) -> Self { - let block_root = self.block.canonical_root(); - self.rig - .trigger_unknown_block_from_attestation(block_root, self.peer_id); - self - } - - fn parent_block_response(mut self) -> Self { - self.rig.expect_empty_network(); - let block = self.parent_block.pop_front().unwrap().clone(); - let _ = self.unknown_parent_block.insert(block.clone()); - self.rig.parent_lookup_block_response( - self.parent_block_req_id.expect("parent request id"), - self.peer_id, - Some(block), - ); - - self.rig.assert_parent_lookups_count(1); - self - } - - fn parent_block_response_expect_blobs(mut self) -> Self { - self.rig.expect_empty_network(); - let block = self.parent_block.pop_front().unwrap().clone(); - let _ = self.unknown_parent_block.insert(block.clone()); - self.rig.parent_lookup_block_response( - self.parent_block_req_id.expect("parent request id"), - self.peer_id, - Some(block), - ); - - // Expect blobs request after sending block - let s = self.expect_parent_blobs_request(); - - s.rig.assert_parent_lookups_count(1); - s - } - - fn parent_blob_response(mut self) -> Self { - let blobs = self.parent_blobs.pop_front().unwrap(); - let _ = self.unknown_parent_blobs.insert(blobs.clone()); - for blob in &blobs { - self.rig.parent_lookup_blob_response( - self.parent_blob_req_id.expect("parent blob request id"), - self.peer_id, - Some(blob.clone()), - ); - assert_eq!(self.rig.active_parent_lookups_count(), 1); - } - self.rig.parent_lookup_blob_response( - self.parent_blob_req_id.expect("parent blob request id"), - self.peer_id, - None, - ); - - self - } - - fn block_response_triggering_process(self) -> Self { - let mut me = self.block_response_and_expect_blob_request(); - me.rig.expect_block_process(ResponseType::Block); - - // The request should still be active. - assert_eq!(me.rig.active_single_lookups_count(), 1); - me - } - - fn block_response_and_expect_blob_request(mut self) -> Self { - // The peer provides the correct block, should not be penalized. Now the block should be sent - // for processing. - self.rig.single_lookup_block_response( - self.block_req_id.expect("block request id"), - self.peer_id, - Some(self.block.clone()), - ); - // After responding with block the node will issue a blob request - let mut s = self.expect_blobs_request(); - - s.rig.expect_empty_network(); - - // The request should still be active. - s.rig.assert_lookup_is_active(s.block.canonical_root()); - s - } - - fn blobs_response(mut self) -> Self { - self.rig - .log(&format!("blobs response {}", self.blobs.len())); - for blob in &self.blobs { - self.rig.single_lookup_blob_response( - self.blob_req_id.expect("blob request id"), - self.peer_id, - Some(blob.clone()), - ); - self.rig - .assert_lookup_is_active(self.block.canonical_root()); - } - self.rig.single_lookup_blob_response( - self.blob_req_id.expect("blob request id"), - self.peer_id, - None, - ); - self - } - - fn blobs_response_was_valid(mut self) -> Self { - self.rig.expect_empty_network(); - if !self.blobs.is_empty() { - self.rig.expect_block_process(ResponseType::Blob); - } - self - } - - fn expect_empty_beacon_processor(mut self) -> Self { - self.rig.expect_empty_beacon_processor(); - self - } - - fn empty_block_response(mut self) -> Self { - self.rig.single_lookup_block_response( - self.block_req_id.expect("block request id"), - self.peer_id, - None, - ); - self - } - - fn empty_blobs_response(mut self) -> Self { - self.rig.single_lookup_blob_response( - self.blob_req_id.expect("blob request id"), - self.peer_id, - None, - ); - self - } - - fn empty_parent_blobs_response(mut self) -> Self { - self.rig.parent_lookup_blob_response( - self.parent_blob_req_id.expect("blob request id"), - self.peer_id, - None, - ); - self - } - - fn block_missing_components(mut self) -> Self { - self.rig.single_block_component_processed( - self.block_req_id.expect("block request id").lookup_id, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - self.block.slot(), - self.block_root, - )), - ); - self.rig.expect_empty_network(); - self.rig.assert_single_lookups_count(1); - self - } - - fn blob_imported(mut self) -> Self { - self.rig.single_blob_component_processed( - self.blob_req_id.expect("blob request id").lookup_id, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), - ); - self.rig.expect_empty_network(); - self.rig.assert_single_lookups_count(0); - self - } - - fn block_imported(mut self) -> Self { - // Missing blobs should be the request is not removed, the outstanding blobs request should - // mean we do not send a new request. - self.rig.single_block_component_processed( - self.block_req_id - .or(self.blob_req_id) - .expect("block request id") - .lookup_id, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), - ); - self.rig.expect_empty_network(); - self.rig.assert_single_lookups_count(0); - self - } - - fn parent_block_imported(mut self) -> Self { - let parent_root = *self.parent_block_roots.first().unwrap(); - self.rig - .log(&format!("parent_block_imported {parent_root:?}")); - self.rig.parent_block_processed( - self.block_root, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(parent_root)), - ); - self.rig.expect_no_requests_for(parent_root); - self.rig.assert_parent_lookups_count(0); - self - } - - fn parent_block_missing_components(mut self) -> Self { - let parent_root = *self.parent_block_roots.first().unwrap(); - self.rig - .log(&format!("parent_block_missing_components {parent_root:?}")); - self.rig.parent_block_processed( - self.block_root, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - Slot::new(0), - parent_root, - )), - ); - self.rig.expect_no_requests_for(parent_root); - self - } - - fn parent_blob_imported(mut self) -> Self { - let parent_root = *self.parent_block_roots.first().unwrap(); - self.rig - .log(&format!("parent_blob_imported {parent_root:?}")); - self.rig.parent_blob_processed( - self.block_root, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(parent_root)), - ); - - self.rig.expect_no_requests_for(parent_root); - self.rig.assert_parent_lookups_count(0); - self - } - - fn parent_block_unknown_parent(mut self) -> Self { - self.rig.log("parent_block_unknown_parent"); - let block = self.unknown_parent_block.take().unwrap(); - // Now this block is the one we expect requests from - self.block = block.clone(); - let block = RpcBlock::new( - block, - None, - &self.rig.harness.chain.data_availability_checker, - self.rig.harness.chain.spec.clone(), - ) - .unwrap(); - self.rig.parent_block_processed( - self.block_root, - BlockProcessingResult::Err(BlockError::ParentUnknown { - parent_root: block.parent_root(), - }), - ); - assert_eq!(self.rig.active_parent_lookups_count(), 1); - self - } - - fn invalid_parent_processed(mut self) -> Self { - self.rig.parent_block_processed( - self.block_root, - BlockProcessingResult::Err(BlockError::BlockSlotLimitReached), - ); - assert_eq!(self.rig.active_parent_lookups_count(), 1); - self - } - - fn invalid_block_processed(mut self) -> Self { - self.rig.single_block_component_processed( - self.block_req_id.expect("block request id").lookup_id, - BlockProcessingResult::Err(BlockError::BlockSlotLimitReached), - ); - self.rig.assert_single_lookups_count(1); - self - } - - fn invalid_blob_processed(mut self) -> Self { - self.rig.log("invalid_blob_processed"); - self.rig.single_blob_component_processed( - self.blob_req_id.expect("blob request id").lookup_id, - BlockProcessingResult::Err(BlockError::AvailabilityCheck( - AvailabilityCheckError::InvalidBlobs(kzg::Error::KzgVerificationFailed), - )), - ); - self.rig.assert_single_lookups_count(1); - self - } - - fn missing_components_from_block_request(mut self) -> Self { - self.rig.single_block_component_processed( - self.block_req_id.expect("block request id").lookup_id, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - self.slot, - self.block_root, - )), - ); - // Add block to da_checker so blobs request can continue - self.rig.insert_block_to_da_checker(self.block.clone()); - - self.rig.assert_single_lookups_count(1); - self - } - - fn complete_current_block_and_blobs_lookup(self) -> Self { - self.expect_block_request() - .block_response_and_expect_blob_request() - .blobs_response() - // TODO: Should send blobs for processing - .expect_block_process() - .block_imported() - } - - fn log(self, msg: &str) -> Self { - self.rig.log(msg); - self - } - - fn parent_block_then_empty_parent_blobs(self) -> Self { - self.log( - " Return empty blobs for parent, block errors with missing components, downscore", - ) - .parent_block_response() - .expect_parent_blobs_request() - .empty_parent_blobs_response() - .expect_penalty("NotEnoughResponsesReturned") - .log("Re-request parent blobs, succeed and import parent") - .expect_parent_blobs_request() - .parent_blob_response() - .expect_block_process() - .parent_block_missing_components() - // Insert new peer into child request before completing parent - .trigger_unknown_block_from_attestation() - .parent_blob_imported() - } - - fn expect_penalty(mut self, expect_penalty_msg: &'static str) -> Self { - self.rig.expect_penalty(self.peer_id, expect_penalty_msg); - self - } - fn expect_no_penalty(mut self) -> Self { - self.rig.expect_empty_network(); - self - } - fn expect_no_penalty_and_no_requests(mut self) -> Self { - self.rig.expect_empty_network(); - self - } - fn expect_block_request(mut self) -> Self { - let id = self - .rig - .expect_block_lookup_request(self.block.canonical_root()); - self.block_req_id = Some(id); - self - } - fn expect_blobs_request(mut self) -> Self { - let id = self - .rig - .expect_blob_lookup_request(self.block.canonical_root()); - self.blob_req_id = Some(id); - self - } - fn expect_parent_block_request(mut self) -> Self { - let id = self - .rig - .expect_block_parent_request(self.block.parent_root()); - self.parent_block_req_id = Some(id); - self - } - fn expect_parent_blobs_request(mut self) -> Self { - let id = self - .rig - .expect_blob_parent_request(self.block.parent_root()); - self.parent_blob_req_id = Some(id); - self - } - fn expect_no_blobs_request(mut self) -> Self { - self.rig.expect_empty_network(); - self - } - fn expect_no_block_request(mut self) -> Self { - self.rig.expect_empty_network(); - self - } - fn invalidate_blobs_too_few(mut self) -> Self { - self.blobs.pop().expect("blobs"); - self - } - fn expect_block_process(mut self) -> Self { - self.rig.expect_block_process(ResponseType::Block); - self - } - fn expect_no_active_lookups(self) -> Self { - self.rig.expect_no_active_lookups(); - self - } - fn search_parent_dup(mut self) -> Self { - self.rig - .trigger_unknown_parent_block(self.peer_id, self.block.clone()); - self - } - } - - #[test] - fn single_block_and_blob_lookup_block_returned_first_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_and_expect_blob_request() - .blobs_response() - .block_missing_components() // blobs not yet imported - .blobs_response_was_valid() - .blob_imported(); // now blobs resolve as imported - } - - #[test] - fn single_block_response_then_empty_blob_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_and_expect_blob_request() - .missing_components_from_block_request() - .empty_blobs_response() - .expect_penalty("NotEnoughResponsesReturned") - .expect_blobs_request() - .expect_no_block_request(); - } - - #[test] - fn single_invalid_block_response_then_blob_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .invalid_block_processed() - .expect_penalty("lookup_block_processing_failure") - .expect_block_request() - .expect_no_blobs_request() - .blobs_response() - // blobs not sent for processing until the block is processed - .expect_no_penalty_and_no_requests(); - } - - #[test] - fn single_block_response_then_invalid_blob_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .missing_components_from_block_request() - .blobs_response() - .invalid_blob_processed() - .expect_penalty("lookup_blobs_processing_failure") - .expect_blobs_request() - .expect_no_block_request(); - } - - #[test] - fn single_block_response_then_too_few_blobs_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .missing_components_from_block_request() - .invalidate_blobs_too_few() - .blobs_response() - .expect_penalty("NotEnoughResponsesReturned") - .expect_blobs_request() - .expect_no_block_request(); - } - - // Test peer returning block that has unknown parent, and a new lookup is created - #[test] - fn parent_block_unknown_parent() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { - return; - }; - tester - .expect_empty_beacon_processor() - .parent_block_response_expect_blobs() - .parent_blob_response() - .expect_block_process() - .parent_block_unknown_parent() - .expect_parent_block_request() - .expect_empty_beacon_processor(); - } - - // Test peer returning invalid (processing) block, expect retry - #[test] - fn parent_block_invalid_parent() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { - return; - }; - tester - .parent_block_response_expect_blobs() - .parent_blob_response() - .expect_block_process() - .invalid_parent_processed() - .expect_penalty("lookup_block_processing_failure") - .expect_parent_block_request() - .expect_empty_beacon_processor(); - } - - // Tests that if a peer does not respond with a block, we downscore and retry the block only - #[test] - fn empty_block_is_retried() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .empty_block_response() - .expect_penalty("NotEnoughResponsesReturned") - .expect_block_request() - .expect_no_blobs_request() - .block_response_and_expect_blob_request() - .blobs_response() - .block_imported() - .expect_no_active_lookups(); - } - - #[test] - fn parent_block_then_empty_parent_blobs() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { - return; - }; - tester - .parent_block_then_empty_parent_blobs() - .log("resolve original block trigger blobs request and import") - // Should not have block request, it is cached - .expect_blobs_request() - // TODO: Should send blobs for processing - .block_imported() - .expect_no_active_lookups(); - } - - #[test] - fn parent_blob_unknown_parent() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { - return; - }; - tester - .expect_empty_beacon_processor() - .parent_block_response_expect_blobs() - .parent_blob_response() - .expect_block_process() - .parent_block_unknown_parent() - .expect_parent_block_request() - .expect_empty_beacon_processor(); - } - - #[test] - fn parent_blob_invalid_parent() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { - return; - }; - tester - .expect_empty_beacon_processor() - .parent_block_response_expect_blobs() - .parent_blob_response() - .expect_block_process() - .invalid_parent_processed() - .expect_penalty("lookup_block_processing_failure") - .expect_parent_block_request() - // blobs are not sent until block is processed - .expect_empty_beacon_processor(); - } - - #[test] - fn parent_block_and_blob_lookup_parent_returned_first_blob_trigger() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { - return; - }; - tester - .parent_block_response() - .expect_parent_blobs_request() - .parent_blob_response() - .expect_block_process() - .trigger_unknown_block_from_attestation() - .parent_block_imported() - .complete_current_block_and_blobs_lookup() - .expect_no_active_lookups(); - } - - #[test] - fn parent_block_then_empty_parent_blobs_blob_trigger() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { - return; - }; - tester - .parent_block_then_empty_parent_blobs() - .log("resolve original block trigger blobs request and import") - .complete_current_block_and_blobs_lookup() - .expect_no_active_lookups(); - } - - #[test] - fn parent_blob_unknown_parent_chain() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(2)) else { - return; - }; - tester - .expect_empty_beacon_processor() - .parent_block_response_expect_blobs() - .parent_blob_response() - .expect_no_penalty() - .expect_block_process() - .parent_block_unknown_parent() - .expect_parent_block_request() - .expect_empty_beacon_processor() - .parent_block_response() - .expect_parent_blobs_request() - .parent_blob_response() - .expect_no_penalty() - .expect_block_process(); - } - - #[test] - fn unknown_parent_block_dup() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { - return; - }; - tester - .search_parent_dup() - .expect_no_blobs_request() - .expect_no_block_request(); - } - - #[test] - fn unknown_parent_blob_dup() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { - return; - }; - tester - .search_parent_dup() - .expect_no_blobs_request() - .expect_no_block_request(); - } - - // This test no longer applies, we don't issue requests for child lookups - // Keep for after updating rules on fetching blocks only first - #[ignore] - #[test] - fn no_peer_penalty_when_rpc_response_already_known_from_gossip() { - let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { - return; - }; - let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(2)); - let block_root = block.canonical_root(); - let blob_0 = blobs[0].clone(); - let blob_1 = blobs[1].clone(); - let peer_a = r.new_connected_peer(); - let peer_b = r.new_connected_peer(); - // Send unknown parent block lookup - r.trigger_unknown_parent_block(peer_a, block.into()); - // Expect network request for blobs - let id = r.expect_blob_lookup_request(block_root); - // Peer responses with blob 0 - r.single_lookup_blob_response(id, peer_a, Some(blob_0.into())); - // Blob 1 is received via gossip unknown parent blob from a different peer - r.trigger_unknown_parent_blob(peer_b, blob_1.clone()); - // Original peer sends blob 1 via RPC - r.single_lookup_blob_response(id, peer_a, Some(blob_1.into())); - // Assert no downscore event for original peer - r.expect_no_penalty_for(peer_a); +// These `crypto_on` tests assert that the fake_crytpo feature works as expected. We run only the +// `crypto_on` tests without the fake_crypto feature and make sure that processing fails, = to +// assert that signatures and kzg proofs are checked +#[tokio::test] +async fn crypto_on_fail_with_invalid_block_signature() { + let mut r = TestRig::default(); + r.build_chain(1).await; + r.corrupt_last_block_signature(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + if cfg!(feature = "fake_crypto") { + r.assert_successful_lookup_sync(); + r.assert_no_penalties(); + } else { + r.assert_failed_lookup_sync(); + r.assert_penalties_of_type("lookup_block_processing_failure"); + } +} + +#[tokio::test] +async fn crypto_on_fail_with_bad_blob_proposer_signature() { + let Some(mut r) = TestRig::new_after_deneb_before_fulu() else { + return; + }; + r.build_chain(1).await; + r.corrupt_last_blob_proposer_signature(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + if cfg!(feature = "fake_crypto") { + r.assert_successful_lookup_sync(); + r.assert_no_penalties(); + } else { + r.assert_failed_lookup_sync(); + r.assert_penalties_of_type("lookup_blobs_processing_failure"); + } +} + +#[tokio::test] +async fn crypto_on_fail_with_bad_blob_kzg_proof() { + let Some(mut r) = TestRig::new_after_deneb_before_fulu() else { + return; + }; + r.build_chain(1).await; + r.corrupt_last_blob_kzg_proof(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + if cfg!(feature = "fake_crypto") { + r.assert_successful_lookup_sync(); + r.assert_no_penalties(); + } else { + r.assert_failed_lookup_sync(); + r.assert_penalties_of_type("lookup_blobs_processing_failure"); + } +} + +#[tokio::test] +async fn crypto_on_fail_with_bad_column_proposer_signature() { + let Some(mut r) = TestRig::new_fulu_peer_test(FuluTestType::WeSupernodeThemSupernode) else { + return; + }; + r.build_chain(1).await; + r.corrupt_last_column_proposer_signature(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + if cfg!(feature = "fake_crypto") { + r.assert_successful_lookup_sync(); + r.assert_no_penalties(); + } else { + r.assert_failed_lookup_sync(); + r.assert_penalties_of_type("lookup_custody_column_processing_failure"); + } +} + +#[tokio::test] +async fn crypto_on_fail_with_bad_column_kzg_proof() { + let Some(mut r) = TestRig::new_fulu_peer_test(FuluTestType::WeSupernodeThemSupernode) else { + return; + }; + r.build_chain(1).await; + r.corrupt_last_column_kzg_proof(); + r.trigger_with_last_block(); + r.simulate(SimulateConfig::happy_path()).await; + if cfg!(feature = "fake_crypto") { + r.assert_successful_lookup_sync(); + r.assert_no_penalties(); + } else { + r.assert_failed_lookup_sync(); + r.assert_penalties_of_type("lookup_custody_column_processing_failure"); } } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index dcc7e3e49d..f00cf5841d 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -1,13 +1,19 @@ use crate::NetworkMessage; use crate::sync::SyncMessage; +use crate::sync::block_lookups::BlockLookupsMetrics; use crate::sync::manager::SyncManager; -use crate::sync::range_sync::RangeSyncType; +use crate::sync::tests::lookups::SimulateConfig; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::Witness; +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_processor::WorkEvent; -use lighthouse_network::NetworkGlobals; +use lighthouse_network::rpc::RequestType; +use lighthouse_network::service::api_types::{AppRequestId, Id}; +use lighthouse_network::{NetworkGlobals, PeerId}; use rand_chacha::ChaCha20Rng; use slot_clock::ManualSlotClock; +use std::collections::{HashMap, HashSet}; use std::fs::OpenOptions; use std::io::Write; use std::sync::{Arc, Once}; @@ -16,7 +22,7 @@ use tokio::sync::mpsc; use tracing_subscriber::fmt::MakeWriter; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; -use types::{ForkName, MinimalEthSpec as E}; +use types::{ForkName, Hash256, MinimalEthSpec as E, Slot}; mod lookups; mod range; @@ -58,6 +64,8 @@ struct TestRig { network_rx_queue: Vec>, /// Receiver for `SyncMessage` from the network sync_rx: mpsc::UnboundedReceiver>, + /// Stores all `SyncMessage`s received from `sync_rx` + sync_rx_queue: Vec>, /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. sync_manager: SyncManager, /// To manipulate sync state and peer connection status @@ -68,6 +76,65 @@ struct TestRig { rng_08: rand_chacha_03::ChaCha20Rng, rng: ChaCha20Rng, fork_name: ForkName, + /// Blocks that will be used in the test but may not be known to `harness` yet. + network_blocks_by_root: HashMap>, + network_blocks_by_slot: HashMap>, + penalties: Vec, + /// All seen lookups through the test run + seen_lookups: HashMap, + /// Registry of all requests done by the test + requests: Vec<(RequestType, AppRequestId)>, + /// Persistent config on how to complete request + complete_strategy: SimulateConfig, + /// Metrics values to allow a reset + initial_block_lookups_metrics: BlockLookupsMetrics, + /// Fulu test type + fulu_test_type: FuluTestType, +} + +enum FuluTestType { + WeSupernodeThemSupernode, + WeSupernodeThemFullnodes, + WeFullnodeThemSupernode, + WeFullnodeThemFullnodes, +} + +impl FuluTestType { + fn we_node_custody_type(&self) -> NodeCustodyType { + match self { + Self::WeSupernodeThemSupernode | Self::WeSupernodeThemFullnodes => { + NodeCustodyType::Supernode + } + Self::WeFullnodeThemSupernode | Self::WeFullnodeThemFullnodes => { + NodeCustodyType::Fullnode + } + } + } + + fn them_node_custody_type(&self) -> NodeCustodyType { + match self { + Self::WeSupernodeThemSupernode | Self::WeFullnodeThemSupernode => { + NodeCustodyType::Supernode + } + Self::WeSupernodeThemFullnodes | Self::WeFullnodeThemFullnodes => { + NodeCustodyType::Fullnode + } + } + } +} + +#[derive(Debug)] +struct SeenLookup { + /// Lookup's Id + id: Id, + block_root: Hash256, + seen_peers: HashSet, +} + +#[derive(Debug)] +struct ReportedPenalty { + pub peer_id: PeerId, + pub msg: &'static str, } // Environment variable to read if `fork_from_env` feature is enabled. diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 6f129bc8f0..67395ccd25 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -185,7 +185,7 @@ impl TestRig { } #[track_caller] - fn expect_chain_segments(&mut self, count: usize) { + fn assert_chain_segments(&mut self, count: usize) { for i in 0..count { self.pop_received_processor_event(|ev| { (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) @@ -235,7 +235,7 @@ impl TestRig { panic!("Should have a BlocksByRange request, filter {request_filter:?}: {e:?}") }); - let by_range_data_requests = if self.after_fulu() { + let by_range_data_requests = if self.is_after_fulu() { let mut data_columns_requests = vec![]; while let Ok(data_columns_request) = self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { @@ -254,7 +254,7 @@ impl TestRig { panic!("Found zero DataColumnsByRange requests, filter {request_filter:?}"); } ByRangeDataRequestIds::PostPeerDAS(data_columns_requests) - } else if self.after_deneb() { + } else if self.is_after_deneb() { let (id, peer) = self .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { @@ -489,7 +489,7 @@ fn build_rpc_block( fn head_chain_removed_while_finalized_syncing() { // NOTE: this is a regression test. // Added in PR https://github.com/sigp/lighthouse/pull/2821 - let mut rig = TestRig::test_setup(); + let mut rig = TestRig::default(); // Get a peer with an advanced head let head_peer = rig.add_head_peer(); @@ -514,11 +514,11 @@ fn head_chain_removed_while_finalized_syncing() { async fn state_update_while_purging() { // NOTE: this is a regression test. // Added in PR https://github.com/sigp/lighthouse/pull/2827 - let mut rig = TestRig::test_setup_with_custody_type(NodeCustodyType::SemiSupernode); + let mut rig = TestRig::with_custody_type(NodeCustodyType::SemiSupernode); // Create blocks on a separate harness // SemiSupernode ensures enough columns are stored for sampling + custody RPC block validation - let mut rig_2 = TestRig::test_setup_with_custody_type(NodeCustodyType::SemiSupernode); + let mut rig_2 = TestRig::with_custody_type(NodeCustodyType::SemiSupernode); // Need to create blocks that can be inserted into the fork-choice and fit the "known // conditions" below. let head_peer_block = rig_2.create_canonical_block().await; @@ -550,7 +550,7 @@ async fn state_update_while_purging() { #[test] fn pause_and_resume_on_ee_offline() { - let mut rig = TestRig::test_setup(); + let mut rig = TestRig::default(); // add some peers let peer1 = rig.add_head_peer(); @@ -559,7 +559,7 @@ fn pause_and_resume_on_ee_offline() { // send the response to the request rig.find_and_complete_blocks_by_range_request(filter().peer(peer1).epoch(0)); // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); + rig.assert_empty_processor(); // while the ee is offline, more peers might arrive. Add a new finalized peer. let _peer2 = rig.add_finalized_peer(); @@ -570,14 +570,14 @@ fn pause_and_resume_on_ee_offline() { // epoch for the other batch. So we can either filter by epoch of by sync type. rig.find_and_complete_blocks_by_range_request(filter().epoch(0)); // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); + rig.assert_empty_processor(); // make the beacon processor available again. // update_execution_engine_state implicitly calls resume // now resume range, we should have two processing requests in the beacon processor. rig.update_execution_engine_state(EngineState::Online); // The head chain and finalized chain (2) should be in the processing queue - rig.expect_chain_segments(2); + rig.assert_chain_segments(2); } /// To attempt to finalize the peer's status finalized checkpoint we synced to its finalized epoch + @@ -587,7 +587,7 @@ const EXTRA_SYNCED_EPOCHS: u64 = 2 + 1; #[test] fn finalized_sync_enough_global_custody_peers_few_chain_peers() { // Run for all forks - let mut r = TestRig::test_setup(); + let mut r = TestRig::default(); let advanced_epochs: u64 = 2; let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); @@ -604,7 +604,7 @@ fn finalized_sync_enough_global_custody_peers_few_chain_peers() { #[test] fn finalized_sync_not_enough_custody_peers_on_start() { - let mut r = TestRig::test_setup(); + let mut r = TestRig::default(); // Only run post-PeerDAS if !r.fork_name.fulu_enabled() { return; @@ -621,7 +621,7 @@ fn finalized_sync_not_enough_custody_peers_on_start() { // Because we don't have enough peers on all columns we haven't sent any request. // NOTE: There's a small chance that this single peer happens to custody exactly the set we // expect, in that case the test will fail. Find a way to make the test deterministic. - r.expect_empty_network(); + r.assert_empty_network(); // Generate enough peers and supernodes to cover all custody columns let peer_count = 100; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 5c3e8058d9..61dccc9674 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1246,9 +1246,12 @@ pub fn cli_app() -> Command { .display_order(0) ) .arg( - Arg::new("reconstruct-historic-states") - .long("reconstruct-historic-states") - .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") + Arg::new("archive") + .long("archive") + .alias("reconstruct-historic-states") + .help("Store all beacon states in the database. When checkpoint syncing, \ + states are reconstructed after backfill completes. This requires \ + syncing all the way back to genesis.") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e6091d9213..0a52bcef06 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -554,8 +554,8 @@ pub fn get_config( ClientGenesis::DepositContract }; - if cli_args.get_flag("reconstruct-historic-states") { - client_config.chain.reconstruct_historic_states = true; + if cli_args.get_flag("archive") { + client_config.chain.archive = true; client_config.chain.genesis_backfill = true; } diff --git a/book/src/advanced_checkpoint_sync.md b/book/src/advanced_checkpoint_sync.md index 7c30598928..0682310dcd 100644 --- a/book/src/advanced_checkpoint_sync.md +++ b/book/src/advanced_checkpoint_sync.md @@ -102,7 +102,7 @@ lack of historic states. _You do not need these states to run a staking node_, b for historical API calls (as used by block explorers and researchers). To run an archived node, you can opt-in to reconstructing all of the historic states by providing the -`--reconstruct-historic-states` flag to the beacon node at any point (before, during or after sync). +`--archive` flag to the beacon node at any point (before, during or after sync). The database keeps track of three markers to determine the availability of historic blocks and states: diff --git a/book/src/api_validator_inclusion.md b/book/src/api_validator_inclusion.md index eef563dcdb..d86483e0ea 100644 --- a/book/src/api_validator_inclusion.md +++ b/book/src/api_validator_inclusion.md @@ -8,7 +8,7 @@ These endpoints are not stable or included in the Ethereum consensus standard AP they are subject to change or removal without a change in major release version. -In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node. Once the state reconstruction process is completed, you can apply these APIs to any epoch. +In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--archive` in the beacon node. Once the state reconstruction process is completed, you can apply these APIs to any epoch. ## Endpoints diff --git a/book/src/faq.md b/book/src/faq.md index c9bc53533f..5ba2c3407f 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -167,19 +167,19 @@ This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will ### How can I construct only partial state history? -Lighthouse prunes finalized states by default. Nevertheless, it is quite often that users may be interested in the state history of a few epochs before finalization. To have access to these pruned states, Lighthouse typically requires a full reconstruction of states using the flag `--reconstruct-historic-states` (which will usually take a week). Partial state history can be achieved with some "tricks". Here are the general steps: +Lighthouse prunes finalized states by default. Nevertheless, it is quite often that users may be interested in the state history of a few epochs before finalization. To have access to these pruned states, Lighthouse typically requires a full reconstruction of states using the flag `--archive` (which will usually take a week). Partial state history can be achieved with some "tricks". Here are the general steps: 1. Delete the current database. You can do so with `--purge-db-force` or manually deleting the database from the data directory: `$datadir/beacon`. - 1. If you are interested in the states from the current slot and beyond, perform a checkpoint sync with the flag `--reconstruct-historic-states`, then you can skip the following and jump straight to Step 5 to check the database. + 1. If you are interested in the states from the current slot and beyond, perform a checkpoint sync with the flag `--archive`, then you can skip the following and jump straight to Step 5 to check the database. - If you are interested in the states before the current slot, identify the slot to perform a manual checkpoint sync. With the default configuration, this slot should be divisible by 221, as this is where a full state snapshot is stored. With the flag `--reconstruct-historic-states`, the state upper limit will be adjusted to the next full snapshot slot, a slot that satisfies: `slot % 2**21 == 0`. In other words, to have the state history available before the current slot, we have to checkpoint sync 221 slots before the next full snapshot slot. + If you are interested in the states before the current slot, identify the slot to perform a manual checkpoint sync. With the default configuration, this slot should be divisible by 221, as this is where a full state snapshot is stored. With the flag `--archive`, the state upper limit will be adjusted to the next full snapshot slot, a slot that satisfies: `slot % 2**21 == 0`. In other words, to have the state history available before the current slot, we have to checkpoint sync 221 slots before the next full snapshot slot. Example: Say the current mainnet is at slot `12000000`. As the next full state snapshot is at slot `12582912`, the slot that we want is slot `10485760`. You can calculate this (in Python) using `12000000 // 2**21 * 2**21`. 1. [Export](./advanced_checkpoint_sync.md#manual-checkpoint-sync) the blobs, block and state data for the slot identified in Step 2. This can be done from another beacon node that you have access to, or you could use any available public beacon API, e.g., [QuickNode](https://www.quicknode.com/docs/ethereum). - 1. Perform a [manual checkpoint sync](./advanced_checkpoint_sync.md#manual-checkpoint-sync) using the data from the previous step, and provide the flag `--reconstruct-historic-states`. + 1. Perform a [manual checkpoint sync](./advanced_checkpoint_sync.md#manual-checkpoint-sync) using the data from the previous step, and provide the flag `--archive`. 1. Check the database: @@ -193,9 +193,9 @@ Lighthouse prunes finalized states by default. Nevertheless, it is quite often t "state_upper_limit": "10485760", ``` -Lighthouse will now start to reconstruct historic states from slot `10485760`. At this point, if you do not want a full state reconstruction, you may remove the flag `--reconstruct-historic-states` (and restart). When the process is completed, you will have the state data from slot `10485760`. Going forward, Lighthouse will continue retaining all historical states newer than the snapshot. Eventually this can lead to increased disk usage, which presently can only be reduced by repeating the process starting from a more recent snapshot. +Lighthouse will now start to reconstruct historic states from slot `10485760`. At this point, if you do not want a full state reconstruction, you may remove the flag `--archive` (and restart). When the process is completed, you will have the state data from slot `10485760`. Going forward, Lighthouse will continue retaining all historical states newer than the snapshot. Eventually this can lead to increased disk usage, which presently can only be reduced by repeating the process starting from a more recent snapshot. -> Note: You may only be interested in very recent historic states. To do so, you may configure the full snapshot to be, for example, every 211 slots, see [database configuration](./advanced_database.md#hierarchical-state-diffs) for more details. This can be configured with the flag `--hierarchy-exponents 5,7,11` together with the flag `--reconstruct-historic-states`. This will affect the slot number in Step 2, while other steps remain the same. Note that this comes at the expense of a higher storage requirement. +> Note: You may only be interested in very recent historic states. To do so, you may configure the full snapshot to be, for example, every 211 slots, see [database configuration](./advanced_database.md#hierarchical-state-diffs) for more details. This can be configured with the flag `--hierarchy-exponents 5,7,11` together with the flag `--archive`. This will affect the slot number in Step 2, while other steps remain the same. Note that this comes at the expense of a higher storage requirement. > With `--hierarchy-exponents 5,7,11`, using the same example as above, the next full state snapshot is at slot `12001280`. So the slot to checkpoint sync from is: slot `11999232`. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index beb74da376..cad21a3e78 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -439,6 +439,10 @@ Flags: intended for use by block builders, relays and developers. You should set a fee recipient on this BN and also consider adjusting the --prepare-payload-lookahead flag. + --archive + Store all beacon states in the database. When checkpoint syncing, + states are reconstructed after backfill completes. This requires + syncing all the way back to genesis. --builder-fallback-disable-checks This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless @@ -552,9 +556,6 @@ Flags: --purge-db-force If present, the chain database will be deleted without confirmation. Use with caution. - --reconstruct-historic-states - After a checkpoint sync, reconstruct historic states in the database. - This requires syncing all the way back to genesis. --reset-payload-statuses When present, Lighthouse will forget the payload statuses of any already-imported blocks. This can assist in the recovery from a diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index da8aba5ded..974508492a 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -8,19 +8,23 @@ edition = { workspace = true } default = [] lighthouse = ["proto_array", "eth2_keystore", "eip_3076", "zeroize"] events = ["reqwest-eventsource", "futures", "futures-util"] +network = ["libp2p-identity", "enr", "multiaddr"] [dependencies] bls = { workspace = true } context_deserialize = { workspace = true } educe = { workspace = true } eip_3076 = { workspace = true, optional = true } +enr = { version = "0.13.0", features = ["ed25519"], optional = true } eth2_keystore = { workspace = true, optional = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } futures = { workspace = true, optional = true } futures-util = { version = "0.3.8", optional = true } +libp2p-identity = { version = "0.2", features = ["peerid"], optional = true } mediatype = "0.19.13" +multiaddr = { version = "0.18.2", optional = true } pretty_reqwest_error = { workspace = true } proto_array = { workspace = true, optional = true } reqwest = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 7e4860a4cf..7a340bda6b 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -35,6 +35,8 @@ use educe::Educe; use futures::Stream; #[cfg(feature = "events")] use futures_util::StreamExt; +#[cfg(feature = "network")] +use libp2p_identity::PeerId; use reqwest::{ Body, IntoUrl, RequestBuilder, Response, header::{HeaderMap, HeaderValue}, @@ -1940,6 +1942,7 @@ impl BeaconNodeHttpClient { } /// `GET node/identity` + #[cfg(feature = "network")] pub async fn get_node_identity(&self) -> Result, Error> { let mut path = self.eth_path(V1)?; @@ -1987,9 +1990,10 @@ impl BeaconNodeHttpClient { } /// `GET node/peers/{peer_id}` + #[cfg(feature = "network")] pub async fn get_node_peers_by_id( &self, - peer_id: &str, + peer_id: PeerId, ) -> Result, Error> { let mut path = self.eth_path(V1)?; @@ -1997,7 +2001,7 @@ impl BeaconNodeHttpClient { .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("node") .push("peers") - .push(peer_id); + .push(&peer_id.to_string()); self.get(path).await } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index af29df42d0..ca16fcd28a 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -9,7 +9,11 @@ use crate::{ }; use bls::{PublicKeyBytes, SecretKey, Signature, SignatureBytes}; use context_deserialize::ContextDeserialize; +#[cfg(feature = "network")] +use enr::{CombinedKey, Enr}; use mediatype::{MediaType, MediaTypeList, names}; +#[cfg(feature = "network")] +use multiaddr::Multiaddr; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; use serde_utils::quoted_u64::Quoted; @@ -559,12 +563,13 @@ pub struct ChainHeadData { pub execution_optimistic: Option, } +#[cfg(feature = "network")] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct IdentityData { pub peer_id: String, - pub enr: String, - pub p2p_addresses: Vec, - pub discovery_addresses: Vec, + pub enr: Enr, + pub p2p_addresses: Vec, + pub discovery_addresses: Vec, pub metadata: MetaData, } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index a08035d583..a83e443e80 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -5,9 +5,8 @@ authors = ["Paul Hauner ", "Michael Sproul >( let body = block.body(); if state.fork_name_unchecked().gloas_enabled() { withdrawals::gloas::process_withdrawals::(state, spec)?; - // TODO(EIP-7732): process execution payload bid + process_execution_payload_bid(state, block, verify_signatures, spec)?; } else { if state.fork_name_unchecked().capella_enabled() { withdrawals::capella_electra::process_withdrawals::( @@ -522,3 +527,162 @@ pub fn compute_timestamp_at_slot( .safe_mul(spec.get_slot_duration().as_secs()) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } + +pub fn can_builder_cover_bid( + state: &BeaconState, + builder_index: BuilderIndex, + builder: &Builder, + bid_amount: u64, + spec: &ChainSpec, +) -> Result { + let builder_balance = builder.balance; + let pending_withdrawals_amount = + state.get_pending_balance_to_withdraw_for_builder(builder_index)?; + let min_balance = spec + .min_deposit_amount + .safe_add(pending_withdrawals_amount)?; + if builder_balance < min_balance { + Ok(false) + } else { + Ok(builder_balance.safe_sub(min_balance)? >= bid_amount) + } +} + +pub fn process_execution_payload_bid>( + state: &mut BeaconState, + block: BeaconBlockRef<'_, E, Payload>, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // Verify the bid signature + let signed_bid = block.body().signed_execution_payload_bid()?; + + let bid = &signed_bid.message; + let amount = bid.value; + let builder_index = bid.builder_index; + + // For self-builds, amount must be zero regardless of withdrawal credential prefix + if builder_index == BUILDER_INDEX_SELF_BUILD { + block_verify!( + amount == 0, + ExecutionPayloadBidInvalid::SelfBuildNonZeroAmount.into() + ); + block_verify!( + signed_bid.signature.is_infinity(), + ExecutionPayloadBidInvalid::BadSignature.into() + ); + } else { + let builder = state.get_builder(builder_index)?; + + // Verify that the builder is active + block_verify!( + builder.is_active_at_finalized_epoch(state.finalized_checkpoint().epoch, spec), + ExecutionPayloadBidInvalid::BuilderNotActive(builder_index).into() + ); + + // Verify that the builder has funds to cover the bid + block_verify!( + can_builder_cover_bid(state, builder_index, builder, amount, spec)?, + ExecutionPayloadBidInvalid::InsufficientBalance { + builder_index, + builder_balance: builder.balance, + bid_value: amount, + } + .into() + ); + + if verify_signatures.is_true() { + block_verify!( + // We know this is NOT a self-build, so there MUST be a signature set (func does not + // return None). + execution_payload_bid_signature_set( + state, + |i| get_builder_pubkey_from_state(state, i), + signed_bid, + spec + )? + .ok_or(ExecutionPayloadBidInvalid::BadSignature)? + .verify(), + ExecutionPayloadBidInvalid::BadSignature.into() + ); + } + } + + // Verify commitments are under limit + let max_blobs_per_block = spec.max_blobs_per_block(state.current_epoch()) as usize; + block_verify!( + bid.blob_kzg_commitments.len() <= max_blobs_per_block, + ExecutionPayloadBidInvalid::ExcessBlobCommitments { + max: max_blobs_per_block, + bid: bid.blob_kzg_commitments.len(), + } + .into() + ); + + // Verify that the bid is for the current slot + block_verify!( + bid.slot == block.slot(), + ExecutionPayloadBidInvalid::SlotMismatch { + bid_slot: bid.slot, + block_slot: block.slot(), + } + .into() + ); + + // Verify that the bid is for the right parent block + let latest_block_hash = state.latest_block_hash()?; + block_verify!( + bid.parent_block_hash == *latest_block_hash, + ExecutionPayloadBidInvalid::ParentBlockHashMismatch { + state_block_hash: *latest_block_hash, + bid_parent_hash: bid.parent_block_hash, + } + .into() + ); + + block_verify!( + bid.parent_block_root == block.parent_root(), + ExecutionPayloadBidInvalid::ParentBlockRootMismatch { + block_parent_root: block.parent_root(), + bid_parent_root: bid.parent_block_root, + } + .into() + ); + + let expected_randao = *state.get_randao_mix(state.current_epoch())?; + block_verify!( + bid.prev_randao == expected_randao, + ExecutionPayloadBidInvalid::PrevRandaoMismatch { + expected: expected_randao, + bid: bid.prev_randao, + } + .into() + ); + + // Record the pending payment if there is some payment + if amount > 0 { + let pending_payment = BuilderPendingPayment { + weight: 0, + withdrawal: BuilderPendingWithdrawal { + fee_recipient: bid.fee_recipient, + amount, + builder_index, + }, + }; + + let payment_index = E::SlotsPerEpoch::to_usize() + .safe_add(bid.slot.as_usize().safe_rem(E::SlotsPerEpoch::to_usize())?)?; + + *state + .builder_pending_payments_mut()? + .get_mut(payment_index) + .ok_or(BlockProcessingError::BeaconStateError( + BeaconStateError::InvalidBuilderPendingPaymentsIndex(payment_index), + ))? = pending_payment; + } + + // Cache the execution bid + *state.latest_execution_payload_bid_mut()? = bid.clone(); + + Ok(()) +} diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 9aa44137d8..e82ce537fd 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -170,6 +170,7 @@ where self.include_exits(block)?; self.include_sync_aggregate(block)?; self.include_bls_to_execution_changes(block)?; + self.include_execution_payload_bid(block)?; Ok(()) } @@ -357,6 +358,27 @@ where Ok(()) } + /// Include the signature of the block's execution payload bid. + pub fn include_execution_payload_bid>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + if let Ok(signed_execution_payload_bid) = + block.message().body().signed_execution_payload_bid() + { + // TODO(gloas): if we implement a global builder pubkey cache we need to inject it here + if let Some(signature_set) = execution_payload_bid_signature_set( + self.state, + |builder_index| get_builder_pubkey_from_state(self.state, builder_index), + signed_execution_payload_bid, + self.spec, + )? { + self.sets.push(signature_set); + } + } + Ok(()) + } + /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 5c1db9d732..53178a7a64 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -99,6 +99,9 @@ pub enum BlockProcessingError { IncorrectExpectedWithdrawalsVariant, MissingLastWithdrawal, PendingAttestationInElectra, + ExecutionPayloadBidInvalid { + reason: ExecutionPayloadBidInvalid, + }, /// Builder payment index out of bounds (Gloas) BuilderPaymentIndexOutOfBounds(usize), } @@ -157,6 +160,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(reason: ExecutionPayloadBidInvalid) -> Self { + Self::ExecutionPayloadBidInvalid { reason } + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { @@ -452,6 +461,38 @@ pub enum ExitInvalid { PendingWithdrawalInQueue(u64), } +#[derive(Debug, PartialEq, Clone)] +pub enum ExecutionPayloadBidInvalid { + /// The validator set a non-zero amount for a self-build. + SelfBuildNonZeroAmount, + /// The signature is invalid. + BadSignature, + /// The builder is not active. + BuilderNotActive(u64), + /// The builder has insufficient balance to cover the bid + InsufficientBalance { + builder_index: u64, + builder_balance: u64, + bid_value: u64, + }, + /// Bid slot doesn't match block slot + SlotMismatch { bid_slot: Slot, block_slot: Slot }, + /// The bid's parent block hash doesn't match the state's latest block hash + ParentBlockHashMismatch { + state_block_hash: ExecutionBlockHash, + bid_parent_hash: ExecutionBlockHash, + }, + /// The bid's parent block root doesn't match the block's parent root + ParentBlockRootMismatch { + block_parent_root: Hash256, + bid_parent_root: Hash256, + }, + /// The bid's prev randao doesn't match the state. + PrevRandaoMismatch { expected: Hash256, bid: Hash256 }, + /// The bid contains more than the maximum number of kzg blob commitments. + ExcessBlobCommitments { max: usize, bid: usize }, +} + #[derive(Debug, PartialEq, Clone)] pub enum BlsExecutionChangeInvalid { /// The specified validator is not in the state's validator registry. diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 0e936007ee..0cc591ba4c 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -9,11 +9,12 @@ use tree_hash::TreeHash; use typenum::Unsigned; use types::{ AbstractExecPayload, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, - ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, + BuilderIndex, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, - SyncAggregatorSelectionData, + SignedContributionAndProof, SignedExecutionPayloadBid, SignedRoot, SignedVoluntaryExit, + SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, + consts::gloas::BUILDER_INDEX_SELF_BUILD, }; pub type Result = std::result::Result; @@ -28,6 +29,9 @@ pub enum Error { /// Attempted to find the public key of a validator that does not exist. You cannot distinguish /// between an error and an invalid block in this case. ValidatorUnknown(u64), + /// Attempted to find the public key of a builder that does not exist. You cannot distinguish + /// between an error and an invalid block in this case. + BuilderUnknown(BuilderIndex), /// Attempted to find the public key of a validator that does not exist. You cannot distinguish /// between an error and an invalid block in this case. ValidatorPubkeyUnknown(PublicKeyBytes), @@ -53,7 +57,7 @@ impl From for Error { } } -/// Helper function to get a public key from a `state`. +/// Helper function to get a validator public key from a `state`. pub fn get_pubkey_from_state( state: &BeaconState, validator_index: usize, @@ -71,6 +75,25 @@ where .map(Cow::Owned) } +/// Helper function to get a builder public key from a `state`. +pub fn get_builder_pubkey_from_state( + state: &BeaconState, + builder_index: BuilderIndex, +) -> Option> +where + E: EthSpec, +{ + state + .builders() + .ok()? + .get(builder_index as usize) + .and_then(|b| { + let pk: Option = b.pubkey.decompress().ok(); + pk + }) + .map(Cow::Owned) +} + /// A signature set that is valid if a block was signed by the expected block producer. pub fn block_proposal_signature_set<'a, E, F, Payload: AbstractExecPayload>( state: &'a BeaconState, @@ -332,6 +355,41 @@ where Ok(SignatureSet::multiple_pubkeys(signature, pubkeys, message)) } +pub fn execution_payload_bid_signature_set<'a, E, F>( + state: &'a BeaconState, + get_builder_pubkey: F, + signed_execution_payload_bid: &'a SignedExecutionPayloadBid, + spec: &'a ChainSpec, +) -> Result>> +where + E: EthSpec, + F: Fn(BuilderIndex) -> Option>, +{ + let execution_payload_bid = &signed_execution_payload_bid.message; + let builder_index = execution_payload_bid.builder_index; + if builder_index == BUILDER_INDEX_SELF_BUILD { + // No signatures to verify in case of a self-build, but consensus code MUST check that + // the signature is the point at infinity. + // See `process_execution_payload_bid`. + return Ok(None); + } + let domain = spec.get_domain( + state.current_epoch(), + Domain::BeaconBuilder, + &state.fork(), + state.genesis_validators_root(), + ); + + let pubkey = get_builder_pubkey(builder_index).ok_or(Error::BuilderUnknown(builder_index))?; + let message = execution_payload_bid.signing_root(domain); + + Ok(Some(SignatureSet::single_pubkey( + &signed_execution_payload_bid.signature, + pubkey, + message, + ))) +} + /// Returns the signature set for the given `attester_slashing` and corresponding `pubkeys`. pub fn attester_slashing_signature_sets<'a, E, F>( state: &'a BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 3e07803aa6..4eb1e36628 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -15,9 +15,9 @@ use std::collections::{BTreeSet, HashMap}; use tracing::instrument; use typenum::Unsigned; use types::{ - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, ProgressiveBalancesCache, - RelativeEpoch, Validator, + ActivationQueue, BeaconState, BeaconStateError, BuilderPendingPayment, ChainSpec, Checkpoint, + DepositData, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, + ProgressiveBalancesCache, RelativeEpoch, Validator, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, @@ -33,6 +33,7 @@ pub struct SinglePassConfig { pub pending_consolidations: bool, pub effective_balance_updates: bool, pub proposer_lookahead: bool, + pub builder_pending_payments: bool, } impl Default for SinglePassConfig { @@ -52,6 +53,7 @@ impl SinglePassConfig { pending_consolidations: true, effective_balance_updates: true, proposer_lookahead: true, + builder_pending_payments: true, } } @@ -65,6 +67,7 @@ impl SinglePassConfig { pending_consolidations: false, effective_balance_updates: false, proposer_lookahead: false, + builder_pending_payments: false, } } } @@ -455,6 +458,12 @@ pub fn process_epoch_single_pass( )?; } + // Process builder pending payments outside the single-pass loop, as they depend on balances for + // multiple validators and cannot be computed accurately inside the loop. + if fork_name.gloas_enabled() && conf.builder_pending_payments { + process_builder_pending_payments(state, state_ctxt, spec)?; + } + // Finally, finish updating effective balance caches. We need this to happen *after* processing // of pending consolidations, which recomputes some effective balances. if conf.effective_balance_updates { @@ -503,6 +512,58 @@ pub fn process_proposer_lookahead( Ok(()) } +/// Calculate the quorum threshold for builder payments based on total active balance. +fn get_builder_payment_quorum_threshold( + state_ctxt: &StateContext, + spec: &ChainSpec, +) -> Result { + let per_slot_balance = state_ctxt + .total_active_balance + .safe_div(E::slots_per_epoch())?; + let quorum = per_slot_balance.safe_mul(spec.builder_payment_threshold_numerator)?; + quorum + .safe_div(spec.builder_payment_threshold_denominator) + .map_err(Error::from) +} + +/// Processes the builder pending payments from the previous epoch. +fn process_builder_pending_payments( + state: &mut BeaconState, + state_ctxt: &StateContext, + spec: &ChainSpec, +) -> Result<(), Error> { + let quorum = get_builder_payment_quorum_threshold::(state_ctxt, spec)?; + + // Collect qualifying payments and append to `builder_pending_withdrawals`. + // We use this pattern rather than a loop to avoid multiple borrows of the state's fields. + let new_pending_builder_withdrawals = state + .builder_pending_payments()? + .iter() + .take(E::SlotsPerEpoch::to_usize()) + .filter(|payment| payment.weight >= quorum) + .map(|payment| payment.withdrawal.clone()) + .collect::>(); + for payment_withdrawal in new_pending_builder_withdrawals { + state + .builder_pending_withdrawals_mut()? + .push(payment_withdrawal)?; + } + + // NOTE: this could be a little more memory-efficient with some juggling to reuse parts + // of the persistent tree (could convert to list, use pop_front, convert back). + let updated_payments = state + .builder_pending_payments()? + .iter() + .skip(E::SlotsPerEpoch::to_usize()) + .cloned() + .chain((0..E::SlotsPerEpoch::to_usize()).map(|_| BuilderPendingPayment::default())) + .collect::>(); + + *state.builder_pending_payments_mut()? = Vector::new(updated_payments)?; + + Ok(()) +} + fn process_single_inactivity_update( inactivity_score: &mut Cow, validator_info: &ValidatorInfo, diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 0f8e5dc52d..f26ea567a2 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -14,6 +14,7 @@ pub enum Error { EpochProcessingError(EpochProcessingError), ArithError(ArithError), InconsistentStateFork(InconsistentFork), + BitfieldError(ssz::BitfieldError), } impl From for Error { @@ -22,6 +23,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ssz::BitfieldError) -> Self { + Self::BitfieldError(e) + } +} + /// Advances a state forward by one slot, performing per-epoch processing if required. /// /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If @@ -48,6 +55,18 @@ pub fn per_slot_processing( None }; + // Unset the next payload availability + if state.fork_name_unchecked().gloas_enabled() { + let next_slot_index = state + .slot() + .as_usize() + .safe_add(1)? + .safe_rem(E::slots_per_historical_root())?; + state + .execution_payload_availability_mut()? + .set(next_slot_index, false)?; + } + state.slot_mut().safe_add_assign(1)?; // Process fork upgrades here. Note that multiple upgrades can potentially run diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index feea855c84..a4b879ddb2 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -8,9 +8,10 @@ authors = [ edition = { workspace = true } [features] -default = ["legacy-arith"] -# Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. -legacy-arith = [] +default = [] +# Enable +, -, *, /, % operators for Slot and Epoch types. +# Operations saturate instead of wrapping. +saturating-arith = [] sqlite = ["dep:rusqlite"] arbitrary = [ "dep:arbitrary", diff --git a/consensus/types/src/builder/builder.rs b/consensus/types/src/builder/builder.rs index 2bd50f42cc..7d494da3ee 100644 --- a/consensus/types/src/builder/builder.rs +++ b/consensus/types/src/builder/builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{Address, Epoch, ForkName}; +use crate::{Address, ChainSpec, Epoch, ForkName}; use bls::PublicKeyBytes; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; @@ -24,3 +24,12 @@ pub struct Builder { pub deposit_epoch: Epoch, pub withdrawable_epoch: Epoch, } + +impl Builder { + /// Check if a builder is active in a state with `finalized_epoch`. + /// + /// This implements `is_active_builder` from the spec. + pub fn is_active_at_finalized_epoch(&self, finalized_epoch: Epoch, spec: &ChainSpec) -> bool { + self.deposit_epoch < finalized_epoch && self.withdrawable_epoch == spec.far_future_epoch + } +} diff --git a/consensus/types/src/core/slot_epoch.rs b/consensus/types/src/core/slot_epoch.rs index 97457701b1..837391546c 100644 --- a/consensus/types/src/core/slot_epoch.rs +++ b/consensus/types/src/core/slot_epoch.rs @@ -22,7 +22,7 @@ use crate::{ test_utils::TestRandom, }; -#[cfg(feature = "legacy-arith")] +#[cfg(feature = "saturating-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/core/slot_epoch_macros.rs b/consensus/types/src/core/slot_epoch_macros.rs index eee267355a..1b0c3bcfc1 100644 --- a/consensus/types/src/core/slot_epoch_macros.rs +++ b/consensus/types/src/core/slot_epoch_macros.rs @@ -117,7 +117,7 @@ macro_rules! impl_safe_arith { } // Deprecated: prefer `SafeArith` methods for new code. -#[cfg(feature = "legacy-arith")] +#[cfg(feature = "saturating-arith")] macro_rules! impl_math_between { ($main: ident, $other: ident) => { impl Add<$other> for $main { @@ -321,9 +321,9 @@ macro_rules! impl_common { impl_u64_eq_ord!($type); impl_safe_arith!($type, $type); impl_safe_arith!($type, u64); - #[cfg(feature = "legacy-arith")] + #[cfg(feature = "saturating-arith")] impl_math_between!($type, $type); - #[cfg(feature = "legacy-arith")] + #[cfg(feature = "saturating-arith")] impl_math_between!($type, u64); impl_math!($type); impl_display!($type); diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 1745908c40..6228e40ef8 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -9,7 +9,7 @@ use fixed_bytes::FixedBytesExtended; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{NumFields, metastruct}; use milhouse::{List, Vector}; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::{ArithError, SafeArith, SafeArithIter}; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; @@ -218,6 +218,7 @@ pub enum BeaconStateError { envelope_epoch: Epoch, }, InvalidIndicesCount, + InvalidBuilderPendingPaymentsIndex(usize), InvalidExecutionPayloadAvailabilityIndex(usize), } @@ -875,7 +876,7 @@ impl BeaconState { relative_epoch: RelativeEpoch, ) -> Result { let cache = self.committee_cache(relative_epoch)?; - Ok(cache.epoch_committee_count() as u64) + Ok(cache.epoch_committee_count()? as u64) } /// Return the cached active validator indices at some epoch. @@ -2149,7 +2150,7 @@ impl BeaconState { ) -> Result, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; - Ok(cache.get_attestation_duties(validator_index)) + Ok(cache.get_attestation_duties(validator_index)?) } /// Check if the attestation is for the block proposed at the attestation slot. @@ -2749,6 +2750,30 @@ impl BeaconState { Ok(pending_balance) } + pub fn get_pending_balance_to_withdraw_for_builder( + &self, + builder_index: BuilderIndex, + ) -> Result { + let pending_withdrawals_total = self + .builder_pending_withdrawals()? + .iter() + .filter_map(|withdrawal| { + (withdrawal.builder_index == builder_index).then_some(withdrawal.amount) + }) + .safe_sum()?; + let pending_payments_total = self + .builder_pending_payments()? + .iter() + .filter_map(|payment| { + (payment.withdrawal.builder_index == builder_index) + .then_some(payment.withdrawal.amount) + }) + .safe_sum()?; + pending_withdrawals_total + .safe_add(pending_payments_total) + .map_err(Into::into) + } + // ******* Electra mutators ******* pub fn queue_excess_active_balance( @@ -2884,7 +2909,6 @@ impl BeaconState { } } - #[allow(clippy::arithmetic_side_effects)] pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), BeaconStateError> { // Required for macros (which use type-hints internally). @@ -3193,7 +3217,6 @@ impl BeaconState { )) } - #[allow(clippy::arithmetic_side_effects)] pub fn apply_pending_mutations(&mut self) -> Result<(), BeaconStateError> { match self { Self::Base(inner) => { @@ -3296,7 +3319,6 @@ impl BeaconState { pub fn get_beacon_state_leaves(&self) -> Vec { let mut leaves = vec![]; - #[allow(clippy::arithmetic_side_effects)] match self { BeaconState::Base(state) => { map_beacon_state_base_fields!(state, |_, field| { diff --git a/consensus/types/src/state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs index 39e9011ef4..4a28f3c689 100644 --- a/consensus/types/src/state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -1,9 +1,7 @@ -#![allow(clippy::arithmetic_side_effects)] - use std::{num::NonZeroUsize, ops::Range, sync::Arc}; use educe::Educe; -use safe_arith::SafeArith; +use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; @@ -79,7 +77,13 @@ impl CommitteeCache { .saturating_sub(spec.min_seed_lookahead) .saturating_sub(1u64); - if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { + if reqd_randao_epoch < state.min_randao_epoch() + || epoch + > state + .current_epoch() + .safe_add(1) + .map_err(BeaconStateError::ArithError)? + { return Err(BeaconStateError::EpochOutOfBounds); } @@ -118,7 +122,7 @@ impl CommitteeCache { *shuffling_positions .get_mut(v) .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(v))? = - NonZeroUsize::new(i + 1).into(); + NonZeroUsize::new(i.safe_add(1).map_err(BeaconStateError::ArithError)?).into(); } Ok(Arc::new(CommitteeCache { @@ -177,8 +181,9 @@ impl CommitteeCache { self.slots_per_epoch as usize, self.committees_per_slot as usize, index as usize, - ); - let committee = self.compute_committee(committee_index)?; + ) + .ok()?; + let committee = self.compute_committee(committee_index).ok()??; Some(BeaconCommittee { slot, @@ -212,8 +217,9 @@ impl CommitteeCache { .initialized_epoch .ok_or(BeaconStateError::CommitteeCacheUninitialized(None))?; + let capacity = self.epoch_committee_count()?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( - Vec::with_capacity(self.epoch_committee_count()), + Vec::with_capacity(capacity), |mut vec, slot| { vec.append(&mut self.get_beacon_committees_at_slot(slot)?); Ok(vec) @@ -225,43 +231,53 @@ impl CommitteeCache { /// /// Returns `None` if the `validator_index` does not exist, does not have duties or `Self` is /// non-initialized. - pub fn get_attestation_duties(&self, validator_index: usize) -> Option { - let i = self.shuffled_position(validator_index)?; + pub fn get_attestation_duties( + &self, + validator_index: usize, + ) -> Result, ArithError> { + let Some(i) = self.shuffled_position(validator_index) else { + return Ok(None); + }; - (0..self.epoch_committee_count()) - .map(|nth_committee| (nth_committee, self.compute_committee_range(nth_committee))) - .find(|(_, range)| { - if let Some(range) = range { - range.start <= i && range.end > i - } else { - false - } - }) - .and_then(|(nth_committee, range)| { - let (slot, index) = self.convert_to_slot_and_index(nth_committee as u64)?; - let range = range?; - let committee_position = i - range.start; - let committee_len = range.end - range.start; + for nth_committee in 0..self.epoch_committee_count()? { + let Some(range) = self.compute_committee_range(nth_committee)? else { + continue; + }; - Some(AttestationDuty { + if range.start <= i && range.end > i { + let Some((slot, index)) = self.convert_to_slot_and_index(nth_committee as u64)? + else { + return Ok(None); + }; + + let committee_position = i.safe_sub(range.start)?; + let committee_len = range.end.safe_sub(range.start)?; + + return Ok(Some(AttestationDuty { slot, index, committee_position, committee_len, committees_at_slot: self.committees_per_slot(), - }) - }) + })); + } + } + + Ok(None) } /// Convert an index addressing the list of all epoch committees into a slot and per-slot index. fn convert_to_slot_and_index( &self, global_committee_index: u64, - ) -> Option<(Slot, CommitteeIndex)> { - let epoch_start_slot = self.initialized_epoch?.start_slot(self.slots_per_epoch); - let slot_offset = global_committee_index / self.committees_per_slot; - let index = global_committee_index % self.committees_per_slot; - Some((epoch_start_slot.safe_add(slot_offset).ok()?, index)) + ) -> Result, ArithError> { + let Some(epoch) = self.initialized_epoch else { + return Ok(None); + }; + let epoch_start_slot = epoch.start_slot(self.slots_per_epoch); + let slot_offset = global_committee_index.safe_div(self.committees_per_slot)?; + let index = global_committee_index.safe_rem(self.committees_per_slot)?; + Ok(Some((epoch_start_slot.safe_add(slot_offset)?, index))) } /// Returns the number of active validators in the initialized epoch. @@ -278,11 +294,8 @@ impl CommitteeCache { /// Always returns `usize::default()` for a non-initialized epoch. /// /// Spec v0.12.1 - pub fn epoch_committee_count(&self) -> usize { - epoch_committee_count( - self.committees_per_slot as usize, - self.slots_per_epoch as usize, - ) + pub fn epoch_committee_count(&self) -> Result { + (self.committees_per_slot as usize).safe_mul(self.slots_per_epoch as usize) } /// Returns the number of committees per slot for this cache's epoch. @@ -293,19 +306,23 @@ impl CommitteeCache { /// Returns a slice of `self.shuffling` that represents the `index`'th committee in the epoch. /// /// Spec v0.12.1 - fn compute_committee(&self, index: usize) -> Option<&[usize]> { - self.shuffling.get(self.compute_committee_range(index)?) + fn compute_committee(&self, index: usize) -> Result, ArithError> { + if let Some(range) = self.compute_committee_range(index)? { + Ok(self.shuffling.get(range)) + } else { + Ok(None) + } } /// Returns a range of `self.shuffling` that represents the `index`'th committee in the epoch. /// - /// To avoid a divide-by-zero, returns `None` if `self.committee_count` is zero. + /// To avoid a divide-by-zero, returns `Ok(None)` if `self.committee_count` is zero. /// - /// Will also return `None` if the index is out of bounds. + /// Will also return `Ok(None)` if the index is out of bounds. /// /// Spec v0.12.1 - fn compute_committee_range(&self, index: usize) -> Option> { - compute_committee_range_in_epoch(self.epoch_committee_count(), index, self.shuffling.len()) + fn compute_committee_range(&self, index: usize) -> Result>, ArithError> { + compute_committee_range_in_epoch(self.epoch_committee_count()?, index, self.shuffling.len()) } /// Returns the index of some validator in `self.shuffling`. @@ -329,8 +346,10 @@ pub fn compute_committee_index_in_epoch( slots_per_epoch: usize, committees_per_slot: usize, committee_index: usize, -) -> usize { - (slot.as_usize() % slots_per_epoch) * committees_per_slot + committee_index +) -> Result { + (slot.as_usize().safe_rem(slots_per_epoch)?) + .safe_mul(committees_per_slot)? + .safe_add(committee_index) } /// Computes the range for slicing the shuffled indices to determine the members of a committee. @@ -341,20 +360,16 @@ pub fn compute_committee_range_in_epoch( epoch_committee_count: usize, index_in_epoch: usize, shuffling_len: usize, -) -> Option> { +) -> Result>, ArithError> { if epoch_committee_count == 0 || index_in_epoch >= epoch_committee_count { - return None; + return Ok(None); } - let start = (shuffling_len * index_in_epoch) / epoch_committee_count; - let end = (shuffling_len * (index_in_epoch + 1)) / epoch_committee_count; + let start = (shuffling_len.safe_mul(index_in_epoch))?.safe_div(epoch_committee_count)?; + let end = + (shuffling_len.safe_mul(index_in_epoch.safe_add(1)?))?.safe_div(epoch_committee_count)?; - Some(start..end) -} - -/// Returns the total number of committees in an epoch. -pub fn epoch_committee_count(committees_per_slot: usize, slots_per_epoch: usize) -> usize { - committees_per_slot * slots_per_epoch + Ok(Some(start..end)) } /// Returns a list of all `validators` indices where the validator is active at the given diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs index ea064fb7ac..096bb67167 100644 --- a/consensus/types/src/state/mod.rs +++ b/consensus/types/src/state/mod.rs @@ -21,7 +21,7 @@ pub use beacon_state::{ }; pub use committee_cache::{ CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, - epoch_committee_count, get_active_validator_indices, + get_active_validator_indices, }; pub use epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; pub use exit_cache::ExitCache; diff --git a/consensus/types/tests/committee_cache.rs b/consensus/types/tests/committee_cache.rs index 751ef05d29..0bb8aa1da2 100644 --- a/consensus/types/tests/committee_cache.rs +++ b/consensus/types/tests/committee_cache.rs @@ -33,9 +33,9 @@ fn default_values() { assert!(!cache.is_initialized_at(Epoch::new(0))); assert!(&cache.active_validator_indices().is_empty()); assert_eq!(cache.get_beacon_committee(Slot::new(0), 0), None); - assert_eq!(cache.get_attestation_duties(0), None); + assert_eq!(cache.get_attestation_duties(0), Ok(None)); assert_eq!(cache.active_validator_count(), 0); - assert_eq!(cache.epoch_committee_count(), 0); + assert_eq!(cache.epoch_committee_count(), Ok(0)); assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err()); } diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index e7eee05077..5fe0c3baab 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -49,7 +49,9 @@ impl TPublicKey for PublicKey { } fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { - panic!("fake_crypto does not support uncompressed keys") + let mut bytes = [0; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN]; + bytes[0..PUBLIC_KEY_BYTES_LEN].copy_from_slice(&self.0); + bytes } fn deserialize(bytes: &[u8]) -> Result { @@ -58,8 +60,17 @@ impl TPublicKey for PublicKey { Ok(pubkey) } - fn deserialize_uncompressed(_: &[u8]) -> Result { - panic!("fake_crypto does not support uncompressed keys") + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + if bytes.len() == PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN { + let mut pubkey = Self([0; PUBLIC_KEY_BYTES_LEN]); + pubkey.0.copy_from_slice(&bytes[0..PUBLIC_KEY_BYTES_LEN]); + Ok(pubkey) + } else { + Err(Error::InvalidByteLength { + got: bytes.len(), + expected: PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }) + } } } @@ -97,7 +108,7 @@ pub struct Signature([u8; SIGNATURE_BYTES_LEN]); impl Signature { fn infinity() -> Self { - Self([0; SIGNATURE_BYTES_LEN]) + Self(INFINITY_SIGNATURE) } } @@ -213,7 +224,11 @@ impl TSecretKey for SecretKey { } fn public_key(&self) -> PublicKey { - PublicKey::infinity() + let mut bytes = [0; PUBLIC_KEY_BYTES_LEN]; + bytes[0] = 0x01; + let to_copy = std::cmp::min(self.0.len(), bytes.len() - 1); + bytes[1..1 + to_copy].copy_from_slice(&self.0[..to_copy]); + PublicKey(bytes) } fn sign(&self, _msg: Hash256) -> Signature { diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index 5a36eb74f7..d2558663d5 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -5,6 +5,10 @@ authors = ["Pawan Dhananjay "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +default = [] +fake_crypto = [] + [dependencies] arbitrary = { workspace = true } c-kzg = { workspace = true } diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 0fe95b7723..66499dad8e 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -134,6 +134,9 @@ impl Kzg { kzg_commitment: KzgCommitment, kzg_proof: KzgProof, ) -> Result<(), Error> { + if cfg!(feature = "fake_crypto") { + return Ok(()); + } if !self.trusted_setup.verify_blob_kzg_proof( blob, &kzg_commitment.into(), @@ -155,6 +158,9 @@ impl Kzg { kzg_commitments: &[KzgCommitment], kzg_proofs: &[KzgProof], ) -> Result<(), Error> { + if cfg!(feature = "fake_crypto") { + return Ok(()); + } let commitments_bytes = kzg_commitments .iter() .map(|comm| Bytes48::from(*comm)) @@ -204,6 +210,9 @@ impl Kzg { y: &Bytes32, kzg_proof: KzgProof, ) -> Result { + if cfg!(feature = "fake_crypto") { + return Ok(true); + } self.trusted_setup .verify_kzg_proof(&kzg_commitment.into(), z, y, &kzg_proof.into()) .map_err(Into::into) @@ -240,6 +249,9 @@ impl Kzg { indices: Vec, kzg_commitments: &[Bytes48], ) -> Result<(), (Option, Error)> { + if cfg!(feature = "fake_crypto") { + return Ok(()); + } let mut column_groups: HashMap> = HashMap::new(); let expected_len = cells.len(); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 322787736b..ded1f2b765 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -401,9 +401,9 @@ fn genesis_backfill_flag() { /// The genesis backfill flag should be enabled if historic states flag is set. #[test] -fn genesis_backfill_with_historic_flag() { +fn genesis_backfill_with_archive_flag() { CommandLineTest::new() - .flag("reconstruct-historic-states", None) + .flag("archive", None) .run_with_zero_port() .with_config(|config| assert!(config.chain.genesis_backfill)); } @@ -2030,17 +2030,24 @@ fn blob_prune_margin_epochs_on_startup_ten() { .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 10)); } #[test] -fn reconstruct_historic_states_flag() { +fn archive_flag() { + CommandLineTest::new() + .flag("archive", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.archive)); +} +#[test] +fn archive_flag_alias() { CommandLineTest::new() .flag("reconstruct-historic-states", None) .run_with_zero_port() - .with_config(|config| assert!(config.chain.reconstruct_historic_states)); + .with_config(|config| assert!(config.chain.archive)); } #[test] -fn no_reconstruct_historic_states_flag() { +fn no_archive_flag() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert!(!config.chain.reconstruct_historic_states)); + .with_config(|config| assert!(!config.chain.archive)); } #[test] fn epochs_per_migration_default() { diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 8e5bd24d24..00638f7b1e 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -48,18 +48,10 @@ excluded_paths = [ "tests/.*/eip7732", "tests/.*/eip7805", # TODO(gloas): remove these ignores as more Gloas operations are implemented - "tests/.*/gloas/operations/block_header/.*", - "tests/.*/gloas/operations/execution_payload_bid/.*", "tests/.*/gloas/operations/payload_attestation/.*", - # TODO(EIP-7732): remove these ignores as Gloas consensus is implemented - "tests/.*/gloas/epoch_processing/.*", - "tests/.*/gloas/finality/.*", + # TODO(gloas): remove these ignores as Gloas consensus is implemented "tests/.*/gloas/fork/.*", "tests/.*/gloas/fork_choice/.*", - "tests/.*/gloas/networking/.*", - "tests/.*/gloas/rewards/.*", - "tests/.*/gloas/sanity/.*", - "tests/.*/gloas/transition/.*", # Ignore MatrixEntry SSZ tests for now. "tests/.*/.*/ssz_static/MatrixEntry/.*", # TODO(gloas): Ignore Gloas light client stuff for now diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index f143643ec3..7a90fc70d0 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -79,6 +79,8 @@ pub struct InactivityUpdates; pub struct ParticipationFlagUpdates; #[derive(Debug)] pub struct ProposerLookahead; +#[derive(Debug)] +pub struct BuilderPendingPayments; type_name!( JustificationAndFinalization, @@ -100,6 +102,7 @@ type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); type_name!(ParticipationFlagUpdates, "participation_flag_updates"); type_name!(ProposerLookahead, "proposer_lookahead"); +type_name!(BuilderPendingPayments, "builder_pending_payments"); impl EpochTransition for JustificationAndFinalization { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { @@ -293,6 +296,20 @@ impl EpochTransition for ProposerLookahead { } } +impl EpochTransition for BuilderPendingPayments { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_epoch_single_pass( + state, + spec, + SinglePassConfig { + builder_pending_payments: true, + ..SinglePassConfig::disable_all() + }, + ) + .map(|_| ()) + } +} + impl> LoadCase for EpochProcessing { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -356,6 +373,10 @@ impl> Case for EpochProcessing { return false; } + if !fork_name.gloas_enabled() && T::name() == "builder_pending_payments" { + return false; + } + true } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 45bed7c6cd..ca77dc8d79 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -448,7 +448,7 @@ impl Tester { .spec(spec.clone()) .keypairs(vec![]) .chain_config(ChainConfig { - reconstruct_historic_states: true, + archive: true, ..ChainConfig::default() }) .genesis_state_ephemeral_store(case.anchor_state.clone()) diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 8605800b79..59d2bef24e 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -17,7 +17,7 @@ use state_processing::{ per_block_processing::{ VerifyBlockRoot, VerifySignatures, errors::BlockProcessingError, - process_block_header, process_execution_payload, + process_block_header, process_execution_payload, process_execution_payload_bid, process_operations::{ altair_deneb, base, gloas, process_attester_slashings, process_bls_to_execution_changes, process_deposits, process_exits, @@ -52,6 +52,12 @@ pub struct WithdrawalsPayload { payload: Option>, } +/// Newtype for testing execution payload bids. +#[derive(Debug, Clone, Deserialize)] +pub struct ExecutionPayloadBidBlock { + block: BeaconBlock, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -467,6 +473,37 @@ impl Operation for SignedExecutionPayloadEnvelope { } } +impl Operation for ExecutionPayloadBidBlock { + type Error = BlockProcessingError; + + fn handler_name() -> String { + "execution_payload_bid".into() + } + + fn filename() -> String { + "block.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.gloas_enabled() + } + + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) + .map(|block| ExecutionPayloadBidBlock { block }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_execution_payload_bid(state, self.block.to_ref(), VerifySignatures::True, spec)?; + Ok(()) + } +} + impl Operation for WithdrawalsPayload { type Error = BlockProcessingError; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 9d11252edb..45bca21c6f 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -22,7 +22,7 @@ pub trait Handler { // Add forks here to exclude them from EF spec testing. Helpful for adding future or // unspecified forks. fn disabled_forks(&self) -> Vec { - vec![ForkName::Gloas] + vec![] } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { @@ -395,11 +395,6 @@ where T::name().into() } - fn disabled_forks(&self) -> Vec { - // TODO(gloas): Can be removed once we enable Gloas on all tests - vec![] - } - fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { self.supported_forks.contains(&fork_name) } @@ -422,11 +417,6 @@ where fn handler_name(&self) -> String { BeaconState::::name().into() } - - fn disabled_forks(&self) -> Vec { - // TODO(gloas): Can be removed once we enable Gloas on all tests - vec![] - } } impl Handler for SszStaticWithSpecHandler @@ -449,11 +439,6 @@ where T::name().into() } - fn disabled_forks(&self) -> Vec { - // TODO(gloas): Can be removed once we enable Gloas on all tests - vec![] - } - fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { self.supported_forks.contains(&fork_name) } @@ -552,6 +537,11 @@ impl Handler for RandomHandler { fn handler_name(&self) -> String { "random".into() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas random tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -622,6 +612,11 @@ impl Handler for ForkHandler { fn handler_name(&self) -> String { "fork".into() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once onboard_builders_from_pending_deposits is implemented + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -726,6 +721,11 @@ impl Handler for ForkChoiceHandler { // run them with fake crypto. cfg!(not(feature = "fake_crypto")) } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas fork choice tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -755,6 +755,11 @@ impl Handler for OptimisticSyncHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { fork_name.bellatrix_enabled() && cfg!(not(feature = "fake_crypto")) } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas optimistic sync tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -975,6 +980,11 @@ impl Handler for KZGComputeCellsHandler { fn handler_name(&self) -> String { "compute_cells".into() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas KZG tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -995,6 +1005,11 @@ impl Handler for KZGComputeCellsAndKZGProofHandler { fn handler_name(&self) -> String { "compute_cells_and_kzg_proofs".into() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas KZG tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -1015,6 +1030,11 @@ impl Handler for KZGVerifyCellKZGProofBatchHandler { fn handler_name(&self) -> String { "verify_cell_kzg_proof_batch".into() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas KZG tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -1035,6 +1055,11 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { fn handler_name(&self) -> String { "recover_cells_and_kzg_proofs".into() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas KZG tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -1059,6 +1084,11 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { fork_name.deneb_enabled() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas KZG merkle proof tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -1083,6 +1113,11 @@ impl Handler for MerkleProofValidityHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { fork_name.altair_enabled() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas light client tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -1108,6 +1143,11 @@ impl Handler for LightClientUpdateHandler { // Enabled in Altair fork_name.altair_enabled() } + + fn disabled_forks(&self) -> Vec { + // TODO(gloas): remove once we have Gloas light client tests + vec![ForkName::Gloas] + } } #[derive(Educe)] @@ -1129,21 +1169,18 @@ impl> Handler for OperationsHandler O::handler_name() } - fn disabled_forks(&self) -> Vec { - // TODO(gloas): Can be removed once we enable Gloas on all tests - vec![] - } - fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { Self::Case::is_enabled_for_fork(fork_name) && (!fork_name.gloas_enabled() || self.handler_name() == "attestation" || self.handler_name() == "attester_slashing" + || self.handler_name() == "block_header" || self.handler_name() == "bls_to_execution_change" || self.handler_name() == "consolidation_request" || self.handler_name() == "deposit_request" || self.handler_name() == "deposit" || self.handler_name() == "execution_payload" + || self.handler_name() == "execution_payload_bid" || self.handler_name() == "proposer_slashing" || self.handler_name() == "sync_aggregate" || self.handler_name() == "withdrawal_request" diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 8ec4860cab..94b19b6644 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,11 +1,11 @@ pub use case_result::CaseResult; -pub use cases::WithdrawalsPayload; pub use cases::{ - Case, EffectiveBalanceUpdates, Eth1DataReset, FeatureName, HistoricalRootsUpdate, - HistoricalSummariesUpdate, InactivityUpdates, JustificationAndFinalization, - ParticipationFlagUpdates, ParticipationRecordUpdates, PendingBalanceDeposits, - PendingConsolidations, ProposerLookahead, RandaoMixesReset, RegistryUpdates, - RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, + BuilderPendingPayments, Case, EffectiveBalanceUpdates, Eth1DataReset, ExecutionPayloadBidBlock, + FeatureName, HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, + JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, + PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, RandaoMixesReset, + RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, + WithdrawalsPayload, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 8a53a61929..c3481a2405 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -93,6 +93,12 @@ fn operations_execution_payload_envelope() { OperationsHandler::>::default().run(); } +#[test] +fn operations_execution_payload_bid() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + #[test] fn operations_withdrawals() { OperationsHandler::>::default().run(); @@ -948,6 +954,12 @@ fn epoch_processing_proposer_lookahead() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_builder_pending_payments() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn fork_upgrade() { ForkHandler::::default().run(); diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index e49d11ee1e..ece6001802 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -115,7 +115,7 @@ pub fn testing_client_config() -> ClientConfig { }; // Simulator tests expect historic states to be available for post-run checks. - client_config.chain.reconstruct_historic_states = true; + client_config.chain.archive = true; // Specify a constant count of beacon processor workers. Having this number // too low can cause annoying HTTP timeouts, especially on Github runners