From a1534bbfb34d2388a63dcd9fdd116629b6065d83 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 30 Mar 2026 17:43:57 +1100 Subject: [PATCH 01/57] Check `ChainSpec` consistency with upstream `config.yaml` (#9008) Closes: - https://github.com/sigp/lighthouse/issues/9002 - Commit `config.yaml` for minimal and mainnet to `consensus/types/configs`. For now we omit any auto-downloading logic, to avoid the hassles of dealing with Github rate limits etc on CI. Unfortunately these files are NOT bundled inside the spec tests. - Fix the values of `min_builder_withdrawability_delay` for minimal and mainnet. These discrepancies aren't caught by the current spec tests, because the spec tests are missing data: https://github.com/ethereum/consensus-specs/pull/5005. Will be fixed in the next release/when we update to nightly. - Fix the blob schedule for `minimal`, which should be empty, NOT inherited from mainnet. - Keep `SECONDS_PER_SLOT` for now because the Kurtosis tests fail upon their complete removal. We will be able to completely remove `SECONDS_PER_SLOT` soon. Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 8 +- .../http_api/tests/interactive_tests.rs | 14 +- .../mainnet/config.yaml | 40 +-- consensus/types/configs/mainnet.yaml | 227 ++++++++++++++++ consensus/types/configs/minimal.yaml | 220 ++++++++++++++++ consensus/types/src/core/chain_spec.rs | 242 +++++++++++++++++- 7 files changed, 703 insertions(+), 50 deletions(-) create mode 100644 consensus/types/configs/mainnet.yaml create mode 100644 consensus/types/configs/minimal.yaml diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c53c29438e..13dcf22108 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -223,7 +223,7 @@ pub fn test_da_checker( let slot_clock = TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); let kzg = get_kzg(&spec); let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index bac3cbaf4e..c6e13bd160 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2910,7 +2910,7 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { let slot_clock = TestingSlotClock::new( Slot::new(0), Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); slot_clock.set_slot(harness.get_current_slot().as_u64()); @@ -5334,8 +5334,8 @@ async fn test_safely_backfill_data_column_custody_info() { .await; let epoch_before_increase = Epoch::new(start_epochs); - let effective_delay_slots = - CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS / harness.chain.spec.seconds_per_slot; + let effective_delay_slots = CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS + / harness.chain.spec.get_slot_duration().as_secs(); let cgc_change_slot = epoch_before_increase.end_slot(E::slots_per_epoch()); @@ -6133,7 +6133,7 @@ async fn bellatrix_produce_and_store_payloads() { .genesis_time() .safe_add( slot.as_u64() - .safe_mul(harness.spec.seconds_per_slot) + .safe_mul(harness.spec.get_slot_duration().as_secs()) .unwrap(), ) .unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index e0e4029875..15f61537a0 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -975,9 +975,10 @@ async fn proposer_duties_with_gossip_tolerance() { assert_eq!(harness.chain.slot().unwrap(), num_initial); // Set the clock to just before the next epoch. - harness.chain.slot_clock.advance_time( - Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(), - ); + harness + .chain + .slot_clock + .advance_time(spec.get_slot_duration() - spec.maximum_gossip_clock_disparity()); assert_eq!( harness .chain @@ -1081,9 +1082,10 @@ async fn proposer_duties_v2_with_gossip_tolerance() { assert_eq!(harness.chain.slot().unwrap(), num_initial); // Set the clock to just before the next epoch. - harness.chain.slot_clock.advance_time( - Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(), - ); + harness + .chain + .slot_clock + .advance_time(spec.get_slot_duration() - spec.maximum_gossip_clock_disparity()); assert_eq!( harness .chain diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 5df6370abe..02bf37cb55 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -56,21 +56,18 @@ ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC FULU_FORK_VERSION: 0x06000000 FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC # Gloas -GLOAS_FORK_VERSION: 0x07000000 # temporary stub +GLOAS_FORK_VERSION: 0x07000000 GLOAS_FORK_EPOCH: 18446744073709551615 -# EIP7441 -EIP7441_FORK_VERSION: 0x08000000 # temporary stub -EIP7441_FORK_EPOCH: 18446744073709551615 -# EIP7805 -EIP7805_FORK_VERSION: 0x0a000000 # temporary stub -EIP7805_FORK_EPOCH: 18446744073709551615 +# Heze +HEZE_FORK_VERSION: 0x08000000 +HEZE_FORK_EPOCH: 18446744073709551615 # EIP7928 -EIP7928_FORK_VERSION: 0x0b000000 # temporary stub +EIP7928_FORK_VERSION: 0xe7928000 # temporary stub EIP7928_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- -# 12 seconds (*deprecated*) +# 12 seconds SECONDS_PER_SLOT: 12 # 12000 milliseconds SLOT_DURATION_MS: 12000 @@ -96,8 +93,8 @@ SYNC_MESSAGE_DUE_BPS: 3333 CONTRIBUTION_DUE_BPS: 6667 # Gloas -# 2**12 (= 4,096) epochs -MIN_BUILDER_WITHDRAWABILITY_DELAY: 4096 +# 2**6 (= 64) epochs +MIN_BUILDER_WITHDRAWABILITY_DELAY: 64 # 2500 basis points, 25% of SLOT_DURATION_MS ATTESTATION_DUE_BPS_GLOAS: 2500 # 5000 basis points, 50% of SLOT_DURATION_MS @@ -109,7 +106,7 @@ CONTRIBUTION_DUE_BPS_GLOAS: 5000 # 7500 basis points, 75% of SLOT_DURATION_MS PAYLOAD_ATTESTATION_DUE_BPS: 7500 -# EIP7805 +# Heze # 7500 basis points, 75% of SLOT_DURATION_MS VIEW_FREEZE_CUTOFF_BPS: 7500 # 6667 basis points, ~67% of SLOT_DURATION_MS @@ -166,8 +163,6 @@ MAX_PAYLOAD_SIZE: 10485760 MAX_REQUEST_BLOCKS: 1024 # 2**8 (= 256) epochs EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33,024) epochs -MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 # 2**5 (= 32) slots ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms @@ -180,8 +175,6 @@ SUBNETS_PER_NODE: 2 ATTESTATION_SUBNET_COUNT: 64 # 0 bits ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS (= 6 + 0) bits -ATTESTATION_SUBNET_PREFIX_BITS: 6 # Deneb # 2**7 (= 128) blocks @@ -192,24 +185,18 @@ MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 BLOB_SIDECAR_SUBNET_COUNT: 6 # 6 blobs MAX_BLOBS_PER_BLOCK: 6 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK (= 128 * 6) sidecars -MAX_REQUEST_BLOB_SIDECARS: 768 # Electra # 9 subnets BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 # 9 blobs MAX_BLOBS_PER_BLOCK_ELECTRA: 9 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA (= 128 * 9) sidecars -MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu # 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 # 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars -MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 # 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 # 2**2 (= 4) sidecars @@ -225,18 +212,13 @@ MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # 2**7 (= 128) payloads MAX_REQUEST_PAYLOADS: 128 -# EIP7441 -# 2**8 (= 256) epochs -EPOCHS_PER_SHUFFLING_PHASE: 256 -# 2**1 (= 2) epochs -PROPOSER_SELECTION_GAP: 2 - -# EIP7805 +# Heze # 2**4 (= 16) inclusion lists MAX_REQUEST_INCLUSION_LIST: 16 # 2**13 (= 8,192) bytes MAX_BYTES_PER_INCLUSION_LIST: 8192 + # Blob Scheduling # --------------------------------------------------------------- diff --git a/consensus/types/configs/mainnet.yaml b/consensus/types/configs/mainnet.yaml new file mode 100644 index 0000000000..ab85bd9e71 --- /dev/null +++ b/consensus/types/configs/mainnet.yaml @@ -0,0 +1,227 @@ +# Mainnet config + +# Extends the mainnet preset +PRESET_BASE: 'mainnet' + +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'mainnet' - there can be only one +# * 'sepolia' - testnet +# * 'holesky' - testnet +# * 'hoodi' - testnet +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'mainnet' + +# Transition +# --------------------------------------------------------------- +# Estimated on Sept 15, 2022 +TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Genesis +# --------------------------------------------------------------- +# 2**14 (= 16,384) validators +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Dec 1, 2020, 12pm UTC +MIN_GENESIS_TIME: 1606824000 +# Initial fork version for mainnet +GENESIS_FORK_VERSION: 0x00000000 +# 7 * 24 * 3,600 (= 604,800) seconds, 7 days +GENESIS_DELAY: 604800 + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC +# Bellatrix +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +# Capella +CAPELLA_FORK_VERSION: 0x03000000 +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +# Deneb +DENEB_FORK_VERSION: 0x04000000 +DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC +# Electra +ELECTRA_FORK_VERSION: 0x05000000 +ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC +# Fulu +FULU_FORK_VERSION: 0x06000000 +FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC +# Gloas +GLOAS_FORK_VERSION: 0x07000000 +GLOAS_FORK_EPOCH: 18446744073709551615 +# Heze +HEZE_FORK_VERSION: 0x08000000 +HEZE_FORK_EPOCH: 18446744073709551615 +# EIP7928 +EIP7928_FORK_VERSION: 0xe7928000 # temporary stub +EIP7928_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12000 milliseconds +SLOT_DURATION_MS: 12000 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks +ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 + +# Gloas +# 2**6 (= 64) epochs +MIN_BUILDER_WITHDRAWABILITY_DELAY: 64 +# 2500 basis points, 25% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS_GLOAS: 5000 +# 2500 basis points, 25% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS_GLOAS: 5000 +# 7500 basis points, 75% of SLOT_DURATION_MS +PAYLOAD_ATTESTATION_DUE_BPS: 7500 + +# Heze +# 7500 basis points, 75% of SLOT_DURATION_MS +VIEW_FREEZE_CUTOFF_BPS: 7500 +# 6667 basis points, ~67% of SLOT_DURATION_MS +INCLUSION_LIST_SUBMISSION_DUE_BPS: 6667 +# 9167 basis points, ~92% of SLOT_DURATION_MS +PROPOSER_INCLUSION_LIST_CUTOFF_BPS: 9167 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) validators +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + +# Deneb +# 2**3 (= 8) (*deprecated*) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# 2 epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum PoW Mainnet +DEPOSIT_CHAIN_ID: 1 +DEPOSIT_NETWORK_ID: 1 +DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + +# Networking +# --------------------------------------------------------------- +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB +MAX_PAYLOAD_SIZE: 10485760 +# 2**10 (= 1,024) blocks +MAX_REQUEST_BLOCKS: 1024 +# 2**8 (= 256) epochs +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# 2**5 (= 32) slots +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**6 (= 64) subnets +ATTESTATION_SUBNET_COUNT: 64 +# 0 bits +ATTESTATION_SUBNET_EXTRA_BITS: 0 + +# Deneb +# 2**7 (= 128) blocks +MAX_REQUEST_BLOCKS_DENEB: 128 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# 6 subnets +BLOB_SIDECAR_SUBNET_COUNT: 6 +# 6 blobs +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 9 subnets +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# 9 blobs +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 + +# Fulu +# 2**7 (= 128) groups +NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# 2**3 (= 8) samples +SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars +CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars +VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# Gloas +# 2**7 (= 128) payloads +MAX_REQUEST_PAYLOADS: 128 + +# Heze +# 2**4 (= 16) inclusion lists +MAX_REQUEST_INCLUSION_LIST: 16 +# 2**13 (= 8,192) bytes +MAX_BYTES_PER_INCLUSION_LIST: 8192 + + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 412672 # December 9, 2025, 02:21:11pm UTC + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 419072 # January 7, 2026, 01:01:11am UTC + MAX_BLOBS_PER_BLOCK: 21 diff --git a/consensus/types/configs/minimal.yaml b/consensus/types/configs/minimal.yaml new file mode 100644 index 0000000000..8c0d7254fe --- /dev/null +++ b/consensus/types/configs/minimal.yaml @@ -0,0 +1,220 @@ +# Minimal config + +# Extends the minimal preset +PRESET_BASE: 'minimal' + +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'minimal' - spec-testing +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'minimal' + +# Transition +# --------------------------------------------------------------- +# 2**256-2**10 for testing minimal network +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Genesis +# --------------------------------------------------------------- +# [customized] 2**6 (= 64) validators +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64 +# [customized] Jan 3, 2020, 12am UTC +MIN_GENESIS_TIME: 1578009600 +# [customized] Initial fork version for minimal +GENESIS_FORK_VERSION: 0x00000001 +# [customized] 5 * 60 (= 300) seconds +GENESIS_DELAY: 300 + +# Forking +# --------------------------------------------------------------- +# Values provided for illustrative purposes. +# Individual tests/testnets may set different values. + +# [customized] Altair +ALTAIR_FORK_VERSION: 0x01000001 +ALTAIR_FORK_EPOCH: 18446744073709551615 +# [customized] Bellatrix +BELLATRIX_FORK_VERSION: 0x02000001 +BELLATRIX_FORK_EPOCH: 18446744073709551615 +# [customized] Capella +CAPELLA_FORK_VERSION: 0x03000001 +CAPELLA_FORK_EPOCH: 18446744073709551615 +# [customized] Deneb +DENEB_FORK_VERSION: 0x04000001 +DENEB_FORK_EPOCH: 18446744073709551615 +# [customized] Electra +ELECTRA_FORK_VERSION: 0x05000001 +ELECTRA_FORK_EPOCH: 18446744073709551615 +# [customized] Fulu +FULU_FORK_VERSION: 0x06000001 +FULU_FORK_EPOCH: 18446744073709551615 +# [customized] Gloas +GLOAS_FORK_VERSION: 0x07000001 +GLOAS_FORK_EPOCH: 18446744073709551615 +# [customized] Heze +HEZE_FORK_VERSION: 0x08000001 +HEZE_FORK_EPOCH: 18446744073709551615 +# [customized] EIP7928 +EIP7928_FORK_VERSION: 0xe7928001 +EIP7928_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# [customized] 6000 milliseconds +SLOT_DURATION_MS: 6000 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# [customized] 2**6 (= 64) epochs +SHARD_COMMITTEE_PERIOD: 64 +# [customized] 2**4 (= 16) Eth1 blocks +ETH1_FOLLOW_DISTANCE: 16 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 + +# Gloas +# [customized] 2**1 (= 2) epochs +MIN_BUILDER_WITHDRAWABILITY_DELAY: 2 +# 2500 basis points, 25% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS_GLOAS: 5000 +# 2500 basis points, 25% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS_GLOAS: 2500 +# 5000 basis points, 50% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS_GLOAS: 5000 +# 7500 basis points, 75% of SLOT_DURATION_MS +PAYLOAD_ATTESTATION_DUE_BPS: 7500 + +# Heze +# 7500 basis points, 75% of SLOT_DURATION_MS +VIEW_FREEZE_CUTOFF_BPS: 7500 +# 6667 basis points, ~67% of SLOT_DURATION_MS +INCLUSION_LIST_SUBMISSION_DUE_BPS: 6667 +# 9167 basis points, ~92% of SLOT_DURATION_MS +PROPOSER_INCLUSION_LIST_CUTOFF_BPS: 9167 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# [customized] 2**1 (= 2) validators +MIN_PER_EPOCH_CHURN_LIMIT: 2 +# [customized] 2**5 (= 32) +CHURN_LIMIT_QUOTIENT: 32 + +# Deneb +# [customized] 2**2 (= 4) (*deprecated*) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4 + +# Electra +# [customized] 2**6 * 10**9 (= 64,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000 +# [customized] 2**7 * 10**9 (= 128,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 128000000000 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# 2 epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum Goerli testnet +DEPOSIT_CHAIN_ID: 5 +DEPOSIT_NETWORK_ID: 5 +# Configured on a per testnet basis +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 + +# Networking +# --------------------------------------------------------------- +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB +MAX_PAYLOAD_SIZE: 10485760 +# 2**10 (= 1,024) blocks +MAX_REQUEST_BLOCKS: 1024 +# 2**8 (= 256) epochs +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# 2**5 (= 32) slots +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**6 (= 64) subnets +ATTESTATION_SUBNET_COUNT: 64 +# 0 bits +ATTESTATION_SUBNET_EXTRA_BITS: 0 + +# Deneb +# 2**7 (= 128) blocks +MAX_REQUEST_BLOCKS_DENEB: 128 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# 6 subnets +BLOB_SIDECAR_SUBNET_COUNT: 6 +# 6 blobs +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 9 subnets +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# 9 blobs +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 + +# Fulu +# 2**7 (= 128) groups +NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# 2**3 (= 8) samples +SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars +CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars +VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 + +# Gloas +# 2**7 (= 128) payloads +MAX_REQUEST_PAYLOADS: 128 + +# Heze +# 2**4 (= 16) inclusion lists +MAX_REQUEST_INCLUSION_LIST: 16 +# 2**13 (= 8,192) bytes +MAX_BYTES_PER_INCLUSION_LIST: 8192 + + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: [] diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 8a2b3a23e8..01c4c7bbfd 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -96,8 +96,7 @@ pub struct ChainSpec { * Time parameters */ pub genesis_delay: u64, - // TODO deprecate seconds_per_slot - pub seconds_per_slot: u64, + seconds_per_slot: u64, // Private so that this value can't get changed except via the `set_slot_duration_ms` function. slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, @@ -914,6 +913,7 @@ impl ChainSpec { /// Set the duration of a slot (in ms). pub fn set_slot_duration_ms(mut self, slot_duration_ms: u64) -> Self { self.slot_duration_ms = slot_duration_ms; + self.seconds_per_slot = slot_duration_ms.saturating_div(1000); self.compute_derived_values::() } @@ -1235,7 +1235,7 @@ impl ChainSpec { gloas_fork_epoch: None, builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, - min_builder_withdrawability_delay: Epoch::new(4096), + min_builder_withdrawability_delay: Epoch::new(64), max_request_payloads: 128, /* @@ -1381,6 +1381,7 @@ impl ChainSpec { // Gloas gloas_fork_version: [0x07, 0x00, 0x00, 0x01], gloas_fork_epoch: None, + min_builder_withdrawability_delay: Epoch::new(2), /* * Derived time values (set by `compute_derived_values()`) @@ -1391,6 +1392,9 @@ impl ChainSpec { sync_message_due: Duration::from_millis(1999), contribution_and_proof_due: Duration::from_millis(4000), + // Networking Fulu + blob_schedule: BlobSchedule::default(), + // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1631,7 +1635,7 @@ impl ChainSpec { gloas_fork_epoch: None, builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, - min_builder_withdrawability_delay: Epoch::new(4096), + min_builder_withdrawability_delay: Epoch::new(64), max_request_payloads: 128, /* @@ -1908,8 +1912,9 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub gloas_fork_epoch: Option>, - #[serde(with = "serde_utils::quoted_u64")] - seconds_per_slot: u64, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + seconds_per_slot: Option>, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] slot_duration_ms: Option>, @@ -2064,6 +2069,10 @@ pub struct Config { #[serde(default = "default_contribution_due_bps")] #[serde(with = "serde_utils::quoted_u64")] contribution_due_bps: u64, + + #[serde(default = "default_min_builder_withdrawability_delay")] + #[serde(with = "serde_utils::quoted_u64")] + min_builder_withdrawability_delay: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -2289,6 +2298,10 @@ const fn default_contribution_due_bps() -> u64 { 6667 } +const fn default_min_builder_withdrawability_delay() -> u64 { + 64 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::new( @@ -2459,7 +2472,9 @@ impl Config { .gloas_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - seconds_per_slot: spec.seconds_per_slot, + seconds_per_slot: Some(MaybeQuoted { + value: spec.seconds_per_slot, + }), slot_duration_ms: Some(MaybeQuoted { value: spec.slot_duration_ms, }), @@ -2525,6 +2540,8 @@ impl Config { aggregate_due_bps: spec.aggregate_due_bps, sync_message_due_bps: spec.sync_message_due_bps, contribution_due_bps: spec.contribution_due_bps, + + min_builder_withdrawability_delay: spec.min_builder_withdrawability_delay.as_u64(), } } @@ -2616,12 +2633,21 @@ impl Config { aggregate_due_bps, sync_message_due_bps, contribution_due_bps, + min_builder_withdrawability_delay, } = self; if preset_base != E::spec_name().to_string().as_str() { return None; } + // Fail if seconds_per_slot and slot_duration_ms are both set but are inconsistent. + if let (Some(seconds_per_slot), Some(slot_duration_ms)) = + (seconds_per_slot, slot_duration_ms) + && seconds_per_slot.value.saturating_mul(1000) != slot_duration_ms.value + { + return None; + } + let spec = ChainSpec { config_name: config_name.clone(), min_genesis_active_validator_count, @@ -2642,10 +2668,12 @@ impl Config { fulu_fork_version, gloas_fork_version, gloas_fork_epoch: gloas_fork_epoch.map(|q| q.value), - seconds_per_slot, + seconds_per_slot: seconds_per_slot + .map(|q| q.value) + .or_else(|| slot_duration_ms.and_then(|q| q.value.checked_div(1000)))?, slot_duration_ms: slot_duration_ms .map(|q| q.value) - .unwrap_or_else(|| seconds_per_slot.saturating_mul(1000)), + .or_else(|| seconds_per_slot.map(|q| q.value.saturating_mul(1000)))?, seconds_per_eth1_block, min_validator_withdrawability_delay, shard_committee_period, @@ -2705,6 +2733,8 @@ impl Config { sync_message_due_bps, contribution_due_bps, + min_builder_withdrawability_delay: Epoch::new(min_builder_withdrawability_delay), + ..chain_spec.clone() }; Some(spec.compute_derived_values::()) @@ -2853,6 +2883,9 @@ mod yaml_tests { use super::*; use crate::core::MinimalEthSpec; use paste::paste; + use std::collections::BTreeSet; + use std::env; + use std::path::PathBuf; use std::sync::Arc; use tempfile::NamedTempFile; @@ -2902,6 +2935,67 @@ mod yaml_tests { assert_eq!(from, yamlconfig); } + #[test] + fn slot_duration_fallback_both_fields() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = Some(MaybeQuoted { value: 12 }); + config.slot_duration_ms = Some(MaybeQuoted { value: 12000 }); + let spec = config + .apply_to_chain_spec::(&mainnet) + .unwrap(); + assert_eq!(spec.seconds_per_slot, 12); + assert_eq!(spec.slot_duration_ms, 12000); + } + + #[test] + fn slot_duration_fallback_both_fields_inconsistent() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = Some(MaybeQuoted { value: 10 }); + config.slot_duration_ms = Some(MaybeQuoted { value: 12000 }); + assert_eq!(config.apply_to_chain_spec::(&mainnet), None); + } + + #[test] + fn slot_duration_fallback_seconds_only() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = Some(MaybeQuoted { value: 12 }); + config.slot_duration_ms = None; + let spec = config + .apply_to_chain_spec::(&mainnet) + .unwrap(); + assert_eq!(spec.seconds_per_slot, 12); + assert_eq!(spec.slot_duration_ms, 12000); + } + + #[test] + fn slot_duration_fallback_ms_only() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = None; + config.slot_duration_ms = Some(MaybeQuoted { value: 12000 }); + let spec = config + .apply_to_chain_spec::(&mainnet) + .unwrap(); + assert_eq!(spec.seconds_per_slot, 12); + assert_eq!(spec.slot_duration_ms, 12000); + } + + #[test] + fn slot_duration_fallback_neither() { + let mainnet = ChainSpec::mainnet(); + let mut config = Config::from_chain_spec::(&mainnet); + config.seconds_per_slot = None; + config.slot_duration_ms = None; + assert!( + config + .apply_to_chain_spec::(&mainnet) + .is_none() + ); + } + #[test] fn blob_schedule_max_blobs_per_block() { let spec_contents = r#" @@ -3375,7 +3469,6 @@ mod yaml_tests { // Test slot duration let slot_duration = spec.get_slot_duration(); assert_eq!(slot_duration, Duration::from_millis(12000)); - assert_eq!(slot_duration, Duration::from_secs(spec.seconds_per_slot)); // Test edge cases with custom spec let mut custom_spec = spec.clone(); @@ -3485,4 +3578,133 @@ mod yaml_tests { spec.attestation_due_bps = 15000; spec.compute_derived_values::(); } + + fn configs_base_path() -> PathBuf { + env::var("CARGO_MANIFEST_DIR") + .expect("should know manifest dir") + .parse::() + .expect("should parse manifest dir as path") + .join("configs") + } + + /// Upstream config keys that Lighthouse intentionally does not include in its + /// `Config` struct. These are forks/features not yet implemented. Update this + /// list as new forks are added. + const UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE: &[&str] = &[ + // Forks not yet implemented + "HEZE_FORK_VERSION", + "HEZE_FORK_EPOCH", + "EIP7928_FORK_VERSION", + "EIP7928_FORK_EPOCH", + // Gloas params not yet in Config + "ATTESTATION_DUE_BPS_GLOAS", + "AGGREGATE_DUE_BPS_GLOAS", + "SYNC_MESSAGE_DUE_BPS_GLOAS", + "CONTRIBUTION_DUE_BPS_GLOAS", + "PAYLOAD_ATTESTATION_DUE_BPS", + "MAX_REQUEST_PAYLOADS", + // Gloas fork choice params not yet in Config + "REORG_HEAD_WEIGHT_THRESHOLD", + "REORG_PARENT_WEIGHT_THRESHOLD", + "REORG_MAX_EPOCHS_SINCE_FINALIZATION", + // Heze networking + "VIEW_FREEZE_CUTOFF_BPS", + "INCLUSION_LIST_SUBMISSION_DUE_BPS", + "PROPOSER_INCLUSION_LIST_CUTOFF_BPS", + "MAX_REQUEST_INCLUSION_LIST", + "MAX_BYTES_PER_INCLUSION_LIST", + ]; + + /// Compare a `ChainSpec` against an upstream consensus-specs config YAML file. + /// + /// 1. Extracts keys from the raw YAML text (to avoid yaml_serde's inability + /// to parse integers > u64 into `Value`/`Mapping` types) and checks that + /// every key is either known to `Config` or explicitly listed in + /// `UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE`. + /// 2. Deserializes the upstream YAML as `Config` (which has custom + /// deserializers for large values like `TERMINAL_TOTAL_DIFFICULTY`) and + /// compares against `Config::from_chain_spec`. + fn config_test(spec: &ChainSpec, config_name: &str) { + let file_path = configs_base_path().join(format!("{config_name}.yaml")); + let upstream_yaml = std::fs::read_to_string(&file_path) + .unwrap_or_else(|e| panic!("failed to read {}: {e}", file_path.display())); + + // Extract top-level keys from the raw YAML text. We can't parse as + // yaml_serde::Mapping because yaml_serde cannot represent integers + // exceeding u64 (e.g. TERMINAL_TOTAL_DIFFICULTY). Config YAML uses a + // simple `KEY: value` format with no indentation for top-level keys. + let upstream_keys: BTreeSet = upstream_yaml + .lines() + .filter_map(|line| { + // Skip comments, blank lines, and indented lines (nested YAML). + if line.is_empty() + || line.starts_with('#') + || line.starts_with(' ') + || line.starts_with('\t') + { + return None; + } + line.split(':').next().map(|k| k.to_string()) + }) + .collect(); + + // Get the set of keys that Config knows about by serializing and collecting + // keys. Also include keys for optional fields that may be skipped during + // serialization (e.g. CONFIG_NAME). + let our_config = Config::from_chain_spec::(spec); + let our_yaml = yaml_serde::to_string(&our_config).expect("failed to serialize Config"); + let our_mapping: yaml_serde::Mapping = + yaml_serde::from_str(&our_yaml).expect("failed to re-parse our Config"); + let mut known_keys: BTreeSet = our_mapping + .keys() + .filter_map(|k| k.as_str().map(String::from)) + .collect(); + // Fields that Config knows but may skip during serialization. + known_keys.insert("CONFIG_NAME".to_string()); + + // Check for upstream keys that our Config doesn't know about. + let mut missing_keys: Vec<&String> = upstream_keys + .iter() + .filter(|k| { + !known_keys.contains(k.as_str()) + && !UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE.contains(&k.as_str()) + }) + .collect(); + missing_keys.sort(); + + assert!( + missing_keys.is_empty(), + "Upstream {config_name} config has keys not present in Lighthouse Config \ + (add to Config or to UPSTREAM_KEYS_NOT_IN_LIGHTHOUSE): {missing_keys:?}" + ); + + // Compare values for all fields Config knows about. + let mut upstream_config: Config = yaml_serde::from_str(&upstream_yaml) + .unwrap_or_else(|e| panic!("failed to parse {config_name} as Config: {e}")); + + // CONFIG_NAME is network metadata (not a spec parameter), so align it + // before comparing. + upstream_config.config_name = our_config.config_name.clone(); + // SECONDS_PER_SLOT is deprecated upstream but we still emit it, so + // fill it in if the upstream YAML omitted it. + if upstream_config.seconds_per_slot.is_none() { + upstream_config.seconds_per_slot = our_config.seconds_per_slot; + } + assert_eq!( + upstream_config, our_config, + "Config mismatch for {config_name}" + ); + } + + #[test] + fn mainnet_config_consistent() { + let spec = ChainSpec::mainnet(); + config_test::(&spec, "mainnet"); + } + + #[test] + fn minimal_config_consistent() { + let spec = ChainSpec::minimal(); + config_test::(&spec, "minimal"); + } } From 6044d79ad931779d1cf19e4a5c805cf5b63cde36 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 31 Mar 2026 11:19:18 +1100 Subject: [PATCH 02/57] Fix local testnet Tempo and Prometheus/Grafana config (#9054) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Pin Tempo image to `grafana/tempo:2.10.3` — `grafana/tempo:latest` now resolves to an unreleased 3.0 build that removed the `compactor` config field, causing startup failure - Replace deprecated `prometheus_grafana` additional service with separate `prometheus` + `grafana` services Co-Authored-By: Jimmy Chen --- scripts/local_testnet/network_params.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index 0c36e5c49c..083f719c60 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -21,10 +21,13 @@ network_params: slot_duration_ms: 3000 snooper_enabled: false global_log_level: debug +tempo_params: + image: grafana/tempo:2.10.3 additional_services: - dora - spamoor - - prometheus_grafana + - prometheus + - grafana - tempo spamoor_params: image: ethpandaops/spamoor:master From 6f480e499e1b745333cd19dfa6037eb6738f3f43 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 31 Mar 2026 00:07:22 -0500 Subject: [PATCH 03/57] Add range sync tests (#8989) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- beacon_node/beacon_processor/src/lib.rs | 8 +- .../src/network_beacon_processor/mod.rs | 7 +- beacon_node/network/src/sync/tests/lookups.rs | 305 +++++- beacon_node/network/src/sync/tests/range.rs | 877 +++++++----------- scripts/range-sync-coverage.sh | 136 +++ 5 files changed, 781 insertions(+), 552 deletions(-) create mode 100755 scripts/range-sync-coverage.sh diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 724c41cfc9..a6c76beb31 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -421,7 +421,11 @@ pub enum Work { IgnoredRpcBlock { process_fn: BlockingFn, }, - ChainSegment(AsyncFn), + ChainSegment { + process_fn: AsyncFn, + /// (chain_id, batch_epoch) for test observability + process_id: (u32, u64), + }, ChainSegmentBackfill(BlockingFn), Status(BlockingFn), BlocksByRangeRequest(AsyncFn), @@ -1473,7 +1477,7 @@ impl BeaconProcessor { } => task_spawner.spawn_blocking(move || { process_batch(aggregates); }), - Work::ChainSegment(process_fn) => task_spawner.spawn_async(async move { + Work::ChainSegment { process_fn, .. } => task_spawner.spawn_async(async move { process_fn.await; }), Work::UnknownBlockAttestation { process_fn } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f74e7dacfb..b3d6874b8a 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -620,11 +620,14 @@ impl NetworkBeaconProcessor { // Back-sync batches are dispatched with a different `Work` variant so // they can be rate-limited. let work = match process_id { - ChainSegmentProcessId::RangeBatchId(_, _) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { let process_fn = async move { processor.process_chain_segment(process_id, blocks).await; }; - Work::ChainSegment(Box::pin(process_fn)) + Work::ChainSegment { + process_fn: Box::pin(process_fn), + process_id: (chain_id, epoch.as_u64()), + } } ChainSegmentProcessId::BackSyncBatchId(_) => { let process_fn = diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index cd872df887..a26996ec5e 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,16 +1,18 @@ use super::*; use crate::NetworkMessage; -use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; +use crate::network_beacon_processor::{ + ChainSegmentProcessId, InvalidBlockStorage, NetworkBeaconProcessor, +}; use crate::sync::block_lookups::{BlockLookupSummary, PARENT_DEPTH_TOLERANCE}; use crate::sync::{ SyncMessage, - manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + manager::{BatchProcessResult, BlockProcessType, BlockProcessingResult, SyncManager}, }; use beacon_chain::blob_verification::KzgVerifiedBlob; use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ - AvailabilityProcessingStatus, BlockError, NotifyExecutionLayer, + AvailabilityProcessingStatus, BlockError, EngineState, NotifyExecutionLayer, block_verification_types::{AsBlock, AvailableBlockData}, data_availability_checker::Availability, test_utils::{ @@ -23,7 +25,7 @@ use educe::Educe; use itertools::Itertools; use lighthouse_network::discovery::CombinedKey; use lighthouse_network::{ - NetworkConfig, NetworkGlobals, PeerId, + NetworkConfig, NetworkGlobals, PeerAction, PeerId, rpc::{RPCError, RequestType}, service::api_types::{AppRequestId, SyncRequestId}, types::SyncState, @@ -64,14 +66,33 @@ pub struct SimulateConfig { Option Option + Send + Sync>>, // Import a block directly before processing it (for simulating race conditions) import_block_before_process: HashSet, + /// Number of range batch processing attempts that return FaultyFailure + range_faulty_failures: usize, + /// Number of range batch processing attempts that return NonFaultyFailure + range_non_faulty_failures: usize, + /// Number of BlocksByRange requests that return empty (no blocks) + return_no_range_blocks_n_times: usize, + /// Number of DataColumnsByRange requests that return empty (no columns) + return_no_range_columns_n_times: usize, + /// Number of DataColumnsByRange requests that return columns with unrequested indices + return_wrong_range_column_indices_n_times: usize, + /// Number of DataColumnsByRange requests that return columns with unrequested slots + return_wrong_range_column_slots_n_times: usize, + /// Number of DataColumnsByRange requests that return fewer columns than requested + /// (drops half the columns). Triggers CouplingError::DataColumnPeerFailure → retry_partial_batch + return_partial_range_columns_n_times: usize, + /// Set EE offline at start, bring back online after this many BlocksByRange responses + ee_offline_for_n_range_responses: Option, + /// Disconnect all peers after this many successful BlocksByRange responses. + successful_range_responses_before_disconnect: Option, } impl SimulateConfig { - fn new() -> Self { + pub(super) fn new() -> Self { Self::default() } - fn happy_path() -> Self { + pub(super) fn happy_path() -> Self { Self::default() } @@ -111,7 +132,7 @@ impl SimulateConfig { self } - fn return_rpc_error(mut self, error: RPCError) -> Self { + pub(super) fn return_rpc_error(mut self, error: RPCError) -> Self { self.return_rpc_error = Some(error); self } @@ -133,6 +154,51 @@ impl SimulateConfig { self.import_block_before_process.insert(block_root); self } + + pub(super) fn with_range_faulty_failures(mut self, n: usize) -> Self { + self.range_faulty_failures = n; + self + } + + pub(super) fn with_range_non_faulty_failures(mut self, n: usize) -> Self { + self.range_non_faulty_failures = n; + self + } + + pub(super) fn with_no_range_blocks_n_times(mut self, n: usize) -> Self { + self.return_no_range_blocks_n_times = n; + self + } + + pub(super) fn with_no_range_columns_n_times(mut self, n: usize) -> Self { + self.return_no_range_columns_n_times = n; + self + } + + pub(super) fn with_wrong_range_column_indices_n_times(mut self, n: usize) -> Self { + self.return_wrong_range_column_indices_n_times = n; + self + } + + pub(super) fn with_wrong_range_column_slots_n_times(mut self, n: usize) -> Self { + self.return_wrong_range_column_slots_n_times = n; + self + } + + pub(super) fn with_partial_range_columns_n_times(mut self, n: usize) -> Self { + self.return_partial_range_columns_n_times = n; + self + } + + pub(super) fn with_ee_offline_for_n_range_responses(mut self, n: usize) -> Self { + self.ee_offline_for_n_range_responses = Some(n); + self + } + + pub(super) fn with_disconnect_after_range_requests(mut self, n: usize) -> Self { + self.successful_range_responses_before_disconnect = Some(n); + self + } } fn genesis_fork() -> ForkName { @@ -256,6 +322,7 @@ impl TestRig { }) } + #[allow(dead_code)] pub fn with_custody_type(node_custody_type: NodeCustodyType) -> Self { Self::new(TestRigConfig { fulu_test_type: FuluTestType::WeFullnodeThemSupernode, @@ -267,13 +334,23 @@ impl TestRig { /// /// Processes events from sync_rx (sink), beacon processor, and network queues in fixed /// priority order each tick. Handles completed work before pulling new requests. - async fn simulate(&mut self, complete_strategy: SimulateConfig) { + pub(super) async fn simulate(&mut self, complete_strategy: SimulateConfig) { self.complete_strategy = complete_strategy; self.log(&format!( "Running simulate with config {:?}", self.complete_strategy )); + // Set EE offline at the start if configured + if self + .complete_strategy + .ee_offline_for_n_range_responses + .is_some() + { + self.sync_manager + .update_execution_engine_state(EngineState::Offline); + } + let mut i = 0; loop { @@ -352,9 +429,34 @@ impl TestRig { process_fn.await } } - Work::RpcBlobs { process_fn } - | Work::RpcCustodyColumn(process_fn) - | Work::ChainSegment(process_fn) => process_fn.await, + Work::RpcBlobs { process_fn } | Work::RpcCustodyColumn(process_fn) => { + process_fn.await + } + Work::ChainSegment { + process_fn, + process_id: (chain_id, batch_epoch), + } => { + let sync_type = + ChainSegmentProcessId::RangeBatchId(chain_id, batch_epoch.into()); + if self.complete_strategy.range_faulty_failures > 0 { + self.complete_strategy.range_faulty_failures -= 1; + self.push_sync_message(SyncMessage::BatchProcessed { + sync_type, + result: BatchProcessResult::FaultyFailure { + imported_blocks: 0, + penalty: PeerAction::LowToleranceError, + }, + }); + } else if self.complete_strategy.range_non_faulty_failures > 0 { + self.complete_strategy.range_non_faulty_failures -= 1; + self.push_sync_message(SyncMessage::BatchProcessed { + sync_type, + result: BatchProcessResult::NonFaultyFailure, + }); + } else { + process_fn.await; + } + } Work::Reprocess(_) => {} // ignore other => panic!("Unsupported Work event {}", other.str_id()), } @@ -573,15 +675,50 @@ impl TestRig { if self.complete_strategy.skip_by_range_routes { return; } - let blocks = (*req.start_slot()..req.start_slot() + req.count()) - .filter_map(|slot| { - self.network_blocks_by_slot - .get(&Slot::new(slot)) - .map(|block| block.block_cloned()) - }) - .collect::>(); - self.send_rpc_blocks_response(req_id, peer_id, &blocks); + // Check if we should disconnect all peers instead of continuing + if let Some(ref mut remaining) = self + .complete_strategy + .successful_range_responses_before_disconnect + { + if *remaining == 0 { + // Disconnect all peers — remaining responses become "late" + for peer in self.get_connected_peers() { + self.peer_disconnected(peer); + } + return; + } else { + *remaining -= 1; + } + } + + // Return empty response N times to simulate peer returning no blocks + if self.complete_strategy.return_no_range_blocks_n_times > 0 { + self.complete_strategy.return_no_range_blocks_n_times -= 1; + self.send_rpc_blocks_response(req_id, peer_id, &[]); + } else { + let blocks = (*req.start_slot()..req.start_slot() + req.count()) + .filter_map(|slot| { + self.network_blocks_by_slot + .get(&Slot::new(slot)) + .map(|block| block.block_cloned()) + }) + .collect::>(); + self.send_rpc_blocks_response(req_id, peer_id, &blocks); + } + + // Bring EE back online after N range responses + if let Some(ref mut remaining) = + self.complete_strategy.ee_offline_for_n_range_responses + { + if *remaining == 0 { + self.sync_manager + .update_execution_engine_state(EngineState::Online); + self.complete_strategy.ee_offline_for_n_range_responses = None; + } else { + *remaining -= 1; + } + } } (RequestType::BlobsByRange(req), AppRequestId::Sync(req_id)) => { @@ -605,10 +742,80 @@ impl TestRig { if self.complete_strategy.skip_by_range_routes { return; } - // Note: This function is permissive, blocks may have zero columns and it won't - // error. Some caveats: - // - The genesis block never has columns - // - Some blocks may not have columns as the blob count is random + + // Return empty columns N times + if self.complete_strategy.return_no_range_columns_n_times > 0 { + self.complete_strategy.return_no_range_columns_n_times -= 1; + self.send_rpc_columns_response(req_id, peer_id, &[]); + return; + } + + // Return columns with unrequested indices N times. + // Note: for supernodes this returns no columns since they custody all indices. + if self + .complete_strategy + .return_wrong_range_column_indices_n_times + > 0 + { + self.complete_strategy + .return_wrong_range_column_indices_n_times -= 1; + let wrong_columns = (req.start_slot..req.start_slot + req.count) + .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) + .filter_map(|block| block.block_data().data_columns()) + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| !req.columns.contains(c.index())) + }) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &wrong_columns); + return; + } + + // Return columns from an out-of-range slot N times + if self + .complete_strategy + .return_wrong_range_column_slots_n_times + > 0 + { + self.complete_strategy + .return_wrong_range_column_slots_n_times -= 1; + // Get a column from a slot AFTER the requested range + let wrong_slot = req.start_slot + req.count; + let wrong_columns = self + .network_blocks_by_slot + .get(&Slot::new(wrong_slot)) + .and_then(|block| block.block_data().data_columns()) + .into_iter() + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| req.columns.contains(c.index())) + }) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &wrong_columns); + return; + } + + // Return only half the requested columns N times — triggers CouplingError + if self.complete_strategy.return_partial_range_columns_n_times > 0 { + self.complete_strategy.return_partial_range_columns_n_times -= 1; + let columns = (req.start_slot..req.start_slot + req.count) + .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) + .filter_map(|block| block.block_data().data_columns()) + .flat_map(|columns| { + columns + .into_iter() + .filter(|c| req.columns.contains(c.index())) + }) + .enumerate() + .filter(|(i, _)| i % 2 == 0) // keep every other column + .map(|(_, c)| c) + .collect::>(); + self.send_rpc_columns_response(req_id, peer_id, &columns); + return; + } + let columns = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) .filter_map(|block| block.block_data().data_columns()) @@ -726,7 +933,7 @@ impl TestRig { // Preparation steps /// Returns the block root of the tip of the built chain - async fn build_chain(&mut self, block_count: usize) -> Hash256 { + pub(super) async fn build_chain(&mut self, block_count: usize) -> Hash256 { let mut blocks = vec![]; // Initialise a new beacon chain @@ -947,6 +1154,30 @@ impl TestRig { self.trigger_with_last_block(); } + /// Import blocks for slots 1..=up_to_slot into the local chain (advance local head) + pub(super) async fn import_blocks_up_to_slot(&mut self, up_to_slot: u64) { + for slot in 1..=up_to_slot { + let rpc_block = self + .network_blocks_by_slot + .get(&Slot::new(slot)) + .unwrap_or_else(|| panic!("No block at slot {slot}")) + .clone(); + let block_root = rpc_block.canonical_root(); + self.harness + .chain + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::Yes, + BlockImportSource::Gossip, + || Ok(()), + ) + .await + .unwrap(); + } + self.harness.chain.recompute_head_at_current_slot().await; + } + /// Import a block directly into the chain without going through lookup sync async fn import_block_by_root(&mut self, block_root: Hash256) { let range_sync_block = self @@ -1000,23 +1231,32 @@ impl TestRig { // Post-test assertions - fn head_slot(&self) -> Slot { + pub(super) fn head_slot(&self) -> Slot { self.harness.chain.head().head_slot() } - fn assert_head_slot(&self, slot: u64) { + pub(super) fn assert_head_slot(&self, slot: u64) { assert_eq!(self.head_slot(), Slot::new(slot), "Unexpected head slot"); } - fn max_known_slot(&self) -> Slot { + pub(super) fn max_known_slot(&self) -> Slot { self.network_blocks_by_slot .keys() .max() .copied() - .expect("no blocks") + .unwrap_or_default() } - fn assert_penalties(&self, expected_penalties: &[&'static str]) { + pub(super) fn finalized_epoch(&self) -> types::Epoch { + self.harness + .chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + } + + pub(super) fn assert_penalties(&self, expected_penalties: &[&'static str]) { let penalties = self .penalties .iter() @@ -1034,7 +1274,7 @@ impl TestRig { } } - fn assert_penalties_of_type(&self, expected_penalty: &'static str) { + pub(super) fn assert_penalties_of_type(&self, expected_penalty: &'static str) { if self.penalties.is_empty() { panic!("No penalties but expected some of type {expected_penalty}"); } @@ -1051,7 +1291,7 @@ impl TestRig { } } - fn assert_no_penalties(&mut self) { + pub(super) fn assert_no_penalties(&mut self) { if !self.penalties.is_empty() { panic!("Some downscore events: {:?}", self.penalties); } @@ -1102,7 +1342,7 @@ impl TestRig { } /// Assert there is at least one range sync chain created and that all sync chains completed - fn assert_successful_range_sync(&self) { + pub(super) fn assert_successful_range_sync(&self) { assert!( self.range_sync_chains_added() > 0, "No created range sync chains" @@ -1425,6 +1665,7 @@ impl TestRig { } } + #[allow(dead_code)] pub fn pop_received_processor_event) -> Option>( &mut self, predicate_transform: F, diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index c19ee8eb6d..891d9d1e97 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -1,110 +1,47 @@ +//! Range sync tests for `BlocksByRange`, `BlobsByRange`, `DataColumnsByRange`. +//! +//! Tests follow the pattern from `lookups.rs`: +//! ```ignore +//! async fn test_name() { +//! let mut r = TestRig::default(); +//! r.setup_xyz().await; +//! r.simulate(SimulateConfig::happy_path()).await; +//! r.assert_range_sync_completed(); +//! } +//! ``` +//! +//! Rules: +//! - Tests must be succinct and readable (3-10 lines per test body) +//! - All complex logic lives in helpers (setup, SimulateConfig, assert) +//! - Test bodies must not manually grab requests, send SyncMessages, or do anything overly specific +//! - All tests use `simulate()` if they need peers to fulfill requests +//! - Extend `SimulateConfig` for new range-specific behaviors +//! - Extend `simulate()` to support by_range methods + +use super::lookups::SimulateConfig; use super::*; -use crate::network_beacon_processor::ChainSegmentProcessId; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; -use crate::sync::network_context::RangeRequestId; use crate::sync::range_sync::RangeSyncType; -use beacon_chain::BeaconChain; -use beacon_chain::block_verification_types::AvailableBlockData; -use beacon_chain::custody_context::NodeCustodyType; -use beacon_chain::data_column_verification::CustodyDataColumn; -use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RangeSyncBlock}; -use beacon_processor::WorkType; -use lighthouse_network::rpc::RequestType; -use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV2, StatusMessageV2, -}; -use lighthouse_network::service::api_types::{ - AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, - SyncRequestId, -}; +use lighthouse_network::rpc::RPCError; +use lighthouse_network::rpc::methods::StatusMessageV2; use lighthouse_network::{PeerId, SyncInfo}; -use std::time::Duration; -use types::{ - BlobSidecarList, BlockImportSource, Epoch, EthSpec, Hash256, MinimalEthSpec as E, - SignedBeaconBlock, SignedBeaconBlockHash, Slot, -}; +use types::{Epoch, EthSpec, Hash256, MinimalEthSpec as E, Slot}; -const D: Duration = Duration::new(0, 0); - -pub(crate) enum DataSidecars { - Blobs(BlobSidecarList), - DataColumns(Vec>), -} - -enum ByRangeDataRequestIds { - PreDeneb, - PrePeerDAS(BlobsByRangeRequestId, PeerId), - PostPeerDAS(Vec<(DataColumnsByRangeRequestId, PeerId)>), -} - -/// Sync tests are usually written in the form: -/// - Do some action -/// - Expect a request to be sent -/// - Complete the above request -/// -/// To make writting tests succint, the machinery in this testing rig automatically identifies -/// _which_ request to complete. Picking the right request is critical for tests to pass, so this -/// filter allows better expressivity on the criteria to identify the right request. -#[derive(Default, Debug, Clone)] -struct RequestFilter { - peer: Option, - epoch: Option, -} - -impl RequestFilter { - fn peer(mut self, peer: PeerId) -> Self { - self.peer = Some(peer); - self - } - - fn epoch(mut self, epoch: u64) -> Self { - self.epoch = Some(epoch); - self - } -} - -fn filter() -> RequestFilter { - RequestFilter::default() -} +/// MinimalEthSpec has 8 slots per epoch +const SLOTS_PER_EPOCH: usize = 8; impl TestRig { - /// Produce a head peer with an advanced head fn add_head_peer(&mut self) -> PeerId { - self.add_head_peer_with_root(Hash256::random()) - } - - /// Produce a head peer with an advanced head - fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { let local_info = self.local_info(); self.add_supernode_peer(SyncInfo { - head_root, + head_root: Hash256::random(), head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), ..local_info }) } - // Produce a finalized peer with an advanced finalized epoch - fn add_finalized_peer(&mut self) -> PeerId { - self.add_finalized_peer_with_root(Hash256::random()) - } - - // Produce a finalized peer with an advanced finalized epoch - fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { - let local_info = self.local_info(); - let finalized_epoch = local_info.finalized_epoch + 2; - self.add_supernode_peer(SyncInfo { - finalized_epoch, - finalized_root, - head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), - head_root: Hash256::random(), - earliest_available_slot: None, - }) - } - fn finalized_remote_info_advanced_by(&self, advanced_epochs: Epoch) -> SyncInfo { let local_info = self.local_info(); let finalized_epoch = local_info.finalized_epoch + advanced_epochs; @@ -142,11 +79,7 @@ impl TestRig { } fn add_supernode_peer(&mut self, remote_info: SyncInfo) -> PeerId { - // Create valid peer known to network globals - // TODO(fulu): Using supernode peers to ensure we have peer across all column - // subnets for syncing. Should add tests connecting to full node peers. let peer_id = self.new_connected_supernode_peer(); - // Send peer to sync self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info)); peer_id } @@ -184,450 +117,362 @@ impl TestRig { ) } - #[track_caller] - fn assert_chain_segments(&mut self, count: usize) { - for i in 0..count { - self.pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expect ChainSegment work event count {i}: {e:?}")); - } + // -- Setup helpers -- + + /// Head sync: peers whose finalized root/epoch match ours (known to fork choice), + /// but whose head is ahead. Only head chain is created. + async fn setup_head_sync(&mut self) { + self.build_chain(SLOTS_PER_EPOCH).await; + self.add_head_peer(); + self.assert_state(RangeSyncType::Head); } - fn update_execution_engine_state(&mut self, state: EngineState) { - self.log(&format!("execution engine state updated: {state:?}")); - self.sync_manager.update_execution_engine_state(state); + /// Finalized sync: peers whose finalized epoch is advanced and head == finalized start slot. + /// Returns the remote SyncInfo (needed for blacklist tests). + async fn setup_finalized_sync(&mut self) -> SyncInfo { + let advanced_epochs = 5; + self.build_chain(advanced_epochs * SLOTS_PER_EPOCH).await; + let remote_info = self.finalized_remote_info_advanced_by((advanced_epochs as u64).into()); + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info.clone()); + self.assert_state(RangeSyncType::Finalized); + remote_info } - fn find_blocks_by_range_request( - &mut self, - request_filter: RequestFilter, - ) -> ((BlocksByRangeRequestId, PeerId), ByRangeDataRequestIds) { - let filter_f = |peer: PeerId, start_slot: u64| { - if let Some(expected_epoch) = request_filter.epoch { - let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); - if epoch != expected_epoch { - return false; - } - } - if let Some(expected_peer) = request_filter.peer - && peer != expected_peer - { - return false; - } - - true + /// Finalized-to-head: peers whose finalized is advanced AND head is beyond finalized. + /// After finalized sync completes, head chains are created from awaiting_head_peers. + async fn setup_finalized_and_head_sync(&mut self) { + let finalized_epochs = 5; + let head_epochs = 7; + self.build_chain(head_epochs * SLOTS_PER_EPOCH).await; + let local_info = self.local_info(); + let finalized_epoch = local_info.finalized_epoch + Epoch::new(finalized_epochs as u64); + let head_slot = Slot::new((head_epochs * SLOTS_PER_EPOCH) as u64); + let remote_info = SyncInfo { + finalized_epoch, + finalized_root: Hash256::random(), + head_slot, + head_root: Hash256::random(), + earliest_available_slot: None, }; - - let block_req = self - .pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - request: - RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( - OldBlocksByRangeRequestV2 { start_slot, .. }, - )), - app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), - _ => None, - }) - .unwrap_or_else(|e| { - panic!("Should have a BlocksByRange request, filter {request_filter:?}: {e:?}") - }); - - let by_range_data_requests = if self.is_after_fulu() { - let mut data_columns_requests = vec![]; - while let Ok(data_columns_request) = self.pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - request: - RequestType::DataColumnsByRange(DataColumnsByRangeRequest { - start_slot, .. - }), - app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), - _ => None, - }) { - data_columns_requests.push(data_columns_request); - } - if data_columns_requests.is_empty() { - panic!("Found zero DataColumnsByRange requests, filter {request_filter:?}"); - } - ByRangeDataRequestIds::PostPeerDAS(data_columns_requests) - } else if self.is_after_deneb() { - let (id, peer) = self - .pop_received_network_event(|ev| match ev { - NetworkMessage::SendRequest { - peer_id, - request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), - app_request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), - _ => None, - }) - .unwrap_or_else(|e| { - panic!("Should have a blobs by range request, filter {request_filter:?}: {e:?}") - }); - ByRangeDataRequestIds::PrePeerDAS(id, peer) - } else { - ByRangeDataRequestIds::PreDeneb - }; - - (block_req, by_range_data_requests) + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info); + self.assert_state(RangeSyncType::Finalized); } - fn find_and_complete_blocks_by_range_request( - &mut self, - request_filter: RequestFilter, - ) -> RangeRequestId { - let ((blocks_req_id, block_peer), by_range_data_request_ids) = - self.find_blocks_by_range_request(request_filter); - - // Complete the request with a single stream termination - self.log(&format!( - "Completing BlocksByRange request {blocks_req_id:?} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcBlock { - sync_request_id: SyncRequestId::BlocksByRange(blocks_req_id), - peer_id: block_peer, - beacon_block: None, - seen_timestamp: D, - }); - - match by_range_data_request_ids { - ByRangeDataRequestIds::PreDeneb => {} - ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { - // Complete the request with a single stream termination - self.log(&format!( - "Completing BlobsByRange request {id:?} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcBlob { - sync_request_id: SyncRequestId::BlobsByRange(id), - peer_id, - blob_sidecar: None, - seen_timestamp: D, - }); - } - ByRangeDataRequestIds::PostPeerDAS(data_column_req_ids) => { - // Complete the request with a single stream termination - for (id, peer_id) in data_column_req_ids { - self.log(&format!( - "Completing DataColumnsByRange request {id:?} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcDataColumn { - sync_request_id: SyncRequestId::DataColumnsByRange(id), - peer_id, - data_column: None, - seen_timestamp: D, - }); - } - } - } - - blocks_req_id.parent_request_id.requester + /// Finalized sync with only 1 fullnode peer (insufficient custody coverage). + /// Returns remote_info to pass to `add_remaining_finalized_peers`. + async fn setup_finalized_sync_insufficient_peers(&mut self) -> SyncInfo { + let advanced_epochs = 5; + self.build_chain(advanced_epochs * SLOTS_PER_EPOCH).await; + let remote_info = self.finalized_remote_info_advanced_by((advanced_epochs as u64).into()); + self.add_fullnode_peer(remote_info.clone()); + self.assert_state(RangeSyncType::Finalized); + remote_info } - fn find_and_complete_processing_chain_segment(&mut self, id: ChainSegmentProcessId) { - self.pop_received_processor_event(|ev| { - (ev.work_type() == WorkType::ChainSegment).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expected chain segment work event: {e}")); - - self.log(&format!( - "Completing ChainSegment processing work {id:?} with success" - )); - self.send_sync_message(SyncMessage::BatchProcessed { - sync_type: id, - result: crate::sync::BatchProcessResult::Success { - sent_blocks: 8, - imported_blocks: 8, - }, - }); - } - - fn complete_and_process_range_sync_until( - &mut self, - last_epoch: u64, - request_filter: RequestFilter, - ) { - for epoch in 0..last_epoch { - // Note: In this test we can't predict the block peer - let id = - self.find_and_complete_blocks_by_range_request(request_filter.clone().epoch(epoch)); - if let RangeRequestId::RangeSync { batch_id, .. } = id { - assert_eq!(batch_id.as_u64(), epoch, "Unexpected batch_id"); - } else { - panic!("unexpected RangeRequestId {id:?}"); - } - - let id = match id { - RangeRequestId::RangeSync { chain_id, batch_id } => { - ChainSegmentProcessId::RangeBatchId(chain_id, batch_id) - } - RangeRequestId::BackfillSync { batch_id } => { - ChainSegmentProcessId::BackSyncBatchId(batch_id) - } - }; - - self.find_and_complete_processing_chain_segment(id); - if epoch < last_epoch - 1 { - self.assert_state(RangeSyncType::Finalized); - } else { - self.assert_no_chains_exist(); - self.assert_no_failed_chains(); - } - } - } - - async fn create_canonical_block(&mut self) -> (SignedBeaconBlock, Option>) { - self.harness.advance_slot(); - - let block_root = self - .harness - .extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) + /// Finalized sync where local node already has blocks up to `local_epochs`. + /// Triggers optimistic start: the chain tries to download a batch at the local head + /// epoch concurrently with sequential processing from the start. + async fn setup_finalized_sync_with_local_head(&mut self, local_epochs: usize) { + let target_epochs = local_epochs + 3; // target beyond local head + self.build_chain(target_epochs * SLOTS_PER_EPOCH).await; + self.import_blocks_up_to_slot((local_epochs * SLOTS_PER_EPOCH) as u64) .await; - - let store = &self.harness.chain.store; - let block = store.get_full_block(&block_root).unwrap().unwrap(); - let fork = block.fork_name_unchecked(); - - let data_sidecars = if fork.fulu_enabled() { - store - .get_data_columns(&block_root, fork) - .unwrap() - .map(|columns| { - columns - .into_iter() - .map(CustodyDataColumn::from_asserted_custody) - .collect() - }) - .map(DataSidecars::DataColumns) - } else if fork.deneb_enabled() { - store - .get_blobs(&block_root) - .unwrap() - .blobs() - .map(DataSidecars::Blobs) - } else { - None - }; - - (block, data_sidecars) + let remote_info = self.finalized_remote_info_advanced_by((target_epochs as u64).into()); + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info); + self.assert_state(RangeSyncType::Finalized); } - async fn remember_block( - &mut self, - (block, data_sidecars): (SignedBeaconBlock, Option>), - ) { - // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. - let block_root = block.canonical_root(); - self.harness.set_current_slot(block.slot()); - let _: SignedBeaconBlockHash = self - .harness - .chain - .process_block( - block_root, - build_range_sync_block(block.into(), &data_sidecars, self.harness.chain.clone()), - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await - .unwrap() - .try_into() - .unwrap(); - self.harness.chain.recompute_head_at_current_slot().await; + /// Add enough peers to cover all custody columns (same chain as insufficient setup) + fn add_remaining_finalized_peers(&mut self, remote_info: SyncInfo) { + self.add_fullnode_peers(remote_info.clone(), 100); + self.add_supernode_peer(remote_info); + } + + // -- Assert helpers -- + + /// Assert range sync completed: chains created and removed, all blocks ingested, + /// finalized epoch advanced, no penalties, no leftover events. + fn assert_range_sync_completed(&mut self) { + self.assert_successful_range_sync(); + self.assert_no_failed_chains(); + assert_eq!( + self.head_slot(), + self.max_known_slot(), + "Head slot should match the last built block (all blocks ingested)" + ); + assert!( + self.finalized_epoch() > types::Epoch::new(0), + "Finalized epoch should have advanced past genesis, got {}", + self.finalized_epoch() + ); + self.assert_no_penalties(); + self.assert_empty_network(); + self.assert_empty_processor(); + } + + /// Assert head sync completed (no finalization expected for short ranges) + fn assert_head_sync_completed(&mut self) { + self.assert_successful_range_sync(); + self.assert_no_failed_chains(); + assert_eq!( + self.head_slot(), + self.max_known_slot(), + "Head slot should match the last built block (all blocks ingested)" + ); + self.assert_no_penalties(); + } + + /// Assert chain was removed and peers received faulty_chain penalty + fn assert_range_sync_chain_failed(&mut self) { + self.assert_no_chains_exist(); + assert!( + self.penalties.iter().any(|p| p.msg == "faulty_chain"), + "Expected faulty_chain penalty, got {:?}", + self.penalties + ); + } + + /// Assert range sync removed chains (e.g., all peers disconnected) + fn assert_range_sync_chain_removed(&mut self) { + self.assert_no_chains_exist(); + } + + /// Assert a new peer with a blacklisted root gets disconnected + fn assert_peer_blacklisted(&mut self, remote_info: SyncInfo) { + let new_peer = self.add_supernode_peer(remote_info); + self.pop_received_network_event(|ev| match ev { + NetworkMessage::GoodbyePeer { peer_id, .. } if *peer_id == new_peer => Some(()), + _ => None, + }) + .expect("Peer with blacklisted root should receive Goodbye"); } } -fn build_range_sync_block( - block: Arc>, - data_sidecars: &Option>, - chain: Arc>, -) -> RangeSyncBlock { - match data_sidecars { - Some(DataSidecars::Blobs(blobs)) => { - let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RangeSyncBlock::new( - block, - block_data, - &chain.data_availability_checker, - chain.spec.clone(), - ) - .unwrap() - } - Some(DataSidecars::DataColumns(columns)) => { - let block_data = AvailableBlockData::new_with_data_columns( - columns - .iter() - .map(|c| c.as_data_column().clone()) - .collect::>(), - ); - RangeSyncBlock::new( - block, - block_data, - &chain.data_availability_checker, - chain.spec.clone(), - ) - .unwrap() - } - // Block has no data, expects zero columns - None => RangeSyncBlock::new( - block, - AvailableBlockData::NoData, - &chain.data_availability_checker, - chain.spec.clone(), - ) - .unwrap(), - } -} - -#[test] -fn head_chain_removed_while_finalized_syncing() { - // NOTE: this is a regression test. - // Added in PR https://github.com/sigp/lighthouse/pull/2821 - let mut rig = TestRig::default(); - - // Get a peer with an advanced head - let head_peer = rig.add_head_peer(); - rig.assert_state(RangeSyncType::Head); - - // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); - - // Now get a peer with an advanced finalized epoch. - let finalized_peer = rig.add_finalized_peer(); - rig.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); - - // Fail the head chain by disconnecting the peer. - rig.peer_disconnected(head_peer); - rig.assert_state(RangeSyncType::Finalized); -} +// ============================================================================================ +// Tests +// ============================================================================================ +/// Head sync: single peer slightly ahead → download batches → all blocks ingested. #[tokio::test] -async fn state_update_while_purging() { - // NOTE: this is a regression test. - // Added in PR https://github.com/sigp/lighthouse/pull/2827 - let mut rig = TestRig::with_custody_type(NodeCustodyType::SemiSupernode); - - // Create blocks on a separate harness - // SemiSupernode ensures enough columns are stored for sampling + custody RPC block validation - let mut rig_2 = TestRig::with_custody_type(NodeCustodyType::SemiSupernode); - // Need to create blocks that can be inserted into the fork-choice and fit the "known - // conditions" below. - let head_peer_block = rig_2.create_canonical_block().await; - let head_peer_root = head_peer_block.0.canonical_root(); - let finalized_peer_block = rig_2.create_canonical_block().await; - let finalized_peer_root = finalized_peer_block.0.canonical_root(); - - // Get a peer with an advanced head - let head_peer = rig.add_head_peer_with_root(head_peer_root); - rig.assert_state(RangeSyncType::Head); - - // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); - - // Now get a peer with an advanced finalized epoch. - let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); - rig.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); - - // Now the chain knows both chains target roots. - rig.remember_block(head_peer_block).await; - rig.remember_block(finalized_peer_block).await; - - // Add an additional peer to the second chain to make range update it's status - rig.add_finalized_peer(); -} - -#[test] -fn pause_and_resume_on_ee_offline() { - let mut rig = TestRig::default(); - - // add some peers - let peer1 = rig.add_head_peer(); - // make the ee offline - rig.update_execution_engine_state(EngineState::Offline); - // send the response to the request - rig.find_and_complete_blocks_by_range_request(filter().peer(peer1).epoch(0)); - // the beacon processor shouldn't have received any work - rig.assert_empty_processor(); - - // while the ee is offline, more peers might arrive. Add a new finalized peer. - let _peer2 = rig.add_finalized_peer(); - - // send the response to the request - // Don't filter requests and the columns requests may be sent to peer1 or peer2 - // We need to filter by epoch, because the previous batch eagerly sent requests for the next - // epoch for the other batch. So we can either filter by epoch of by sync type. - rig.find_and_complete_blocks_by_range_request(filter().epoch(0)); - // the beacon processor shouldn't have received any work - rig.assert_empty_processor(); - // make the beacon processor available again. - // update_execution_engine_state implicitly calls resume - // now resume range, we should have two processing requests in the beacon processor. - rig.update_execution_engine_state(EngineState::Online); - - // The head chain and finalized chain (2) should be in the processing queue - rig.assert_chain_segments(2); -} - -/// To attempt to finalize the peer's status finalized checkpoint we synced to its finalized epoch + -/// 2 epochs + 1 slot. -const EXTRA_SYNCED_EPOCHS: u64 = 2 + 1; - -#[test] -fn finalized_sync_enough_global_custody_peers_few_chain_peers() { - // Run for all forks +async fn head_sync_completes() { let mut r = TestRig::default(); - - let advanced_epochs: u64 = 2; - let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); - - // Generate enough peers and supernodes to cover all custody columns - let peer_count = 100; - r.add_fullnode_peers(remote_info.clone(), peer_count); - r.add_supernode_peer(remote_info); - r.assert_state(RangeSyncType::Finalized); - - let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; - r.complete_and_process_range_sync_until(last_epoch, filter()); + r.setup_head_sync().await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_head_sync_completed(); + r.assert_head_slot(SLOTS_PER_EPOCH as u64); } -#[test] -fn finalized_sync_not_enough_custody_peers_on_start() { +/// Peers with advanced finalized AND head beyond finalized. Finalized sync completes first, +/// then head chains are created from awaiting_head_peers to sync the remaining gap. +#[tokio::test] +async fn finalized_to_head_transition() { + let mut r = TestRig::default(); + r.setup_finalized_and_head_sync().await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); + r.assert_head_slot(7 * SLOTS_PER_EPOCH as u64); +} + +/// Finalized sync happy path: all batches download and process, head advances to target, +/// finalized epoch advances past genesis. +#[tokio::test] +async fn finalized_sync_completes() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); + r.assert_head_slot(5 * SLOTS_PER_EPOCH as u64); +} + +/// First BlocksByRange request gets an RPC error. Batch retries from another peer, +/// sync completes with no penalties (RPC errors are not penalized). +#[tokio::test] +async fn batch_rpc_error_retries() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().return_rpc_error(RPCError::UnsupportedProtocol)) + .await; + r.assert_range_sync_completed(); +} + +/// Peer returns zero blocks for a BlocksByRange request. Batch retries, sync completes. +#[tokio::test] +async fn batch_peer_returns_empty_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_no_range_blocks_n_times(1)) + .await; + r.assert_successful_range_sync(); +} + +/// Peer returns zero columns for a DataColumnsByRange request. Batch retries, sync completes. +/// Only exercises column logic on fulu+. +#[tokio::test] +async fn batch_peer_returns_no_columns_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_no_range_columns_n_times(1)) + .await; + r.assert_successful_range_sync(); +} + +/// Peer returns columns with indices it wasn't asked for → UnrequestedIndex verify error. +/// Batch retries from another peer, sync completes. +#[tokio::test] +async fn batch_peer_returns_wrong_column_indices_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_wrong_range_column_indices_n_times(1)) + .await; + r.assert_successful_range_sync(); +} + +/// Peer returns columns from a slot outside the requested range → UnrequestedSlot verify error. +/// Batch retries from another peer, sync completes. +#[tokio::test] +async fn batch_peer_returns_wrong_column_slots_then_succeeds() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_wrong_range_column_slots_n_times(1)) + .await; + r.assert_successful_range_sync(); +} + +/// PeerDAS: peer returns only half the requested columns. Block-sidecar coupling detects +/// missing columns → CouplingError::DataColumnPeerFailure → retry_partial_batch from other peers. +#[tokio::test] +async fn batch_peer_returns_partial_columns_then_succeeds() { let mut r = TestRig::default(); - // Only run post-PeerDAS if !r.fork_name.fulu_enabled() { return; } - - let advanced_epochs: u64 = 2; - let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into()); - - // Unikely that the single peer we added has enough columns for us. Tests are deterministic and - // this error should never be hit - r.add_fullnode_peer(remote_info.clone()); - r.assert_state(RangeSyncType::Finalized); - - // Because we don't have enough peers on all columns we haven't sent any request. - // NOTE: There's a small chance that this single peer happens to custody exactly the set we - // expect, in that case the test will fail. Find a way to make the test deterministic. - r.assert_empty_network(); - - // Generate enough peers and supernodes to cover all custody columns - let peer_count = 100; - r.add_fullnode_peers(remote_info.clone(), peer_count); - r.add_supernode_peer(remote_info); - - let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; - r.complete_and_process_range_sync_until(last_epoch, filter()); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_partial_range_columns_n_times(1)) + .await; + r.assert_successful_range_sync(); +} + +/// Batch processing returns NonFaultyFailure (e.g. transient error). Batch goes back to +/// AwaitingDownload, retries without penalty, sync completes. +#[tokio::test] +async fn batch_non_faulty_failure_retries() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_non_faulty_failures(1)) + .await; + r.assert_range_sync_completed(); +} + +/// Batch processing returns FaultyFailure once. Peer penalized with "faulty_batch", +/// batch redownloaded from a different peer, sync completes. +#[tokio::test] +async fn batch_faulty_failure_redownloads() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_faulty_failures(1)) + .await; + r.assert_successful_range_sync(); + r.assert_penalties_of_type("faulty_batch"); +} + +/// Batch processing fails MAX_BATCH_PROCESSING_ATTEMPTS (3) times with FaultyFailure. +/// Chain removed, all peers penalized with "faulty_chain". +#[tokio::test] +async fn batch_max_failures_removes_chain() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_faulty_failures(3)) + .await; + r.assert_range_sync_chain_failed(); +} + +/// Chain fails via max faulty retries → finalized root added to failed_chains LRU. +/// A new peer advertising the same finalized root gets disconnected with GoodbyeReason. +#[tokio::test] +async fn failed_chain_blacklisted() { + let mut r = TestRig::default(); + let remote_info = r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_range_faulty_failures(3)) + .await; + r.assert_range_sync_chain_failed(); + r.assert_peer_blacklisted(remote_info); +} + +/// All peers disconnect before any request is fulfilled → chain removed (EmptyPeerPool). +#[tokio::test] +async fn all_peers_disconnect_removes_chain() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_disconnect_after_range_requests(0)) + .await; + r.assert_range_sync_chain_removed(); +} + +/// Peers disconnect after 1 request is served. Remaining in-flight responses arrive +/// for a chain that no longer exists — verified as a no-op (no crash). +#[tokio::test] +async fn late_response_for_removed_chain() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_disconnect_after_range_requests(1)) + .await; + r.assert_range_sync_chain_removed(); +} + +/// Execution engine goes offline at sync start. Batch responses complete but processing +/// is paused. After 2 responses, EE comes back online, queued batches process, sync completes. +#[tokio::test] +async fn ee_offline_then_online_resumes_sync() { + let mut r = TestRig::default(); + r.setup_finalized_sync().await; + r.simulate(SimulateConfig::happy_path().with_ee_offline_for_n_range_responses(2)) + .await; + r.assert_range_sync_completed(); +} + +/// Local node already has blocks up to epoch 3. Finalized sync starts targeting epoch 6. +/// The chain uses optimistic start: downloads a batch at the local head epoch concurrently +/// with sequential processing from the start. All blocks ingested. +#[tokio::test] +async fn finalized_sync_with_local_head_partial() { + let mut r = TestRig::default(); + r.setup_finalized_sync_with_local_head(3).await; + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); +} + +/// Local node has all blocks except the last one. Finalized sync only needs to fill the +/// final gap. Tests optimistic start where local head is near the target. +#[tokio::test] +async fn finalized_sync_with_local_head_near_target() { + let mut r = TestRig::default(); + let target_epochs = 5; + let local_slots = (target_epochs * SLOTS_PER_EPOCH) - 1; // all blocks except last + r.build_chain(target_epochs * SLOTS_PER_EPOCH).await; + r.import_blocks_up_to_slot(local_slots as u64).await; + let remote_info = r.finalized_remote_info_advanced_by((target_epochs as u64).into()); + r.add_fullnode_peers(remote_info.clone(), 100); + r.add_supernode_peer(remote_info); + r.assert_state(RangeSyncType::Finalized); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); + r.assert_head_slot((target_epochs * SLOTS_PER_EPOCH) as u64); +} + +/// PeerDAS only: single fullnode peer doesn't cover all custody columns → no requests sent. +/// Once enough fullnodes + a supernode arrive, sync proceeds and completes. +#[tokio::test] +async fn not_enough_custody_peers_then_peers_arrive() { + let mut r = TestRig::default(); + if !r.fork_name.fulu_enabled() { + return; + } + let remote_info = r.setup_finalized_sync_insufficient_peers().await; + r.assert_empty_network(); + r.add_remaining_finalized_peers(remote_info); + r.simulate(SimulateConfig::happy_path()).await; + r.assert_range_sync_completed(); } diff --git a/scripts/range-sync-coverage.sh b/scripts/range-sync-coverage.sh new file mode 100755 index 0000000000..df438c0c7f --- /dev/null +++ b/scripts/range-sync-coverage.sh @@ -0,0 +1,136 @@ +#!/bin/bash +# Aggregate range sync test coverage across all forks +# Usage: ./scripts/range-sync-coverage.sh [--html] +set -e + +REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)" +cd "$REPO_ROOT" + +TARGET_DIR="${CARGO_TARGET_DIR:-/mnt/ssd/builds/lighthouse-range-sync-tests}" +FORKS=(base altair bellatrix capella deneb electra fulu) +LCOV_DIR="/tmp/range-cov-forks" +MERGED="/tmp/range-cov-merged.lcov" + +rm -rf "$LCOV_DIR" +mkdir -p "$LCOV_DIR" + +echo "=== Running coverage for each fork ===" +for fork in "${FORKS[@]}"; do + echo "--- $fork ---" + CARGO_TARGET_DIR="$TARGET_DIR" FORK_NAME="$fork" \ + cargo llvm-cov --features "network/fake_crypto,network/fork_from_env" \ + -p network --lib --lcov --output-path "$LCOV_DIR/$fork.lcov" \ + -- "sync::tests::range" 2>&1 | grep -E "test result|running" +done + +echo "" +echo "=== Merging lcov files ===" + +# Merge all lcov files: for each source file, take max hit count per line +python3 - "$LCOV_DIR" "$MERGED" << 'PYEOF' +import sys, os, glob +from collections import defaultdict + +lcov_dir = sys.argv[1] +output = sys.argv[2] + +# Parse all lcov files: file -> line -> max hits +coverage = defaultdict(lambda: defaultdict(int)) +fn_coverage = defaultdict(lambda: defaultdict(int)) +current_sf = None + +for lcov_file in sorted(glob.glob(os.path.join(lcov_dir, "*.lcov"))): + with open(lcov_file) as f: + for line in f: + line = line.strip() + if line.startswith("SF:"): + current_sf = line[3:] + elif line.startswith("DA:") and current_sf: + parts = line[3:].split(",") + lineno = int(parts[0]) + hits = int(parts[1]) + coverage[current_sf][lineno] = max(coverage[current_sf][lineno], hits) + elif line.startswith("FNDA:") and current_sf: + parts = line[5:].split(",", 1) + hits = int(parts[0]) + fn_name = parts[1] + fn_coverage[current_sf][fn_name] = max(fn_coverage[current_sf][fn_name], hits) + +# Write merged lcov +with open(output, "w") as f: + for sf in sorted(coverage.keys()): + f.write(f"SF:{sf}\n") + for fn_name, hits in sorted(fn_coverage.get(sf, {}).items()): + f.write(f"FNDA:{hits},{fn_name}\n") + for lineno in sorted(coverage[sf].keys()): + f.write(f"DA:{lineno},{coverage[sf][lineno]}\n") + total = len(coverage[sf]) + covered = sum(1 for h in coverage[sf].values() if h > 0) + f.write(f"LH:{covered}\n") + f.write(f"LF:{total}\n") + f.write("end_of_record\n") + +print(f"Merged {len(glob.glob(os.path.join(lcov_dir, '*.lcov')))} lcov files -> {output}") +PYEOF + +echo "" +echo "=== Range sync coverage (merged across all forks) ===" + +# Extract and display range sync files +python3 - "$MERGED" << 'PYEOF' +import sys +from collections import defaultdict + +current_sf = None +files = {} # short_name -> (total_lines, covered_lines) +lines = defaultdict(dict) + +with open(sys.argv[1]) as f: + for line in f: + line = line.strip() + if line.startswith("SF:"): + current_sf = line[3:] + elif line.startswith("DA:") and current_sf: + parts = line[3:].split(",") + lineno, hits = int(parts[0]), int(parts[1]) + lines[current_sf][lineno] = hits + +# Filter to range sync files +targets = [ + "range_sync/chain.rs", + "range_sync/chain_collection.rs", + "range_sync/range.rs", + "requests/blocks_by_range.rs", + "requests/blobs_by_range.rs", + "requests/data_columns_by_range.rs", +] + +print(f"{'File':<45} {'Lines':>6} {'Covered':>8} {'Missed':>7} {'Coverage':>9}") +print("-" * 80) + +total_all = 0 +covered_all = 0 + +for sf in sorted(lines.keys()): + short = sf.split("sync/")[-1] if "sync/" in sf else sf.split("/")[-1] + if not any(t in sf for t in targets): + continue + total = len(lines[sf]) + covered = sum(1 for h in lines[sf].values() if h > 0) + missed = total - covered + pct = covered / total * 100 if total > 0 else 0 + total_all += total + covered_all += covered + print(f"{short:<45} {total:>6} {covered:>8} {missed:>7} {pct:>8.1f}%") + +print("-" * 80) +pct_all = covered_all / total_all * 100 if total_all > 0 else 0 +print(f"{'TOTAL':<45} {total_all:>6} {covered_all:>8} {total_all - covered_all:>7} {pct_all:>8.1f}%") +PYEOF + +if [ "$1" = "--html" ]; then + echo "" + echo "=== Generating HTML report ===" + genhtml "$MERGED" -o /tmp/range-cov-html --ignore-errors source 2>/dev/null + echo "HTML report: /tmp/range-cov-html/index.html" +fi From cd60ea80bb3383843d703443901dbb834dd47193 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 16:59:36 +1100 Subject: [PATCH 04/57] Update to spec v1.7.0-alpha.4 (#9046) Update our consensus code to v1.7.0-alpha.4 Co-Authored-By: Michael Sproul --- .../src/per_epoch_processing/altair.rs | 11 ++- .../src/per_epoch_processing/single_pass.rs | 76 +++++++++++++++++- .../state_processing/src/upgrade/gloas.rs | 43 +++++++++- consensus/types/src/core/eth_spec.rs | 20 ++++- consensus/types/src/state/beacon_state.rs | 79 ++++++++++++++++++- consensus/types/src/state/committee_cache.rs | 37 ++++++++- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 +- .../ef_tests/src/cases/epoch_processing.rs | 19 ++++- testing/ef_tests/src/cases/operations.rs | 6 +- testing/ef_tests/src/lib.rs | 2 +- testing/ef_tests/tests/tests.rs | 6 ++ 12 files changed, 279 insertions(+), 28 deletions(-) diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index d9e6964730..683d92d836 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -51,8 +51,8 @@ pub fn process_epoch( // without loss of correctness. let current_epoch_progressive_balances = state.progressive_balances_cache().clone(); let current_epoch_total_active_balance = state.get_total_active_balance()?; - let participation_summary = - process_epoch_single_pass(state, spec, SinglePassConfig::default())?; + let epoch_result = process_epoch_single_pass(state, spec, SinglePassConfig::default())?; + let participation_summary = epoch_result.summary; // Reset eth1 data votes. process_eth1_data_reset(state)?; @@ -79,6 +79,13 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches()?; + + // Install the lookahead committee cache (built during PTC window processing) as the Next + // cache. After advance_caches, the lookahead epoch becomes the Next relative epoch. + if let Some(cache) = epoch_result.lookahead_committee_cache { + state.set_committee_cache(RelativeEpoch::Next, cache)?; + } + update_progressive_balances_on_epoch_transition(state, spec)?; Ok(EpochProcessingSummary::Altair { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 4eb1e36628..976607aa76 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -12,12 +12,13 @@ use milhouse::{Cow, List, Vector}; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; use std::collections::{BTreeSet, HashMap}; +use std::sync::Arc; use tracing::instrument; use typenum::Unsigned; use types::{ ActivationQueue, BeaconState, BeaconStateError, BuilderPendingPayment, ChainSpec, Checkpoint, - DepositData, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Validator, + CommitteeCache, DepositData, Epoch, EthSpec, ExitCache, ForkName, ParticipationFlags, + PendingDeposit, ProgressiveBalancesCache, RelativeEpoch, Validator, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, @@ -34,6 +35,7 @@ pub struct SinglePassConfig { pub effective_balance_updates: bool, pub proposer_lookahead: bool, pub builder_pending_payments: bool, + pub ptc_window: bool, } impl Default for SinglePassConfig { @@ -54,6 +56,7 @@ impl SinglePassConfig { effective_balance_updates: true, proposer_lookahead: true, builder_pending_payments: true, + ptc_window: true, } } @@ -68,6 +71,7 @@ impl SinglePassConfig { effective_balance_updates: false, proposer_lookahead: false, builder_pending_payments: false, + ptc_window: false, } } } @@ -139,12 +143,20 @@ impl ValidatorInfo { } } +/// Result of single-pass epoch processing. +pub struct SinglePassEpochResult { + pub summary: ParticipationEpochSummary, + /// Committee cache for the lookahead epoch, built during PTC window processing. + /// Can be installed as the Next committee cache after `advance_caches`. + pub lookahead_committee_cache: Option>, +} + #[instrument(skip_all)] pub fn process_epoch_single_pass( state: &mut BeaconState, spec: &ChainSpec, conf: SinglePassConfig, -) -> Result, Error> { +) -> Result, Error> { initialize_epoch_cache(state, spec)?; initialize_progressive_balances_cache(state, spec)?; state.build_exit_cache(spec)?; @@ -479,7 +491,16 @@ pub fn process_epoch_single_pass( process_proposer_lookahead(state, spec)?; } - Ok(summary) + let lookahead_committee_cache = if conf.ptc_window && fork_name.gloas_enabled() { + Some(process_ptc_window(state, spec)?) + } else { + None + }; + + Ok(SinglePassEpochResult { + summary, + lookahead_committee_cache, + }) } // TOOO(EIP-7917): use balances cache @@ -512,6 +533,53 @@ pub fn process_proposer_lookahead( Ok(()) } +/// Process the PTC window, returning the committee cache built for the lookahead epoch. +/// +/// The returned cache can be injected into the state's Next committee cache slot after +/// `advance_caches` is called during the epoch transition, avoiding redundant recomputation. +pub fn process_ptc_window( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + let slots_per_epoch = E::slots_per_epoch() as usize; + + // Convert Vector -> List to use tree-efficient pop_front. + let ptc_window = state.ptc_window()?.clone(); + let mut window: List<_, E::PtcWindowLength> = List::from(ptc_window); + + // Drop the oldest epoch from the front (reuses shared tree nodes). + window + .pop_front(slots_per_epoch) + .map_err(|e| Error::BeaconStateError(BeaconStateError::MilhouseError(e)))?; + + // Compute PTC for the new lookahead epoch + let next_epoch = state + .current_epoch() + .safe_add(spec.min_seed_lookahead.as_u64())? + .safe_add(1)?; + let start_slot = next_epoch.start_slot(E::slots_per_epoch()); + + // Build a committee cache for the lookahead epoch (beyond the normal Next bound) + let committee_cache = state.initialize_committee_cache_for_lookahead(next_epoch, spec)?; + + for i in 0..slots_per_epoch { + let slot = start_slot.safe_add(i as u64)?; + let ptc = state.compute_ptc_with_cache(slot, &committee_cache, spec)?; + let ptc_u64: Vec = ptc.into_iter().map(|v| v as u64).collect(); + let entry = ssz_types::FixedVector::new(ptc_u64) + .map_err(|e| Error::BeaconStateError(BeaconStateError::SszTypesError(e)))?; + window + .push(entry) + .map_err(|e| Error::BeaconStateError(BeaconStateError::MilhouseError(e)))?; + } + + // Convert List back to Vector. + *state.ptc_window_mut()? = Vector::try_from(window) + .map_err(|e| Error::BeaconStateError(BeaconStateError::MilhouseError(e)))?; + + Ok(committee_cache) +} + /// Calculate the quorum threshold for builder payments based on total active balance. fn get_builder_payment_quorum_threshold( state_ctxt: &StateContext, diff --git a/consensus/state_processing/src/upgrade/gloas.rs b/consensus/state_processing/src/upgrade/gloas.rs index 7a88383ab0..b39ee6048f 100644 --- a/consensus/state_processing/src/upgrade/gloas.rs +++ b/consensus/state_processing/src/upgrade/gloas.rs @@ -2,7 +2,9 @@ use crate::per_block_processing::{ is_valid_deposit_signature, process_operations::apply_deposit_for_builder, }; use milhouse::{List, Vector}; +use safe_arith::SafeArith; use ssz_types::BitVector; +use ssz_types::FixedVector; use std::collections::HashSet; use std::mem; use typenum::Unsigned; @@ -102,13 +104,11 @@ pub fn upgrade_state_to_gloas( vec![0xFFu8; E::SlotsPerHistoricalRoot::to_usize() / 8].into(), ) .map_err(|_| Error::InvalidBitfield)?, - builder_pending_payments: Vector::new(vec![ - BuilderPendingPayment::default(); - E::builder_pending_payments_limit() - ])?, + builder_pending_payments: Vector::from_elem(BuilderPendingPayment::default())?, builder_pending_withdrawals: List::default(), // Empty list initially, latest_block_hash: pre.latest_execution_payload_header.block_hash, payload_expected_withdrawals: List::default(), + ptc_window: Vector::from_elem(FixedVector::from_elem(0))?, // placeholder, will be initialized below // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), @@ -120,10 +120,45 @@ pub fn upgrade_state_to_gloas( }); // [New in Gloas:EIP7732] onboard_builders_from_pending_deposits(&mut post, spec)?; + initialize_ptc_window(&mut post, spec)?; Ok(post) } +/// Initialize the `ptc_window` field in the beacon state at fork transition. +/// +/// The window contains: +/// - One epoch of empty entries (previous epoch) +/// - Computed PTC for the current epoch through `1 + MIN_SEED_LOOKAHEAD` epochs +fn initialize_ptc_window( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let slots_per_epoch = E::slots_per_epoch() as usize; + + let empty_previous_epoch = vec![FixedVector::::from_elem(0); slots_per_epoch]; + let mut ptcs = empty_previous_epoch; + + // Compute PTC for current epoch + lookahead epochs + let current_epoch = state.current_epoch(); + for e in 0..=spec.min_seed_lookahead.as_u64() { + let epoch = current_epoch.safe_add(e)?; + let committee_cache = state.initialize_committee_cache_for_lookahead(epoch, spec)?; + let start_slot = epoch.start_slot(E::slots_per_epoch()); + for i in 0..slots_per_epoch { + let slot = start_slot.safe_add(i as u64)?; + let ptc = state.compute_ptc_with_cache(slot, &committee_cache, spec)?; + let ptc_u64: Vec = ptc.into_iter().map(|v| v as u64).collect(); + let entry = FixedVector::new(ptc_u64)?; + ptcs.push(entry); + } + } + + *state.ptc_window_mut()? = Vector::new(ptcs)?; + + Ok(()) +} + /// Applies any pending deposit for builders, effectively onboarding builders at the fork. fn onboard_builders_from_pending_deposits( state: &mut BeaconState, diff --git a/consensus/types/src/core/eth_spec.rs b/consensus/types/src/core/eth_spec.rs index a4b22da3f8..36d61fbbf9 100644 --- a/consensus/types/src/core/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -6,9 +6,9 @@ use std::{ use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use typenum::{ - U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, - U16384, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, - U1099511627776, UInt, Unsigned, bit::B0, + U0, U1, U2, U4, U8, U16, U17, U24, U32, U48, U64, U96, U128, U256, U512, U625, U1024, U2048, + U4096, U8192, U16384, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, + U1073741824, U1099511627776, UInt, Unsigned, bit::B0, }; use crate::core::{ChainSpec, Epoch}; @@ -176,6 +176,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + * New in Gloas */ type PTCSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PtcWindowLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxPayloadAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BuilderPendingPaymentsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BuilderPendingWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -428,6 +429,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::PTCSize::to_usize() } + /// Returns the `PtcWindowLength` constant for this specification. + fn ptc_window_length() -> usize { + Self::PtcWindowLength::to_usize() + } + /// Returns the `MaxPayloadAttestations` constant for this specification. fn max_payload_attestations() -> usize { Self::MaxPayloadAttestations::to_usize() @@ -515,6 +521,7 @@ impl EthSpec for MainnetEthSpec { type MaxWithdrawalRequestsPerPayload = U16; type MaxPendingDepositsPerEpoch = U16; type PTCSize = U512; + type PtcWindowLength = U96; // (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH type MaxPayloadAttestations = U4; type MaxBuildersPerWithdrawalsSweep = U16384; @@ -561,6 +568,7 @@ impl EthSpec for MinimalEthSpec { type ProposerLookaheadSlots = U16; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type BuilderPendingPaymentsLimit = U16; // 2 * SLOTS_PER_EPOCH = 2 * 8 = 16 type PTCSize = U2; + type PtcWindowLength = U24; // (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH type MaxBuildersPerWithdrawalsSweep = U16; params_from_eth_spec!(MainnetEthSpec { @@ -668,6 +676,7 @@ impl EthSpec for GnosisEthSpec { type ProposerLookaheadSlots = U32; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type BuilderRegistryLimit = U1099511627776; type PTCSize = U512; + type PtcWindowLength = U48; // (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH type MaxPayloadAttestations = U2; type MaxBuildersPerWithdrawalsSweep = U16384; @@ -694,6 +703,11 @@ mod test { E::proposer_lookahead_slots(), (spec.min_seed_lookahead.as_usize() + 1) * E::slots_per_epoch() as usize ); + assert_eq!( + E::ptc_window_length(), + (spec.min_seed_lookahead.as_usize() + 2) * E::slots_per_epoch() as usize, + "PtcWindowLength must equal (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH" + ); } #[test] diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index f431055c5f..a033272b9d 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -667,6 +667,11 @@ where #[superstruct(only(Gloas))] pub payload_expected_withdrawals: List, + #[compare_fields(as_iter)] + #[test_random(default)] + #[superstruct(only(Gloas))] + pub ptc_window: Vector, E::PtcWindowLength>, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -2431,6 +2436,18 @@ impl BeaconState { CommitteeCache::initialized(self, epoch, spec) } + /// Like [`initialize_committee_cache`](Self::initialize_committee_cache), but allows epochs + /// beyond `current_epoch + 1`. Only checks that the required randao seed is available. + /// + /// Used by PTC window computation which needs shufflings for lookahead epochs. + pub fn initialize_committee_cache_for_lookahead( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { + CommitteeCache::initialized_for_lookahead(self, epoch, spec) + } + /// Advances the cache for this state into the next epoch. /// /// This should be used if the `slot` of this state is advanced beyond an epoch boundary. @@ -2501,6 +2518,17 @@ impl BeaconState { .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } + /// Set the committee cache for the given `relative_epoch` to `cache`. + pub fn set_committee_cache( + &mut self, + relative_epoch: RelativeEpoch, + cache: Arc, + ) -> Result<(), BeaconStateError> { + let i = Self::committee_cache_index(relative_epoch); + *self.committee_cache_at_index_mut(i)? = cache; + Ok(()) + } + /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. pub fn committee_cache( @@ -3084,12 +3112,55 @@ impl BeaconState { } } - /// Get the payload timeliness committee for the given `slot`. - /// - /// Requires the committee cache to be initialized. - /// TODO(EIP-7732): definitely gonna have to cache this.. + /// Get the payload timeliness committee for the given `slot` from the `ptc_window`. pub fn get_ptc(&self, slot: Slot, spec: &ChainSpec) -> Result, BeaconStateError> { + let ptc_window = self.ptc_window()?; + let epoch = slot.epoch(E::slots_per_epoch()); + let state_epoch = self.current_epoch(); + let slots_per_epoch = E::slots_per_epoch() as usize; + let slot_in_epoch = slot.as_usize().safe_rem(slots_per_epoch)?; + + let index = if epoch < state_epoch { + if epoch.safe_add(1)? != state_epoch { + return Err(BeaconStateError::SlotOutOfBounds); + } + slot_in_epoch + } else { + if epoch > state_epoch.safe_add(spec.min_seed_lookahead)? { + return Err(BeaconStateError::SlotOutOfBounds); + } + let offset = epoch + .safe_sub(state_epoch)? + .safe_add(1)? + .as_usize() + .safe_mul(slots_per_epoch)?; + offset.safe_add(slot_in_epoch)? + }; + + let entry = ptc_window + .get(index) + .ok_or(BeaconStateError::SlotOutOfBounds)?; + + // Convert from FixedVector to PTC (FixedVector) + let indices: Vec = entry.iter().map(|&v| v as usize).collect(); + Ok(PTC(FixedVector::new(indices)?)) + } + + /// Compute the payload timeliness committee for the given `slot` from scratch. + /// + /// Requires the committee cache to be initialized for the slot's epoch. + pub fn compute_ptc(&self, slot: Slot, spec: &ChainSpec) -> Result, BeaconStateError> { let committee_cache = self.committee_cache_at_slot(slot)?; + self.compute_ptc_with_cache(slot, committee_cache, spec) + } + + /// Compute the PTC for a slot using a specific committee cache. + pub fn compute_ptc_with_cache( + &self, + slot: Slot, + committee_cache: &CommitteeCache, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let committees = committee_cache.get_beacon_committees_at_slot(slot)?; let seed = self.get_ptc_attester_seed(slot, spec)?; diff --git a/consensus/types/src/state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs index 4a28f3c689..2e74ab760c 100644 --- a/consensus/types/src/state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -62,6 +62,9 @@ fn compare_shuffling_positions(xs: &Vec, ys: &Vec( state: &BeaconState, @@ -81,12 +84,44 @@ impl CommitteeCache { || epoch > state .current_epoch() - .safe_add(1) + .safe_add(1u64) .map_err(BeaconStateError::ArithError)? { return Err(BeaconStateError::EpochOutOfBounds); } + Self::initialized_unchecked(state, epoch, spec) + } + + /// Return a new, fully initialized cache for a lookahead epoch. + /// + /// Like [`initialized`](Self::initialized), but allows epochs beyond `current_epoch + 1`. + /// The only bound enforced is that the required randao seed is available in the state. + /// + /// This is used by PTC window computation, which needs committee shufflings for + /// `current_epoch + 1 + MIN_SEED_LOOKAHEAD`. + pub fn initialized_for_lookahead( + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { + let reqd_randao_epoch = epoch + .saturating_sub(spec.min_seed_lookahead) + .saturating_sub(1u64); + + if reqd_randao_epoch < state.min_randao_epoch() { + return Err(BeaconStateError::EpochOutOfBounds); + } + + Self::initialized_unchecked(state, epoch, spec) + } + + /// Core committee cache construction. Callers are responsible for bounds-checking `epoch`. + fn initialized_unchecked( + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { // May cause divide-by-zero errors. if E::slots_per_epoch() == 0 { return Err(BeaconStateError::ZeroSlotsPerEpoch); diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 48378a4c95..ab24ea35a0 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.3 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.4 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index dd6be14306..2daafada31 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -53,6 +53,8 @@ excluded_paths = [ "tests/.*/gloas/fork_choice/.*", # Ignore MatrixEntry SSZ tests for now. "tests/.*/.*/ssz_static/MatrixEntry/.*", + # TODO: partial data column not implemented yet + "tests/.*/.*/ssz_static/PartialDataColumn.*/.*", # TODO(gloas): Ignore Gloas light client stuff for now "tests/.*/gloas/ssz_static/LightClient.*/.*", # Execution payload header is irrelevant after Gloas, this type will probably be deleted. @@ -73,7 +75,9 @@ excluded_paths = [ "tests/.*/compute_verify_cell_kzg_proof_batch_challenge/.*", "tests/.*/compute_challenge/.*", # We don't need these manifest files at the moment. - "tests/.*/manifest.yaml" + "tests/.*/manifest.yaml", + # TODO: gossip condition tests not implemented yet + "tests/.*/.*/networking/.*" ] diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 7a90fc70d0..a032aa917f 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -12,7 +12,7 @@ use state_processing::per_epoch_processing::effective_balance_updates::{ process_effective_balance_updates, process_effective_balance_updates_slow, }; use state_processing::per_epoch_processing::single_pass::{ - SinglePassConfig, process_epoch_single_pass, process_proposer_lookahead, + SinglePassConfig, process_epoch_single_pass, process_proposer_lookahead, process_ptc_window, }; use state_processing::per_epoch_processing::{ altair, base, @@ -80,6 +80,8 @@ pub struct ParticipationFlagUpdates; #[derive(Debug)] pub struct ProposerLookahead; #[derive(Debug)] +pub struct PtcWindow; +#[derive(Debug)] pub struct BuilderPendingPayments; type_name!( @@ -102,6 +104,7 @@ type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); type_name!(ParticipationFlagUpdates, "participation_flag_updates"); type_name!(ProposerLookahead, "proposer_lookahead"); +type_name!(PtcWindow, "ptc_window"); type_name!(BuilderPendingPayments, "builder_pending_payments"); impl EpochTransition for JustificationAndFinalization { @@ -296,6 +299,16 @@ impl EpochTransition for ProposerLookahead { } } +impl EpochTransition for PtcWindow { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + if state.fork_name_unchecked().gloas_enabled() { + process_ptc_window(state, spec).map(|_| ()) + } else { + Ok(()) + } + } +} + impl EpochTransition for BuilderPendingPayments { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { process_epoch_single_pass( @@ -373,7 +386,9 @@ impl> Case for EpochProcessing { return false; } - if !fork_name.gloas_enabled() && T::name() == "builder_pending_payments" { + if !fork_name.gloas_enabled() + && (T::name() == "builder_pending_payments" || T::name() == "ptc_window") + { return false; } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 798c66b666..1399815763 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -717,11 +717,7 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let operation_path = path.join(O::filename()); - let (operation, bls_error) = if !operation_path.is_file() { - // Some test cases (e.g. builder_voluntary_exit__success) have no operation file. - // TODO(gloas): remove this once the test vectors are fixed - (None, None) - } else if metadata.bls_setting.unwrap_or_default().check().is_ok() { + let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { match O::decode(&operation_path, fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 94b19b6644..5587bbed41 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -3,7 +3,7 @@ pub use cases::{ BuilderPendingPayments, Case, EffectiveBalanceUpdates, Eth1DataReset, ExecutionPayloadBidBlock, FeatureName, HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, RandaoMixesReset, + PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, PtcWindow, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, WithdrawalsPayload, }; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index cb4abed90a..62eb2dd038 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -960,6 +960,12 @@ fn epoch_processing_proposer_lookahead() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_ptc_window() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_builder_pending_payments() { EpochProcessingHandler::::default().run(); From 7fe9da0043c9ad3b0b9392d08776a9a9d001f8b9 Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Tue, 31 Mar 2026 08:16:34 +0200 Subject: [PATCH 05/57] Add Gloas SSE event boilerplate (#9053) Implement boilerplate for new SSE events as specified in - https://github.com/ethereum/beacon-APIs/pull/588 While that one is not merged yet, I believe the SSE events might be utilized in Dora already. Implement the boilerplate, i.e. subscription tracking and publish queues. A PR to implement to fully implement already implementable events will follow. Co-Authored-By: Daniel Knopik --- beacon_node/beacon_chain/src/events.rs | 75 +++++++++++++++++++++ beacon_node/http_api/src/lib.rs | 15 +++++ common/eth2/src/types.rs | 92 ++++++++++++++++++++++++++ 3 files changed, 182 insertions(+) diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 276edc3fe6..80667cd399 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -25,6 +25,11 @@ pub struct ServerSentEventHandler { attester_slashing_tx: Sender>, bls_to_execution_change_tx: Sender>, block_gossip_tx: Sender>, + execution_payload_tx: Sender>, + execution_payload_gossip_tx: Sender>, + execution_payload_available_tx: Sender>, + execution_payload_bid_tx: Sender>, + payload_attestation_message_tx: Sender>, } impl ServerSentEventHandler { @@ -51,6 +56,11 @@ impl ServerSentEventHandler { let (attester_slashing_tx, _) = broadcast::channel(capacity); let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); let (block_gossip_tx, _) = broadcast::channel(capacity); + let (execution_payload_tx, _) = broadcast::channel(capacity); + let (execution_payload_gossip_tx, _) = broadcast::channel(capacity); + let (execution_payload_available_tx, _) = broadcast::channel(capacity); + let (execution_payload_bid_tx, _) = broadcast::channel(capacity); + let (payload_attestation_message_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -71,6 +81,11 @@ impl ServerSentEventHandler { attester_slashing_tx, bls_to_execution_change_tx, block_gossip_tx, + execution_payload_tx, + execution_payload_gossip_tx, + execution_payload_available_tx, + execution_payload_bid_tx, + payload_attestation_message_tx, } } @@ -155,6 +170,26 @@ impl ServerSentEventHandler { .block_gossip_tx .send(kind) .map(|count| log_count("block gossip", count)), + EventKind::ExecutionPayload(_) => self + .execution_payload_tx + .send(kind) + .map(|count| log_count("execution payload", count)), + EventKind::ExecutionPayloadGossip(_) => self + .execution_payload_gossip_tx + .send(kind) + .map(|count| log_count("execution payload gossip", count)), + EventKind::ExecutionPayloadAvailable(_) => self + .execution_payload_available_tx + .send(kind) + .map(|count| log_count("execution payload available", count)), + EventKind::ExecutionPayloadBid(_) => self + .execution_payload_bid_tx + .send(kind) + .map(|count| log_count("execution payload bid", count)), + EventKind::PayloadAttestationMessage(_) => self + .payload_attestation_message_tx + .send(kind) + .map(|count| log_count("payload attestation message", count)), }; if let Err(SendError(event)) = result { trace!(?event, "No receivers registered to listen for event"); @@ -233,6 +268,26 @@ impl ServerSentEventHandler { self.block_gossip_tx.subscribe() } + pub fn subscribe_execution_payload(&self) -> Receiver> { + self.execution_payload_tx.subscribe() + } + + pub fn subscribe_execution_payload_gossip(&self) -> Receiver> { + self.execution_payload_gossip_tx.subscribe() + } + + pub fn subscribe_execution_payload_available(&self) -> Receiver> { + self.execution_payload_available_tx.subscribe() + } + + pub fn subscribe_execution_payload_bid(&self) -> Receiver> { + self.execution_payload_bid_tx.subscribe() + } + + pub fn subscribe_payload_attestation_message(&self) -> Receiver> { + self.payload_attestation_message_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -296,4 +351,24 @@ impl ServerSentEventHandler { pub fn has_block_gossip_subscribers(&self) -> bool { self.block_gossip_tx.receiver_count() > 0 } + + pub fn has_execution_payload_subscribers(&self) -> bool { + self.execution_payload_tx.receiver_count() > 0 + } + + pub fn has_execution_payload_gossip_subscribers(&self) -> bool { + self.execution_payload_gossip_tx.receiver_count() > 0 + } + + pub fn has_execution_payload_available_subscribers(&self) -> bool { + self.execution_payload_available_tx.receiver_count() > 0 + } + + pub fn has_execution_payload_bid_subscribers(&self) -> bool { + self.execution_payload_bid_tx.receiver_count() > 0 + } + + pub fn has_payload_attestation_message_subscribers(&self) -> bool { + self.payload_attestation_message_tx.receiver_count() > 0 + } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 19d73be89a..68ab91dc4c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3168,6 +3168,21 @@ pub fn serve( api_types::EventTopic::BlockGossip => { event_handler.subscribe_block_gossip() } + api_types::EventTopic::ExecutionPayload => { + event_handler.subscribe_execution_payload() + } + api_types::EventTopic::ExecutionPayloadGossip => { + event_handler.subscribe_execution_payload_gossip() + } + api_types::EventTopic::ExecutionPayloadAvailable => { + event_handler.subscribe_execution_payload_available() + } + api_types::EventTopic::ExecutionPayloadBid => { + event_handler.subscribe_execution_payload_bid() + } + api_types::EventTopic::PayloadAttestationMessage => { + event_handler.subscribe_payload_attestation_message() + } }; receivers.push( diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 94dff95bc6..54e9c98b5b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1070,6 +1070,33 @@ pub struct BlockGossip { pub slot: Slot, pub block: Hash256, } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionPayload { + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub block_hash: ExecutionBlockHash, + pub block_root: Hash256, + pub state_root: Hash256, + pub execution_optimistic: bool, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionPayloadGossip { + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub block_hash: ExecutionBlockHash, + pub block_root: Hash256, + pub state_root: Hash256, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionPayloadAvailable { + pub slot: Slot, + pub block_root: Hash256, +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, @@ -1134,6 +1161,8 @@ pub struct SseExtendedPayloadAttributesGeneric { pub type SseExtendedPayloadAttributes = SseExtendedPayloadAttributesGeneric; pub type VersionedSsePayloadAttributes = ForkVersionedResponse; +pub type VersionedSseExecutionPayloadBid = ForkVersionedResponse>; +pub type VersionedSsePayloadAttestationMessage = ForkVersionedResponse; impl<'de> ContextDeserialize<'de, ForkName> for SsePayloadAttributes { fn context_deserialize(deserializer: D, context: ForkName) -> Result @@ -1210,6 +1239,11 @@ pub enum EventKind { AttesterSlashing(Box>), BlsToExecutionChange(Box), BlockGossip(Box), + ExecutionPayload(SseExecutionPayload), + ExecutionPayloadGossip(SseExecutionPayloadGossip), + ExecutionPayloadAvailable(SseExecutionPayloadAvailable), + ExecutionPayloadBid(Box>), + PayloadAttestationMessage(Box), } impl EventKind { @@ -1233,6 +1267,11 @@ impl EventKind { EventKind::AttesterSlashing(_) => "attester_slashing", EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", EventKind::BlockGossip(_) => "block_gossip", + EventKind::ExecutionPayload(_) => "execution_payload", + EventKind::ExecutionPayloadGossip(_) => "execution_payload_gossip", + EventKind::ExecutionPayloadAvailable(_) => "execution_payload_available", + EventKind::ExecutionPayloadBid(_) => "execution_payload_bid", + EventKind::PayloadAttestationMessage(_) => "payload_attestation_message", } } @@ -1322,6 +1361,40 @@ impl EventKind { "block_gossip" => Ok(EventKind::BlockGossip(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Gossip: {:?}", e)), )?)), + "execution_payload" => Ok(EventKind::ExecutionPayload( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Execution Payload: {:?}", e)) + })?, + )), + "execution_payload_gossip" => Ok(EventKind::ExecutionPayloadGossip( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Execution Payload Gossip: {:?}", + e + )) + })?, + )), + "execution_payload_available" => Ok(EventKind::ExecutionPayloadAvailable( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Execution Payload Available: {:?}", + e + )) + })?, + )), + "execution_payload_bid" => Ok(EventKind::ExecutionPayloadBid(Box::new( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Execution Payload Bid: {:?}", e)) + })?, + ))), + "payload_attestation_message" => Ok(EventKind::PayloadAttestationMessage(Box::new( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Payload Attestation Message: {:?}", + e + )) + })?, + ))), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -1357,6 +1430,11 @@ pub enum EventTopic { ProposerSlashing, BlsToExecutionChange, BlockGossip, + ExecutionPayload, + ExecutionPayloadGossip, + ExecutionPayloadAvailable, + ExecutionPayloadBid, + PayloadAttestationMessage, } impl FromStr for EventTopic { @@ -1382,6 +1460,11 @@ impl FromStr for EventTopic { "proposer_slashing" => Ok(EventTopic::ProposerSlashing), "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), "block_gossip" => Ok(EventTopic::BlockGossip), + "execution_payload" => Ok(EventTopic::ExecutionPayload), + "execution_payload_gossip" => Ok(EventTopic::ExecutionPayloadGossip), + "execution_payload_available" => Ok(EventTopic::ExecutionPayloadAvailable), + "execution_payload_bid" => Ok(EventTopic::ExecutionPayloadBid), + "payload_attestation_message" => Ok(EventTopic::PayloadAttestationMessage), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -1408,6 +1491,15 @@ impl fmt::Display for EventTopic { EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), EventTopic::BlockGossip => write!(f, "block_gossip"), + EventTopic::ExecutionPayload => write!(f, "execution_payload"), + EventTopic::ExecutionPayloadGossip => write!(f, "execution_payload_gossip"), + EventTopic::ExecutionPayloadAvailable => { + write!(f, "execution_payload_available") + } + EventTopic::ExecutionPayloadBid => write!(f, "execution_payload_bid"), + EventTopic::PayloadAttestationMessage => { + write!(f, "payload_attestation_message") + } } } } From 993cecee83cb1b73189638b4446910c018d2b6f9 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 31 Mar 2026 15:22:31 -0500 Subject: [PATCH 06/57] Clear best_child/best_descendant during V28->V29 conversion --- consensus/proto_array/src/ssz_container.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 5edc1cd313..f8b1f6634e 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -77,7 +77,17 @@ impl From for SszContainerV29 { Self { votes: v28.votes, prune_threshold: v28.prune_threshold, - nodes: v28.nodes.into_iter().map(ProtoNode::V17).collect(), + nodes: v28 + .nodes + .into_iter() + .map(|mut node| { + // best_child/best_descendant are no longer used (replaced by + // the virtual tree walk). Clear during conversion. + node.best_child = None; + node.best_descendant = None; + ProtoNode::V17(node) + }) + .collect(), indices: v28.indices, previous_proposer_boost: v28.previous_proposer_boost, } From 1ee2ce4258335e23bef4542adc6dc7a61db2065e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 10:10:48 +1100 Subject: [PATCH 07/57] Fix schema migrations --- .../beacon_chain/src/persisted_fork_choice.rs | 54 ++++++++++++------ beacon_node/beacon_chain/src/schema_change.rs | 8 +-- .../src/schema_change/migration_schema_v29.rs | 55 ++++++++----------- consensus/fork_choice/src/fork_choice.rs | 9 +++ 4 files changed, 74 insertions(+), 52 deletions(-) diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 592ea9ecd7..8edccbbe98 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -22,27 +22,38 @@ pub struct PersistedForkChoice { pub fork_choice_store: PersistedForkChoiceStoreV28, } -macro_rules! impl_store_item { - ($type:ty) => { - impl store::StoreItem for $type { - fn db_column() -> DBColumn { - DBColumn::ForkChoice - } +impl PersistedForkChoiceV28 { + pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { + let decompressed_bytes = store_config + .decompress_bytes(bytes) + .map_err(Error::Compression)?; + Self::from_ssz_bytes(&decompressed_bytes).map_err(Into::into) + } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } + pub fn as_bytes(&self, store_config: &StoreConfig) -> Result, Error> { + let encode_timer = metrics::start_timer(&metrics::FORK_CHOICE_ENCODE_TIMES); + let ssz_bytes = self.as_ssz_bytes(); + drop(encode_timer); - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } - } - }; + let _compress_timer = metrics::start_timer(&metrics::FORK_CHOICE_COMPRESS_TIMES); + store_config + .compress_bytes(&ssz_bytes) + .map_err(Error::Compression) + } + + pub fn as_kv_store_op( + &self, + key: Hash256, + store_config: &StoreConfig, + ) -> Result { + Ok(KeyValueStoreOp::PutKeyValue( + DBColumn::ForkChoice, + key.as_slice().to_vec(), + self.as_bytes(store_config)?, + )) + } } -impl_store_item!(PersistedForkChoiceV28); -impl_store_item!(PersistedForkChoiceV29); - impl PersistedForkChoiceV29 { pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { let decompressed_bytes = store_config @@ -83,3 +94,12 @@ impl From for PersistedForkChoiceV29 { } } } + +impl From for PersistedForkChoiceV28 { + fn from(v29: PersistedForkChoiceV29) -> Self { + Self { + fork_choice_v28: v29.fork_choice.into(), + fork_choice_store: v29.fork_choice_store, + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index fa2ab70d21..841f28e37d 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -22,13 +22,13 @@ pub fn migrate_schema( (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), // Upgrade from v28 to v29. (SchemaVersion(28), SchemaVersion(29)) => { - upgrade_to_v29::(&db)?; - db.store_schema_version_atomically(to, vec![]) + let ops = upgrade_to_v29::(&db)?; + db.store_schema_version_atomically(to, ops) } // Downgrade from v29 to v28. (SchemaVersion(29), SchemaVersion(28)) => { - downgrade_from_v29::(&db)?; - db.store_schema_version_atomically(to, vec![]) + let ops = downgrade_from_v29::(&db)?; + db.store_schema_version_atomically(to, ops) } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs index 6c82e8a737..3069200fce 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -1,12 +1,8 @@ -use crate::beacon_chain::BeaconChainTypes; +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; -use ssz::Decode; use store::hot_cold_store::HotColdDB; use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; -use types::{EthSpec, Hash256}; - -/// The key used to store the fork choice in the database. -const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; +use types::EthSpec; /// Upgrade from schema v28 to v29. /// @@ -14,24 +10,25 @@ const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; /// virtual tree walk). /// - Fails if the persisted fork choice contains any V17 (pre-Gloas) proto /// nodes at or after the Gloas fork slot. +/// +/// Returns a list of store ops to be applied atomically with the schema version write. pub fn upgrade_to_v29( db: &HotColdDB, -) -> Result<(), StoreError> { +) -> Result, StoreError> { let gloas_fork_slot = db .spec .gloas_fork_epoch .map(|epoch| epoch.start_slot(T::EthSpec::slots_per_epoch())); - // Load the persisted fork choice (v28 format, uncompressed SSZ). + // Load the persisted fork choice (v28 format). let Some(fc_bytes) = db .hot_db .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? else { - return Ok(()); + return Ok(vec![]); }; - let mut persisted_v28 = - PersistedForkChoiceV28::from_ssz_bytes(&fc_bytes).map_err(StoreError::SszDecodeError)?; + let persisted_v28 = PersistedForkChoiceV28::from_bytes(&fc_bytes, db.get_config())?; // Check for V17 nodes at/after the Gloas fork slot. if let Some(gloas_fork_slot) = gloas_fork_slot { @@ -52,39 +49,30 @@ pub fn upgrade_to_v29( } } - // Clear best_child/best_descendant — replaced by the virtual tree walk. - for node in &mut persisted_v28.fork_choice_v28.proto_array_v28.nodes { - node.best_child = None; - node.best_descendant = None; - } - - // Convert to v29 and write back. + // Convert to v29 and encode. let persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); - let fc_bytes = persisted_v29 - .as_bytes(db.get_config()) - .map_err(|e| StoreError::MigrationError(format!("failed to encode v29: {:?}", e)))?; - db.hot_db.do_atomically(vec![KeyValueStoreOp::PutKeyValue( - DBColumn::ForkChoice, - FORK_CHOICE_DB_KEY.as_slice().to_vec(), - fc_bytes, - )])?; - Ok(()) + Ok(vec![ + persisted_v29.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, + ]) } -/// Downgrade from schema v29 to v28 (no-op). +/// Downgrade from schema v29 to v28. /// +/// Converts the persisted fork choice from V29 format back to V28. /// Fails if the persisted fork choice contains any V29 proto nodes, as these contain /// payload-specific fields that cannot be losslessly converted back to V17 format. +/// +/// Returns a list of store ops to be applied atomically with the schema version write. pub fn downgrade_from_v29( db: &HotColdDB, -) -> Result<(), StoreError> { +) -> Result, StoreError> { // Load the persisted fork choice (v29 format, compressed). let Some(fc_bytes) = db .hot_db .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? else { - return Ok(()); + return Ok(vec![]); }; let persisted_v29 = @@ -111,5 +99,10 @@ pub fn downgrade_from_v29( )); } - Ok(()) + // Convert to v28 and encode. + let persisted_v28 = PersistedForkChoiceV28::from(persisted_v29); + + Ok(vec![ + persisted_v28.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, + ]) } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3b13cd4429..771104a02f 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1840,6 +1840,15 @@ impl From for PersistedForkChoiceV29 { } } +impl From for PersistedForkChoiceV28 { + fn from(v29: PersistedForkChoiceV29) -> Self { + Self { + proto_array_v28: v29.proto_array.into(), + queued_attestations: v29.queued_attestations, + } + } +} + #[cfg(test)] mod tests { use types::MainnetEthSpec; From bc6cf0f88290cb6c8de35ab8623dc0363e65cb92 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 11:17:33 +1100 Subject: [PATCH 08/57] Remove payload attestation queueing and more cleanups --- beacon_node/http_api/src/lib.rs | 4 + beacon_node/http_api/tests/tests.rs | 4 + consensus/fork_choice/src/fork_choice.rs | 113 +++--------------- consensus/proto_array/src/proto_array.rs | 6 +- .../src/proto_array_fork_choice.rs | 6 +- 5 files changed, 34 insertions(+), 99 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5df1078617..0bb04888b7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2148,10 +2148,14 @@ pub fn serve( execution_status: execution_status_string, best_child: node .best_child() + .ok() + .flatten() .and_then(|index| proto_array.nodes.get(index)) .map(|child| child.root()), best_descendant: node .best_descendant() + .ok() + .flatten() .and_then(|index| proto_array.nodes.get(index)) .map(|descendant| descendant.root()), }, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 14bfb5ce92..b28816302c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3179,10 +3179,14 @@ impl ApiTester { .unwrap_or_else(|| "irrelevant".to_string()), best_child: node .best_child() + .ok() + .flatten() .and_then(|index| expected_proto_array.nodes.get(index)) .map(|child| child.root()), best_descendant: node .best_descendant() + .ok() + .flatten() .and_then(|index| expected_proto_array.nodes.get(index)) .map(|descendant| descendant.root()), }, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 771104a02f..a80ec99a25 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -138,10 +138,6 @@ pub enum InvalidBlock { finalized_root: Hash256, block_ancestor: Option, }, - MissingExecutionPayloadBid { - block_slot: Slot, - block_root: Hash256, - }, } #[derive(Debug)] @@ -310,22 +306,6 @@ fn dequeue_attestations( std::mem::replace(queued_attestations, remaining) } -/// Returns all values in `queued` that have `slot + 1 < current_slot`. -/// Payload attestations need an extra slot of delay compared to regular attestations. -fn dequeue_payload_attestations( - current_slot: Slot, - queued: &mut Vec, -) -> Vec { - let remaining = queued.split_off( - queued - .iter() - .position(|a| a.slot.saturating_add(1_u64) >= current_slot) - .unwrap_or(queued.len()), - ); - - std::mem::replace(queued, remaining) -} - /// Denotes whether an attestation we are processing was received from a block or from gossip. /// Equivalent to the `is_from_block` `bool` in: /// @@ -370,9 +350,6 @@ pub struct ForkChoice { proto_array: ProtoArrayForkChoice, /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec, - /// Payload attestations (PTC votes) that must be queued for later processing. - /// These have different dequeue timing than regular attestations. - queued_payload_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. forkchoice_update_parameters: ForkchoiceUpdateParameters, _phantom: PhantomData, @@ -387,7 +364,6 @@ where self.fc_store == other.fc_store && self.proto_array == other.proto_array && self.queued_attestations == other.queued_attestations - && self.queued_payload_attestations == other.queued_payload_attestations } } @@ -472,7 +448,6 @@ where fc_store, proto_array, queued_attestations: vec![], - queued_payload_attestations: vec![], // This will be updated during the next call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -966,14 +941,6 @@ where Some(signed_bid.message.block_hash), ) } else { - if spec.fork_name_at_slot::(block.slot()).gloas_enabled() { - return Err(Error::InvalidBlock( - InvalidBlock::MissingExecutionPayloadBid { - block_slot: block.slot(), - block_root, - }, - )); - } (None, None) }; @@ -1334,11 +1301,21 @@ where ) -> Result<(), Error> { self.update_time(system_time_current_slot)?; - if attestation.data.beacon_block_root == Hash256::zero() { + if attestation.data.beacon_block_root.is_zero() { return Ok(()); } - self.validate_on_payload_attestation(attestation, is_from_block)?; + match self.validate_on_payload_attestation(attestation, is_from_block) { + Ok(()) => (), + Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { .. }) => { + // Just ignore wrong-slot payload attestations, they could have been processed at + // the correct slot when received on gossip, but then have the wrong-slot by the + // time they make it to here (TOCTOU). + // TODO(gloas): consider moving this to the call site for gossip processing + return Ok(()); + } + Err(e) => return Err(e.into()), + } // Resolve validator indices to PTC committee positions. let ptc_indices: Vec = attestation @@ -1346,34 +1323,13 @@ where .filter_map(|vi| ptc.iter().position(|&p| p == *vi as usize)) .collect(); - let processing_slot = self.fc_store.get_current_slot(); - // Payload attestations from blocks can be applied in the next slot (S+1 for data.slot=S), - // while gossiped payload attestations are delayed one extra slot. - let should_process_now = match is_from_block { - AttestationFromBlock::True => attestation.data.slot < processing_slot, - AttestationFromBlock::False => { - attestation.data.slot.saturating_add(1_u64) < processing_slot - } - }; - - if should_process_now { - for &ptc_index in &ptc_indices { - self.proto_array.process_payload_attestation( - attestation.data.beacon_block_root, - ptc_index, - attestation.data.payload_present, - attestation.data.blob_data_available, - )?; - } - } else { - self.queued_payload_attestations - .push(QueuedPayloadAttestation { - slot: attestation.data.slot, - ptc_indices, - block_root: attestation.data.beacon_block_root, - payload_present: attestation.data.payload_present, - blob_data_available: attestation.data.blob_data_available, - }); + for &ptc_index in &ptc_indices { + self.proto_array.process_payload_attestation( + attestation.data.beacon_block_root, + ptc_index, + attestation.data.payload_present, + attestation.data.blob_data_available, + )?; } Ok(()) @@ -1408,7 +1364,6 @@ where // Process any attestations that might now be eligible. self.process_attestation_queue()?; - self.process_payload_attestation_queue()?; Ok(self.fc_store.get_current_slot()) } @@ -1495,26 +1450,6 @@ where Ok(()) } - /// Processes and removes from the queue any queued payload attestations which may now be - /// eligible for processing. Payload attestations use `slot + 1 < current_slot` timing. - fn process_payload_attestation_queue(&mut self) -> Result<(), Error> { - let current_slot = self.fc_store.get_current_slot(); - for attestation in - dequeue_payload_attestations(current_slot, &mut self.queued_payload_attestations) - { - for &ptc_index in &attestation.ptc_indices { - self.proto_array.process_payload_attestation( - attestation.block_root, - ptc_index, - attestation.payload_present, - attestation.blob_data_available, - )?; - } - } - - Ok(()) - } - /// Returns `true` if the block is known **and** a descendant of the finalized root. pub fn contains_block(&self, block_root: &Hash256) -> bool { self.proto_array.contains_block(block_root) @@ -1670,11 +1605,6 @@ where &self.queued_attestations } - /// Returns a reference to the currently queued payload attestations. - pub fn queued_payload_attestations(&self) -> &[QueuedPayloadAttestation] { - &self.queued_payload_attestations - } - /// Returns the store's `proposer_boost_root`. pub fn proposer_boost_root(&self) -> Hash256 { self.fc_store.proposer_boost_root() @@ -1759,7 +1689,6 @@ where fc_store, proto_array, queued_attestations: persisted.queued_attestations, - queued_payload_attestations: persisted.queued_payload_attestations, // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1800,7 +1729,6 @@ where PersistedForkChoice { proto_array: self.proto_array().as_ssz_container(), queued_attestations: self.queued_attestations().to_vec(), - queued_payload_attestations: self.queued_payload_attestations.clone(), } } @@ -1824,8 +1752,6 @@ pub struct PersistedForkChoice { #[superstruct(only(V29))] pub proto_array: proto_array::core::SszContainerV29, pub queued_attestations: Vec, - #[superstruct(only(V29))] - pub queued_payload_attestations: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV29; @@ -1835,7 +1761,6 @@ impl From for PersistedForkChoiceV29 { Self { proto_array: v28.proto_array_v28.into(), queued_attestations: v28.queued_attestations, - queued_payload_attestations: vec![], } } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f68d3eb71b..452679d7a3 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -117,10 +117,10 @@ pub struct ProtoNode { pub finalized_checkpoint: Checkpoint, #[superstruct(getter(copy))] pub weight: u64, - #[superstruct(getter(copy))] + #[superstruct(only(V17), partial_getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_child: Option, - #[superstruct(getter(copy))] + #[superstruct(only(V17), partial_getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, /// Indicates if an execution node has marked this block as valid. Also contains the execution @@ -614,8 +614,6 @@ impl ProtoArray { justified_checkpoint: block.justified_checkpoint, finalized_checkpoint: block.finalized_checkpoint, weight: 0, - best_child: None, - best_descendant: None, unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, parent_payload_status, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 6c90af1302..cb467f2531 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -997,7 +997,11 @@ impl ProtoArrayForkChoice { /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - block.execution_status().ok() + Some( + block + .execution_status() + .unwrap_or_else(|_| ExecutionStatus::irrelevant()), + ) } /// Returns whether the execution payload for a block has been received. From 4684d972e0b9fe4c574b885de765ee97bab82246 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 11:31:51 +1100 Subject: [PATCH 09/57] Remove TOCTOU early return --- consensus/fork_choice/src/fork_choice.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index a80ec99a25..630de11281 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1305,17 +1305,10 @@ where return Ok(()); } - match self.validate_on_payload_attestation(attestation, is_from_block) { - Ok(()) => (), - Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { .. }) => { - // Just ignore wrong-slot payload attestations, they could have been processed at - // the correct slot when received on gossip, but then have the wrong-slot by the - // time they make it to here (TOCTOU). - // TODO(gloas): consider moving this to the call site for gossip processing - return Ok(()); - } - Err(e) => return Err(e.into()), - } + // TODO(gloas): Should ignore wrong-slot payload attestations at the caller, they could + // have been processed at the correct slot when received on gossip, but then have the + // wrong-slot by the time they make it to here (TOCTOU). + self.validate_on_payload_attestation(attestation, is_from_block)?; // Resolve validator indices to PTC committee positions. let ptc_indices: Vec = attestation From 51e78fd157456d90c8e7c8bb79b929c670843b2f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 11:51:03 +1100 Subject: [PATCH 10/57] Fix queued attestation decoding from disk --- consensus/fork_choice/src/fork_choice.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 630de11281..b075a23ddd 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -260,6 +260,15 @@ pub struct QueuedAttestation { payload_present: bool, } +/// Legacy queued attestation without payload_present (pre-Gloas, schema V28). +#[derive(Clone, PartialEq, Encode, Decode)] +pub struct QueuedAttestationV28 { + slot: Slot, + attesting_indices: Vec, + block_root: Hash256, + target_epoch: Epoch, +} + impl<'a, E: EthSpec> From> for QueuedAttestation { fn from(a: IndexedAttestationRef<'a, E>) -> Self { Self { @@ -1681,7 +1690,7 @@ where let mut fork_choice = Self { fc_store, proto_array, - queued_attestations: persisted.queued_attestations, + queued_attestations: vec![], // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1721,7 +1730,6 @@ where pub fn to_persisted(&self) -> PersistedForkChoice { PersistedForkChoice { proto_array: self.proto_array().as_ssz_container(), - queued_attestations: self.queued_attestations().to_vec(), } } @@ -1744,7 +1752,8 @@ pub struct PersistedForkChoice { pub proto_array_v28: proto_array::core::SszContainerV28, #[superstruct(only(V29))] pub proto_array: proto_array::core::SszContainerV29, - pub queued_attestations: Vec, + #[superstruct(only(V28))] + pub queued_attestations_v28: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV29; @@ -1753,7 +1762,6 @@ impl From for PersistedForkChoiceV29 { fn from(v28: PersistedForkChoiceV28) -> Self { Self { proto_array: v28.proto_array_v28.into(), - queued_attestations: v28.queued_attestations, } } } @@ -1762,7 +1770,7 @@ impl From for PersistedForkChoiceV28 { fn from(v29: PersistedForkChoiceV29) -> Self { Self { proto_array_v28: v29.proto_array.into(), - queued_attestations: v29.queued_attestations, + queued_attestations_v28: vec![], } } } From 62c016660fcdb9089064fd14ae48a1a61c5169db Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Wed, 1 Apr 2026 02:58:49 +0200 Subject: [PATCH 11/57] Emit SSE: `execution_payload` (#9065) Emit `execution_payload` on successful import of an execution payload. Co-Authored-By: Daniel Knopik --- .../payload_envelope_verification/import.rs | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 2ee315e559..bae848c3c1 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -1,11 +1,12 @@ use std::sync::Arc; use std::time::Duration; +use eth2::types::{EventKind, SseExecutionPayload}; use fork_choice::PayloadVerificationStatus; use slot_clock::SlotClock; use store::StoreOp; use tracing::{debug, error, info, info_span, instrument, warn}; -use types::{BeaconState, BlockImportSource, Hash256, Slot}; +use types::{BeaconState, BlockImportSource, Hash256, SignedExecutionPayloadEnvelope}; use super::{ AvailableEnvelope, AvailableExecutedEnvelope, EnvelopeError, EnvelopeImportData, @@ -225,7 +226,7 @@ impl BeaconChain { signed_envelope: AvailableEnvelope, block_root: Hash256, state: BeaconState, - _payload_verification_status: PayloadVerificationStatus, + payload_verification_status: PayloadVerificationStatus, ) -> Result { // Everything in this initial section is on the hot path for processing the envelope. // Take an upgradable read lock on fork choice so we can check if this block has already @@ -317,8 +318,9 @@ impl BeaconChain { metrics::stop_timer(db_write_timer); self.import_envelope_update_metrics_and_events( + signed_envelope, block_root, - signed_envelope.slot(), + payload_verification_status, envelope_time_imported, ); @@ -327,10 +329,12 @@ impl BeaconChain { fn import_envelope_update_metrics_and_events( &self, + signed_envelope: Arc>, block_root: Hash256, - envelope_slot: Slot, + payload_verification_status: PayloadVerificationStatus, envelope_time_imported: Duration, ) { + let envelope_slot = signed_envelope.slot(); let envelope_delay_total = get_slot_delay_ms(envelope_time_imported, envelope_slot, &self.slot_clock); @@ -349,6 +353,17 @@ impl BeaconChain { ); } - // TODO(gloas) emit SSE event for envelope import (similar to SseBlock for blocks). + if let Some(event_handler) = self.event_handler.as_ref() + && event_handler.has_execution_payload_subscribers() + { + event_handler.register(EventKind::ExecutionPayload(SseExecutionPayload { + slot: envelope_slot, + builder_index: signed_envelope.message.builder_index, + block_hash: signed_envelope.block_hash(), + block_root, + state_root: signed_envelope.message.state_root, + execution_optimistic: payload_verification_status.is_optimistic(), + })); + } } } From 03385d698db96b9dc7fe037454fd78c47a19c302 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Wed, 1 Apr 2026 08:58:52 +0800 Subject: [PATCH 12/57] Update `blob_delay_ms` to track data columns seen (#9024) * #7477 Use the last seen data column as the time for `blob_delay_ms`, the metric name remains unchanged Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Tan Chee Keong Co-Authored-By: Tan Chee Keong --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +- .../beacon_chain/src/canonical_head.rs | 4 +- .../overflow_lru_cache.rs | 7 ++- .../src/data_column_verification.rs | 51 +++++++++++++++---- 4 files changed, 50 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 81735bdd9d..69db0c24fb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3801,7 +3801,7 @@ impl BeaconChain { consensus_context, } = import_data; - // Record the time at which this block's blobs became available. + // Record the time at which this block's blobs/data columns became available. if let Some(blobs_available) = block.blobs_available_timestamp() { self.block_times_cache.write().set_time_blob_observed( block_root, @@ -3810,8 +3810,6 @@ impl BeaconChain { ); } - // TODO(das) record custody column available timestamp - let block_root = { // Capture the current span before moving into the blocking task let current_span = tracing::Span::current(); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 3a429bdb8a..9dd7d62a27 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1379,8 +1379,8 @@ fn observe_head_block_delays( .as_millis() as i64, ); - // The time from the start of the slot when all blobs have been observed. Technically this - // is the time we last saw a blob related to this block/slot. + // The time from the start of the slot when all blobs/data columns have been observed. Technically this + // is the time we last saw a blob/data column related to this block/slot. metrics::set_gauge( &metrics::BEACON_BLOB_DELAY_ALL_OBSERVED_SLOT_START, block_delays diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c0403595ee..8f1d4464e1 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -282,8 +282,11 @@ impl PendingComponents { .flatten() .map(|blob| blob.seen_timestamp()) .max(), - // TODO(das): To be fixed with https://github.com/sigp/lighthouse/pull/6850 - AvailableBlockData::DataColumns(_) => None, + AvailableBlockData::DataColumns(_) => self + .verified_data_columns + .iter() + .map(|data_column| data_column.seen_timestamp()) + .max(), }; let AvailabilityPendingExecutedBlock { diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index dde9fad342..f47de01ddc 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -5,6 +5,7 @@ use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; use crate::observed_data_sidecars::{ Error as ObservedDataSidecarsError, ObservationKey, ObservationStrategy, Observe, }; +use crate::validator_monitor::timestamp_now; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; use educe::Educe; use fork_choice::ProtoBlock; @@ -16,6 +17,7 @@ use ssz_types::VariableList; use std::iter; use std::marker::PhantomData; use std::sync::Arc; +use std::time::Duration; use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ @@ -320,25 +322,34 @@ impl GossipVerifiedDataColumn #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedDataColumn { data: Arc>, + #[ssz(skip_serializing, skip_deserializing)] + seen_timestamp: Duration, } impl KzgVerifiedDataColumn { pub fn new( data_column: Arc>, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result, KzgError)> { - verify_kzg_for_data_column(data_column, kzg) + verify_kzg_for_data_column(data_column, kzg, seen_timestamp) } /// Mark a data column as KZG verified. Caller must ONLY use this on columns constructed /// from EL blobs. pub fn from_execution_verified(data_column: Arc>) -> Self { - Self { data: data_column } + Self { + data: data_column, + seen_timestamp: timestamp_now(), + } } /// Create a `KzgVerifiedDataColumn` from `DataColumnSidecar` for testing ONLY. pub(crate) fn __new_for_testing(data_column: Arc>) -> Self { - Self { data: data_column } + Self { + data: data_column, + seen_timestamp: timestamp_now(), + } } pub fn from_batch_with_scoring( @@ -348,7 +359,10 @@ impl KzgVerifiedDataColumn { verify_kzg_for_data_column_list(data_columns.iter(), kzg)?; Ok(data_columns .into_iter() - .map(|column| Self { data: column }) + .map(|column| Self { + data: column, + seen_timestamp: timestamp_now(), + }) .collect()) } @@ -407,6 +421,8 @@ impl CustodyDataColumn { #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedCustodyDataColumn { data: Arc>, + #[ssz(skip_serializing, skip_deserializing)] + seen_timestamp: Duration, } impl KzgVerifiedCustodyDataColumn { @@ -414,6 +430,7 @@ impl KzgVerifiedCustodyDataColumn { /// include this column pub fn from_asserted_custody(kzg_verified: KzgVerifiedDataColumn) -> Self { Self { + seen_timestamp: kzg_verified.seen_timestamp, data: kzg_verified.to_data_column(), } } @@ -422,10 +439,12 @@ impl KzgVerifiedCustodyDataColumn { pub fn new( data_column: CustodyDataColumn, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result, KzgError)> { - verify_kzg_for_data_column(data_column.clone_arc(), kzg)?; + verify_kzg_for_data_column(data_column.clone_arc(), kzg, seen_timestamp)?; Ok(Self { data: data_column.data, + seen_timestamp, }) } @@ -443,10 +462,15 @@ impl KzgVerifiedCustodyDataColumn { spec, )?; + let seen_timestamp = timestamp_now(); + Ok(all_data_columns .into_iter() .map(|data| { - KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { data }) + KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { + data, + seen_timestamp, + }) }) .collect::>()) } @@ -464,6 +488,10 @@ impl KzgVerifiedCustodyDataColumn { pub fn index(&self) -> ColumnIndex { *self.data.index() } + + pub fn seen_timestamp(&self) -> Duration { + self.seen_timestamp + } } /// Complete kzg verification for a `DataColumnSidecar`. @@ -473,10 +501,14 @@ impl KzgVerifiedCustodyDataColumn { pub fn verify_kzg_for_data_column( data_column: Arc>, kzg: &Kzg, + seen_timestamp: Duration, ) -> Result, (Option, KzgError)> { let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); validate_data_columns(kzg, iter::once(&data_column))?; - Ok(KzgVerifiedDataColumn { data: data_column }) + Ok(KzgVerifiedDataColumn { + data: data_column, + seen_timestamp, + }) } /// Complete kzg verification for a list of `DataColumnSidecar`s. @@ -538,8 +570,9 @@ pub fn validate_data_column_sidecar_for_gossip_fulu Date: Wed, 1 Apr 2026 12:04:32 +1100 Subject: [PATCH 13/57] More cleanup --- consensus/fork_choice/src/fork_choice.rs | 16 ++-------------- consensus/fork_choice/src/lib.rs | 3 +-- .../src/fork_choice_test_definition.rs | 1 + .../proto_array/src/proto_array_fork_choice.rs | 5 ++++- 4 files changed, 8 insertions(+), 17 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index b075a23ddd..eac9820de3 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -281,19 +281,6 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { } } -/// Used for queuing payload attestations (PTC votes) from the current slot. -/// Payload attestations have different dequeue timing than regular attestations: -/// gossiped payload attestations need an extra slot of delay (slot + 1 < current_slot). -#[derive(Clone, PartialEq, Encode, Decode)] -pub struct QueuedPayloadAttestation { - slot: Slot, - /// Resolved PTC committee positions (not validator indices). - ptc_indices: Vec, - block_root: Hash256, - payload_present: bool, - blob_data_available: bool, -} - /// Returns all values in `self.queued_attestations` that have a slot that is earlier than the /// current slot. Also removes those values from `self.queued_attestations`. fn dequeue_attestations( @@ -450,6 +437,7 @@ where execution_status, execution_payload_parent_hash, execution_payload_block_hash, + anchor_block.message().proposer_index(), spec, )?; @@ -462,7 +450,7 @@ where head_hash: None, justified_hash: None, finalized_hash: None, - // These will be updated during the next call to `Self::get_head`. + // This will be updated during the next call to `Self::get_head`. head_root: Hash256::zero(), }, _phantom: PhantomData, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 70f1dbc215..8f479125b7 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -5,8 +5,7 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, QueuedPayloadAttestation, - ResetPayloadStatuses, + PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 34d7f2e48e..ff9d70bad5 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -144,6 +144,7 @@ impl ForkChoiceTestDefinition { ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), self.execution_payload_parent_hash, self.execution_payload_block_hash, + 0, &spec, ) .expect("should create fork choice struct"); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cb467f2531..71a5a46f8c 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -479,6 +479,7 @@ impl ProtoArrayForkChoice { execution_status: ExecutionStatus, execution_payload_parent_hash: Option, execution_payload_block_hash: Option, + proposer_index: u64, spec: &ChainSpec, ) -> Result { let mut proto_array = ProtoArray { @@ -505,7 +506,7 @@ impl ProtoArrayForkChoice { unrealized_finalized_checkpoint: Some(finalized_checkpoint), execution_payload_parent_hash, execution_payload_block_hash, - proposer_index: Some(0), + proposer_index: Some(proposer_index), }; proto_array @@ -1317,6 +1318,7 @@ mod test_compute_deltas { execution_status, None, None, + 0, &spec, ) .unwrap(); @@ -1471,6 +1473,7 @@ mod test_compute_deltas { execution_status, None, None, + 0, &spec, ) .unwrap(); From ad0f3cf89bf3034df4efeb5700795c96d20cebab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:10:11 +1100 Subject: [PATCH 14/57] Use None for post-Gloas payload hashes pre-Gloas --- consensus/fork_choice/src/fork_choice.rs | 30 +++++++++++------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index eac9820de3..1f5b2cf1b0 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -395,22 +395,7 @@ where .map_err(Error::BeaconStateError)?; let (execution_status, execution_payload_parent_hash, execution_payload_block_hash) = - if let Ok(execution_payload) = anchor_block.message().execution_payload() { - // Pre-Gloas forks: hashes come from the execution payload. - if execution_payload.is_default_with_empty_roots() { - (ExecutionStatus::irrelevant(), None, None) - } else { - // Assume that this payload is valid, since the anchor should be a - // trusted block and state. - ( - ExecutionStatus::Valid(execution_payload.block_hash()), - Some(execution_payload.parent_hash()), - Some(execution_payload.block_hash()), - ) - } - } else if let Ok(signed_bid) = - anchor_block.message().body().signed_execution_payload_bid() - { + if let Ok(signed_bid) = anchor_block.message().body().signed_execution_payload_bid() { // Gloas: execution status is irrelevant post-Gloas; payload validation // is decoupled from beacon blocks. ( @@ -418,6 +403,19 @@ where Some(signed_bid.message.parent_block_hash), Some(signed_bid.message.block_hash), ) + } else if let Ok(execution_payload) = anchor_block.message().execution_payload() { + // Pre-Gloas forks: do not set payload hashes, they are only used post-Gloas. + if execution_payload.is_default_with_empty_roots() { + (ExecutionStatus::irrelevant(), None, None) + } else { + // Assume that this payload is valid, since the anchor should be a + // trusted block and state. + ( + ExecutionStatus::Valid(execution_payload.block_hash()), + None, + None, + ) + } } else { // Pre-merge: no execution payload at all. (ExecutionStatus::irrelevant(), None, None) From 9ef73d0af01c1eb53239a12976d849866c784098 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:25:22 +1100 Subject: [PATCH 15/57] Add new cross-boundary test --- .../gloas_payload.rs | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 0fb120328c..9a7acbde09 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -718,6 +718,133 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe mod tests { use super::*; + fn gloas_fork_boundary_spec() -> ChainSpec { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + spec.gloas_fork_epoch = Some(Epoch::new(1)); + spec + } + + /// Gloas fork boundary: a chain starting pre-Gloas (V17 nodes) that crosses into + /// Gloas (V29 nodes). The head should advance through the fork boundary. + /// + /// Parameters: + /// - `skip_first_gloas_slot`: if true, there is no block at the first Gloas slot (slot 32); + /// the first V29 block appears at slot 33. + /// - `first_gloas_block_full`: if true, the first V29 block extends the parent V17 node's + /// EL chain (Full parent payload status). If false, it doesn't (Empty). + fn get_gloas_fork_boundary_test_definition( + skip_first_gloas_slot: bool, + first_gloas_block_full: bool, + ) -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block at slot 31 — last pre-Gloas slot. Created as a V17 node because + // gloas_fork_epoch = 1 → Gloas starts at slot 32. + // + // The test harness sets execution_status = Optimistic(ExecutionBlockHash::from_root(root)), + // so this V17 node's EL block hash = ExecutionBlockHash::from_root(get_root(1)). + ops.push(Operation::ProcessBlock { + slot: Slot::new(31), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + }); + + // First Gloas block (V29 node). + let gloas_slot = if skip_first_gloas_slot { 33 } else { 32 }; + + // For Full: execution_payload_parent_hash must match the V17 parent's EL hash. + // The V17 parent's EL hash = ExecutionBlockHash::from_root(get_root(1)) = get_hash(1). + // For Empty: use a non-matching hash. + let parent_hash = if first_gloas_block_full { + get_hash(1) + } else { + get_hash(99) + }; + + ops.push(Operation::ProcessBlock { + slot: Slot::new(gloas_slot), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(parent_hash), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Verify the parent_payload_status is correctly set. + let expected_parent_status = if first_gloas_block_full { + PayloadStatus::Full + } else { + PayloadStatus::Empty + }; + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: expected_parent_status, + }); + + // Mark root 2's execution payload as received so the Full virtual child exists. + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(2), + }); + + // Extend the chain with another V29 block (Full child of root 2). + ops.push(Operation::ProcessBlock { + slot: Slot::new(gloas_slot + 1), + root: get_root(3), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(2)), + execution_payload_block_hash: Some(get_hash(3)), + }); + + // Head should advance to the tip of the chain through the fork boundary. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(gloas_slot + 1), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + // Genesis is V17 (slot 0 < Gloas fork slot 32), these are unused for V17. + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: Some(gloas_fork_boundary_spec()), + } + } + + #[test] + fn fork_boundary_no_skip_full() { + get_gloas_fork_boundary_test_definition(false, true).run(); + } + + #[test] + fn fork_boundary_no_skip_empty() { + get_gloas_fork_boundary_test_definition(false, false).run(); + } + + #[test] + fn fork_boundary_skip_first_gloas_slot_full() { + get_gloas_fork_boundary_test_definition(true, true).run(); + } + + #[test] + fn fork_boundary_skip_first_gloas_slot_empty() { + get_gloas_fork_boundary_test_definition(true, false).run(); + } + #[test] fn chain_following() { let test = get_gloas_chain_following_test_definition(); From afb1f0ae2d476cb5c864d6d4ead3a119097b2abc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 12:48:34 +1100 Subject: [PATCH 16/57] Fix VoteTracker decoding --- .../src/proto_array_fork_choice.rs | 39 +++++++++++++++++++ consensus/proto_array/src/ssz_container.rs | 9 +++-- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 71a5a46f8c..5269957dff 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -33,6 +33,45 @@ pub struct VoteTracker { next_payload_present: bool, } +// Can be deleted once the V28 schema migration is buried. +#[derive(Default, PartialEq, Clone, Encode, Decode)] +pub struct VoteTrackerV28 { + current_root: Hash256, + next_root: Hash256, + current_slot: Slot, + next_slot: Slot, +} + +// This impl is only used upon upgrade from pre-Gloas to Gloas with all pre-Gloas nodes. +// The payload status is `false` for pre-Gloas nodes. +impl From for VoteTracker { + fn from(v: VoteTrackerV28) -> Self { + VoteTracker { + current_root: v.current_root, + next_root: v.next_root, + current_slot: v.current_slot, + next_slot: v.next_slot, + // TODO(gloas): check that this is correct + current_payload_present: false, + next_payload_present: false, + } + } +} + +// This impl is only used upon downgrade from V29 to V28, with exclusively pre-Gloas nodes. +impl From for VoteTrackerV28 { + fn from(v: VoteTracker) -> Self { + // Drop the payload_present, but this is safe because this is only called on pre-Gloas + // nodes. + VoteTrackerV28 { + current_root: v.current_root, + next_root: v.next_root, + current_slot: v.current_slot, + next_slot: v.next_slot, + } + } +} + pub struct LatestMessage { pub slot: Slot, pub root: Hash256, diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index f8b1f6634e..80a6702210 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -2,7 +2,7 @@ use crate::proto_array::ProposerBoost; use crate::{ Error, JustifiedBalances, proto_array::{ProtoArray, ProtoNode, ProtoNodeV17}, - proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, + proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker, VoteTrackerV28}, }; use ssz::{Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; @@ -22,6 +22,9 @@ pub type SszContainer = SszContainerV29; no_enum )] pub struct SszContainer { + #[superstruct(only(V28))] + pub votes_v28: Vec, + #[superstruct(only(V29))] pub votes: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration @@ -75,7 +78,7 @@ impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { impl From for SszContainerV29 { fn from(v28: SszContainerV28) -> Self { Self { - votes: v28.votes, + votes: v28.votes_v28.into_iter().map(Into::into).collect(), prune_threshold: v28.prune_threshold, nodes: v28 .nodes @@ -98,7 +101,7 @@ impl From for SszContainerV29 { impl From for SszContainerV28 { fn from(v29: SszContainerV29) -> Self { Self { - votes: v29.votes, + votes_v28: v29.votes.into_iter().map(Into::into).collect(), prune_threshold: v29.prune_threshold, // These checkpoints are not consumed in v28 paths since the upgrade from v17, // we can safely default the values. From 99f5a92b98b579b350cdef14a3f2e9007ede8bea Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 1 Apr 2026 11:13:20 +0900 Subject: [PATCH 17/57] Automatically pass spans into blocking handles (#8158) Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Jimmy Chen --- beacon_node/beacon_chain/src/beacon_chain.rs | 46 +++---------------- .../src/block_production/gloas.rs | 17 ++----- .../beacon_chain/src/block_production/mod.rs | 1 + .../beacon_chain/src/canonical_head.rs | 12 +---- .../beacon_chain/src/fetch_blobs/mod.rs | 4 +- .../gossip_verified_envelope.rs | 4 +- .../payload_envelope_verification/import.rs | 4 -- beacon_node/http_api/src/publish_blocks.rs | 11 ++--- common/task_executor/src/lib.rs | 12 ++++- 9 files changed, 29 insertions(+), 82 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 69db0c24fb..310163b4a9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -130,7 +130,7 @@ use store::{ }; use task_executor::{RayonPoolType, ShutdownReason, TaskExecutor}; use tokio_stream::Stream; -use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; +use tracing::{debug, debug_span, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::data::{ColumnIndex, FixedBlobSidecarList}; use types::execution::BlockProductionVersion; @@ -2761,6 +2761,7 @@ impl BeaconChain { /// or already-known). /// /// This method is potentially long-running and should not run on the core executor. + #[instrument(skip_all, level = "debug")] pub fn filter_chain_segment( self: &Arc, chain_segment: Vec>, @@ -2888,12 +2889,8 @@ impl BeaconChain { // Filter uninteresting blocks from the chain segment in a blocking task. let chain = self.clone(); - let filter_chain_segment = debug_span!("filter_chain_segment"); let filtered_chain_segment_future = self.spawn_blocking_handle( - move || { - let _guard = filter_chain_segment.enter(); - chain.filter_chain_segment(chain_segment) - }, + move || chain.filter_chain_segment(chain_segment), "filter_chain_segment", ); let mut filtered_chain_segment = match filtered_chain_segment_future.await { @@ -2924,12 +2921,8 @@ impl BeaconChain { std::mem::swap(&mut blocks, &mut filtered_chain_segment); let chain = self.clone(); - let current_span = Span::current(); let signature_verification_future = self.spawn_blocking_handle( - move || { - let _guard = current_span.enter(); - signature_verify_chain_segment(blocks, &chain) - }, + move || signature_verify_chain_segment(blocks, &chain), "signature_verify_chain_segment", ); @@ -3019,12 +3012,10 @@ impl BeaconChain { block: Arc>, ) -> Result, BlockError> { let chain = self.clone(); - let span = Span::current(); self.task_executor .clone() .spawn_blocking_handle( move || { - let _guard = span.enter(); let slot = block.slot(); let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); @@ -3332,11 +3323,9 @@ impl BeaconChain { let data_availability_checker = self.data_availability_checker.clone(); - let current_span = Span::current(); let result = self .task_executor .spawn_blocking_with_rayon_async(RayonPoolType::HighPriority, move || { - let _guard = current_span.enter(); data_availability_checker.reconstruct_data_columns(&block_root) }) .await @@ -3811,13 +3800,9 @@ impl BeaconChain { } let block_root = { - // Capture the current span before moving into the blocking task - let current_span = tracing::Span::current(); let chain = self.clone(); self.spawn_blocking_handle( move || { - // Enter the captured span in the blocking thread - let _guard = current_span.enter(); chain.import_block( block, block_root, @@ -4528,15 +4513,10 @@ impl BeaconChain { // // Load the parent state from disk. let chain = self.clone(); - let span = Span::current(); let (state, state_root_opt) = self .task_executor .spawn_blocking_handle( - move || { - let _guard = - debug_span!(parent: span, "load_state_for_block_production").entered(); - chain.load_state_for_block_production(slot) - }, + move || chain.load_state_for_block_production(slot), "load_state_for_block_production", ) .ok_or(BlockProductionError::ShuttingDown)? @@ -4960,13 +4940,10 @@ impl BeaconChain { .graffiti_calculator .get_graffiti(graffiti_settings) .await; - let span = Span::current(); let mut partial_beacon_block = self .task_executor .spawn_blocking_handle( move || { - let _guard = - debug_span!(parent: span, "produce_partial_beacon_block").entered(); chain.produce_partial_beacon_block( state, state_root_opt, @@ -5002,14 +4979,10 @@ impl BeaconChain { match block_contents_type { BlockProposalContentsType::Full(block_contents) => { let chain = self.clone(); - let span = Span::current(); let beacon_block_response = self .task_executor .spawn_blocking_handle( move || { - let _guard = - debug_span!(parent: span, "complete_partial_beacon_block") - .entered(); chain.complete_partial_beacon_block( partial_beacon_block, Some(block_contents), @@ -5026,14 +4999,10 @@ impl BeaconChain { } BlockProposalContentsType::Blinded(block_contents) => { let chain = self.clone(); - let span = Span::current(); let beacon_block_response = self .task_executor .spawn_blocking_handle( move || { - let _guard = - debug_span!(parent: span, "complete_partial_beacon_block") - .entered(); chain.complete_partial_beacon_block( partial_beacon_block, Some(block_contents), @@ -5051,13 +5020,10 @@ impl BeaconChain { } } else { let chain = self.clone(); - let span = Span::current(); let beacon_block_response = self .task_executor .spawn_blocking_handle( move || { - let _guard = - debug_span!(parent: span, "complete_partial_beacon_block").entered(); chain.complete_partial_beacon_block( partial_beacon_block, None, @@ -5075,6 +5041,7 @@ impl BeaconChain { } #[allow(clippy::too_many_arguments)] + #[instrument(skip_all, level = "debug")] fn produce_partial_beacon_block( self: &Arc, mut state: BeaconState, @@ -5319,6 +5286,7 @@ impl BeaconChain { }) } + #[instrument(skip_all, level = "debug")] fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, diff --git a/beacon_node/beacon_chain/src/block_production/gloas.rs b/beacon_node/beacon_chain/src/block_production/gloas.rs index 2fc4fb51f7..51caf63b7a 100644 --- a/beacon_node/beacon_chain/src/block_production/gloas.rs +++ b/beacon_node/beacon_chain/src/block_production/gloas.rs @@ -19,7 +19,7 @@ use state_processing::{ }; use state_processing::{VerifyOperation, state_advance::complete_state_advance}; use task_executor::JoinHandle; -use tracing::{Instrument, Span, debug, debug_span, error, instrument, trace, warn}; +use tracing::{Instrument, debug, debug_span, error, instrument, trace, warn}; use tree_hash::TreeHash; use types::consts::gloas::BUILDER_INDEX_SELF_BUILD; use types::{ @@ -87,15 +87,10 @@ impl BeaconChain { // // Load the parent state from disk. let chain = self.clone(); - let span = Span::current(); let (state, state_root_opt) = self .task_executor .spawn_blocking_handle( - move || { - let _guard = - debug_span!(parent: span, "load_state_for_block_production").entered(); - chain.load_state_for_block_production(slot) - }, + move || chain.load_state_for_block_production(slot), "load_state_for_block_production", ) .ok_or(BlockProductionError::ShuttingDown)? @@ -135,13 +130,10 @@ impl BeaconChain { .graffiti_calculator .get_graffiti(graffiti_settings) .await; - let span = Span::current(); let (partial_beacon_block, state) = self .task_executor .spawn_blocking_handle( move || { - let _guard = - debug_span!(parent: span, "produce_partial_beacon_block_gloas").entered(); chain.produce_partial_beacon_block_gloas( state, state_root_opt, @@ -175,12 +167,9 @@ impl BeaconChain { // // Complete the block with the execution payload bid. let chain = self.clone(); - let span = Span::current(); self.task_executor .spawn_blocking_handle( move || { - let _guard = - debug_span!(parent: span, "complete_partial_beacon_block_gloas").entered(); chain.complete_partial_beacon_block_gloas( partial_beacon_block, execution_payload_bid, @@ -198,6 +187,7 @@ impl BeaconChain { #[allow(clippy::too_many_arguments)] #[allow(clippy::type_complexity)] + #[instrument(skip_all, level = "debug")] fn produce_partial_beacon_block_gloas( self: &Arc, mut state: BeaconState, @@ -432,6 +422,7 @@ impl BeaconChain { /// - `pending_state` is the state post block application (prior to payload application) /// - `block_value` is the consensus-layer rewards for `block` #[allow(clippy::type_complexity)] + #[instrument(skip_all, level = "debug")] fn complete_partial_beacon_block_gloas( &self, partial_beacon_block: PartialBeaconBlock, diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index b33323f527..256b67086a 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -15,6 +15,7 @@ mod gloas; impl BeaconChain { /// Load a beacon state from the database for block production. This is a long-running process /// that should not be performed in an `async` context. + #[instrument(skip_all, level = "debug")] pub(crate) fn load_state_for_block_production( self: &Arc, slot: Slot, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 9dd7d62a27..f6377e6ea5 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -58,7 +58,6 @@ use store::{ Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreConfig, iter::StateRootsIterator, }; use task_executor::{JoinHandle, ShutdownReason}; -use tracing::info_span; use tracing::{debug, error, info, instrument, warn}; use types::*; @@ -528,22 +527,15 @@ impl BeaconChain { /// such a case it's critical that the `BeaconChain` keeps importing blocks so that the /// situation can be rectified. We avoid returning an error here so that calling functions /// can't abort block import because an error is returned here. + #[instrument(name = "lh_recompute_head_at_slot", skip(self), level = "info", fields(slot = %current_slot))] pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) { - let span = info_span!( - "lh_recompute_head_at_slot", - slot = %current_slot - ); - metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); let chain = self.clone(); match self .spawn_blocking_handle( - move || { - let _guard = span.enter(); - chain.recompute_head_at_slot_internal(current_slot) - }, + move || chain.recompute_head_at_slot_internal(current_slot), "recompute_head_internal", ) .await diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index bae61767cc..db76ff887d 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -32,7 +32,7 @@ use mockall_double::double; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::sync::Arc; -use tracing::{Span, debug, instrument, warn}; +use tracing::{debug, instrument, warn}; use types::data::{BlobSidecarError, DataColumnSidecarError}; use types::{ BeaconStateError, Blob, BlobSidecar, ColumnIndex, EthSpec, FullPayload, Hash256, KzgProofs, @@ -356,12 +356,10 @@ async fn compute_custody_columns_to_import( let spec = chain_adapter.spec().clone(); let chain_adapter_cloned = chain_adapter.clone(); let custody_columns_indices = custody_columns_indices.to_vec(); - let current_span = Span::current(); chain_adapter .executor() .spawn_blocking_handle( move || { - let _guard = current_span.enter(); let mut timer = metrics::start_timer_vec( &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, &[&blobs.len().to_string()], diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 9a4ed2d044..4d40a29332 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -4,7 +4,7 @@ use educe::Educe; use eth2::types::{EventKind, SseExecutionPayloadGossip}; use parking_lot::{Mutex, RwLock}; use store::DatabaseBlock; -use tracing::{Span, debug}; +use tracing::debug; use types::{ ChainSpec, EthSpec, ExecutionPayloadBid, ExecutionPayloadEnvelope, Hash256, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, consts::gloas::BUILDER_INDEX_SELF_BUILD, @@ -270,12 +270,10 @@ impl BeaconChain { envelope: Arc>, ) -> Result, EnvelopeError> { let chain = self.clone(); - let span = Span::current(); self.task_executor .clone() .spawn_blocking_handle( move || { - let _guard = span.enter(); let slot = envelope.slot(); let beacon_block_root = envelope.message.beacon_block_root; diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index bae848c3c1..39925d65d2 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -192,13 +192,9 @@ impl BeaconChain { } = import_data; let block_root = { - // Capture the current span before moving into the blocking task - let current_span = tracing::Span::current(); let chain = self.clone(); self.spawn_blocking_handle( move || { - // Enter the captured span in the blocking thread - let _guard = current_span.enter(); chain.import_execution_payload_envelope( envelope, block_root, diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 43dfbeb836..eb7e56e9cc 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -146,12 +146,8 @@ pub async fn publish_block>( let slot = block.message().slot(); let sender_clone = network_tx.clone(); - let build_sidecar_task_handle = spawn_build_data_sidecar_task( - chain.clone(), - block.clone(), - unverified_blobs, - current_span.clone(), - )?; + let build_sidecar_task_handle = + spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs)?; // Gossip verify the block and blobs/data columns separately. let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); @@ -358,7 +354,6 @@ fn spawn_build_data_sidecar_task( chain: Arc>, block: Arc>>, proofs_and_blobs: UnverifiedBlobs, - current_span: Span, ) -> Result>, Rejection> { chain .clone() @@ -368,7 +363,7 @@ fn spawn_build_data_sidecar_task( let Some((kzg_proofs, blobs)) = proofs_and_blobs else { return Ok((vec![], vec![])); }; - let _guard = debug_span!(parent: current_span, "build_data_sidecars").entered(); + let _span = debug_span!("build_data_sidecars").entered(); let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); if !peer_das_enabled { diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index d3d862f96c..07716fa2e7 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -6,7 +6,7 @@ use futures::channel::mpsc::Sender; use futures::prelude::*; use std::sync::{Arc, Weak}; use tokio::runtime::{Handle, Runtime}; -use tracing::debug; +use tracing::{Span, debug}; use crate::rayon_pool_provider::RayonPoolProvider; pub use crate::rayon_pool_provider::RayonPoolType; @@ -225,9 +225,11 @@ impl TaskExecutor { F: FnOnce() + Send + 'static, { let thread_pool = self.rayon_pool_provider.get_thread_pool(rayon_pool_type); + let span = Span::current(); self.spawn_blocking( move || { thread_pool.install(|| { + let _guard = span.enter(); task(); }); }, @@ -247,8 +249,10 @@ impl TaskExecutor { { let thread_pool = self.rayon_pool_provider.get_thread_pool(rayon_pool_type); let (tx, rx) = tokio::sync::oneshot::channel(); + let span = Span::current(); thread_pool.spawn(move || { + let _guard = span.enter(); let result = task(); let _ = tx.send(result); }); @@ -320,8 +324,12 @@ impl TaskExecutor { let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]); + let span = Span::current(); let join_handle = if let Some(handle) = self.handle() { - handle.spawn_blocking(task) + handle.spawn_blocking(move || { + let _guard = span.enter(); + task() + }) } else { debug!("Couldn't spawn task. Runtime shutting down"); return None; From edae39cc298fc211597f3082c9e2ec81ac19961e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 13:29:48 +1100 Subject: [PATCH 18/57] Fix fork transition case --- .../gloas_payload.rs | 44 +++++++++-------- consensus/proto_array/src/proto_array.rs | 48 +++++++++---------- 2 files changed, 48 insertions(+), 44 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 9a7acbde09..5b82e1de8c 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -757,40 +757,31 @@ mod tests { // First Gloas block (V29 node). let gloas_slot = if skip_first_gloas_slot { 33 } else { 32 }; - // For Full: execution_payload_parent_hash must match the V17 parent's EL hash. - // The V17 parent's EL hash = ExecutionBlockHash::from_root(get_root(1)) = get_hash(1). - // For Empty: use a non-matching hash. - let parent_hash = if first_gloas_block_full { - get_hash(1) - } else { - get_hash(99) - }; - + // The first Gloas block should always have the pre-Gloas block as its execution parent, + // although this is currently not checked anywhere (the spec doesn't mention this). ops.push(Operation::ProcessBlock { slot: Slot::new(gloas_slot), root: get_root(2), parent_root: get_root(1), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - execution_payload_parent_hash: Some(parent_hash), + execution_payload_parent_hash: Some(get_hash(1)), execution_payload_block_hash: Some(get_hash(2)), }); - // Verify the parent_payload_status is correctly set. - let expected_parent_status = if first_gloas_block_full { - PayloadStatus::Full - } else { - PayloadStatus::Empty - }; + // Parent payload status of fork boundary block should always be Empty. + let expected_parent_status = PayloadStatus::Empty; ops.push(Operation::AssertParentPayloadStatus { block_root: get_root(2), expected_status: expected_parent_status, }); // Mark root 2's execution payload as received so the Full virtual child exists. - ops.push(Operation::ProcessExecutionPayload { - block_root: get_root(2), - }); + if first_gloas_block_full { + ops.push(Operation::ProcessExecutionPayload { + block_root: get_root(2), + }); + } // Extend the chain with another V29 block (Full child of root 2). ops.push(Operation::ProcessBlock { @@ -799,7 +790,11 @@ mod tests { parent_root: get_root(2), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), - execution_payload_parent_hash: Some(get_hash(2)), + execution_payload_parent_hash: if first_gloas_block_full { + Some(get_hash(2)) + } else { + Some(get_hash(1)) + }, execution_payload_block_hash: Some(get_hash(3)), }); @@ -813,6 +808,15 @@ mod tests { expected_payload_status: None, }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(3), + expected_status: if first_gloas_block_full { + PayloadStatus::Full + } else { + PayloadStatus::Empty + }, + }); + ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), justified_checkpoint: get_checkpoint(0), diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 452679d7a3..ffe60d3a50 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -570,31 +570,31 @@ impl ProtoArray { block_root: block.root, })?; - let parent_payload_status: PayloadStatus = if let Some(parent_node) = - parent_index.and_then(|idx| self.nodes.get(idx)) - { - // Get the parent's execution block hash, handling both V17 and V29 nodes. - // V17 parents occur during the Gloas fork transition. - // TODO(gloas): the spec's `get_parent_payload_status` assumes all blocks are - // post-Gloas with bids. Revisit once the spec clarifies fork-transition behavior. - let parent_el_block_hash = match parent_node { - ProtoNode::V29(v29) => Some(v29.execution_payload_block_hash), - ProtoNode::V17(v17) => v17.execution_status.block_hash(), - }; - // Per spec's `is_parent_node_full`: if the child's EL parent hash - // matches the parent's EL block hash, the child extends the parent's - // payload chain, meaning the parent was Full. - if parent_el_block_hash.is_some_and(|hash| execution_payload_parent_hash == hash) { - PayloadStatus::Full + let parent_payload_status: PayloadStatus = + if let Some(parent_node) = parent_index.and_then(|idx| self.nodes.get(idx)) { + match parent_node { + ProtoNode::V29(v29) => { + // Both parent and child are Gloas blocks. The parent is full if the + // block hash in the parent node matches the parent block hash in the + // child bid. + if execution_payload_parent_hash == v29.execution_payload_block_hash { + PayloadStatus::Full + } else { + PayloadStatus::Empty + } + } + ProtoNode::V17(_) => { + // Parent is pre-Gloas, pre-Gloas blocks are treated as having Empty + // payload status. This case is reached during the fork transition. + PayloadStatus::Empty + } + } } else { - PayloadStatus::Empty - } - } else { - // Parent is missing (genesis or pruned due to finalization). Default to Full - // since this path should only be hit at Gloas genesis, and extending the payload - // chain is the safe default. - PayloadStatus::Full - }; + // TODO(gloas): re-assess this assumption + // Parent is missing (genesis or pruned due to finalization). Default to Full + // since this path should only be hit at Gloas genesis. + PayloadStatus::Full + }; // Per spec `get_forkchoice_store`: the anchor (genesis) block has // its payload state initialized (`payload_states = {anchor_root: ...}`). From 4e44cec249eafadc5b964499b4ad98364eb6411c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 13:45:19 +1100 Subject: [PATCH 19/57] Fix markdown lint --- .../src/fork_choice_test_definition/gloas_payload.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 5b82e1de8c..e3f81fb3ff 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -730,9 +730,9 @@ mod tests { /// /// Parameters: /// - `skip_first_gloas_slot`: if true, there is no block at the first Gloas slot (slot 32); - /// the first V29 block appears at slot 33. + /// the first V29 block appears at slot 33. /// - `first_gloas_block_full`: if true, the first V29 block extends the parent V17 node's - /// EL chain (Full parent payload status). If false, it doesn't (Empty). + /// EL chain (Full parent payload status). If false, it doesn't (Empty). fn get_gloas_fork_boundary_test_definition( skip_first_gloas_slot: bool, first_gloas_block_full: bool, From ab023a7231eb0aad285a987c000c0e2b21d33d9d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 14:54:18 +1100 Subject: [PATCH 20/57] Fix the VoteTrackerV28 definition --- .../proto_array/src/proto_array_fork_choice.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 5269957dff..f6427ba863 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -34,12 +34,12 @@ pub struct VoteTracker { } // Can be deleted once the V28 schema migration is buried. +// Matches the on-disk format from schema v28: current_root, next_root, next_epoch. #[derive(Default, PartialEq, Clone, Encode, Decode)] pub struct VoteTrackerV28 { current_root: Hash256, next_root: Hash256, - current_slot: Slot, - next_slot: Slot, + next_epoch: Epoch, } // This impl is only used upon upgrade from pre-Gloas to Gloas with all pre-Gloas nodes. @@ -49,9 +49,10 @@ impl From for VoteTracker { VoteTracker { current_root: v.current_root, next_root: v.next_root, - current_slot: v.current_slot, - next_slot: v.next_slot, - // TODO(gloas): check that this is correct + // The v28 format stored next_epoch rather than slots. Default to 0 since the + // vote tracker will be updated on the next attestation. + current_slot: Slot::new(0), + next_slot: Slot::new(0), current_payload_present: false, next_payload_present: false, } @@ -61,13 +62,14 @@ impl From for VoteTracker { // This impl is only used upon downgrade from V29 to V28, with exclusively pre-Gloas nodes. impl From for VoteTrackerV28 { fn from(v: VoteTracker) -> Self { - // Drop the payload_present, but this is safe because this is only called on pre-Gloas + // Drop the payload_present fields. This is safe because this is only called on pre-Gloas // nodes. VoteTrackerV28 { current_root: v.current_root, next_root: v.next_root, - current_slot: v.current_slot, - next_slot: v.next_slot, + // The v28 format stored next_epoch. Default to 0 since the vote tracker will be + // updated on the next attestation. + next_epoch: Epoch::new(0), } } } From 3cf19e724fd6bd8dcb70324298043407022abd99 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 15:47:10 +1100 Subject: [PATCH 21/57] Fix Gloas check in on_block --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index ffe60d3a50..256cb48c77 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -535,7 +535,7 @@ impl ProtoArray { .parent_root .and_then(|parent| self.indices.get(&parent).copied()); - let node = if !spec.fork_name_at_slot::(current_slot).gloas_enabled() { + let node = if !spec.fork_name_at_slot::(block.slot).gloas_enabled() { ProtoNode::V17(ProtoNodeV17 { slot: block.slot, root: block.root, From ddff03d26f269c773e3774e0eaf1b327bd254e8b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 16:42:49 +1100 Subject: [PATCH 22/57] Store parent_payload_hash in ProtoNode --- consensus/fork_choice/src/fork_choice.rs | 1 + consensus/proto_array/src/proto_array.rs | 4 +++- consensus/proto_array/src/proto_array_fork_choice.rs | 5 +++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 1f5b2cf1b0..7e189b0a1b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1138,6 +1138,7 @@ where } // index == 1 (payload_present) requires the block's payload to have been received. + // TODO(gloas): could optimise by adding `payload_received` to `Block` if index == 1 && !self .proto_array diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 256cb48c77..412e217bf8 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -143,6 +143,8 @@ pub struct ProtoNode { pub full_payload_weight: u64, #[superstruct(only(V29), partial_getter(copy))] pub execution_payload_block_hash: ExecutionBlockHash, + #[superstruct(only(V29), partial_getter(copy))] + pub execution_payload_parent_hash: ExecutionBlockHash, /// Equivalent to spec's `block_timeliness[root][ATTESTATION_TIMELINESS_INDEX]`. #[superstruct(only(V29), partial_getter(copy))] pub block_timeliness_attestation_threshold: bool, @@ -181,7 +183,6 @@ pub struct ProtoNode { impl ProtoNode { /// Generic version of spec's `parent_payload_status` that works for pre-Gloas nodes by /// considering their parents Empty. - /// Pre-Gloas nodes have no ePBS, default to Empty. pub fn get_parent_payload_status(&self) -> PayloadStatus { self.parent_payload_status().unwrap_or(PayloadStatus::Empty) } @@ -620,6 +621,7 @@ impl ProtoArray { empty_payload_weight: 0, full_payload_weight: 0, execution_payload_block_hash, + execution_payload_parent_hash, // Per spec `get_forkchoice_store`: the anchor block's PTC votes are // initialized to all-True, ensuring `is_payload_timely` and // `is_payload_data_available` return true for the anchor. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index f6427ba863..72440b83b8 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1030,7 +1030,7 @@ impl ProtoArrayForkChoice { .unwrap_or_else(|_| ExecutionStatus::irrelevant()), unrealized_justified_checkpoint: block.unrealized_justified_checkpoint(), unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint(), - execution_payload_parent_hash: None, + execution_payload_parent_hash: block.execution_payload_parent_hash().ok(), execution_payload_block_hash: block.execution_payload_block_hash().ok(), proposer_index: block.proposer_index().ok(), }) @@ -1047,7 +1047,8 @@ impl ProtoArrayForkChoice { } /// Returns whether the execution payload for a block has been received. - /// Returns `false` for pre-GLOAS (V17) nodes or unknown blocks. + /// + /// Returns `false` for pre-Gloas (V17) nodes or unknown blocks. pub fn is_payload_received(&self, block_root: &Hash256) -> bool { self.get_proto_node(block_root) .and_then(|node| node.payload_received().ok()) From f5b2445d09d62c5ff8258ecabe4a8dab2655e7b0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 16:46:02 +1100 Subject: [PATCH 23/57] Remove stupid GLOAS comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++-- beacon_node/http_api/src/validator/mod.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 10 +++++----- consensus/fork_choice/tests/tests.rs | 4 ++-- .../src/fork_choice_test_definition/gloas_payload.rs | 8 ++++---- consensus/proto_array/src/proto_array.rs | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3c8ea30779..f05b972679 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4855,8 +4855,8 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::NotProposing.into())); } - // TODO(gloas): reorg weight logic needs updating for GLOAS. For now use - // total weight which is correct for pre-GLOAS and conservative for post-GLOAS. + // TODO(gloas): reorg weight logic needs updating for Gloas. For now use + // total weight which is correct for pre-Gloas and conservative for post-Gloas. let head_weight = info.head_node.weight(); let parent_weight = info.parent_node.weight(); diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 3d96b85870..412851233e 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -671,7 +671,7 @@ pub fn post_validator_prepare_beacon_proposer( .await; // TODO(gloas): verify this is correct. We skip proposer preparation for - // GLOAS because the execution payload is no longer embedded in the beacon + // Gloas because the execution payload is no longer embedded in the beacon // block (it's in the payload envelope), so the head block's // execution_payload() is unavailable. let next_slot = current_slot + 1; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7e189b0a1b..cedd42cf01 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -170,11 +170,11 @@ pub enum InvalidAttestation { /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). AttestsToFutureBlock { block: Slot, attestation: Slot }, - /// Post-GLOAS: attestation index must be 0 or 1. + /// Post-Gloas: attestation index must be 0 or 1. InvalidAttestationIndex { index: u64 }, - /// A same-slot attestation has a non-zero index, which is invalid post-GLOAS. + /// A same-slot attestation has a non-zero index, which is invalid post-Gloas. InvalidSameSlotAttestationIndex { slot: Slot }, - /// Post-GLOAS: attestation with index == 1 (payload_present) requires the block's + /// Post-Gloas: attestation with index == 1 (payload_present) requires the block's /// payload to have been received (`root in store.payload_states`). PayloadNotReceived { beacon_block_root: Hash256 }, /// A payload attestation votes payload_present for a block in the current slot, which is @@ -256,7 +256,7 @@ pub struct QueuedAttestation { attesting_indices: Vec, block_root: Hash256, target_epoch: Epoch, - /// Per GLOAS spec: `payload_present = attestation.data.index == 1`. + /// Per Gloas spec: `payload_present = attestation.data.index == 1`. payload_present: bool, } @@ -1125,7 +1125,7 @@ where { let index = indexed_attestation.data().index; - // Post-GLOAS: attestation index must be 0 or 1. + // Post-Gloas: attestation index must be 0 or 1. if index > 1 { return Err(InvalidAttestation::InvalidAttestationIndex { index }); } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 839d0f4c5c..241e25d3e2 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -73,9 +73,9 @@ impl ForkChoiceTest { Self { harness } } - /// Creates a new tester with the GLOAS fork active at epoch 1. + /// Creates a new tester with the Gloas fork active at epoch 1. /// Genesis is a standard Fulu block (epoch 0), so block production works normally. - /// Tests that need GLOAS semantics should advance the chain into epoch 1 first. + /// Tests that need Gloas semantics should advance the chain into epoch 1 first. /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index e3f81fb3ff..18d7a40b82 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -52,7 +52,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); @@ -262,7 +262,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); @@ -367,7 +367,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); @@ -537,7 +537,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef }); // Mark root_1 as having received its execution payload so that - // its FULL virtual node exists in the GLOAS fork choice tree. + // its FULL virtual node exists in the Gloas fork choice tree. ops.push(Operation::ProcessExecutionPayload { block_root: get_root(1), }); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 412e217bf8..361e4a86e2 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -642,7 +642,7 @@ impl ProtoArray { block_timeliness_attestation_threshold: is_genesis || (is_current_slot && time_into_slot < spec.get_unaggregated_attestation_due()), - // TODO(gloas): use GLOAS-specific PTC due threshold once + // TODO(gloas): use Gloas-specific PTC due threshold once // `get_payload_attestation_due_ms` is on ChainSpec. block_timeliness_ptc_threshold: is_genesis || (is_current_slot && time_into_slot < spec.get_slot_duration() / 2), From 39f07106afcb05be8d780c6fda848060d20454e7 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 23:53:06 -0700 Subject: [PATCH 24/57] revert --- consensus/proto_array/src/proto_array_fork_choice.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cb467f2531..6c90af1302 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -997,11 +997,7 @@ impl ProtoArrayForkChoice { /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - Some( - block - .execution_status() - .unwrap_or_else(|_| ExecutionStatus::irrelevant()), - ) + block.execution_status().ok() } /// Returns whether the execution payload for a block has been received. From f6baea4b08e095a869fc9a3e0f70b6650d784909 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Tue, 31 Mar 2026 23:55:42 -0700 Subject: [PATCH 25/57] revert --- consensus/proto_array/src/proto_array.rs | 26 ++++-------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index d3abf53b64..53b963b974 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1432,29 +1432,11 @@ impl ProtoArray { .get(node.proto_node_index) .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; - // V17 (pre-GLOAS) nodes don't have payload_received or parent_payload_status. - // Skip the virtual Empty/Full split and return real children directly. - if proto_node.as_v17().is_ok() { - let child_indices = children_index - .get(node.proto_node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); - return Ok(child_indices - .iter() - .filter_map(|&child_index| { - let child_node = self.nodes.get(child_index)?; - Some(( - IndexedForkChoiceNode { - root: child_node.root(), - proto_node_index: child_index, - payload_status: PayloadStatus::Pending, - }, - child_node.clone(), - )) - }) - .collect()); + let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; + // The FULL virtual child only exists if the payload has been received. + if proto_node.payload_received().is_ok_and(|received| received) { + children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); } - // TODO(gloas) this is the actual change we want to keep once PTC is implemented // let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; // // The FULL virtual child only exists if the payload has been received. From 5aae563d84f3d7ae67bc63de5f230024a78cdaa4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 1 Apr 2026 17:25:50 +1100 Subject: [PATCH 26/57] Remove proposer boost weight during upgrade --- .../src/schema_change/migration_schema_v29.rs | 47 ++++++++++++++++++- .../tests/payload_invalidation.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 6 +-- consensus/proto_array/src/proto_array.rs | 5 -- .../src/proto_array_fork_choice.rs | 31 ++---------- consensus/proto_array/src/ssz_container.rs | 7 ++- 6 files changed, 54 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs index 3069200fce..77d4be3443 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -1,7 +1,9 @@ use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; +use std::collections::HashMap; use store::hot_cold_store::HotColdDB; use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; +use tracing::warn; use types::EthSpec; /// Upgrade from schema v28 to v29. @@ -49,8 +51,49 @@ pub fn upgrade_to_v29( } } - // Convert to v29 and encode. - let persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); + // Read the previous proposer boost before converting to V29 (V29 no longer stores it). + let previous_proposer_boost = persisted_v28 + .fork_choice_v28 + .proto_array_v28 + .previous_proposer_boost; + + // Convert to v29. + let mut persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); + + // Subtract the proposer boost from the boosted node and all its ancestors. + // + // In the V28 schema, `apply_score_changes` baked the proposer boost directly into node + // weights and back-propagated it up the parent chain. In V29, the boost is computed + // on-the-fly during the virtual tree walk. If we don't subtract the baked-in boost here, + // it will be double-counted after the upgrade. + if !previous_proposer_boost.root.is_zero() && previous_proposer_boost.score > 0 { + let score = previous_proposer_boost.score; + let indices: HashMap<_, _> = persisted_v29 + .fork_choice + .proto_array + .indices + .iter() + .cloned() + .collect(); + + if let Some(node_index) = indices.get(&previous_proposer_boost.root).copied() { + let nodes = &mut persisted_v29.fork_choice.proto_array.nodes; + let mut current = Some(node_index); + while let Some(idx) = current { + if let Some(node) = nodes.get_mut(idx) { + *node.weight_mut() = node.weight().saturating_sub(score); + current = node.parent(); + } else { + break; + } + } + } else { + warn!( + root = ?previous_proposer_boost.root, + "Proposer boost node missing from fork choice" + ); + } + } Ok(vec![ persisted_v29.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 13672bbb63..947024e8c2 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1438,7 +1438,7 @@ async fn weights_after_resetting_optimistic_status() { .canonical_head .fork_choice_write_lock() .proto_array_mut() - .set_all_blocks_to_optimistic::(&rig.harness.chain.spec) + .set_all_blocks_to_optimistic::() .unwrap(); let new_weights = rig diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index cedd42cf01..c08e76020b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1614,7 +1614,6 @@ where persisted_proto_array: proto_array::core::SszContainer, justified_balances: JustifiedBalances, reset_payload_statuses: ResetPayloadStatuses, - spec: &ChainSpec, ) -> Result> { let mut proto_array = ProtoArrayForkChoice::from_container( persisted_proto_array.clone(), @@ -1639,7 +1638,7 @@ where // Reset all blocks back to being "optimistic". This helps recover from an EL consensus // fault where an invalid payload becomes valid. - if let Err(e) = proto_array.set_all_blocks_to_optimistic::(spec) { + if let Err(e) = proto_array.set_all_blocks_to_optimistic::() { // If there is an error resetting the optimistic status then log loudly and revert // back to a proto-array which does not have the reset applied. This indicates a // significant error in Lighthouse and warrants detailed investigation. @@ -1669,7 +1668,6 @@ where persisted.proto_array, justified_balances, reset_payload_statuses, - spec, )?; let current_slot = fc_store.get_current_slot(); @@ -1703,7 +1701,7 @@ where // get a different result. fork_choice .proto_array - .set_all_blocks_to_optimistic::(spec)?; + .set_all_blocks_to_optimistic::()?; // If the second attempt at finding a head fails, return an error since we do not // expect this scenario. let _ = fork_choice.get_head(current_slot, spec)?; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 361e4a86e2..f2a6f6d0dc 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -369,7 +369,6 @@ pub struct ProtoArray { pub prune_threshold: usize, pub nodes: Vec, pub indices: HashMap, - pub previous_proposer_boost: ProposerBoost, } impl ProtoArray { @@ -502,10 +501,6 @@ impl ProtoArray { } } - // Proposer boost is now applied on-the-fly in `get_weight` during the - // walk, so clear any stale boost from a prior call. - self.previous_proposer_boost = ProposerBoost::default(); - Ok(()) } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 72440b83b8..634a78823d 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -2,8 +2,7 @@ use crate::{ JustifiedBalances, error::Error, proto_array::{ - InvalidationOperation, Iter, NodeDelta, ProposerBoost, ProtoArray, ProtoNode, - calculate_committee_fraction, + InvalidationOperation, Iter, NodeDelta, ProtoArray, ProtoNode, calculate_committee_fraction, }, ssz_container::SszContainer, }; @@ -527,7 +526,6 @@ impl ProtoArrayForkChoice { prune_threshold: DEFAULT_PRUNE_THRESHOLD, nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), - previous_proposer_boost: ProposerBoost::default(), }; let block = Block { @@ -880,10 +878,7 @@ impl ProtoArrayForkChoice { /// status to be optimistic. /// /// In practice this means forgetting any `VALID` or `INVALID` statuses. - pub fn set_all_blocks_to_optimistic( - &mut self, - spec: &ChainSpec, - ) -> Result<(), String> { + pub fn set_all_blocks_to_optimistic(&mut self) -> Result<(), String> { // Iterate backwards through all nodes in the `proto_array`. Whilst it's not strictly // required to do this process in reverse, it seems natural when we consider how LMD votes // are counted. @@ -906,7 +901,7 @@ impl ProtoArrayForkChoice { // Restore the weight of the node, it would have been set to `0` in // `apply_score_changes` when it was invalidated. - let mut restored_weight: u64 = self + let restored_weight: u64 = self .votes .0 .iter() @@ -922,26 +917,6 @@ impl ProtoArrayForkChoice { }) .sum(); - // If the invalid root was boosted, apply the weight to it and - // ancestors. - if let Some(proposer_score_boost) = spec.proposer_score_boost - && self.proto_array.previous_proposer_boost.root == node.root() - { - // Compute the score based upon the current balances. We can't rely on - // the `previous_proposr_boost.score` since it is set to zero with an - // invalid node. - let proposer_score = - calculate_committee_fraction::(&self.balances, proposer_score_boost) - .ok_or("Failed to compute proposer boost")?; - // Store the score we've applied here so it can be removed in - // a later call to `apply_score_changes`. - self.proto_array.previous_proposer_boost.score = proposer_score; - // Apply this boost to this node. - restored_weight = restored_weight - .checked_add(proposer_score) - .ok_or("Overflow when adding boost to weight")?; - } - // Add the restored weight to the node and all ancestors. if restored_weight > 0 { let mut node_or_ancestor = node; diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 80a6702210..69efb35027 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -38,6 +38,7 @@ pub struct SszContainer { #[superstruct(only(V29))] pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, + #[superstruct(only(V28))] pub previous_proposer_boost: ProposerBoost, } @@ -50,7 +51,6 @@ impl SszContainerV29 { prune_threshold: proto_array.prune_threshold, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), - previous_proposer_boost: proto_array.previous_proposer_boost, } } } @@ -63,7 +63,6 @@ impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { prune_threshold: from.prune_threshold, nodes: from.nodes, indices: from.indices.into_iter().collect::>(), - previous_proposer_boost: from.previous_proposer_boost, }; Ok(Self { @@ -92,7 +91,6 @@ impl From for SszContainerV29 { }) .collect(), indices: v28.indices, - previous_proposer_boost: v28.previous_proposer_boost, } } } @@ -116,7 +114,8 @@ impl From for SszContainerV28 { }) .collect(), indices: v29.indices, - previous_proposer_boost: v29.previous_proposer_boost, + // Proposer boost is not tracked in V29 (computed on-the-fly), so reset it. + previous_proposer_boost: ProposerBoost::default(), } } } From 12e6595b324fb419d2718fd51371b61b6b41630f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Wed, 1 Apr 2026 00:02:11 -0700 Subject: [PATCH 27/57] revcert --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 53b963b974..95e19e2119 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1431,7 +1431,6 @@ impl ProtoArray { .nodes .get(node.proto_node_index) .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; - let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; // The FULL virtual child only exists if the payload has been received. if proto_node.payload_received().is_ok_and(|received| received) { @@ -1464,7 +1463,6 @@ impl ProtoArray { .iter() .filter_map(|&child_index| { let child_node = self.nodes.get(child_index)?; - // Skip parent_payload_status filter for V17 children (they don't have it) if child_node.get_parent_payload_status() != node.payload_status { return None; } From d978e3dabe56be6c56ea7765a15f4ce7f844f864 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Wed, 1 Apr 2026 00:35:14 -0700 Subject: [PATCH 28/57] revbert --- consensus/proto_array/src/proto_array.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 95e19e2119..e9d5e02db5 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1446,11 +1446,7 @@ impl ProtoArray { // TODO(gloas) remove this and uncomment the code above once we implement PTC // Skip Empty/Full split: go straight to Full when payload received, // giving full payload weight 100% without PTC votes. - let children = if proto_node.payload_received().is_ok_and(|received| received) { - vec![(node.with_status(PayloadStatus::Full), proto_node.clone())] - } else { - vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())] - }; + // TODO(gloas) delete up to here Ok(children) From a5bdd0c5979f68be615aeb3d610ffeb5d5ce25fc Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Wed, 1 Apr 2026 00:59:00 -0700 Subject: [PATCH 29/57] Smol revert --- consensus/proto_array/src/proto_array.rs | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index e9d5e02db5..361e4a86e2 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1436,19 +1436,6 @@ impl ProtoArray { if proto_node.payload_received().is_ok_and(|received| received) { children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); } - // TODO(gloas) this is the actual change we want to keep once PTC is implemented - // let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; - // // The FULL virtual child only exists if the payload has been received. - // if proto_node.payload_received().is_ok_and(|received| received) { - // children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); - // } - - // TODO(gloas) remove this and uncomment the code above once we implement PTC - // Skip Empty/Full split: go straight to Full when payload received, - // giving full payload weight 100% without PTC votes. - - // TODO(gloas) delete up to here - Ok(children) } else { let child_indices = children_index From 5f8605f67e9341112db44ebe27e39b099e1e8543 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 10:40:23 +1100 Subject: [PATCH 30/57] Disable optimistic sync for Gloas --- .../src/payload_envelope_verification/import.rs | 10 ++++++++++ .../src/payload_envelope_verification/mod.rs | 2 ++ 2 files changed, 12 insertions(+) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index ed121ccb94..6efabcdfa8 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -167,6 +167,16 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; + // TODO(gloas): optimistic sync is not supported for Gloas, maybe we could re-add it + if payload_verification_outcome + .payload_verification_status + .is_optimistic() + { + return Err(EnvelopeError::OptimisticSyncNotSupported { + block_root: import_data.block_root, + }); + } + Ok(ExecutedEnvelope::new( signed_envelope, import_data, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index c707d62dc7..225d5a9892 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -182,6 +182,8 @@ pub enum EnvelopeError { payload_slot: Slot, latest_finalized_slot: Slot, }, + /// Optimistic sync is not supported for Gloas payload envelopes. + OptimisticSyncNotSupported { block_root: Hash256 }, /// Some Beacon Chain Error BeaconChainError(Arc), /// Some Beacon State error From 1c5a7bed7402481baddafe39c9a9c928e54ce7f1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 10:56:34 +1100 Subject: [PATCH 31/57] Clarify name of `on_valid_payload_envelope_received` --- .../payload_envelope_verification/import.rs | 5 ++-- consensus/fork_choice/src/fork_choice.rs | 22 ++++++++++----- .../src/fork_choice_test_definition.rs | 2 +- consensus/proto_array/src/proto_array.rs | 27 +++++++------------ .../src/proto_array_fork_choice.rs | 11 ++++++-- testing/ef_tests/src/cases/fork_choice.rs | 2 +- 6 files changed, 40 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 6efabcdfa8..4a0a188e5e 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -253,9 +253,10 @@ impl BeaconChain { // avoiding taking other locks whilst holding this lock. let mut fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); - // Update the node's payload_status from PENDING to FULL in fork choice. + // Update the block's payload to received in fork choice, which creates the `Full` virtual + // node which can be eligible for head. fork_choice - .on_execution_payload(block_root) + .on_valid_payload_envelope_received(block_root) .map_err(|e| EnvelopeError::InternalError(format!("{e:?}")))?; // TODO(gloas) emit SSE event if the payload became the new head payload diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c08e76020b..7f5ef51217 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -654,6 +654,20 @@ where } } + /// Mark a Gloas payload envelope as valid and received. + /// + /// This must only be called for valid Gloas payloads. + pub fn on_valid_payload_envelope_received( + &mut self, + block_root: Hash256, + ) -> Result<(), Error> { + self.proto_array + .on_valid_payload_envelope_received(block_root) + .map_err(Error::FailedToProcessValidExecutionPayload) + } + + /// Pre-Gloas only. + /// /// See `ProtoArrayForkChoice::process_execution_payload_validation` for documentation. pub fn on_valid_execution_payload( &mut self, @@ -664,6 +678,8 @@ where .map_err(Error::FailedToProcessValidExecutionPayload) } + /// Pre-Gloas only. + /// /// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation. pub fn on_invalid_execution_payload( &mut self, @@ -977,12 +993,6 @@ where Ok(()) } - pub fn on_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { - self.proto_array - .on_execution_payload(block_root) - .map_err(Error::FailedToProcessValidExecutionPayload) - } - /// Update checkpoints in store if necessary fn update_checkpoints( &mut self, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ff9d70bad5..1901091dd6 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -502,7 +502,7 @@ impl ForkChoiceTestDefinition { } Operation::ProcessExecutionPayload { block_root } => { fork_choice - .on_execution_payload(block_root) + .on_valid_payload_envelope_received(block_root) .unwrap_or_else(|e| { panic!( "on_execution_payload op at index {} returned error: {}", diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f2a6f6d0dc..6695079cd2 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -768,12 +768,10 @@ impl ProtoArray { Ok(!has_equivocation) } - /// Process an execution payload for a Gloas block. + /// Process a valid execution payload envelope for a Gloas block. /// - /// Sets `payload_received` to true, which makes `is_payload_timely` and - /// `is_payload_data_available` return true regardless of PTC votes. - /// This maps to `store.payload_states[root] = state` in the spec. - pub fn on_valid_execution_payload(&mut self, block_root: Hash256) -> Result<(), Error> { + /// Sets `payload_received` to true. + pub fn on_valid_payload_envelope_received(&mut self, block_root: Hash256) -> Result<(), Error> { let index = *self .indices .get(&block_root) @@ -809,6 +807,8 @@ impl ProtoArray { /// Updates the `verified_node_index` and all ancestors to have validated execution payloads. /// + /// This function is a no-op if called for a Gloas block. + /// /// Returns an error if: /// /// - The `verified_node_index` is unknown. @@ -852,18 +852,10 @@ impl ProtoArray { }); } }, - // Gloas nodes don't carry `ExecutionStatus`. Mark the validated - // block as payload-received so that `is_payload_timely` / - // `is_payload_data_available` and `index == 1` attestations work. - ProtoNode::V29(node) => { - if index == verified_node_index { - node.payload_received = true; - } - if let Some(parent_index) = node.parent { - parent_index - } else { - return Ok(()); - } + // Gloas nodes should not be marked valid by this function, which exists only + // for pre-Gloas fork choice. + ProtoNode::V29(_) => { + return Ok(()); } }; @@ -874,6 +866,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. + // TODO(gloas): this needs some tests for the mixed Gloas/pre-Gloas case. pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 634a78823d..842cdfaa33 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -567,11 +567,18 @@ impl ProtoArrayForkChoice { }) } - pub fn on_execution_payload(&mut self, block_root: Hash256) -> Result<(), String> { + /// Mark a Gloas payload envelope as valid and received. + /// + /// This must only be called for valid Gloas payloads. + pub fn on_valid_payload_envelope_received( + &mut self, + block_root: Hash256, + ) -> Result<(), String> { self.proto_array - .on_valid_execution_payload(block_root) + .on_valid_payload_envelope_received(block_root) .map_err(|e| format!("Failed to process execution payload: {:?}", e)) } + /// See `ProtoArray::propagate_execution_payload_validation` for documentation. pub fn process_execution_payload_validation( &mut self, diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 22e8453e14..06f204ab01 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1018,7 +1018,7 @@ impl Tester { .chain .canonical_head .fork_choice_write_lock() - .on_execution_payload(block_root); + .on_valid_payload_envelope_received(block_root); if valid { result.map_err(|e| { From 958c8cad3939f02bda2c6667bdb3f5835ad61db6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:05:33 +1100 Subject: [PATCH 32/57] Rename fork choice test def for clarity --- .../proto_array/src/fork_choice_test_definition.rs | 4 ++-- .../fork_choice_test_definition/gloas_payload.rs | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 1901091dd6..c9764d3e44 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -98,7 +98,7 @@ pub enum Operation { }, /// Simulate receiving and validating an execution payload for `block_root`. /// Sets `payload_received = true` on the V29 node via the live validation path. - ProcessExecutionPayload { + ProcessExecutionPayloadEnvelope { block_root: Hash256, }, AssertPayloadReceived { @@ -500,7 +500,7 @@ impl ForkChoiceTestDefinition { // the payload to be in payload_states (payload_received). node_v29.payload_received = is_timely || is_data_available; } - Operation::ProcessExecutionPayload { block_root } => { + Operation::ProcessExecutionPayloadEnvelope { block_root } => { fork_choice .on_valid_payload_envelope_received(block_root) .unwrap_or_else(|e| { diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 18d7a40b82..ea37780795 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -53,7 +53,7 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -263,7 +263,7 @@ pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDe // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -368,7 +368,7 @@ pub fn get_gloas_weight_priority_over_payload_preference_test_definition() // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -538,7 +538,7 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef // Mark root_1 as having received its execution payload so that // its FULL virtual node exists in the Gloas fork choice tree. - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(1), }); @@ -674,8 +674,8 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe expected_payload_status: None, }); - // ProcessExecutionPayload on genesis is a no-op (already received at init). - ops.push(Operation::ProcessExecutionPayload { + // ProcessExecutionPayloadEnvelope on genesis is a no-op (already received at init). + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(0), }); @@ -778,7 +778,7 @@ mod tests { // Mark root 2's execution payload as received so the Full virtual child exists. if first_gloas_block_full { - ops.push(Operation::ProcessExecutionPayload { + ops.push(Operation::ProcessExecutionPayloadEnvelope { block_root: get_root(2), }); } From 69d725097183a740d1a2cc81b42bb1da7d1b1cab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:27:55 +1100 Subject: [PATCH 33/57] Tidy up payload attestation verification --- consensus/fork_choice/src/fork_choice.rs | 27 ++++++++++++++++--- .../indexed_payload_attestation.rs | 7 ----- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7f5ef51217..dd68497e23 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -185,6 +185,11 @@ pub enum InvalidAttestation { attestation_slot: Slot, current_slot: Slot, }, + /// One or more payload attesters are not part of the PTC. + PayloadAttestationAttestersNotInPtc { + attesting_indices_len: usize, + attesting_indices_in_ptc: usize, + }, } impl From for Error { @@ -1169,10 +1174,14 @@ where indexed_payload_attestation: &IndexedPayloadAttestation, is_from_block: AttestationFromBlock, ) -> Result<(), InvalidAttestation> { + // This check is from `is_valid_indexed_payload_attestation`, but we do it immediately to + // avoid wasting time on junk attestations. if indexed_payload_attestation.attesting_indices.is_empty() { return Err(InvalidAttestation::EmptyAggregationBitfield); } + // PTC attestation must be for a known block. If block is unknown, delay consideration until + // the block is found (responsibility of caller). let block = self .proto_array .get_block(&indexed_payload_attestation.data.beacon_block_root) @@ -1180,6 +1189,8 @@ where beacon_block_root: indexed_payload_attestation.data.beacon_block_root, })?; + // Not strictly part of the spec, but payload attestations to future slots are MORE INVALID + // than payload attestations to blocks at previous slots. if block.slot > indexed_payload_attestation.data.slot { return Err(InvalidAttestation::AttestsToFutureBlock { block: block.slot, @@ -1187,13 +1198,13 @@ where }); } - // Spec: `if data.slot != state.slot: return` — PTC votes can only - // change the vote for their assigned beacon block. + // PTC votes can only change the vote for their assigned beacon block, return early otherwise if block.slot != indexed_payload_attestation.data.slot { return Ok(()); } // Gossip payload attestations must be for the current slot. + // NOTE: signature is assumed to have been verified by caller. // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md if matches!(is_from_block, AttestationFromBlock::False) && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() @@ -1318,10 +1329,20 @@ where // Resolve validator indices to PTC committee positions. let ptc_indices: Vec = attestation - .attesting_indices_iter() + .attesting_indices + .iter() .filter_map(|vi| ptc.iter().position(|&p| p == *vi as usize)) .collect(); + // Check that all the attesters are in the PTC + if ptc_indices.len() != attestation.attesting_indices.len() { + return Err(InvalidAttestation::PayloadAttestationAttestersNotInPtc { + attesting_indices_len: attestation.attesting_indices.len(), + attesting_indices_in_ptc: ptc_indices.len(), + } + .into()); + } + for &ptc_index in &ptc_indices { self.proto_array.process_payload_attestation( attestation.data.beacon_block_root, diff --git a/consensus/types/src/attestation/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs index 4de805570c..bb2087e330 100644 --- a/consensus/types/src/attestation/indexed_payload_attestation.rs +++ b/consensus/types/src/attestation/indexed_payload_attestation.rs @@ -2,7 +2,6 @@ use crate::test_utils::TestRandom; use crate::{EthSpec, ForkName, PayloadAttestationData}; use bls::AggregateSignature; use context_deserialize::context_deserialize; -use core::slice::Iter; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -21,12 +20,6 @@ pub struct IndexedPayloadAttestation { pub signature: AggregateSignature, } -impl IndexedPayloadAttestation { - pub fn attesting_indices_iter(&self) -> Iter<'_, u64> { - self.attesting_indices.iter() - } -} - #[cfg(test)] mod tests { use super::*; From bc7864b076ebe87c52a9ad33b669a188ec889a58 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:44:02 +1100 Subject: [PATCH 34/57] Split out InvalidPayloadAttestation error --- .../beacon_chain/src/block_verification.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 57 +++++++++++++------ consensus/fork_choice/src/lib.rs | 5 +- consensus/fork_choice/tests/tests.rs | 6 +- 4 files changed, 47 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 9bb519373a..1ce1137f1e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1698,7 +1698,7 @@ impl ExecutionPendingBlock { indexed_payload_attestation, AttestationFromBlock::True, &ptc.0, - ) && !matches!(e, ForkChoiceError::InvalidAttestation(_)) + ) && !matches!(e, ForkChoiceError::InvalidPayloadAttestation(_)) { return Err(BlockError::BeaconChainError(Box::new(e.into()))); } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index dd68497e23..2dbefc763f 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -26,6 +26,7 @@ use types::{ #[derive(Debug)] pub enum Error { InvalidAttestation(InvalidAttestation), + InvalidPayloadAttestation(InvalidPayloadAttestation), InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), ProtoArrayStringError(String), @@ -85,6 +86,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InvalidPayloadAttestation) -> Self { + Error::InvalidPayloadAttestation(e) + } +} + impl From for Error { fn from(e: AttesterSlashingValidationError) -> Self { Error::InvalidAttesterSlashing(e) @@ -177,14 +184,24 @@ pub enum InvalidAttestation { /// Post-Gloas: attestation with index == 1 (payload_present) requires the block's /// payload to have been received (`root in store.payload_states`). PayloadNotReceived { beacon_block_root: Hash256 }, - /// A payload attestation votes payload_present for a block in the current slot, which is - /// invalid because the payload cannot be known yet. - PayloadPresentDuringSameSlot { slot: Slot }, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum InvalidPayloadAttestation { + /// The payload attestation's attesting indices were empty. + EmptyAggregationBitfield, + /// The `payload_attestation.data.beacon_block_root` block is unknown. + UnknownHeadBlock { beacon_block_root: Hash256 }, + /// The payload attestation is attesting to a block that is later than itself. + AttestsToFutureBlock { block: Slot, attestation: Slot }, /// A gossip payload attestation must be for the current slot. PayloadAttestationNotCurrentSlot { attestation_slot: Slot, current_slot: Slot, }, + /// A payload attestation votes payload_present for a block in the current slot, which is + /// invalid because the payload cannot be known yet. + PayloadPresentDuringSameSlot { slot: Slot }, /// One or more payload attesters are not part of the PTC. PayloadAttestationAttestersNotInPtc { attesting_indices_len: usize, @@ -1173,11 +1190,11 @@ where &self, indexed_payload_attestation: &IndexedPayloadAttestation, is_from_block: AttestationFromBlock, - ) -> Result<(), InvalidAttestation> { + ) -> Result<(), InvalidPayloadAttestation> { // This check is from `is_valid_indexed_payload_attestation`, but we do it immediately to // avoid wasting time on junk attestations. if indexed_payload_attestation.attesting_indices.is_empty() { - return Err(InvalidAttestation::EmptyAggregationBitfield); + return Err(InvalidPayloadAttestation::EmptyAggregationBitfield); } // PTC attestation must be for a known block. If block is unknown, delay consideration until @@ -1185,14 +1202,14 @@ where let block = self .proto_array .get_block(&indexed_payload_attestation.data.beacon_block_root) - .ok_or(InvalidAttestation::UnknownHeadBlock { + .ok_or(InvalidPayloadAttestation::UnknownHeadBlock { beacon_block_root: indexed_payload_attestation.data.beacon_block_root, })?; // Not strictly part of the spec, but payload attestations to future slots are MORE INVALID // than payload attestations to blocks at previous slots. if block.slot > indexed_payload_attestation.data.slot { - return Err(InvalidAttestation::AttestsToFutureBlock { + return Err(InvalidPayloadAttestation::AttestsToFutureBlock { block: block.slot, attestation: indexed_payload_attestation.data.slot, }); @@ -1209,10 +1226,12 @@ where if matches!(is_from_block, AttestationFromBlock::False) && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() { - return Err(InvalidAttestation::PayloadAttestationNotCurrentSlot { - attestation_slot: indexed_payload_attestation.data.slot, - current_slot: self.fc_store.get_current_slot(), - }); + return Err( + InvalidPayloadAttestation::PayloadAttestationNotCurrentSlot { + attestation_slot: indexed_payload_attestation.data.slot, + current_slot: self.fc_store.get_current_slot(), + }, + ); } // A payload attestation voting payload_present for a block in the current slot is @@ -1222,7 +1241,9 @@ where && self.fc_store.get_current_slot() == block.slot && indexed_payload_attestation.data.payload_present { - return Err(InvalidAttestation::PayloadPresentDuringSameSlot { slot: block.slot }); + return Err(InvalidPayloadAttestation::PayloadPresentDuringSameSlot { + slot: block.slot, + }); } Ok(()) @@ -1336,11 +1357,13 @@ where // Check that all the attesters are in the PTC if ptc_indices.len() != attestation.attesting_indices.len() { - return Err(InvalidAttestation::PayloadAttestationAttestersNotInPtc { - attesting_indices_len: attestation.attesting_indices.len(), - attesting_indices_in_ptc: ptc_indices.len(), - } - .into()); + return Err( + InvalidPayloadAttestation::PayloadAttestationAttestersNotInPtc { + attesting_indices_len: attestation.attesting_indices.len(), + attesting_indices_in_ptc: ptc_indices.len(), + } + .into(), + ); } for &ptc_index in &ptc_indices { diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 8f479125b7..159eab0ec0 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -4,8 +4,9 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, - InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, ResetPayloadStatuses, + InvalidAttestation, InvalidBlock, InvalidPayloadAttestation, PayloadVerificationStatus, + PersistedForkChoice, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, + ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 241e25d3e2..d6f937c0ca 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -11,7 +11,7 @@ use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, QueuedAttestation, + InvalidPayloadAttestation, PayloadVerificationStatus, QueuedAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -969,8 +969,8 @@ async fn non_block_payload_attestation_for_previous_slot_is_rejected() { assert!( matches!( result, - Err(ForkChoiceError::InvalidAttestation( - InvalidAttestation::PayloadAttestationNotCurrentSlot { .. } + Err(ForkChoiceError::InvalidPayloadAttestation( + InvalidPayloadAttestation::PayloadAttestationNotCurrentSlot { .. } )) ), "gossip payload attestation for previous slot should be rejected, got: {:?}", From cc7e727f90bf28e1ba33413d31c5b34f4147d2ad Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:46:34 +1100 Subject: [PATCH 35/57] Tidy latest_message --- consensus/proto_array/src/proto_array_fork_choice.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 842cdfaa33..0ecaea3971 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -73,6 +73,7 @@ impl From for VoteTrackerV28 { } } +/// Spec's `LatestMessage` type. Only used in tests. pub struct LatestMessage { pub slot: Slot, pub root: Hash256, @@ -1064,10 +1065,9 @@ impl ProtoArrayForkChoice { .is_finalized_checkpoint_or_descendant::(descendant_root, best_finalized_checkpoint) } + /// NOTE: only used in tests. pub fn latest_message(&self, validator_index: usize) -> Option { - if validator_index < self.votes.0.len() { - let vote = &self.votes.0[validator_index]; - + if let Some(vote) = self.votes.0.get(validator_index) { if *vote == VoteTracker::default() { None } else { From 727535bcc9768c99589dcc60e8ee0f5f2807814b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 11:50:16 +1100 Subject: [PATCH 36/57] Remove spurious payload attestation condition --- consensus/fork_choice/src/fork_choice.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 2dbefc763f..0993ae95a3 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -199,9 +199,6 @@ pub enum InvalidPayloadAttestation { attestation_slot: Slot, current_slot: Slot, }, - /// A payload attestation votes payload_present for a block in the current slot, which is - /// invalid because the payload cannot be known yet. - PayloadPresentDuringSameSlot { slot: Slot }, /// One or more payload attesters are not part of the PTC. PayloadAttestationAttestersNotInPtc { attesting_indices_len: usize, @@ -1234,18 +1231,6 @@ where ); } - // A payload attestation voting payload_present for a block in the current slot is - // invalid: the payload cannot be known yet. This only applies to gossip attestations; - // payload attestations from blocks have already been validated by the block producer. - if matches!(is_from_block, AttestationFromBlock::False) - && self.fc_store.get_current_slot() == block.slot - && indexed_payload_attestation.data.payload_present - { - return Err(InvalidPayloadAttestation::PayloadPresentDuringSameSlot { - slot: block.slot, - }); - } - Ok(()) } From d7f67dae21203a3691f96b4d9d2c63995ffaba53 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:27:43 +1100 Subject: [PATCH 37/57] Remove incorrect comment --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6695079cd2..f259ed7e06 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -165,8 +165,6 @@ pub struct ProtoNode { pub payload_data_availability_votes: BitVector, /// Whether the execution payload for this block has been received and validated locally. /// Maps to `root in store.payload_states` in the spec. - /// When true, `is_payload_timely` and `is_payload_data_available` return true - /// regardless of PTC vote counts. #[superstruct(only(V29), partial_getter(copy))] pub payload_received: bool, /// The proposer index for this block, used by `should_apply_proposer_boost` From 6cc65848da74fe29a13604d8290445e85a0632bb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:35:51 +1100 Subject: [PATCH 38/57] Remove dead code --- consensus/proto_array/src/proto_array.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f259ed7e06..14290fd784 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -489,12 +489,10 @@ impl ProtoArray { .ok_or(Error::DeltaOverflow(parent_index))?; } } else { - // V17 child of a V29 parent (fork transition): treat as FULL - // since V17 nodes always have execution payloads inline. - parent_delta.full_delta = parent_delta - .full_delta - .checked_add(delta) - .ok_or(Error::DeltaOverflow(parent_index))?; + // This is a v17 node with a v17 parent. + // There is no empty or full weight for v17 nodes, so nothing to propagate. + // In the tree walk, the v17 nodes have an empty child with 0 weight, which + // wins by default (it is the only child). } } } From 85934b4d5fcd55884abd2580089154dd419d2ad0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:52:21 +1100 Subject: [PATCH 39/57] Remove some noisy TODOs --- consensus/proto_array/src/proto_array.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 14290fd784..392f11a198 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -720,7 +720,6 @@ impl ProtoArray { .nodes .get(block_index) .ok_or(Error::InvalidNodeIndex(block_index))?; - // TODO(gloas): handle parent unknown case? let parent_index = block .parent() .ok_or(Error::NodeUnknown(proposer_boost_root))?; @@ -744,7 +743,6 @@ impl ProtoArray { // the parent's slot from the same proposer. let parent_slot = parent.slot(); let parent_root = parent.root(); - // TODO(gloas): handle proposer index for pre-Gloas blocks? let parent_proposer = parent.proposer_index(); let has_equivocation = self.nodes.iter().any(|node| { From 5284486af08216cdaca1e88c4e02cbce8c0a85f3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 13:55:29 +1100 Subject: [PATCH 40/57] Break out of invalidation loop on Gloas block --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 392f11a198..faaf675565 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -960,7 +960,7 @@ impl ProtoArray { // This block is pre-merge, therefore it has no execution status. Nor do its // ancestors. Ok(ExecutionStatus::Irrelevant(_)) => break, - Err(_) => (), + Err(_) => break, } } From 7570fd155503a45d27c2be95d23ab3263d81dc48 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 14:10:11 +1100 Subject: [PATCH 41/57] Remove comment --- consensus/proto_array/src/proto_array.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index faaf675565..20554c963e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1069,9 +1069,6 @@ impl ProtoArray { }); } - // In the post-Gloas world, always use a virtual tree walk. - // - // Best child/best descendant is dead. let best_fc_node = self.find_head_walk::( justified_index, current_slot, From eceedaf7b69c558a2a8aeb81c25e2547b59b98b3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Mar 2026 15:47:50 +1100 Subject: [PATCH 42/57] Revert parent->child optimisation AGAIN --- consensus/proto_array/src/proto_array.rs | 70 ++++++++---------------- 1 file changed, 24 insertions(+), 46 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 20554c963e..3bf8994c67 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1104,26 +1104,6 @@ impl ProtoArray { Ok((best_fc_node.root, best_fc_node.payload_status)) } - /// Build a parent->children index. Invalid nodes are excluded - /// (they aren't in store.blocks in the spec). - fn build_children_index(&self) -> Vec> { - let mut children = vec![vec![]; self.nodes.len()]; - for (i, node) in self.nodes.iter().enumerate() { - if node - .execution_status() - .is_ok_and(|status| status.is_invalid()) - { - continue; - } - if let Some(parent) = node.parent() - && parent < children.len() - { - children[parent].push(i); - } - } - children - } - /// Spec: `get_filtered_block_tree`. /// /// Returns the set of node indices on viable branches — those with at least @@ -1134,7 +1114,6 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - children_index: &[Vec], ) -> HashSet { let mut viable = HashSet::new(); self.filter_block_tree::( @@ -1142,7 +1121,6 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, - children_index, &mut viable, ); viable @@ -1155,17 +1133,25 @@ impl ProtoArray { current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - children_index: &[Vec], viable: &mut HashSet, ) -> bool { let Some(node) = self.nodes.get(node_index) else { return false; }; - let children = children_index - .get(node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); + // Skip invalid children — they aren't in store.blocks in the spec. + let children: Vec = self + .nodes + .iter() + .enumerate() + .filter(|(_, child)| { + child.parent() == Some(node_index) + && !child + .execution_status() + .is_ok_and(|status| status.is_invalid()) + }) + .map(|(i, _)| i) + .collect(); if !children.is_empty() { // Evaluate ALL children (no short-circuit) to mark all viable branches. @@ -1177,7 +1163,6 @@ impl ProtoArray { current_slot, best_justified_checkpoint, best_finalized_checkpoint, - children_index, viable, ) }) @@ -1222,16 +1207,12 @@ impl ProtoArray { payload_status: PayloadStatus::Pending, }; - // Build parent->children index once for O(1) lookups. - let children_index = self.build_children_index(); - // Spec: `get_filtered_block_tree`. let viable_nodes = self.get_filtered_block_tree::( start_index, current_slot, best_justified_checkpoint, best_finalized_checkpoint, - &children_index, ); // Compute once rather than per-child per-level. @@ -1240,7 +1221,7 @@ impl ProtoArray { loop { let children: Vec<_> = self - .get_node_children(&head, &children_index)? + .get_node_children(&head)? .into_iter() .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) .collect(); @@ -1403,7 +1384,6 @@ impl ProtoArray { fn get_node_children( &self, node: &IndexedForkChoiceNode, - children_index: &[Vec], ) -> Result, Error> { if node.payload_status == PayloadStatus::Pending { let proto_node = self @@ -1417,25 +1397,23 @@ impl ProtoArray { } Ok(children) } else { - let child_indices = children_index - .get(node.proto_node_index) - .map(|c| c.as_slice()) - .unwrap_or(&[]); - Ok(child_indices + Ok(self + .nodes .iter() - .filter_map(|&child_index| { - let child_node = self.nodes.get(child_index)?; - if child_node.get_parent_payload_status() != node.payload_status { - return None; - } - Some(( + .enumerate() + .filter(|(_, child_node)| { + child_node.parent() == Some(node.proto_node_index) + && child_node.get_parent_payload_status() == node.payload_status + }) + .map(|(child_index, child_node)| { + ( IndexedForkChoiceNode { root: child_node.root(), proto_node_index: child_index, payload_status: PayloadStatus::Pending, }, child_node.clone(), - )) + ) }) .collect()) } From 6df55970aab1b138ebe10cf7f87c4a6148f25462 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 14:24:37 +1100 Subject: [PATCH 43/57] Simplify find_head_walk --- consensus/proto_array/src/proto_array.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3bf8994c67..39c638f4a8 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1232,11 +1232,7 @@ impl ProtoArray { head = children .into_iter() - .map(|(child, _)| -> Result<_, Error> { - let proto_node = self - .nodes - .get(child.proto_node_index) - .ok_or(Error::InvalidNodeIndex(child.proto_node_index))?; + .map(|(child, ref proto_node)| -> Result<_, Error> { let weight = self.get_weight::( &child, proto_node, From f5413c6e9721e46a9a383e16bd607ec8ce2b6acb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 15:42:57 +1100 Subject: [PATCH 44/57] Remove useless let _ --- consensus/fork_choice/src/fork_choice.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 0993ae95a3..92fd4c1faf 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -474,7 +474,7 @@ where }; // Ensure that `fork_choice.forkchoice_update_parameters.head_root` is updated. - let _ = fork_choice.get_head(current_slot, spec)?; + fork_choice.get_head(current_slot, spec)?; Ok(fork_choice) } @@ -1743,7 +1743,7 @@ where .set_all_blocks_to_optimistic::()?; // If the second attempt at finding a head fails, return an error since we do not // expect this scenario. - let _ = fork_choice.get_head(current_slot, spec)?; + fork_choice.get_head(current_slot, spec)?; } Ok(fork_choice) From a1296fc0c7df2f28eac8c2c52fce1cdb3e9c51b2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 16:38:40 +1100 Subject: [PATCH 45/57] Fix block_timeliness_ptc_threshold hardcoded constants --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 39c638f4a8..f1145598a9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -636,7 +636,7 @@ impl ProtoArray { // TODO(gloas): use Gloas-specific PTC due threshold once // `get_payload_attestation_due_ms` is on ChainSpec. block_timeliness_ptc_threshold: is_genesis - || (is_current_slot && time_into_slot < spec.get_slot_duration() / 2), + || (is_current_slot && time_into_slot < 3 * spec.get_slot_duration() / 4), equivocating_attestation_score: 0, }) }; From 9db37b8bd3d93917413aa35b880a87137e20eb06 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 2 Apr 2026 00:50:46 -0500 Subject: [PATCH 46/57] Document is_head_weak spec divergence and impact --- consensus/proto_array/src/proto_array.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f1145598a9..b0f471dd6d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -672,12 +672,17 @@ impl ProtoArray { Ok(()) } - /// Spec: `is_head_weak`. - /// - /// The spec adds weight from equivocating validators in the head slot's - /// committees. We approximate this with `equivocating_attestation_score` - /// which tracks equivocating validators that voted for this block (close - /// but not identical to committee membership). + // TODO(gloas): the spec adds weight from equivocating validators in the + // head slot's *committees*, regardless of who they voted for. We approximate + // with `equivocating_attestation_score` which only tracks equivocating + // validators whose vote pointed at this block. This under-counts when an + // equivocating validator is in the committee but voted for a different fork, + // which could allow a re-org the spec wouldn't. In practice the deviation + // is small — it requires equivocating validators voting for competing forks + // AND the head weight to be exactly at the reorg threshold boundary. + // Fixing this properly requires committee computation from BeaconState, + // which is not available in proto_array. The fix would be to pass + // pre-computed equivocating committee weight from the beacon_chain caller. fn is_head_weak( &self, head_node: &ProtoNode, From 6763862e0f796cfda42f0781503715c90c14f651 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 2 Apr 2026 00:56:42 -0500 Subject: [PATCH 47/57] Add attestation_due_bps_gloas and payload_attestation_due_bps to ChainSpec Spec: `get_attestation_due_ms(epoch)` uses ATTESTATION_DUE_BPS_GLOAS (2500) for Gloas epochs vs ATTESTATION_DUE_BPS (3333) pre-Gloas. `get_payload_attestation_due_ms` uses PAYLOAD_ATTESTATION_DUE_BPS (7500). - Add both BPS fields to ChainSpec with derived Duration values - Add `get_attestation_due::(slot)` that returns epoch-appropriate threshold matching the spec - Add `get_payload_attestation_due()` matching the spec - Use them in proto_array record_block_timeliness instead of hardcoded values --- consensus/proto_array/src/proto_array.rs | 7 ++- consensus/types/src/core/chain_spec.rs | 54 +++++++++++++++++++++++- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index b0f471dd6d..dfb43f5f34 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -632,11 +632,9 @@ impl ProtoArray { // Anchor gets [True, True]. Others computed from time_into_slot. block_timeliness_attestation_threshold: is_genesis || (is_current_slot - && time_into_slot < spec.get_unaggregated_attestation_due()), - // TODO(gloas): use Gloas-specific PTC due threshold once - // `get_payload_attestation_due_ms` is on ChainSpec. + && time_into_slot < spec.get_attestation_due::(current_slot)), block_timeliness_ptc_threshold: is_genesis - || (is_current_slot && time_into_slot < 3 * spec.get_slot_duration() / 4), + || (is_current_slot && time_into_slot < spec.get_payload_attestation_due()), equivocating_attestation_score: 0, }) }; @@ -672,6 +670,7 @@ impl ProtoArray { Ok(()) } + /// Spec: `is_head_weak`. // TODO(gloas): the spec adds weight from equivocating validators in the // head slot's *committees*, regardless of who they voted for. We approximate // with `equivocating_attestation_score` which only tracks equivocating diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index cc79d3fc29..9ffa7d6f7e 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -107,6 +107,8 @@ pub struct ChainSpec { pub shard_committee_period: u64, pub proposer_reorg_cutoff_bps: u64, pub attestation_due_bps: u64, + pub attestation_due_bps_gloas: u64, + pub payload_attestation_due_bps: u64, pub aggregate_due_bps: u64, pub sync_message_due_bps: u64, pub contribution_due_bps: u64, @@ -115,6 +117,8 @@ pub struct ChainSpec { * Derived time values (computed at startup via `compute_derived_values()`) */ pub unaggregated_attestation_due: Duration, + pub unaggregated_attestation_due_gloas: Duration, + pub payload_attestation_due: Duration, pub aggregate_attestation_due: Duration, pub sync_message_due: Duration, pub contribution_and_proof_due: Duration, @@ -877,6 +881,20 @@ impl ChainSpec { self.unaggregated_attestation_due } + /// Spec: `get_attestation_due_ms`. Returns the epoch-appropriate threshold. + pub fn get_attestation_due(&self, slot: Slot) -> Duration { + if self.fork_name_at_slot::(slot).gloas_enabled() { + self.unaggregated_attestation_due_gloas + } else { + self.unaggregated_attestation_due + } + } + + /// Spec: `get_payload_attestation_due_ms`. + pub fn get_payload_attestation_due(&self) -> Duration { + self.payload_attestation_due + } + /// Get the duration into a slot in which an aggregated attestation is due. /// Returns the pre-computed value from `compute_derived_values()`. pub fn get_aggregate_attestation_due(&self) -> Duration { @@ -949,6 +967,12 @@ impl ChainSpec { self.unaggregated_attestation_due = self .compute_slot_component_duration(self.attestation_due_bps) .expect("invalid chain spec: cannot compute unaggregated_attestation_due"); + self.unaggregated_attestation_due_gloas = self + .compute_slot_component_duration(self.attestation_due_bps_gloas) + .expect("invalid chain spec: cannot compute unaggregated_attestation_due_gloas"); + self.payload_attestation_due = self + .compute_slot_component_duration(self.payload_attestation_due_bps) + .expect("invalid chain spec: cannot compute payload_attestation_due"); self.aggregate_attestation_due = self .compute_slot_component_duration(self.aggregate_due_bps) .expect("invalid chain spec: cannot compute aggregate_attestation_due"); @@ -1079,6 +1103,8 @@ impl ChainSpec { shard_committee_period: 256, proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, + attestation_due_bps_gloas: 2500, + payload_attestation_due_bps: 7500, aggregate_due_bps: 6667, sync_message_due_bps: 3333, contribution_due_bps: 6667, @@ -1087,6 +1113,8 @@ impl ChainSpec { * Derived time values (set by `compute_derived_values()`) */ unaggregated_attestation_due: Duration::from_millis(3999), + unaggregated_attestation_due_gloas: Duration::from_millis(3000), + payload_attestation_due: Duration::from_millis(9000), aggregate_attestation_due: Duration::from_millis(8000), sync_message_due: Duration::from_millis(3999), contribution_and_proof_due: Duration::from_millis(8000), @@ -1479,6 +1507,8 @@ impl ChainSpec { shard_committee_period: 256, proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, + attestation_due_bps_gloas: 2500, + payload_attestation_due_bps: 7500, aggregate_due_bps: 6667, /* @@ -1486,6 +1516,8 @@ impl ChainSpec { * Precomputed for 5000ms slot: 3333 bps = 1666ms, 6667 bps = 3333ms */ unaggregated_attestation_due: Duration::from_millis(1666), + unaggregated_attestation_due_gloas: Duration::from_millis(1250), + payload_attestation_due: Duration::from_millis(3750), aggregate_attestation_due: Duration::from_millis(3333), sync_message_due: Duration::from_millis(1666), contribution_and_proof_due: Duration::from_millis(3333), @@ -2062,6 +2094,12 @@ pub struct Config { #[serde(default = "default_attestation_due_bps")] #[serde(with = "serde_utils::quoted_u64")] attestation_due_bps: u64, + #[serde(default = "default_attestation_due_bps_gloas")] + #[serde(with = "serde_utils::quoted_u64")] + attestation_due_bps_gloas: u64, + #[serde(default = "default_payload_attestation_due_bps")] + #[serde(with = "serde_utils::quoted_u64")] + payload_attestation_due_bps: u64, #[serde(default = "default_aggregate_due_bps")] #[serde(with = "serde_utils::quoted_u64")] aggregate_due_bps: u64, @@ -2288,6 +2326,14 @@ const fn default_attestation_due_bps() -> u64 { 3333 } +const fn default_attestation_due_bps_gloas() -> u64 { + 2500 +} + +const fn default_payload_attestation_due_bps() -> u64 { + 7500 +} + const fn default_aggregate_due_bps() -> u64 { 6667 } @@ -2539,6 +2585,8 @@ impl Config { proposer_reorg_cutoff_bps: spec.proposer_reorg_cutoff_bps, attestation_due_bps: spec.attestation_due_bps, + attestation_due_bps_gloas: spec.attestation_due_bps_gloas, + payload_attestation_due_bps: spec.payload_attestation_due_bps, aggregate_due_bps: spec.aggregate_due_bps, sync_message_due_bps: spec.sync_message_due_bps, contribution_due_bps: spec.contribution_due_bps, @@ -2632,6 +2680,8 @@ impl Config { min_epochs_for_data_column_sidecars_requests, proposer_reorg_cutoff_bps, attestation_due_bps, + attestation_due_bps_gloas, + payload_attestation_due_bps, aggregate_due_bps, sync_message_due_bps, contribution_due_bps, @@ -2731,6 +2781,8 @@ impl Config { proposer_reorg_cutoff_bps, attestation_due_bps, + attestation_due_bps_gloas, + payload_attestation_due_bps, aggregate_due_bps, sync_message_due_bps, contribution_due_bps, @@ -3634,11 +3686,9 @@ mod yaml_tests { "EIP7928_FORK_VERSION", "EIP7928_FORK_EPOCH", // Gloas params not yet in Config - "ATTESTATION_DUE_BPS_GLOAS", "AGGREGATE_DUE_BPS_GLOAS", "SYNC_MESSAGE_DUE_BPS_GLOAS", "CONTRIBUTION_DUE_BPS_GLOAS", - "PAYLOAD_ATTESTATION_DUE_BPS", "MAX_REQUEST_PAYLOADS", // Gloas fork choice params not yet in Config "REORG_HEAD_WEIGHT_THRESHOLD", From 68f18efbe5df5c4e629a9851f951839a927c9444 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 2 Apr 2026 19:24:29 +1100 Subject: [PATCH 48/57] Fix minimal spec --- consensus/types/src/core/chain_spec.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 9ffa7d6f7e..e612c8b6db 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1418,6 +1418,8 @@ impl ChainSpec { * Precomputed for 6000ms slot: 3333 bps = 1999ms, 6667 bps = 4000ms */ unaggregated_attestation_due: Duration::from_millis(1999), + unaggregated_attestation_due_gloas: Duration::from_millis(1500), + payload_attestation_due: Duration::from_millis(4500), aggregate_attestation_due: Duration::from_millis(4000), sync_message_due: Duration::from_millis(1999), contribution_and_proof_due: Duration::from_millis(4000), From 86ddd0d88d3f4650f56312830262a070137940ee Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 2 Apr 2026 19:09:56 -0700 Subject: [PATCH 49/57] Add EnvelopeRequestState logic --- .../network/src/sync/block_lookups/common.rs | 58 ++++++++- .../network/src/sync/block_lookups/mod.rs | 111 +++++++++++++----- .../sync/block_lookups/single_block_lookup.rs | 67 +++++++++-- beacon_node/network/src/sync/manager.rs | 30 ++--- .../network/src/sync/network_context.rs | 23 +++- 5 files changed, 221 insertions(+), 68 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index edd99345b4..bb8d81cc6e 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -2,7 +2,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, + BlobRequestState, BlockRequestState, CustodyRequestState, EnvelopeRequestState, PeerId, }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; @@ -12,16 +12,17 @@ use parking_lot::RwLock; use std::collections::HashSet; use std::sync::Arc; use types::data::FixedBlobSidecarList; -use types::{DataColumnSidecarList, SignedBeaconBlock}; +use types::{DataColumnSidecarList, SignedBeaconBlock, SignedExecutionPayloadEnvelope}; use super::SingleLookupId; use super::single_block_lookup::{ComponentRequests, DownloadResult}; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ResponseType { Block, Blob, CustodyColumn, + Envelope, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -151,6 +152,7 @@ impl RequestState for BlobRequestState { ComponentRequests::WaitingForBlock => Err("waiting for block"), ComponentRequests::ActiveBlobRequest(request, _) => Ok(request), ComponentRequests::ActiveCustodyRequest { .. } => Err("expecting custody request"), + ComponentRequests::ActiveEnvelopeRequest { .. } => Err("expecting envelope request"), ComponentRequests::NotNeeded { .. } => Err("not needed"), } } @@ -205,6 +207,7 @@ impl RequestState for CustodyRequestState { ComponentRequests::WaitingForBlock => Err("waiting for block"), ComponentRequests::ActiveBlobRequest { .. } => Err("expecting blob request"), ComponentRequests::ActiveCustodyRequest(request) => Ok(request), + ComponentRequests::ActiveEnvelopeRequest { .. } => Err("expecting envelope request"), ComponentRequests::NotNeeded { .. } => Err("not needed"), } } @@ -215,3 +218,52 @@ impl RequestState for CustodyRequestState { &mut self.state } } + +impl RequestState for EnvelopeRequestState { + type VerifiedResponseType = Arc>; + + fn make_request( + &self, + id: Id, + lookup_peers: Arc>>, + _: usize, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.envelope_lookup_request(id, lookup_peers, self.block_root) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_envelope_for_processing(id, value, seen_timestamp, block_root) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::Envelope + } + + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + match &mut request.component_requests { + ComponentRequests::ActiveEnvelopeRequest(request) => Ok(request), + _ => Err("expecting envelope request"), + } + } + + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 7b4e3ce753..b33c38d147 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,9 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; +pub use single_block_lookup::{ + BlobRequestState, BlockRequestState, CustodyRequestState, EnvelopeRequestState, +}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; @@ -344,6 +346,57 @@ impl BlockLookups { self.new_current_lookup(block_root_to_search, None, None, peers, cx) } + /// A block triggers the search of a parent envelope. + #[must_use = "only reference the new lookup if returns true"] + pub fn search_parent_envelope_of_child( + &mut self, + parent_root: Hash256, + peers: &[PeerId], + cx: &mut SyncNetworkContext, + ) -> bool { + // Check if there's already a lookup for this root (could be a block lookup or envelope + // lookup). If so, add peers and let it handle the envelope. + if let Some((&lookup_id, _lookup)) = self + .single_block_lookups + .iter_mut() + .find(|(_, lookup)| lookup.is_for_block(parent_root)) + { + if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers, cx) { + warn!(error = ?e, "Error adding peers to envelope lookup"); + } + return true; + } + + if self.single_block_lookups.len() >= MAX_LOOKUPS { + warn!(?parent_root, "Dropping envelope lookup reached max"); + return false; + } + + let lookup = SingleBlockLookup::new_envelope_only(parent_root, peers, cx.next_id()); + let _guard = lookup.span.clone().entered(); + + let id = lookup.id; + let lookup = match self.single_block_lookups.entry(id) { + Entry::Vacant(entry) => entry.insert(lookup), + Entry::Occupied(_) => { + warn!(id, "Lookup exists with same id"); + return false; + } + }; + + debug!( + ?peers, + ?parent_root, + id = lookup.id, + "Created envelope-only lookup" + ); + metrics::inc_counter(&metrics::SYNC_LOOKUP_CREATED); + self.metrics.created_lookups += 1; + + let result = lookup.continue_requests(cx); + self.on_lookup_result(id, result, "new_envelope_lookup", cx) + } + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. /// Returns true if the lookup is created or already exists @@ -561,17 +614,13 @@ impl BlockLookups { self.on_processing_result_inner::>(id, result, cx) } BlockProcessType::SinglePayloadEnvelope { id, block_root } => { - match result { - BlockProcessingResult::Ok(_) => { - self.continue_envelope_child_lookups(block_root, cx); - } - BlockProcessingResult::Err(e) => { - debug!(%id, error = ?e, "Payload envelope processing failed"); - // TODO(EIP-7732): resolve awaiting_envelope on affected lookups so they can retry - } - _ => {} + let result = self + .on_processing_result_inner::>(id, result, cx); + // On successful envelope import, unblock child lookups waiting for this envelope + if matches!(&result, Ok(LookupResult::Completed)) { + self.continue_envelope_child_lookups(block_root, cx); } - return; + result } }; self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); @@ -721,6 +770,7 @@ impl BlockLookups { ResponseType::CustodyColumn => { "lookup_custody_column_processing_failure" } + ResponseType::Envelope => "lookup_envelope_processing_failure", }, ); } @@ -764,22 +814,20 @@ impl BlockLookups { } Action::ParentEnvelopeUnknown { parent_root } => { let peers = lookup.all_peers(); - lookup.set_awaiting_envelope(parent_root); - // Pick a peer to request the envelope from - let peer_id = peers.first().copied().ok_or_else(|| { - LookupRequestError::Failed("No peers available for envelope request".to_owned()) - })?; - match cx.envelope_lookup_request(lookup_id, peer_id, parent_root) { - Ok(_) => { - debug!( - id = lookup_id, - ?block_root, - ?parent_root, - "Requesting missing parent envelope" - ); - Ok(LookupResult::Pending) - } - Err(e) => Err(LookupRequestError::SendFailedNetwork(e)), + lookup.set_awaiting_parent_envelope(parent_root); + let envelope_lookup_exists = self.search_parent_envelope_of_child(parent_root, &peers, cx); + if envelope_lookup_exists { + debug!( + id = lookup_id, + ?block_root, + ?parent_root, + "Marking lookup as awaiting parent envelope" + ); + Ok(LookupResult::Pending) + } else { + Err(LookupRequestError::Failed(format!( + "Envelope lookup could not be created for {parent_root:?}" + ))) } } Action::Drop(reason) => { @@ -858,8 +906,8 @@ impl BlockLookups { let mut lookup_results = vec![]; for (id, lookup) in self.single_block_lookups.iter_mut() { - if lookup.awaiting_envelope() == Some(block_root) { - lookup.resolve_awaiting_envelope(); + if lookup.awaiting_parent_envelope() == Some(block_root) { + lookup.resolve_awaiting_parent_envelope(); debug!( envelope_root = ?block_root, id, @@ -894,7 +942,10 @@ impl BlockLookups { let child_lookups = self .single_block_lookups .iter() - .filter(|(_, lookup)| lookup.awaiting_parent() == Some(dropped_lookup.block_root())) + .filter(|(_, lookup)| { + lookup.awaiting_parent() == Some(dropped_lookup.block_root()) + || lookup.awaiting_parent_envelope() == Some(dropped_lookup.block_root()) + }) .map(|(id, _)| *id) .collect::>(); diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 51cc191056..d59753b960 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -16,7 +16,9 @@ use store::Hash256; use strum::IntoStaticStr; use tracing::{Span, debug_span}; use types::data::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; +use types::{ + DataColumnSidecarList, EthSpec, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, +}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -70,7 +72,7 @@ pub struct SingleBlockLookup { peers: Arc>>, block_root: Hash256, awaiting_parent: Option, - awaiting_envelope: Option, + awaiting_parent_envelope: Option, created: Instant, pub(crate) span: Span, } @@ -80,6 +82,7 @@ pub(crate) enum ComponentRequests { WaitingForBlock, ActiveBlobRequest(BlobRequestState, usize), ActiveCustodyRequest(CustodyRequestState), + ActiveEnvelopeRequest(EnvelopeRequestState), // When printing in debug this state display the reason why it's not needed #[allow(dead_code)] NotNeeded(&'static str), @@ -105,12 +108,26 @@ impl SingleBlockLookup { peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, - awaiting_envelope: None, + awaiting_parent_envelope: None, created: Instant::now(), span: lookup_span, } } + /// Create an envelope-only lookup. The block is already imported, we just need the envelope. + pub fn new_envelope_only(block_root: Hash256, peers: &[PeerId], id: Id) -> Self { + let mut lookup = Self::new(block_root, peers, id, None); + // Block is already imported, mark as completed + lookup + .block_request_state + .state + .on_completed_request("block already imported") + .expect("block state starts as AwaitingDownload"); + lookup.component_requests = + ComponentRequests::ActiveEnvelopeRequest(EnvelopeRequestState::new(block_root)); + lookup + } + /// Reset the status of all internal requests pub fn reset_requests(&mut self) { self.block_request_state = BlockRequestState::new(self.block_root); @@ -146,18 +163,18 @@ impl SingleBlockLookup { self.awaiting_parent = None; } - pub fn awaiting_envelope(&self) -> Option { - self.awaiting_envelope + pub fn awaiting_parent_envelope(&self) -> Option { + self.awaiting_parent_envelope } /// Mark this lookup as awaiting a parent envelope to be imported before processing. - pub fn set_awaiting_envelope(&mut self, parent_root: Hash256) { - self.awaiting_envelope = Some(parent_root); + pub fn set_awaiting_parent_envelope(&mut self, parent_root: Hash256) { + self.awaiting_parent_envelope = Some(parent_root); } /// Mark this lookup as no longer awaiting a parent envelope. - pub fn resolve_awaiting_envelope(&mut self) { - self.awaiting_envelope = None; + pub fn resolve_awaiting_parent_envelope(&mut self) { + self.awaiting_parent_envelope = None; } /// Returns the time elapsed since this lookup was created @@ -194,6 +211,7 @@ impl SingleBlockLookup { ComponentRequests::WaitingForBlock => false, ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::ActiveEnvelopeRequest(request) => request.state.is_processed(), ComponentRequests::NotNeeded { .. } => true, } } @@ -201,7 +219,7 @@ impl SingleBlockLookup { /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() - || self.awaiting_envelope.is_some() + || self.awaiting_parent_envelope.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { // If components are waiting for the block request to complete, here we should @@ -214,6 +232,9 @@ impl SingleBlockLookup { ComponentRequests::ActiveCustodyRequest(request) => { request.state.is_awaiting_event() } + ComponentRequests::ActiveEnvelopeRequest(request) => { + request.state.is_awaiting_event() + } ComponentRequests::NotNeeded { .. } => false, } } @@ -283,6 +304,9 @@ impl SingleBlockLookup { ComponentRequests::ActiveCustodyRequest(_) => { self.continue_request::>(cx, 0)? } + ComponentRequests::ActiveEnvelopeRequest(_) => { + self.continue_request::>(cx, 0)? + } ComponentRequests::NotNeeded { .. } => {} // do nothing } @@ -304,7 +328,8 @@ impl SingleBlockLookup { expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; - let awaiting_event = self.awaiting_parent.is_some() || self.awaiting_envelope.is_some(); + let awaiting_event = + self.awaiting_parent.is_some() || self.awaiting_parent_envelope.is_some(); let request = R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; @@ -444,6 +469,26 @@ impl BlockRequestState { } } +/// The state of the envelope request component of a `SingleBlockLookup`. +/// Used for envelope-only lookups where the parent block is already imported +/// but its execution payload envelope is missing. +#[derive(Educe)] +#[educe(Debug)] +pub struct EnvelopeRequestState { + #[educe(Debug(ignore))] + pub block_root: Hash256, + pub state: SingleLookupRequestState>>, +} + +impl EnvelopeRequestState { + pub fn new(block_root: Hash256) -> Self { + Self { + block_root, + state: SingleLookupRequestState::new(), + } + } +} + #[derive(Debug, Clone)] pub struct DownloadResult { pub value: T, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 256752d5fb..2cc35081b7 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -45,6 +45,7 @@ use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, + EnvelopeRequestState, }; use crate::sync::custody_backfill_sync::CustodyBackFillSync; use crate::sync::network_context::{PeerGroup, RpcResponseResult}; @@ -1278,27 +1279,14 @@ impl SyncManager { .network .on_single_envelope_response(id, peer_id, rpc_event) { - match resp { - Ok((envelope, seen_timestamp)) => { - let block_root = envelope.beacon_block_root(); - debug!( - ?block_root, - %id, - "Downloaded payload envelope, sending for processing" - ); - if let Err(e) = self.network.send_envelope_for_processing( - id.req_id, - envelope, - seen_timestamp, - block_root, - ) { - error!(error = ?e, "Failed to send envelope for processing"); - } - } - Err(e) => { - debug!(error = ?e, %id, "Payload envelope download failed"); - } - } + self.block_lookups + .on_download_response::>( + id, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), + &mut self.network, + ) } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e9d289b777..328940d672 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -944,9 +944,26 @@ impl SyncNetworkContext { pub fn envelope_lookup_request( &mut self, lookup_id: SingleLookupId, - peer_id: PeerId, + lookup_peers: Arc>>, block_root: Hash256, - ) -> Result { + ) -> Result { + let active_request_count_by_peer = self.active_request_count_by_peer(); + let Some(peer_id) = lookup_peers + .read() + .iter() + .map(|peer| { + ( + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, peer)| *peer) + else { + return Ok(LookupRequestResult::Pending("no peers")); + }; + let id = SingleLookupReqId { lookup_id, req_id: self.next_id(), @@ -988,7 +1005,7 @@ impl SyncNetworkContext { request_span, ); - Ok(id.req_id) + Ok(LookupRequestResult::RequestSent(id.req_id)) } /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: From 3523804515f9e05f8c1782152694d35a1951b0e5 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 2 Apr 2026 19:30:12 -0700 Subject: [PATCH 50/57] cleanup --- .../src/beacon/execution_payload_envelope.rs | 15 ++++--- .../network/src/sync/block_lookups/mod.rs | 44 ++++++++++++++++++- beacon_node/network/src/sync/manager.rs | 38 +++++++++++++++- 3 files changed, 88 insertions(+), 9 deletions(-) diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index ea8c0d4b8a..7f81f7bf25 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -132,18 +132,21 @@ pub async fn publish_execution_payload_envelope( }; let ctx = chain.gossip_verification_context(); - let Ok(gossip_verifed_envelope) = GossipVerifiedEnvelope::new(signed_envelope, &ctx) else { - warn!(%slot, %beacon_block_root, "Execution payload envelope rejected"); - return Err(warp_utils::reject::custom_bad_request( - "execution payload envelope rejected, gossip verification".to_string(), - )); + let gossip_verified_envelope = match GossipVerifiedEnvelope::new(signed_envelope, &ctx) { + Ok(envelope) => envelope, + Err(e) => { + warn!(%slot, %beacon_block_root, error = ?e, "Execution payload envelope rejected"); + return Err(warp_utils::reject::custom_bad_request(format!( + "execution payload envelope rejected: {e:?}", + ))); + } }; // Import the envelope locally (runs state transition and notifies the EL). chain .process_execution_payload_envelope( beacon_block_root, - gossip_verifed_envelope, + gossip_verified_envelope, NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index b33c38d147..4d14479627 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -228,6 +228,47 @@ impl BlockLookups { } } + /// A child block's parent envelope is missing. Create a child lookup (with the block component) + /// that waits for the parent envelope, and an envelope-only lookup for the parent. + /// + /// Returns true if both lookups are created or already exist. + #[must_use = "only reference the new lookup if returns true"] + pub fn search_child_and_parent_envelope( + &mut self, + block_root: Hash256, + block_component: BlockComponent, + parent_root: Hash256, + peer_id: PeerId, + cx: &mut SyncNetworkContext, + ) -> bool { + let envelope_lookup_exists = + self.search_parent_envelope_of_child(parent_root, &[peer_id], cx); + if envelope_lookup_exists { + // Create child lookup that waits for the parent envelope (not parent block). + // The child block itself is available, so we pass it as a component. + let child_created = self.new_current_lookup( + block_root, + Some(block_component), + None, // not awaiting parent block + &[], + cx, + ); + // Set awaiting_parent_envelope on the child lookup + if child_created { + if let Some((_, lookup)) = self + .single_block_lookups + .iter_mut() + .find(|(_, l)| l.is_for_block(block_root)) + { + lookup.set_awaiting_parent_envelope(parent_root); + } + } + child_created + } else { + false + } + } + /// Seach a block whose parent root is unknown. /// /// Returns true if the lookup is created or already exists @@ -815,7 +856,8 @@ impl BlockLookups { Action::ParentEnvelopeUnknown { parent_root } => { let peers = lookup.all_peers(); lookup.set_awaiting_parent_envelope(parent_root); - let envelope_lookup_exists = self.search_parent_envelope_of_child(parent_root, &peers, cx); + let envelope_lookup_exists = + self.search_parent_envelope_of_child(parent_root, &peers, cx); if envelope_lookup_exists { debug!( id = lookup_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 2cc35081b7..1ca338ccd3 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -935,9 +935,9 @@ impl SyncManager { debug!( %block_root, %parent_root, - "Parent envelope not yet available, creating lookup" + "Parent envelope not yet available, creating envelope lookup" ); - self.handle_unknown_parent( + self.handle_unknown_parent_envelope( peer_id, block_root, parent_root, @@ -1055,6 +1055,40 @@ impl SyncManager { } } + /// Handle a block whose parent block is known but parent envelope is missing. + /// Creates an envelope-only lookup for the parent and a child lookup that waits for it. + fn handle_unknown_parent_envelope( + &mut self, + peer_id: PeerId, + block_root: Hash256, + parent_root: Hash256, + slot: Slot, + block_component: BlockComponent, + ) { + match self.should_search_for_block(Some(slot), &peer_id) { + Ok(_) => { + if self.block_lookups.search_child_and_parent_envelope( + block_root, + block_component, + parent_root, + peer_id, + &mut self.network, + ) { + // Lookups created + } else { + debug!( + ?block_root, + ?parent_root, + "No lookup created for child and parent envelope" + ); + } + } + Err(reason) => { + debug!(%block_root, %parent_root, reason, "Ignoring unknown parent envelope request"); + } + } + } + fn handle_unknown_block_root(&mut self, peer_id: PeerId, block_root: Hash256) { match self.should_search_for_block(None, &peer_id) { Ok(_) => { From 1cd4d57204f9283bbbceba46585476385a1c0c53 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Thu, 2 Apr 2026 19:37:51 -0700 Subject: [PATCH 51/57] Fixes --- beacon_node/network/src/router.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 3fb2196975..3d82252a0c 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -336,7 +336,7 @@ impl Router { // TODO(EIP-7732): implement outgoing payload envelopes by range responses once // range sync requests them. Response::PayloadEnvelopesByRange(_) => { - unreachable!() + error!(%peer_id, "Unexpected PayloadEnvelopesByRange response"); } // Light client responses should not be received Response::LightClientBootstrap(_) From 214e3ce9f0ec4a4042e085c213b267fec63342f6 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Fri, 3 Apr 2026 00:02:24 -0700 Subject: [PATCH 52/57] Cleanup --- .../beacon_chain/src/block_verification.rs | 37 ++++++++++++++- .../src/beacon/execution_payload_envelope.rs | 1 - .../gossip_methods.rs | 4 +- .../network_beacon_processor/sync_methods.rs | 8 +--- .../network/src/sync/block_lookups/mod.rs | 47 ++++++++++++------- 5 files changed, 72 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 916a207e62..2b46843901 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -60,6 +60,7 @@ use crate::execution_payload::{ }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; +use crate::payload_envelope_verification::EnvelopeError; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ @@ -321,13 +322,18 @@ pub enum BlockError { bid_parent_root: Hash256, block_parent_root: Hash256, }, - /// The parent block is known but its execution payload envelope has not been received yet. + /// The child block is known but its parent execution payload envelope has not been received yet. /// /// ## Peer scoring /// /// It's unclear if this block is valid, but it cannot be fully verified without the parent's /// execution payload envelope. ParentEnvelopeUnknown { parent_root: Hash256 }, + + PayloadEnvelopeError { + e: Box, + penalize_peer: bool, + }, } /// Which specific signature(s) are invalid in a SignedBeaconBlock @@ -494,6 +500,35 @@ impl From for BlockError { } } +impl From for BlockError { + fn from(e: EnvelopeError) -> Self { + let penalize_peer = match &e { + // REJECT per spec: peer sent invalid envelope data + EnvelopeError::BadSignature + | EnvelopeError::BuilderIndexMismatch { .. } + | EnvelopeError::BlockHashMismatch { .. } + | EnvelopeError::SlotMismatch { .. } + | EnvelopeError::IncorrectBlockProposer { .. } => true, + // IGNORE per spec: not the peer's fault + EnvelopeError::BlockRootUnknown { .. } + | EnvelopeError::PriorToFinalization { .. } + | EnvelopeError::UnknownValidator { .. } => false, + // Internal errors: not the peer's fault + EnvelopeError::BeaconChainError(_) + | EnvelopeError::BeaconStateError(_) + | EnvelopeError::BlockProcessingError(_) + | EnvelopeError::EnvelopeProcessingError(_) + | EnvelopeError::ExecutionPayloadError(_) + | EnvelopeError::BlockError(_) + | EnvelopeError::InternalError(_) => false, + }; + BlockError::PayloadEnvelopeError { + e: Box::new(e), + penalize_peer, + } + } +} + /// Stores information about verifying a payload against an execution engine. #[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct PayloadVerificationOutcome { diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index 7f81f7bf25..3479d62f6a 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -90,7 +90,6 @@ pub(crate) fn post_beacon_execution_payload_envelope( .boxed() } /// Publishes a signed execution payload envelope to the network. -/// TODO(gloas): Add gossip verification (BroadcastValidation::Gossip) before import. pub async fn publish_execution_payload_envelope( envelope: SignedExecutionPayloadEnvelope, chain: Arc>, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 2e04847630..fe9e1755b6 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1390,7 +1390,9 @@ impl NetworkBeaconProcessor { return None; } // BlobNotRequired is unreachable. Only constructed in `process_gossip_blob` - Err(e @ BlockError::InternalError(_)) | Err(e @ BlockError::BlobNotRequired(_)) => { + Err(e @ BlockError::InternalError(_)) + | Err(e @ BlockError::BlobNotRequired(_)) + | Err(e @ BlockError::PayloadEnvelopeError { .. }) => { error!(error = %e, "Internal block gossip validation error"); return None; } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f6d4940121..57d3d7d220 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -109,9 +109,7 @@ impl NetworkBeaconProcessor { ); self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: BlockProcessingResult::Err(BlockError::InternalError(format!( - "Envelope verification failed: {e:?}" - ))), + result: BlockProcessingResult::Err(e.into()), }); return; } @@ -138,9 +136,7 @@ impl NetworkBeaconProcessor { ?beacon_block_root, "RPC payload envelope processing failed" ); - BlockProcessingResult::Err(BlockError::InternalError(format!( - "Envelope processing failed: {e:?}" - ))) + BlockProcessingResult::Err(e.into()) } }; diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 4d14479627..8a183a0b1b 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -217,6 +217,7 @@ impl BlockLookups { block_root, Some(block_component), Some(parent_root), + None, // On a `UnknownParentBlock` or `UnknownParentBlob` event the peer is not required // to have the rest of the block components (refer to decoupled blob gossip). Create // the lookup with zero peers to house the block components. @@ -246,30 +247,20 @@ impl BlockLookups { if envelope_lookup_exists { // Create child lookup that waits for the parent envelope (not parent block). // The child block itself is available, so we pass it as a component. - let child_created = self.new_current_lookup( + self.new_current_lookup( block_root, Some(block_component), None, // not awaiting parent block + Some(parent_root), &[], cx, - ); - // Set awaiting_parent_envelope on the child lookup - if child_created { - if let Some((_, lookup)) = self - .single_block_lookups - .iter_mut() - .find(|(_, l)| l.is_for_block(block_root)) - { - lookup.set_awaiting_parent_envelope(parent_root); - } - } - child_created + ) } else { false } } - /// Seach a block whose parent root is unknown. + /// Search a block whose parent root is unknown. /// /// Returns true if the lookup is created or already exists #[must_use = "only reference the new lookup if returns true"] @@ -279,7 +270,7 @@ impl BlockLookups { peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { - self.new_current_lookup(block_root, None, None, peer_source, cx) + self.new_current_lookup(block_root, None, None, None, peer_source, cx) } /// A block or blob triggers the search of a parent. @@ -384,7 +375,7 @@ impl BlockLookups { } // `block_root_to_search` is a failed chain check happens inside new_current_lookup - self.new_current_lookup(block_root_to_search, None, None, peers, cx) + self.new_current_lookup(block_root_to_search, None, None, None, peers, cx) } /// A block triggers the search of a parent envelope. @@ -447,6 +438,7 @@ impl BlockLookups { block_root: Hash256, block_component: Option>, awaiting_parent: Option, + awaiting_parent_envelope: Option, peers: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { @@ -501,6 +493,9 @@ impl BlockLookups { // If we know that this lookup has unknown parent (is awaiting a parent lookup to resolve), // signal here to hold processing downloaded data. let mut lookup = SingleBlockLookup::new(block_root, peers, cx.next_id(), awaiting_parent); + if let Some(parent_root) = awaiting_parent_envelope { + lookup.set_awaiting_parent_envelope(parent_root); + } let _guard = lookup.span.clone().entered(); // Add block components to the new request @@ -777,6 +772,26 @@ impl BlockLookups { // We opt to drop the lookup instead. Action::Drop(format!("{e:?}")) } + BlockError::PayloadEnvelopeError { e, penalize_peer } => { + debug!( + ?block_root, + error = ?e, + "Payload envelope processing error" + ); + if penalize_peer { + let peer_group = request_state.on_processing_failure()?; + for peer in peer_group.all() { + cx.report_peer( + *peer, + PeerAction::MidToleranceError, + "lookup_envelope_processing_failure", + ); + } + Action::Retry + } else { + Action::Drop(format!("{e:?}")) + } + } other => { debug!( ?block_root, From f897215684c8b1a91fc0d95f991f8e1aee17a96f Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Fri, 3 Apr 2026 01:02:57 -0700 Subject: [PATCH 53/57] refactor awaiting_parent field and some metrics --- .../network/src/sync/block_lookups/mod.rs | 50 +++++++------- .../src/sync/block_lookups/parent_chain.rs | 2 +- .../sync/block_lookups/single_block_lookup.rs | 66 ++++++++++++------- .../network/src/sync/network_context.rs | 4 ++ 4 files changed, 69 insertions(+), 53 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 8a183a0b1b..8dedcba2f4 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -22,7 +22,9 @@ use self::parent_chain::{NodeChain, compute_parent_chains}; pub use self::single_block_lookup::DownloadResult; -use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; +use self::single_block_lookup::{ + AwaitingParent, LookupRequestError, LookupResult, SingleBlockLookup, +}; use super::manager::{BlockProcessType, BlockProcessingResult, SLOT_IMPORT_TOLERANCE}; use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; @@ -216,8 +218,7 @@ impl BlockLookups { self.new_current_lookup( block_root, Some(block_component), - Some(parent_root), - None, + Some(AwaitingParent::Block(parent_root)), // On a `UnknownParentBlock` or `UnknownParentBlob` event the peer is not required // to have the rest of the block components (refer to decoupled blob gossip). Create // the lookup with zero peers to house the block components. @@ -250,8 +251,7 @@ impl BlockLookups { self.new_current_lookup( block_root, Some(block_component), - None, // not awaiting parent block - Some(parent_root), + Some(AwaitingParent::Envelope(parent_root)), &[], cx, ) @@ -270,7 +270,7 @@ impl BlockLookups { peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { - self.new_current_lookup(block_root, None, None, None, peer_source, cx) + self.new_current_lookup(block_root, None, None, peer_source, cx) } /// A block or blob triggers the search of a parent. @@ -375,7 +375,7 @@ impl BlockLookups { } // `block_root_to_search` is a failed chain check happens inside new_current_lookup - self.new_current_lookup(block_root_to_search, None, None, None, peers, cx) + self.new_current_lookup(block_root_to_search, None, None, peers, cx) } /// A block triggers the search of a parent envelope. @@ -437,8 +437,7 @@ impl BlockLookups { &mut self, block_root: Hash256, block_component: Option>, - awaiting_parent: Option, - awaiting_parent_envelope: Option, + awaiting_parent: Option, peers: &[PeerId], cx: &mut SyncNetworkContext, ) -> bool { @@ -473,13 +472,14 @@ impl BlockLookups { } // Ensure that awaiting parent exists, otherwise this lookup won't be able to make progress - if let Some(awaiting_parent) = awaiting_parent + if let Some(AwaitingParent::Block(parent_root) | AwaitingParent::Envelope(parent_root)) = + awaiting_parent && !self .single_block_lookups .iter() - .any(|(_, lookup)| lookup.is_for_block(awaiting_parent)) + .any(|(_, lookup)| lookup.is_for_block(parent_root)) { - warn!(block_root = ?awaiting_parent, "Ignoring child lookup parent lookup not found"); + warn!(block_root = ?parent_root, "Ignoring child lookup parent lookup not found"); return false; } @@ -493,9 +493,6 @@ impl BlockLookups { // If we know that this lookup has unknown parent (is awaiting a parent lookup to resolve), // signal here to hold processing downloaded data. let mut lookup = SingleBlockLookup::new(block_root, peers, cx.next_id(), awaiting_parent); - if let Some(parent_root) = awaiting_parent_envelope { - lookup.set_awaiting_parent_envelope(parent_root); - } let _guard = lookup.span.clone().entered(); // Add block components to the new request @@ -516,9 +513,7 @@ impl BlockLookups { debug!( ?peers, ?block_root, - awaiting_parent = awaiting_parent - .map(|root| root.to_string()) - .unwrap_or("none".to_owned()), + ?awaiting_parent, id = lookup.id, "Created block lookup" ); @@ -936,7 +931,7 @@ impl BlockLookups { let mut lookup_results = vec![]; // < need to buffer lookup results to not re-borrow &mut self for (id, lookup) in self.single_block_lookups.iter_mut() { - if lookup.awaiting_parent() == Some(block_root) { + if lookup.awaiting_parent_block() == Some(block_root) { lookup.resolve_awaiting_parent(); debug!( parent_root = ?block_root, @@ -964,7 +959,7 @@ impl BlockLookups { for (id, lookup) in self.single_block_lookups.iter_mut() { if lookup.awaiting_parent_envelope() == Some(block_root) { - lookup.resolve_awaiting_parent_envelope(); + lookup.resolve_awaiting_parent(); debug!( envelope_root = ?block_root, id, @@ -996,12 +991,13 @@ impl BlockLookups { metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[reason]); self.metrics.dropped_lookups += 1; + let dropped_root = dropped_lookup.block_root(); let child_lookups = self .single_block_lookups .iter() .filter(|(_, lookup)| { - lookup.awaiting_parent() == Some(dropped_lookup.block_root()) - || lookup.awaiting_parent_envelope() == Some(dropped_lookup.block_root()) + lookup.awaiting_parent_block() == Some(dropped_root) + || lookup.awaiting_parent_envelope() == Some(dropped_root) }) .map(|(id, _)| *id) .collect::>(); @@ -1170,17 +1166,15 @@ impl BlockLookups { &'a self, lookup: &'a SingleBlockLookup, ) -> Result<&'a SingleBlockLookup, String> { - if let Some(awaiting_parent) = lookup.awaiting_parent() { + if let Some(parent_root) = lookup.awaiting_parent_block() { if let Some(lookup) = self .single_block_lookups .values() - .find(|l| l.block_root() == awaiting_parent) + .find(|l| l.block_root() == parent_root) { self.find_oldest_ancestor_lookup(lookup) } else { - Err(format!( - "Lookup references unknown parent {awaiting_parent:?}" - )) + Err(format!("Lookup references unknown parent {parent_root:?}")) } } else { Ok(lookup) @@ -1213,7 +1207,7 @@ impl BlockLookups { } } - if let Some(parent_root) = lookup.awaiting_parent() { + if let Some(parent_root) = lookup.awaiting_parent_block() { if let Some((&child_id, _)) = self .single_block_lookups .iter() diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 5deea1dd94..18363e9b8d 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -13,7 +13,7 @@ impl From<&SingleBlockLookup> for Node { fn from(value: &SingleBlockLookup) -> Self { Self { block_root: value.block_root(), - parent_root: value.awaiting_parent(), + parent_root: value.awaiting_parent_block(), } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index d59753b960..6687a1ec75 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -58,6 +58,14 @@ pub enum LookupRequestError { }, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AwaitingParent { + /// Waiting for the parent block to be imported. + Block(Hash256), + /// The parent block is imported but its execution payload envelope is missing. + Envelope(Hash256), +} + #[derive(Educe)] #[educe(Debug(bound(T: BeaconChainTypes)))] pub struct SingleBlockLookup { @@ -71,8 +79,7 @@ pub struct SingleBlockLookup { #[educe(Debug(method(fmt_peer_set_as_len)))] peers: Arc>>, block_root: Hash256, - awaiting_parent: Option, - awaiting_parent_envelope: Option, + awaiting_parent: Option, created: Instant, pub(crate) span: Span, } @@ -93,7 +100,7 @@ impl SingleBlockLookup { requested_block_root: Hash256, peers: &[PeerId], id: Id, - awaiting_parent: Option, + awaiting_parent: Option, ) -> Self { let lookup_span = debug_span!( "lh_single_block_lookup", @@ -108,7 +115,6 @@ impl SingleBlockLookup { peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, - awaiting_parent_envelope: None, created: Instant::now(), span: lookup_span, } @@ -131,7 +137,16 @@ impl SingleBlockLookup { /// Reset the status of all internal requests pub fn reset_requests(&mut self) { self.block_request_state = BlockRequestState::new(self.block_root); - self.component_requests = ComponentRequests::WaitingForBlock; + match &self.component_requests { + ComponentRequests::ActiveEnvelopeRequest(_) => { + self.component_requests = ComponentRequests::ActiveEnvelopeRequest( + EnvelopeRequestState::new(self.block_root), + ); + } + _ => { + self.component_requests = ComponentRequests::WaitingForBlock; + } + } } /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` @@ -147,34 +162,39 @@ impl SingleBlockLookup { self.block_root } - pub fn awaiting_parent(&self) -> Option { + pub fn awaiting_parent(&self) -> Option { self.awaiting_parent } - /// Mark this lookup as awaiting a parent lookup from being processed. Meanwhile don't send - /// components for processing. - pub fn set_awaiting_parent(&mut self, parent_root: Hash256) { - self.awaiting_parent = Some(parent_root) - } - - /// Mark this lookup as no longer awaiting a parent lookup. Components can be sent for - /// processing. - pub fn resolve_awaiting_parent(&mut self) { - self.awaiting_parent = None; + /// Returns the parent root if awaiting a parent block. + pub fn awaiting_parent_block(&self) -> Option { + match self.awaiting_parent { + Some(AwaitingParent::Block(root)) => Some(root), + _ => None, + } } + /// Returns the parent root if awaiting a parent envelope. pub fn awaiting_parent_envelope(&self) -> Option { - self.awaiting_parent_envelope + match self.awaiting_parent { + Some(AwaitingParent::Envelope(root)) => Some(root), + _ => None, + } + } + + /// Mark this lookup as awaiting a parent block to be imported before processing. + pub fn set_awaiting_parent(&mut self, parent_root: Hash256) { + self.awaiting_parent = Some(AwaitingParent::Block(parent_root)); } /// Mark this lookup as awaiting a parent envelope to be imported before processing. pub fn set_awaiting_parent_envelope(&mut self, parent_root: Hash256) { - self.awaiting_parent_envelope = Some(parent_root); + self.awaiting_parent = Some(AwaitingParent::Envelope(parent_root)); } - /// Mark this lookup as no longer awaiting a parent envelope. - pub fn resolve_awaiting_parent_envelope(&mut self) { - self.awaiting_parent_envelope = None; + /// Mark this lookup as no longer awaiting any parent. + pub fn resolve_awaiting_parent(&mut self) { + self.awaiting_parent = None; } /// Returns the time elapsed since this lookup was created @@ -219,7 +239,6 @@ impl SingleBlockLookup { /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { self.awaiting_parent.is_some() - || self.awaiting_parent_envelope.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { // If components are waiting for the block request to complete, here we should @@ -328,8 +347,7 @@ impl SingleBlockLookup { expected_blobs: usize, ) -> Result<(), LookupRequestError> { let id = self.id; - let awaiting_event = - self.awaiting_parent.is_some() || self.awaiting_parent_envelope.is_some(); + let awaiting_event = self.awaiting_parent.is_some(); let request = R::request_state_mut(self).map_err(|e| LookupRequestError::BadState(e.to_owned()))?; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 328940d672..1176442202 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1917,6 +1917,10 @@ impl SyncNetworkContext { "data_columns_by_range", self.data_columns_by_range_requests.len(), ), + ( + "payload_envelopes_by_root", + self.payload_envelopes_by_root_requests.len(), + ), ("custody_by_root", self.custody_by_root_requests.len()), ( "components_by_range", From b333841229c45349bf5226f7c12af77a577b50b6 Mon Sep 17 00:00:00 2001 From: Eitan Seri- Levi Date: Fri, 3 Apr 2026 01:04:34 -0700 Subject: [PATCH 54/57] update --- beacon_node/network/src/sync/block_lookups/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 8dedcba2f4..27d96de51d 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -246,8 +246,8 @@ impl BlockLookups { let envelope_lookup_exists = self.search_parent_envelope_of_child(parent_root, &[peer_id], cx); if envelope_lookup_exists { - // Create child lookup that waits for the parent envelope (not parent block). - // The child block itself is available, so we pass it as a component. + // Create child lookup that waits for the parent envelope. + // The child block itself has already been seen, so we pass it as a component. self.new_current_lookup( block_root, Some(block_component), From 65c2e0161247409580a50e8a01e3e3aa8dbebf32 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 3 Apr 2026 19:35:02 +1100 Subject: [PATCH 55/57] Gloas fork choice redux (#9025) Co-Authored-By: hopinheimer Co-Authored-By: Michael Sproul Co-Authored-By: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Co-Authored-By: Eitan Seri- Levi Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Michael Sproul Co-Authored-By: Jimmy Chen Co-Authored-By: Daniel Knopik <107140945+dknopik@users.noreply.github.com> --- Cargo.lock | 3 + beacon_node/beacon_chain/src/beacon_chain.rs | 41 +- .../beacon_chain/src/block_production/mod.rs | 4 +- .../beacon_chain/src/block_verification.rs | 68 +- beacon_node/beacon_chain/src/builder.rs | 12 +- .../beacon_chain/src/canonical_head.rs | 23 +- beacon_node/beacon_chain/src/invariants.rs | 4 +- .../payload_envelope_verification/import.rs | 26 +- .../src/payload_envelope_verification/mod.rs | 2 + .../beacon_chain/src/persisted_fork_choice.rs | 64 +- beacon_node/beacon_chain/src/schema_change.rs | 15 +- .../src/schema_change/migration_schema_v29.rs | 151 ++ .../tests/payload_invalidation.rs | 8 +- beacon_node/beacon_chain/tests/store_tests.rs | 6 +- beacon_node/beacon_chain/tests/tests.rs | 14 +- beacon_node/http_api/src/lib.rs | 60 +- beacon_node/http_api/src/validator/mod.rs | 2 +- beacon_node/http_api/tests/tests.rs | 63 +- beacon_node/store/src/metadata.rs | 2 +- consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 337 +++- consensus/fork_choice/src/lib.rs | 7 +- consensus/fork_choice/tests/tests.rs | 61 +- consensus/proto_array/Cargo.toml | 2 + consensus/proto_array/src/error.rs | 8 + .../src/fork_choice_test_definition.rs | 270 ++- .../execution_status.rs | 100 +- .../ffg_updates.rs | 76 +- .../gloas_payload.rs | 893 ++++++++++ .../fork_choice_test_definition/no_votes.rs | 33 + .../src/fork_choice_test_definition/votes.rs | 96 +- consensus/proto_array/src/lib.rs | 6 +- consensus/proto_array/src/proto_array.rs | 1524 ++++++++++++----- .../src/proto_array_fork_choice.rs | 620 +++++-- consensus/proto_array/src/ssz_container.rs | 80 +- .../indexed_payload_attestation.rs | 7 - consensus/types/src/core/chain_spec.rs | 56 +- testing/ef_tests/src/cases/fork_choice.rs | 121 +- testing/ef_tests/src/handler.rs | 23 +- testing/ef_tests/tests/tests.rs | 6 + 40 files changed, 4061 insertions(+), 834 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs create mode 100644 consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs diff --git a/Cargo.lock b/Cargo.lock index 3ba431d62e..726929e9ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3583,6 +3583,7 @@ name = "fork_choice" version = "0.1.0" dependencies = [ "beacon_chain", + "bls", "ethereum_ssz", "ethereum_ssz_derive", "fixed_bytes", @@ -7023,7 +7024,9 @@ dependencies = [ "fixed_bytes", "safe_arith", "serde", + "smallvec", "superstruct", + "typenum", "types", "yaml_serde", ] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 310163b4a9..e226c707a4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1468,7 +1468,7 @@ impl BeaconChain { .proto_array() .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()) .iter() - .map(|node| (node.root, node.slot)) + .map(|node| (node.root(), node.slot())) .collect() } @@ -2298,6 +2298,7 @@ impl BeaconChain { self.slot()?, verified.indexed_attestation().to_ref(), AttestationFromBlock::False, + &self.spec, ) .map_err(Into::into) } @@ -3934,7 +3935,7 @@ impl BeaconChain { let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE); match fork_choice.get_head(current_slot, &self.spec) { // This block became the head, add it to the early attester cache. - Ok(new_head_root) if new_head_root == block_root => { + Ok((new_head_root, _)) if new_head_root == block_root => { if let Some(proto_block) = fork_choice.get_block(&block_root) { let new_head_is_optimistic = proto_block.execution_status.is_optimistic_or_invalid(); @@ -4734,6 +4735,7 @@ impl BeaconChain { }) } + // TODO(gloas): wrong for Gloas, needs an update pub fn overridden_forkchoice_update_params_or_failure_reason( &self, canonical_forkchoice_params: &ForkchoiceUpdateParameters, @@ -4768,7 +4770,7 @@ impl BeaconChain { // The slot of our potential re-org block is always 1 greater than the head block because we // only attempt single-slot re-orgs. - let head_slot = info.head_node.slot; + let head_slot = info.head_node.slot(); let re_org_block_slot = head_slot + 1; let fork_choice_slot = info.current_slot; @@ -4803,9 +4805,9 @@ impl BeaconChain { .fork_name_at_slot::(re_org_block_slot) .fulu_enabled() { - info.head_node.current_epoch_shuffling_id + info.head_node.current_epoch_shuffling_id() } else { - info.head_node.next_epoch_shuffling_id + info.head_node.next_epoch_shuffling_id() } .shuffling_decision_block; let proposer_index = self @@ -4831,13 +4833,15 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::NotProposing.into())); } - // If the current slot is already equal to the proposal slot (or we are in the tail end of - // the prior slot), then check the actual weight of the head against the head re-org threshold - // and the actual weight of the parent against the parent re-org threshold. + // TODO(gloas): reorg weight logic needs updating for Gloas. For now use + // total weight which is correct for pre-Gloas and conservative for post-Gloas. + let head_weight = info.head_node.weight(); + let parent_weight = info.parent_node.weight(); + let (head_weak, parent_strong) = if fork_choice_slot == re_org_block_slot { ( - info.head_node.weight < info.re_org_head_weight_threshold, - info.parent_node.weight > info.re_org_parent_weight_threshold, + head_weight < info.re_org_head_weight_threshold, + parent_weight > info.re_org_parent_weight_threshold, ) } else { (true, true) @@ -4845,7 +4849,7 @@ impl BeaconChain { if !head_weak { return Err(Box::new( DoNotReOrg::HeadNotWeak { - head_weight: info.head_node.weight, + head_weight, re_org_head_weight_threshold: info.re_org_head_weight_threshold, } .into(), @@ -4854,7 +4858,7 @@ impl BeaconChain { if !parent_strong { return Err(Box::new( DoNotReOrg::ParentNotStrong { - parent_weight: info.parent_node.weight, + parent_weight, re_org_parent_weight_threshold: info.re_org_parent_weight_threshold, } .into(), @@ -4872,9 +4876,16 @@ impl BeaconChain { return Err(Box::new(DoNotReOrg::HeadNotLate.into())); } - let parent_head_hash = info.parent_node.execution_status.block_hash(); + // TODO(gloas): V29 nodes don't carry execution_status, so this returns + // None for post-Gloas re-orgs. Need to source the EL block hash from + // the bid's block_hash instead. Re-org is disabled for Gloas for now. + let parent_head_hash = info + .parent_node + .execution_status() + .ok() + .and_then(|execution_status| execution_status.block_hash()); let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: info.parent_node.root, + head_root: info.parent_node.root(), head_hash: parent_head_hash, justified_hash: canonical_forkchoice_params.justified_hash, finalized_hash: canonical_forkchoice_params.finalized_hash, @@ -4882,7 +4893,7 @@ impl BeaconChain { debug!( canonical_head = ?head_block_root, - ?info.parent_node.root, + parent_root = ?info.parent_node.root(), slot = %fork_choice_slot, "Fork choice update overridden" ); diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index 256b67086a..bf42923cbe 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -228,7 +228,7 @@ impl BeaconChain { }) .ok()?; drop(proposer_head_timer); - let re_org_parent_block = proposer_head.parent_node.root; + let re_org_parent_block = proposer_head.parent_node.root(); let (state_root, state) = self .store @@ -245,7 +245,7 @@ impl BeaconChain { info!( weak_head = ?canonical_head, parent = ?re_org_parent_block, - head_weight = proposer_head.head_node.weight, + head_weight = proposer_head.head_node.weight(), threshold_weight = proposer_head.re_org_head_weight_threshold, "Attempting re-org due to weak head" ); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 802b090f6a..1ce1137f1e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1670,6 +1670,7 @@ impl ExecutionPendingBlock { current_slot, indexed_attestation, AttestationFromBlock::True, + &chain.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The @@ -1678,6 +1679,31 @@ impl ExecutionPendingBlock { Err(e) => Err(BlockError::BeaconChainError(Box::new(e.into()))), }?; } + + // Register each payload attestation in the block with fork choice. + if let Ok(payload_attestations) = block.message().body().payload_attestations() { + for (i, payload_attestation) in payload_attestations.iter().enumerate() { + let indexed_payload_attestation = consensus_context + .get_indexed_payload_attestation(&state, payload_attestation, &chain.spec) + .map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?; + + let ptc = state + .get_ptc(indexed_payload_attestation.data.slot, &chain.spec) + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; + + // Ignore invalid payload attestations from blocks (same as + // regular attestations — the block may be old). + if let Err(e) = fork_choice.on_payload_attestation( + current_slot, + indexed_payload_attestation, + AttestationFromBlock::True, + &ptc.0, + ) && !matches!(e, ForkChoiceError::InvalidPayloadAttestation(_)) + { + return Err(BlockError::BeaconChainError(Box::new(e.into()))); + } + } + } drop(fork_choice); Ok(Self { @@ -1934,25 +1960,31 @@ fn load_parent>( // Post-Gloas we must also fetch a state with the correct payload status. If the current // block builds upon the payload of its parent block, then we know the parent block is FULL // and we need to load the full state. - let (payload_status, parent_state_root) = - if block.as_block().fork_name_unchecked().gloas_enabled() - && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() - { - if block.as_block().is_parent_block_full(parent_bid_block_hash) { - // TODO(gloas): loading the envelope here is not very efficient - // TODO(gloas): check parent payload existence prior to this point? - let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { - BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - })?; - (StatePayloadStatus::Full, envelope.message.state_root) - } else { - (StatePayloadStatus::Pending, parent_block.state_root()) - } - } else { - (StatePayloadStatus::Pending, parent_block.state_root()) + let (payload_status, parent_state_root) = if parent_block.slot() == chain.spec.genesis_slot + { + // Genesis state is always pending, there is no such thing as a "genesis envelope". + // See: https://github.com/ethereum/consensus-specs/issues/5043 + (StatePayloadStatus::Pending, parent_block.state_root()) + } else if !block.as_block().fork_name_unchecked().gloas_enabled() { + // All pre-Gloas parent states are pending. + (StatePayloadStatus::Pending, parent_block.state_root()) + } else if let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() + && block.as_block().is_parent_block_full(parent_bid_block_hash) + { + // Post-Gloas Full block case. + // TODO(gloas): loading the envelope here is not very efficient + let Some(envelope) = chain.store.get_payload_envelope(&root)? else { + return Err(BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + .into()); }; + let state_root = envelope.message.state_root; + (StatePayloadStatus::Full, state_root) + } else { + // Post-Gloas empty block case (also covers the Gloas fork transition). + (StatePayloadStatus::Pending, parent_block.state_root()) + }; let (parent_state_root, state) = chain .store .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 7eb92060a2..11b87351b1 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -45,7 +45,7 @@ use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, StatePayloadStatus, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -776,7 +776,7 @@ where slot_clock.now().ok_or("Unable to read slot")? }; - let initial_head_block_root = fork_choice + let (initial_head_block_root, head_payload_status) = fork_choice .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; @@ -786,13 +786,12 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; - // TODO(gloas): update head loading to load Full block once fork choice works - let payload_status = StatePayloadStatus::Pending; + let state_payload_status = head_payload_status.as_state_payload_status(); let (_head_state_root, head_state) = store .get_advanced_hot_state( head_block_root, - payload_status, + state_payload_status, current_slot, head_block.state_root(), ) @@ -923,7 +922,8 @@ where let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); - let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let canonical_head = + CanonicalHead::new(fork_choice, Arc::new(head_snapshot), head_payload_status); let shuffling_cache_size = self.chain_config.shuffling_cache_size; let complete_blob_backfill = self.chain_config.complete_blob_backfill; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index f6377e6ea5..cd53d0ef7c 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -107,6 +107,8 @@ pub struct CachedHead { /// This value may be distinct to the `self.snapshot.beacon_state.finalized_checkpoint`. /// This value should be used over the beacon state value in practically all circumstances. finalized_checkpoint: Checkpoint, + /// The payload status of the head block, as determined by fork choice. + head_payload_status: proto_array::PayloadStatus, /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` /// before Bellatrix. head_hash: Option, @@ -231,6 +233,10 @@ impl CachedHead { finalized_hash: self.finalized_hash, } } + + pub fn head_payload_status(&self) -> proto_array::PayloadStatus { + self.head_payload_status + } } /// Represents the "canonical head" of the beacon chain. @@ -261,6 +267,7 @@ impl CanonicalHead { pub fn new( fork_choice: BeaconForkChoice, snapshot: Arc>, + head_payload_status: proto_array::PayloadStatus, ) -> Self { let fork_choice_view = fork_choice.cached_fork_choice_view(); let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); @@ -268,6 +275,7 @@ impl CanonicalHead { snapshot, justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_payload_status, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -295,9 +303,11 @@ impl CanonicalHead { store: &BeaconStore, spec: &ChainSpec, ) -> Result<(), Error> { - let fork_choice = + let mut fork_choice = >::load_fork_choice(store.clone(), reset_payload_statuses, spec)? .ok_or(Error::MissingPersistedForkChoice)?; + let current_slot_for_head = fork_choice.fc_store().get_current_slot(); + let (_, head_payload_status) = fork_choice.get_head(current_slot_for_head, spec)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store @@ -328,6 +338,7 @@ impl CanonicalHead { snapshot: Arc::new(snapshot), justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_payload_status, head_hash: forkchoice_update_params.head_hash, justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, @@ -601,11 +612,12 @@ impl BeaconChain { justified_checkpoint: old_cached_head.justified_checkpoint(), finalized_checkpoint: old_cached_head.finalized_checkpoint(), }; + let old_payload_status = old_cached_head.head_payload_status(); let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); // Recompute the current head via the fork choice algorithm. - fork_choice_write_lock.get_head(current_slot, &self.spec)?; + let (_, new_payload_status) = fork_choice_write_lock.get_head(current_slot, &self.spec)?; // Downgrade the fork choice write-lock to a read lock, without allowing access to any // other writers. @@ -650,9 +662,8 @@ impl BeaconChain { }); } - // Exit early if the head or justified/finalized checkpoints have not changed, there's - // nothing to do. - if new_view == old_view { + // Exit early if the head, checkpoints, and payload status have not changed. + if new_view == old_view && new_payload_status == old_payload_status { debug!( head = ?new_view.head_block_root, "No change in canonical head" @@ -709,6 +720,7 @@ impl BeaconChain { snapshot: Arc::new(new_snapshot), justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, + head_payload_status: new_payload_status, head_hash: new_forkchoice_update_parameters.head_hash, justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, @@ -736,6 +748,7 @@ impl BeaconChain { snapshot: old_cached_head.snapshot.clone(), justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, + head_payload_status: new_payload_status, head_hash: new_forkchoice_update_parameters.head_hash, justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, diff --git a/beacon_node/beacon_chain/src/invariants.rs b/beacon_node/beacon_chain/src/invariants.rs index 7bcec7b0b4..b365f37a0a 100644 --- a/beacon_node/beacon_chain/src/invariants.rs +++ b/beacon_node/beacon_chain/src/invariants.rs @@ -23,9 +23,9 @@ impl BeaconChain { // Only check blocks that are descendants of the finalized checkpoint. // Pruned non-canonical fork blocks may linger in the proto-array but // are legitimately absent from the database. - fc.is_finalized_checkpoint_or_descendant(node.root) + fc.is_finalized_checkpoint_or_descendant(node.root()) }) - .map(|node| (node.root, node.slot)) + .map(|node| (node.root(), node.slot())) .collect() }; diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 39925d65d2..7e79799310 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -168,6 +168,16 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; + // TODO(gloas): optimistic sync is not supported for Gloas, maybe we could re-add it + if payload_verification_outcome + .payload_verification_status + .is_optimistic() + { + return Err(EnvelopeError::OptimisticSyncNotSupported { + block_root: import_data.block_root, + }); + } + Ok(ExecutedEnvelope::new( signed_envelope, import_data, @@ -236,16 +246,15 @@ impl BeaconChain { // Note that a duplicate cache/payload status table should prevent this from happening // but it doesnt hurt to be defensive. - // TODO(gloas) when the code below is implemented we can delete this drop - drop(fork_choice_reader); - - // TODO(gloas) no fork choice logic yet // Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by // avoiding taking other locks whilst holding this lock. - // let fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); + let mut fork_choice = parking_lot::RwLockUpgradableReadGuard::upgrade(fork_choice_reader); - // TODO(gloas) Do we need this check? Do not import a block that doesn't descend from the finalized root. - // let signed_block = check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; + // Update the block's payload to received in fork choice, which creates the `Full` virtual + // node which can be eligible for head. + fork_choice + .on_valid_payload_envelope_received(block_root) + .map_err(|e| EnvelopeError::InternalError(format!("{e:?}")))?; // TODO(gloas) emit SSE event if the payload became the new head payload @@ -299,10 +308,9 @@ impl BeaconChain { drop(db_span); - // TODO(gloas) drop fork choice lock // The fork choice write-lock is dropped *after* the on-disk database has been updated. // This prevents inconsistency between the two at the expense of concurrency. - // drop(fork_choice); + drop(fork_choice); // We're declaring the envelope "imported" at this point, since fork choice and the DB know // about it. diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index c707d62dc7..225d5a9892 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -182,6 +182,8 @@ pub enum EnvelopeError { payload_slot: Slot, latest_finalized_slot: Slot, }, + /// Optimistic sync is not supported for Gloas payload envelopes. + OptimisticSyncNotSupported { block_root: Hash256 }, /// Some Beacon Chain Error BeaconChainError(Arc), /// Some Beacon State error diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 6229544e81..8edccbbe98 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -6,11 +6,19 @@ use superstruct::superstruct; use types::Hash256; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV28; +pub type PersistedForkChoice = PersistedForkChoiceV29; -#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] +#[superstruct( + variants(V28, V29), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct PersistedForkChoice { - pub fork_choice: fork_choice::PersistedForkChoiceV28, + #[superstruct(only(V28))] + pub fork_choice_v28: fork_choice::PersistedForkChoiceV28, + #[superstruct(only(V29))] + pub fork_choice: fork_choice::PersistedForkChoiceV29, + #[superstruct(only(V28, V29))] pub fork_choice_store: PersistedForkChoiceStoreV28, } @@ -45,3 +53,53 @@ impl PersistedForkChoiceV28 { )) } } + +impl PersistedForkChoiceV29 { + pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { + let decompressed_bytes = store_config + .decompress_bytes(bytes) + .map_err(Error::Compression)?; + Self::from_ssz_bytes(&decompressed_bytes).map_err(Into::into) + } + + pub fn as_bytes(&self, store_config: &StoreConfig) -> Result, Error> { + let encode_timer = metrics::start_timer(&metrics::FORK_CHOICE_ENCODE_TIMES); + let ssz_bytes = self.as_ssz_bytes(); + drop(encode_timer); + + let _compress_timer = metrics::start_timer(&metrics::FORK_CHOICE_COMPRESS_TIMES); + store_config + .compress_bytes(&ssz_bytes) + .map_err(Error::Compression) + } + + pub fn as_kv_store_op( + &self, + key: Hash256, + store_config: &StoreConfig, + ) -> Result { + Ok(KeyValueStoreOp::PutKeyValue( + DBColumn::ForkChoice, + key.as_slice().to_vec(), + self.as_bytes(store_config)?, + )) + } +} + +impl From for PersistedForkChoiceV29 { + fn from(v28: PersistedForkChoiceV28) -> Self { + Self { + fork_choice: v28.fork_choice_v28.into(), + fork_choice_store: v28.fork_choice_store, + } + } +} + +impl From for PersistedForkChoiceV28 { + fn from(v29: PersistedForkChoiceV29) -> Self { + Self { + fork_choice_v28: v29.fork_choice.into(), + fork_choice_store: v29.fork_choice_store, + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ed82143c38..841f28e37d 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,5 +1,8 @@ //! Utilities for managing database schema changes. +mod migration_schema_v29; + use crate::beacon_chain::BeaconChainTypes; +use migration_schema_v29::{downgrade_from_v29, upgrade_to_v29}; use std::sync::Arc; use store::Error as StoreError; use store::hot_cold_store::{HotColdDB, HotColdDBError}; @@ -10,13 +13,23 @@ use store::metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}; /// All migrations for schema versions up to and including v28 have been removed. Nodes on live /// networks are already running v28, so only the current version check remains. pub fn migrate_schema( - _db: Arc>, + db: Arc>, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), + // Upgrade from v28 to v29. + (SchemaVersion(28), SchemaVersion(29)) => { + let ops = upgrade_to_v29::(&db)?; + db.store_schema_version_atomically(to, ops) + } + // Downgrade from v29 to v28. + (SchemaVersion(29), SchemaVersion(28)) => { + let ops = downgrade_from_v29::(&db)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs new file mode 100644 index 0000000000..77d4be3443 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs @@ -0,0 +1,151 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::persisted_fork_choice::{PersistedForkChoiceV28, PersistedForkChoiceV29}; +use std::collections::HashMap; +use store::hot_cold_store::HotColdDB; +use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; +use tracing::warn; +use types::EthSpec; + +/// Upgrade from schema v28 to v29. +/// +/// - Clears `best_child` and `best_descendant` on all nodes (replaced by +/// virtual tree walk). +/// - Fails if the persisted fork choice contains any V17 (pre-Gloas) proto +/// nodes at or after the Gloas fork slot. +/// +/// Returns a list of store ops to be applied atomically with the schema version write. +pub fn upgrade_to_v29( + db: &HotColdDB, +) -> Result, StoreError> { + let gloas_fork_slot = db + .spec + .gloas_fork_epoch + .map(|epoch| epoch.start_slot(T::EthSpec::slots_per_epoch())); + + // Load the persisted fork choice (v28 format). + let Some(fc_bytes) = db + .hot_db + .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? + else { + return Ok(vec![]); + }; + + let persisted_v28 = PersistedForkChoiceV28::from_bytes(&fc_bytes, db.get_config())?; + + // Check for V17 nodes at/after the Gloas fork slot. + if let Some(gloas_fork_slot) = gloas_fork_slot { + let bad_node = persisted_v28 + .fork_choice_v28 + .proto_array_v28 + .nodes + .iter() + .find(|node| node.slot >= gloas_fork_slot); + + if let Some(node) = bad_node { + return Err(StoreError::MigrationError(format!( + "cannot upgrade from v28 to v29: found V17 proto node at slot {} (root: {:?}) \ + which is at or after the Gloas fork slot {}. This node has synced a chain with \ + Gloas disabled and cannot be upgraded. Please resync from scratch.", + node.slot, node.root, gloas_fork_slot, + ))); + } + } + + // Read the previous proposer boost before converting to V29 (V29 no longer stores it). + let previous_proposer_boost = persisted_v28 + .fork_choice_v28 + .proto_array_v28 + .previous_proposer_boost; + + // Convert to v29. + let mut persisted_v29 = PersistedForkChoiceV29::from(persisted_v28); + + // Subtract the proposer boost from the boosted node and all its ancestors. + // + // In the V28 schema, `apply_score_changes` baked the proposer boost directly into node + // weights and back-propagated it up the parent chain. In V29, the boost is computed + // on-the-fly during the virtual tree walk. If we don't subtract the baked-in boost here, + // it will be double-counted after the upgrade. + if !previous_proposer_boost.root.is_zero() && previous_proposer_boost.score > 0 { + let score = previous_proposer_boost.score; + let indices: HashMap<_, _> = persisted_v29 + .fork_choice + .proto_array + .indices + .iter() + .cloned() + .collect(); + + if let Some(node_index) = indices.get(&previous_proposer_boost.root).copied() { + let nodes = &mut persisted_v29.fork_choice.proto_array.nodes; + let mut current = Some(node_index); + while let Some(idx) = current { + if let Some(node) = nodes.get_mut(idx) { + *node.weight_mut() = node.weight().saturating_sub(score); + current = node.parent(); + } else { + break; + } + } + } else { + warn!( + root = ?previous_proposer_boost.root, + "Proposer boost node missing from fork choice" + ); + } + } + + Ok(vec![ + persisted_v29.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, + ]) +} + +/// Downgrade from schema v29 to v28. +/// +/// Converts the persisted fork choice from V29 format back to V28. +/// Fails if the persisted fork choice contains any V29 proto nodes, as these contain +/// payload-specific fields that cannot be losslessly converted back to V17 format. +/// +/// Returns a list of store ops to be applied atomically with the schema version write. +pub fn downgrade_from_v29( + db: &HotColdDB, +) -> Result, StoreError> { + // Load the persisted fork choice (v29 format, compressed). + let Some(fc_bytes) = db + .hot_db + .get_bytes(DBColumn::ForkChoice, FORK_CHOICE_DB_KEY.as_slice())? + else { + return Ok(vec![]); + }; + + let persisted_v29 = + PersistedForkChoiceV29::from_bytes(&fc_bytes, db.get_config()).map_err(|e| { + StoreError::MigrationError(format!( + "cannot downgrade from v29 to v28: failed to decode fork choice: {:?}", + e + )) + })?; + + let has_v29_node = persisted_v29 + .fork_choice + .proto_array + .nodes + .iter() + .any(|node| matches!(node, proto_array::core::ProtoNode::V29(_))); + + if has_v29_node { + return Err(StoreError::MigrationError( + "cannot downgrade from v29 to v28: the persisted fork choice contains V29 proto \ + nodes which cannot be losslessly converted to V17 format. The Gloas-specific \ + payload data would be lost." + .to_string(), + )); + } + + // Convert to v28 and encode. + let persisted_v28 = PersistedForkChoiceV28::from(persisted_v29); + + Ok(vec![ + persisted_v28.as_kv_store_op(FORK_CHOICE_DB_KEY, db.get_config())?, + ]) +} diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 3ed8f59838..947024e8c2 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1350,7 +1350,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { "the fork block should become the head" ); - let manual_get_head = rig + let (manual_get_head, _) = rig .harness .chain .canonical_head @@ -1428,7 +1428,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .proto_array() .iter_nodes(&head.head_block_root()) - .map(|node| (node.root, node.weight)) + .map(|node| (node.root(), node.weight())) .collect::>(); rig.invalidate_manually(roots[1]).await; @@ -1438,7 +1438,7 @@ async fn weights_after_resetting_optimistic_status() { .canonical_head .fork_choice_write_lock() .proto_array_mut() - .set_all_blocks_to_optimistic::(&rig.harness.chain.spec) + .set_all_blocks_to_optimistic::() .unwrap(); let new_weights = rig @@ -1448,7 +1448,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .proto_array() .iter_nodes(&head.head_block_root()) - .map(|node| (node.root, node.weight)) + .map(|node| (node.root(), node.weight())) .collect::>(); assert_eq!(original_weights, new_weights); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index fb5262b893..c6e13bd160 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3995,7 +3995,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = CURRENT_SCHEMA_VERSION; + let min_version = SchemaVersion(28); // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -5426,10 +5426,12 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b .fork_choice_write_lock() .get_head(slot, &spec) .unwrap() + .0 == b.canonical_head .fork_choice_write_lock() .get_head(slot, &spec) - .unwrap(), + .unwrap() + .0, "fork_choice heads should be equal" ); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index b052ba66f1..10c0b429a9 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -590,7 +590,10 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { if slot <= num_blocks_produced && slot != 0 { assert_eq!( - latest_message.unwrap().1, + latest_message + .expect("latest message should be present") + .slot + .epoch(MinimalEthSpec::slots_per_epoch()), slot.epoch(MinimalEthSpec::slots_per_epoch()), "Latest message epoch for {} should be equal to epoch {}.", validator, @@ -700,10 +703,12 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots { - let latest_message = fork_choice.latest_message(*validator); + let latest_message = fork_choice + .latest_message(*validator) + .expect("latest message should be present"); assert_eq!( - latest_message.unwrap().1, + latest_message.slot.epoch(MinimalEthSpec::slots_per_epoch()), slot.epoch(MinimalEthSpec::slots_per_epoch()), "Latest message slot should be equal to attester duty." ); @@ -714,8 +719,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { .expect("Should get block root at slot"); assert_eq!( - latest_message.unwrap().0, - *block_root, + latest_message.root, *block_root, "Latest message block root should be equal to block at slot." ); } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 17d41cfbcd..0bb04888b7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2098,52 +2098,66 @@ pub fn serve( .nodes .iter() .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) + let execution_status = if node + .execution_status() + .is_ok_and(|status| status.is_execution_enabled()) + { + node.execution_status() + .ok() + .map(|status| status.to_string()) } else { None }; + let execution_status_string = node + .execution_status() + .map_or_else(|_| "irrelevant".to_string(), |s| s.to_string()); + ForkChoiceNode { - slot: node.slot, - block_root: node.root, + slot: node.slot(), + block_root: node.root(), parent_root: node - .parent + .parent() .and_then(|index| proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, + .map(|parent| parent.root()), + justified_epoch: node.justified_checkpoint().epoch, + finalized_epoch: node.finalized_checkpoint().epoch, + weight: node.weight(), validity: execution_status, execution_block_hash: node - .execution_status - .block_hash() + .execution_status() + .ok() + .and_then(|status| status.block_hash()) .map(|block_hash| block_hash.into_root()), extra_data: ForkChoiceExtraData { - target_root: node.target_root, - justified_root: node.justified_checkpoint.root, - finalized_root: node.finalized_checkpoint.root, + target_root: node.target_root(), + justified_root: node.justified_checkpoint().root, + finalized_root: node.finalized_checkpoint().root, unrealized_justified_root: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_finalized_root: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_justified_epoch: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.epoch), unrealized_finalized_epoch: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.epoch), - execution_status: node.execution_status.to_string(), + execution_status: execution_status_string, best_child: node - .best_child + .best_child() + .ok() + .flatten() .and_then(|index| proto_array.nodes.get(index)) - .map(|child| child.root), + .map(|child| child.root()), best_descendant: node - .best_descendant + .best_descendant() + .ok() + .flatten() .and_then(|index| proto_array.nodes.get(index)) - .map(|descendant| descendant.root), + .map(|descendant| descendant.root()), }, } }) diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 3d96b85870..412851233e 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -671,7 +671,7 @@ pub fn post_validator_prepare_beacon_proposer( .await; // TODO(gloas): verify this is correct. We skip proposer preparation for - // GLOAS because the execution payload is no longer embedded in the beacon + // Gloas because the execution payload is no longer embedded in the beacon // block (it's in the payload envelope), so the head block's // execution_payload() is unavailable. let next_slot = current_slot + 1; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index c9086dd876..b28816302c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -33,7 +33,7 @@ use lighthouse_network::{Enr, PeerId, types::SyncState}; use network::NetworkReceivers; use network_utils::enr_ext::EnrExt; use operation_pool::attestation_storage::CheckpointKey; -use proto_array::ExecutionStatus; +use proto_array::{ExecutionStatus, core::ProtoNode}; use reqwest::{RequestBuilder, Response, StatusCode}; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; @@ -3130,51 +3130,65 @@ impl ApiTester { .nodes .iter() .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) + let execution_status = if node + .execution_status() + .is_ok_and(|status| status.is_execution_enabled()) + { + node.execution_status() + .ok() + .map(|status| status.to_string()) } else { None }; ForkChoiceNode { - slot: node.slot, - block_root: node.root, + slot: node.slot(), + block_root: node.root(), parent_root: node - .parent + .parent() .and_then(|index| expected_proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, + .map(|parent| parent.root()), + justified_epoch: node.justified_checkpoint().epoch, + finalized_epoch: node.finalized_checkpoint().epoch, + weight: node.weight(), validity: execution_status, execution_block_hash: node - .execution_status - .block_hash() + .execution_status() + .ok() + .and_then(|status| status.block_hash()) .map(|block_hash| block_hash.into_root()), extra_data: ForkChoiceExtraData { - target_root: node.target_root, - justified_root: node.justified_checkpoint.root, - finalized_root: node.finalized_checkpoint.root, + target_root: node.target_root(), + justified_root: node.justified_checkpoint().root, + finalized_root: node.finalized_checkpoint().root, unrealized_justified_root: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_finalized_root: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.root), unrealized_justified_epoch: node - .unrealized_justified_checkpoint + .unrealized_justified_checkpoint() .map(|checkpoint| checkpoint.epoch), unrealized_finalized_epoch: node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .map(|checkpoint| checkpoint.epoch), - execution_status: node.execution_status.to_string(), + execution_status: node + .execution_status() + .ok() + .map(|status| status.to_string()) + .unwrap_or_else(|| "irrelevant".to_string()), best_child: node - .best_child + .best_child() + .ok() + .flatten() .and_then(|index| expected_proto_array.nodes.get(index)) - .map(|child| child.root), + .map(|child| child.root()), best_descendant: node - .best_descendant + .best_descendant() + .ok() + .flatten() .and_then(|index| expected_proto_array.nodes.get(index)) - .map(|descendant| descendant.root), + .map(|descendant| descendant.root()), }, } }) @@ -7180,6 +7194,7 @@ impl ApiTester { .core_proto_array_mut() .nodes .last_mut() + && let ProtoNode::V17(head_node) = head_node { head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index cf49468451..215cdb2b64 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(28); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(29); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index a07aa38aa5..df47a5c9d1 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -19,5 +19,6 @@ types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } +bls = { workspace = true } store = { workspace = true } tokio = { workspace = true } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 74b287975e..92fd4c1faf 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -3,8 +3,8 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use fixed_bytes::FixedBytesExtended; use logging::crit; use proto_array::{ - Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, - ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, + Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, LatestMessage, + PayloadStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use ssz_derive::{Decode, Encode}; use state_processing::{ @@ -19,12 +19,14 @@ use tracing::{debug, instrument, warn}; use types::{ AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + Hash256, IndexedAttestationRef, IndexedPayloadAttestation, RelativeEpoch, SignedBeaconBlock, + Slot, }; #[derive(Debug)] pub enum Error { InvalidAttestation(InvalidAttestation), + InvalidPayloadAttestation(InvalidPayloadAttestation), InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), ProtoArrayStringError(String), @@ -84,6 +86,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InvalidPayloadAttestation) -> Self { + Error::InvalidPayloadAttestation(e) + } +} + impl From for Error { fn from(e: AttesterSlashingValidationError) -> Self { Error::InvalidAttesterSlashing(e) @@ -169,6 +177,33 @@ pub enum InvalidAttestation { /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). AttestsToFutureBlock { block: Slot, attestation: Slot }, + /// Post-Gloas: attestation index must be 0 or 1. + InvalidAttestationIndex { index: u64 }, + /// A same-slot attestation has a non-zero index, which is invalid post-Gloas. + InvalidSameSlotAttestationIndex { slot: Slot }, + /// Post-Gloas: attestation with index == 1 (payload_present) requires the block's + /// payload to have been received (`root in store.payload_states`). + PayloadNotReceived { beacon_block_root: Hash256 }, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum InvalidPayloadAttestation { + /// The payload attestation's attesting indices were empty. + EmptyAggregationBitfield, + /// The `payload_attestation.data.beacon_block_root` block is unknown. + UnknownHeadBlock { beacon_block_root: Hash256 }, + /// The payload attestation is attesting to a block that is later than itself. + AttestsToFutureBlock { block: Slot, attestation: Slot }, + /// A gossip payload attestation must be for the current slot. + PayloadAttestationNotCurrentSlot { + attestation_slot: Slot, + current_slot: Slot, + }, + /// One or more payload attesters are not part of the PTC. + PayloadAttestationAttestersNotInPtc { + attesting_indices_len: usize, + attesting_indices_in_ptc: usize, + }, } impl From for Error { @@ -240,6 +275,17 @@ pub struct QueuedAttestation { attesting_indices: Vec, block_root: Hash256, target_epoch: Epoch, + /// Per Gloas spec: `payload_present = attestation.data.index == 1`. + payload_present: bool, +} + +/// Legacy queued attestation without payload_present (pre-Gloas, schema V28). +#[derive(Clone, PartialEq, Encode, Decode)] +pub struct QueuedAttestationV28 { + slot: Slot, + attesting_indices: Vec, + block_root: Hash256, + target_epoch: Epoch, } impl<'a, E: EthSpec> From> for QueuedAttestation { @@ -249,6 +295,7 @@ impl<'a, E: EthSpec> From> for QueuedAttestation { attesting_indices: a.attesting_indices_to_vec(), block_root: a.data().beacon_block_root, target_epoch: a.data().target.epoch, + payload_present: a.data().index == 1, } } } @@ -366,21 +413,32 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; - let execution_status = anchor_block.message().execution_payload().map_or_else( - // If the block doesn't have an execution payload then it can't have - // execution enabled. - |_| ExecutionStatus::irrelevant(), - |execution_payload| { + let (execution_status, execution_payload_parent_hash, execution_payload_block_hash) = + if let Ok(signed_bid) = anchor_block.message().body().signed_execution_payload_bid() { + // Gloas: execution status is irrelevant post-Gloas; payload validation + // is decoupled from beacon blocks. + ( + ExecutionStatus::irrelevant(), + Some(signed_bid.message.parent_block_hash), + Some(signed_bid.message.block_hash), + ) + } else if let Ok(execution_payload) = anchor_block.message().execution_payload() { + // Pre-Gloas forks: do not set payload hashes, they are only used post-Gloas. if execution_payload.is_default_with_empty_roots() { - // A default payload does not have execution enabled. - ExecutionStatus::irrelevant() + (ExecutionStatus::irrelevant(), None, None) } else { - // Assume that this payload is valid, since the anchor should be a trusted block and - // state. - ExecutionStatus::Valid(execution_payload.block_hash()) + // Assume that this payload is valid, since the anchor should be a + // trusted block and state. + ( + ExecutionStatus::Valid(execution_payload.block_hash()), + None, + None, + ) } - }, - ); + } else { + // Pre-merge: no execution payload at all. + (ExecutionStatus::irrelevant(), None, None) + }; // If the current slot is not provided, use the value that was last provided to the store. let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); @@ -394,6 +452,10 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, + execution_payload_parent_hash, + execution_payload_block_hash, + anchor_block.message().proposer_index(), + spec, )?; let mut fork_choice = Self { @@ -479,7 +541,7 @@ where &mut self, system_time_current_slot: Slot, spec: &ChainSpec, - ) -> Result> { + ) -> Result<(Hash256, PayloadStatus), Error> { // Provide the slot (as per the system clock) to the `fc_store` and then return its view of // the current slot. The `fc_store` will ensure that the `current_slot` is never // decreasing, a property which we must maintain. @@ -487,7 +549,7 @@ where let store = &mut self.fc_store; - let head_root = self.proto_array.find_head::( + let (head_root, head_payload_status) = self.proto_array.find_head::( *store.justified_checkpoint(), *store.finalized_checkpoint(), store.justified_balances(), @@ -516,7 +578,7 @@ where finalized_hash, }; - Ok(head_root) + Ok((head_root, head_payload_status)) } /// Get the block to build on as proposer, taking into account proposer re-orgs. @@ -611,6 +673,20 @@ where } } + /// Mark a Gloas payload envelope as valid and received. + /// + /// This must only be called for valid Gloas payloads. + pub fn on_valid_payload_envelope_received( + &mut self, + block_root: Hash256, + ) -> Result<(), Error> { + self.proto_array + .on_valid_payload_envelope_received(block_root) + .map_err(Error::FailedToProcessValidExecutionPayload) + } + + /// Pre-Gloas only. + /// /// See `ProtoArrayForkChoice::process_execution_payload_validation` for documentation. pub fn on_valid_execution_payload( &mut self, @@ -621,6 +697,8 @@ where .map_err(Error::FailedToProcessValidExecutionPayload) } + /// Pre-Gloas only. + /// /// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation. pub fn on_invalid_execution_payload( &mut self, @@ -729,6 +807,11 @@ where let attestation_threshold = spec.get_unaggregated_attestation_due(); // Add proposer score boost if the block is timely. + // TODO(gloas): the spec's `update_proposer_boost_root` additionally checks that + // `block.proposer_index == get_beacon_proposer_index(head_state)` — i.e. that + // the block's proposer matches the expected proposer on the canonical chain. + // This requires calling `get_head` and advancing the head state to the current + // slot, which is expensive. Implement once we have a cached proposer index. let is_before_attesting_interval = block_delay < attestation_threshold; let is_first_block = self.fc_store.proposer_boost_root().is_zero(); @@ -881,6 +964,16 @@ where ExecutionStatus::irrelevant() }; + let (execution_payload_parent_hash, execution_payload_block_hash) = + if let Ok(signed_bid) = block.body().signed_execution_payload_bid() { + ( + Some(signed_bid.message.parent_block_hash), + Some(signed_bid.message.block_hash), + ) + } else { + (None, None) + }; + // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. self.proto_array.process_block::( @@ -907,10 +1000,13 @@ where execution_status, unrealized_justified_checkpoint: Some(unrealized_justified_checkpoint), unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), + execution_payload_parent_hash, + execution_payload_block_hash, + proposer_index: Some(block.proposer_index()), }, current_slot, - self.justified_checkpoint(), - self.finalized_checkpoint(), + spec, + block_delay, )?; Ok(()) @@ -979,6 +1075,7 @@ where &self, indexed_attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), InvalidAttestation> { // There is no point in processing an attestation with an empty bitfield. Reject // it immediately. @@ -1051,6 +1148,89 @@ where }); } + if spec + .fork_name_at_slot::(indexed_attestation.data().slot) + .gloas_enabled() + { + let index = indexed_attestation.data().index; + + // Post-Gloas: attestation index must be 0 or 1. + if index > 1 { + return Err(InvalidAttestation::InvalidAttestationIndex { index }); + } + + // Same-slot attestations must have index == 0. + if indexed_attestation.data().slot == block.slot && index != 0 { + return Err(InvalidAttestation::InvalidSameSlotAttestationIndex { + slot: block.slot, + }); + } + + // index == 1 (payload_present) requires the block's payload to have been received. + // TODO(gloas): could optimise by adding `payload_received` to `Block` + if index == 1 + && !self + .proto_array + .is_payload_received(&indexed_attestation.data().beacon_block_root) + { + return Err(InvalidAttestation::PayloadNotReceived { + beacon_block_root: indexed_attestation.data().beacon_block_root, + }); + } + } + + Ok(()) + } + + /// Validates a payload attestation for application to fork choice. + fn validate_on_payload_attestation( + &self, + indexed_payload_attestation: &IndexedPayloadAttestation, + is_from_block: AttestationFromBlock, + ) -> Result<(), InvalidPayloadAttestation> { + // This check is from `is_valid_indexed_payload_attestation`, but we do it immediately to + // avoid wasting time on junk attestations. + if indexed_payload_attestation.attesting_indices.is_empty() { + return Err(InvalidPayloadAttestation::EmptyAggregationBitfield); + } + + // PTC attestation must be for a known block. If block is unknown, delay consideration until + // the block is found (responsibility of caller). + let block = self + .proto_array + .get_block(&indexed_payload_attestation.data.beacon_block_root) + .ok_or(InvalidPayloadAttestation::UnknownHeadBlock { + beacon_block_root: indexed_payload_attestation.data.beacon_block_root, + })?; + + // Not strictly part of the spec, but payload attestations to future slots are MORE INVALID + // than payload attestations to blocks at previous slots. + if block.slot > indexed_payload_attestation.data.slot { + return Err(InvalidPayloadAttestation::AttestsToFutureBlock { + block: block.slot, + attestation: indexed_payload_attestation.data.slot, + }); + } + + // PTC votes can only change the vote for their assigned beacon block, return early otherwise + if block.slot != indexed_payload_attestation.data.slot { + return Ok(()); + } + + // Gossip payload attestations must be for the current slot. + // NOTE: signature is assumed to have been verified by caller. + // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/fork-choice.md + if matches!(is_from_block, AttestationFromBlock::False) + && indexed_payload_attestation.data.slot != self.fc_store.get_current_slot() + { + return Err( + InvalidPayloadAttestation::PayloadAttestationNotCurrentSlot { + attestation_slot: indexed_payload_attestation.data.slot, + current_slot: self.fc_store.get_current_slot(), + }, + ); + } + Ok(()) } @@ -1076,6 +1256,7 @@ where system_time_current_slot: Slot, attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_ON_ATTESTATION_TIMES); @@ -1098,14 +1279,21 @@ where return Ok(()); } - self.validate_on_attestation(attestation, is_from_block)?; + self.validate_on_attestation(attestation, is_from_block, spec)?; + + // Per Gloas spec: `payload_present = attestation.data.index == 1`. + let payload_present = spec + .fork_name_at_slot::(attestation.data().slot) + .gloas_enabled() + && attestation.data().index == 1; if attestation.data().slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices_iter() { self.proto_array.process_attestation( *validator_index as usize, attestation.data().beacon_block_root, - attestation.data().target.epoch, + attestation.data().slot, + payload_present, )?; } } else { @@ -1122,6 +1310,59 @@ where Ok(()) } + /// Register a payload attestation with the fork choice DAG. + /// + /// `ptc` is the PTC committee for the attestation's slot: a list of validator indices + /// ordered by committee position. Each attesting validator index is resolved to its + /// position within `ptc` (its `ptc_index`) before being applied to the proto-array. + pub fn on_payload_attestation( + &mut self, + system_time_current_slot: Slot, + attestation: &IndexedPayloadAttestation, + is_from_block: AttestationFromBlock, + ptc: &[usize], + ) -> Result<(), Error> { + self.update_time(system_time_current_slot)?; + + if attestation.data.beacon_block_root.is_zero() { + return Ok(()); + } + + // TODO(gloas): Should ignore wrong-slot payload attestations at the caller, they could + // have been processed at the correct slot when received on gossip, but then have the + // wrong-slot by the time they make it to here (TOCTOU). + self.validate_on_payload_attestation(attestation, is_from_block)?; + + // Resolve validator indices to PTC committee positions. + let ptc_indices: Vec = attestation + .attesting_indices + .iter() + .filter_map(|vi| ptc.iter().position(|&p| p == *vi as usize)) + .collect(); + + // Check that all the attesters are in the PTC + if ptc_indices.len() != attestation.attesting_indices.len() { + return Err( + InvalidPayloadAttestation::PayloadAttestationAttestersNotInPtc { + attesting_indices_len: attestation.attesting_indices.len(), + attesting_indices_in_ptc: ptc_indices.len(), + } + .into(), + ); + } + + for &ptc_index in &ptc_indices { + self.proto_array.process_payload_attestation( + attestation.data.beacon_block_root, + ptc_index, + attestation.data.payload_present, + attestation.data.blob_data_available, + )?; + } + + Ok(()) + } + /// Apply an attester slashing to fork choice. /// /// We assume that the attester slashing provided to this function has already been verified. @@ -1228,7 +1469,8 @@ where self.proto_array.process_attestation( *validator_index as usize, attestation.block_root, - attestation.target_epoch, + attestation.slot, + attestation.payload_present, )?; } } @@ -1358,13 +1600,15 @@ where /// Returns the latest message for a given validator, if any. /// - /// Returns `(block_root, block_slot)`. + /// Returns `block_root, block_slot, payload_present`. /// /// ## Notes /// /// It may be prudent to call `Self::update_time` before calling this function, /// since some attestations might be queued and awaiting processing. - pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { + /// + /// This function is only used in tests. + pub fn latest_message(&self, validator_index: usize) -> Option { self.proto_array.latest_message(validator_index) } @@ -1409,7 +1653,6 @@ where persisted_proto_array: proto_array::core::SszContainer, justified_balances: JustifiedBalances, reset_payload_statuses: ResetPayloadStatuses, - spec: &ChainSpec, ) -> Result> { let mut proto_array = ProtoArrayForkChoice::from_container( persisted_proto_array.clone(), @@ -1434,7 +1677,7 @@ where // Reset all blocks back to being "optimistic". This helps recover from an EL consensus // fault where an invalid payload becomes valid. - if let Err(e) = proto_array.set_all_blocks_to_optimistic::(spec) { + if let Err(e) = proto_array.set_all_blocks_to_optimistic::() { // If there is an error resetting the optimistic status then log loudly and revert // back to a proto-array which does not have the reset applied. This indicates a // significant error in Lighthouse and warrants detailed investigation. @@ -1464,7 +1707,6 @@ where persisted.proto_array, justified_balances, reset_payload_statuses, - spec, )?; let current_slot = fc_store.get_current_slot(); @@ -1472,7 +1714,7 @@ where let mut fork_choice = Self { fc_store, proto_array, - queued_attestations: persisted.queued_attestations, + queued_attestations: vec![], // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, @@ -1498,7 +1740,7 @@ where // get a different result. fork_choice .proto_array - .set_all_blocks_to_optimistic::(spec)?; + .set_all_blocks_to_optimistic::()?; // If the second attempt at finding a head fails, return an error since we do not // expect this scenario. fork_choice.get_head(current_slot, spec)?; @@ -1511,10 +1753,7 @@ where /// be instantiated again later. pub fn to_persisted(&self) -> PersistedForkChoice { PersistedForkChoice { - proto_array: self - .proto_array() - .as_ssz_container(self.justified_checkpoint(), self.finalized_checkpoint()), - queued_attestations: self.queued_attestations().to_vec(), + proto_array: self.proto_array().as_ssz_container(), } } @@ -1528,16 +1767,37 @@ where /// /// This is used when persisting the state of the fork choice to disk. #[superstruct( - variants(V28), + variants(V28, V29), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct PersistedForkChoice { - pub proto_array: proto_array::core::SszContainerV28, - pub queued_attestations: Vec, + #[superstruct(only(V28))] + pub proto_array_v28: proto_array::core::SszContainerV28, + #[superstruct(only(V29))] + pub proto_array: proto_array::core::SszContainerV29, + #[superstruct(only(V28))] + pub queued_attestations_v28: Vec, } -pub type PersistedForkChoice = PersistedForkChoiceV28; +pub type PersistedForkChoice = PersistedForkChoiceV29; + +impl From for PersistedForkChoiceV29 { + fn from(v28: PersistedForkChoiceV28) -> Self { + Self { + proto_array: v28.proto_array_v28.into(), + } + } +} + +impl From for PersistedForkChoiceV28 { + fn from(v29: PersistedForkChoiceV29) -> Self { + Self { + proto_array_v28: v29.proto_array.into(), + queued_attestations_v28: vec![], + } + } +} #[cfg(test)] mod tests { @@ -1574,6 +1834,7 @@ mod tests { attesting_indices: vec![], block_root: Hash256::zero(), target_epoch: Epoch::new(0), + payload_present: false, }) .collect() } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 8cf2936db4..159eab0ec0 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -4,10 +4,11 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, - InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, + InvalidAttestation, InvalidBlock, InvalidPayloadAttestation, PayloadVerificationStatus, + PersistedForkChoice, PersistedForkChoiceV28, PersistedForkChoiceV29, QueuedAttestation, + ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ - Block as ProtoBlock, ExecutionStatus, InvalidationOperation, ProposerHeadError, + Block as ProtoBlock, ExecutionStatus, InvalidationOperation, PayloadStatus, ProposerHeadError, }; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index d3a84ee85b..d6f937c0ca 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -7,9 +7,11 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; +use bls::AggregateSignature; use fixed_bytes::FixedBytesExtended; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, + AttestationFromBlock, ForkChoiceStore, InvalidAttestation, InvalidBlock, + InvalidPayloadAttestation, PayloadVerificationStatus, QueuedAttestation, }; use state_processing::state_advance::complete_state_advance; use std::fmt; @@ -19,8 +21,8 @@ use store::MemoryStore; use types::SingleAttestation; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, Hash256, - IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, - test_utils::generate_deterministic_keypair, + IndexedAttestation, IndexedPayloadAttestation, MainnetEthSpec, PayloadAttestationData, + RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; @@ -71,6 +73,9 @@ impl ForkChoiceTest { Self { harness } } + /// Creates a new tester with the Gloas fork active at epoch 1. + /// Genesis is a standard Fulu block (epoch 0), so block production works normally. + /// Tests that need Gloas semantics should advance the chain into epoch 1 first. /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where @@ -923,6 +928,56 @@ async fn invalid_attestation_future_block() { .await; } +/// Gossip payload attestations must be for the current slot. A payload attestation for slot S +/// received at slot S+1 should be rejected per the spec. +#[tokio::test] +async fn non_block_payload_attestation_for_previous_slot_is_rejected() { + let test = ForkChoiceTest::new() + .apply_blocks_without_new_attestations(1) + .await; + + let chain = &test.harness.chain; + let block_a = chain + .block_at_slot(Slot::new(1), WhenSlotSkipped::Prev) + .expect("lookup should succeed") + .expect("block A should exist"); + let block_a_root = block_a.canonical_root(); + let s_plus_1 = block_a.slot().saturating_add(1_u64); + + let payload_attestation = IndexedPayloadAttestation:: { + attesting_indices: vec![0_u64].try_into().expect("valid attesting indices"), + data: PayloadAttestationData { + beacon_block_root: block_a_root, + slot: Slot::new(1), + payload_present: true, + blob_data_available: true, + }, + signature: AggregateSignature::empty(), + }; + + let ptc = &[0_usize]; + + let result = chain + .canonical_head + .fork_choice_write_lock() + .on_payload_attestation( + s_plus_1, + &payload_attestation, + AttestationFromBlock::False, + ptc, + ); + assert!( + matches!( + result, + Err(ForkChoiceError::InvalidPayloadAttestation( + InvalidPayloadAttestation::PayloadAttestationNotCurrentSlot { .. } + )) + ), + "gossip payload attestation for previous slot should be rejected, got: {:?}", + result + ); +} + /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 7419ad813b..ee86277f9c 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -14,6 +14,8 @@ ethereum_ssz_derive = { workspace = true } fixed_bytes = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } +smallvec = { workspace = true } superstruct = { workspace = true } +typenum = { workspace = true } types = { workspace = true } yaml_serde = { workspace = true } diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 35cb4007b7..bb47af97d9 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -54,6 +54,14 @@ pub enum Error { }, InvalidEpochOffset(u64), Arith(ArithError), + InvalidNodeVariant { + block_root: Hash256, + }, + BrokenBlock { + block_root: Hash256, + }, + NoViableChildren, + OnBlockRequiresProposerIndex, } impl From for Error { diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index e9deb6759f..c9764d3e44 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -1,20 +1,24 @@ mod execution_status; mod ffg_updates; +mod gloas_payload; mod no_votes; mod votes; -use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; +use crate::proto_array_fork_choice::{Block, ExecutionStatus, PayloadStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; +use ssz::BitVector; use std::collections::BTreeSet; +use std::time::Duration; use types::{ - AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, }; pub use execution_status::*; pub use ffg_updates::*; +pub use gloas_payload::*; pub use no_votes::*; pub use votes::*; @@ -25,6 +29,9 @@ pub enum Operation { finalized_checkpoint: Checkpoint, justified_state_balances: Vec, expected_head: Hash256, + current_slot: Slot, + #[serde(default)] + expected_payload_status: Option, }, ProposerBoostFindHead { justified_checkpoint: Checkpoint, @@ -44,11 +51,23 @@ pub enum Operation { parent_root: Hash256, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, + #[serde(default)] + execution_payload_parent_hash: Option, + #[serde(default)] + execution_payload_block_hash: Option, }, ProcessAttestation { validator_index: usize, block_root: Hash256, - target_epoch: Epoch, + attestation_slot: Slot, + }, + ProcessPayloadAttestation { + validator_index: usize, + block_root: Hash256, + attestation_slot: Slot, + payload_present: bool, + #[serde(default)] + blob_data_available: bool, }, Prune { finalized_root: Hash256, @@ -63,6 +82,29 @@ pub enum Operation { block_root: Hash256, weight: u64, }, + AssertPayloadWeights { + block_root: Hash256, + expected_full_weight: u64, + expected_empty_weight: u64, + }, + AssertParentPayloadStatus { + block_root: Hash256, + expected_status: PayloadStatus, + }, + SetPayloadTiebreak { + block_root: Hash256, + is_timely: bool, + is_data_available: bool, + }, + /// Simulate receiving and validating an execution payload for `block_root`. + /// Sets `payload_received = true` on the V29 node via the live validation path. + ProcessExecutionPayloadEnvelope { + block_root: Hash256, + }, + AssertPayloadReceived { + block_root: Hash256, + expected: bool, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -71,12 +113,23 @@ pub struct ForkChoiceTestDefinition { pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, pub operations: Vec, + #[serde(default)] + pub execution_payload_parent_hash: Option, + #[serde(default)] + pub execution_payload_block_hash: Option, + #[serde(skip)] + pub spec: Option, } impl ForkChoiceTestDefinition { pub fn run(self) { - let mut spec = MainnetEthSpec::default_spec(); - spec.proposer_score_boost = Some(50); + let spec = self.spec.unwrap_or_else(|| { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + // Legacy test definitions target pre-Gloas behaviour unless explicitly overridden. + spec.gloas_fork_epoch = None; + spec + }); let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); @@ -89,6 +142,10 @@ impl ForkChoiceTestDefinition { junk_shuffling_id.clone(), junk_shuffling_id, ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), + self.execution_payload_parent_hash, + self.execution_payload_block_hash, + 0, + &spec, ) .expect("should create fork choice struct"); let equivocating_indices = BTreeSet::new(); @@ -100,18 +157,20 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, justified_state_balances, expected_head, + current_slot, + expected_payload_status, } => { let justified_balances = JustifiedBalances::from_effective_balances(justified_state_balances) .unwrap(); - let head = fork_choice + let (head, payload_status) = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, &justified_balances, Hash256::zero(), &equivocating_indices, - Slot::new(0), + current_slot, &spec, ) .unwrap_or_else(|e| { @@ -123,6 +182,13 @@ impl ForkChoiceTestDefinition { "Operation at index {} failed head check. Operation: {:?}", op_index, op ); + if let Some(expected_status) = expected_payload_status { + assert_eq!( + payload_status, expected_status, + "Operation at index {} failed payload status check. Operation: {:?}", + op_index, op + ); + } check_bytes_round_trip(&fork_choice); } Operation::ProposerBoostFindHead { @@ -135,7 +201,7 @@ impl ForkChoiceTestDefinition { let justified_balances = JustifiedBalances::from_effective_balances(justified_state_balances) .unwrap(); - let head = fork_choice + let (head, _payload_status) = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, @@ -188,6 +254,8 @@ impl ForkChoiceTestDefinition { parent_root, justified_checkpoint, finalized_checkpoint, + execution_payload_parent_hash, + execution_payload_block_hash, } => { let block = Block { slot, @@ -211,14 +279,12 @@ impl ForkChoiceTestDefinition { ), unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, + execution_payload_parent_hash, + execution_payload_block_hash, + proposer_index: Some(0), }; fork_choice - .process_block::( - block, - slot, - self.justified_checkpoint, - self.finalized_checkpoint, - ) + .process_block::(block, slot, &spec, Duration::ZERO) .unwrap_or_else(|e| { panic!( "process_block op at index {} returned error: {:?}", @@ -230,10 +296,10 @@ impl ForkChoiceTestDefinition { Operation::ProcessAttestation { validator_index, block_root, - target_epoch, + attestation_slot, } => { fork_choice - .process_attestation(validator_index, block_root, target_epoch) + .process_attestation(validator_index, block_root, attestation_slot, false) .unwrap_or_else(|_| { panic!( "process_attestation op at index {} returned error", @@ -242,6 +308,28 @@ impl ForkChoiceTestDefinition { }); check_bytes_round_trip(&fork_choice); } + Operation::ProcessPayloadAttestation { + validator_index, + block_root, + attestation_slot: _, + payload_present, + blob_data_available, + } => { + fork_choice + .process_payload_attestation( + block_root, + validator_index, + payload_present, + blob_data_available, + ) + .unwrap_or_else(|_| { + panic!( + "process_payload_attestation op at index {} returned error", + op_index + ) + }); + check_bytes_round_trip(&fork_choice); + } Operation::Prune { finalized_root, prune_threshold, @@ -287,8 +375,153 @@ impl ForkChoiceTestDefinition { Operation::AssertWeight { block_root, weight } => assert_eq!( fork_choice.get_weight(&block_root).unwrap(), weight, - "block weight" + "block weight at op index {}", + op_index ), + Operation::AssertPayloadWeights { + block_root, + expected_full_weight, + expected_empty_weight, + } => { + let block_index = fork_choice + .proto_array + .indices + .get(&block_root) + .unwrap_or_else(|| { + panic!( + "AssertPayloadWeights: block root not found at op index {}", + op_index + ) + }); + let node = fork_choice + .proto_array + .nodes + .get(*block_index) + .unwrap_or_else(|| { + panic!( + "AssertPayloadWeights: node not found at op index {}", + op_index + ) + }); + let v29 = node.as_v29().unwrap_or_else(|_| { + panic!( + "AssertPayloadWeights: node is not V29 at op index {}", + op_index + ) + }); + assert_eq!( + v29.full_payload_weight, expected_full_weight, + "full_payload_weight mismatch at op index {}", + op_index + ); + assert_eq!( + v29.empty_payload_weight, expected_empty_weight, + "empty_payload_weight mismatch at op index {}", + op_index + ); + } + Operation::AssertParentPayloadStatus { + block_root, + expected_status, + } => { + let block_index = fork_choice + .proto_array + .indices + .get(&block_root) + .unwrap_or_else(|| { + panic!( + "AssertParentPayloadStatus: block root not found at op index {}", + op_index + ) + }); + let node = fork_choice + .proto_array + .nodes + .get(*block_index) + .unwrap_or_else(|| { + panic!( + "AssertParentPayloadStatus: node not found at op index {}", + op_index + ) + }); + let v29 = node.as_v29().unwrap_or_else(|_| { + panic!( + "AssertParentPayloadStatus: node is not V29 at op index {}", + op_index + ) + }); + assert_eq!( + v29.parent_payload_status, expected_status, + "parent_payload_status mismatch at op index {}", + op_index + ); + } + Operation::SetPayloadTiebreak { + block_root, + is_timely, + is_data_available, + } => { + let block_index = fork_choice + .proto_array + .indices + .get(&block_root) + .unwrap_or_else(|| { + panic!( + "SetPayloadTiebreak: block root not found at op index {}", + op_index + ) + }); + let node = fork_choice + .proto_array + .nodes + .get_mut(*block_index) + .unwrap_or_else(|| { + panic!( + "SetPayloadTiebreak: node not found at op index {}", + op_index + ) + }); + let node_v29 = node.as_v29_mut().unwrap_or_else(|_| { + panic!( + "SetPayloadTiebreak: node is not V29 at op index {}", + op_index + ) + }); + // Set all bits (exceeds any threshold) or clear all bits. + let fill = if is_timely { 0xFF } else { 0x00 }; + node_v29.payload_timeliness_votes = + BitVector::from_bytes(smallvec::smallvec![fill; 64]) + .expect("valid 512-bit bitvector"); + let fill = if is_data_available { 0xFF } else { 0x00 }; + node_v29.payload_data_availability_votes = + BitVector::from_bytes(smallvec::smallvec![fill; 64]) + .expect("valid 512-bit bitvector"); + // Per spec, is_payload_timely/is_payload_data_available require + // the payload to be in payload_states (payload_received). + node_v29.payload_received = is_timely || is_data_available; + } + Operation::ProcessExecutionPayloadEnvelope { block_root } => { + fork_choice + .on_valid_payload_envelope_received(block_root) + .unwrap_or_else(|e| { + panic!( + "on_execution_payload op at index {} returned error: {}", + op_index, e + ) + }); + check_bytes_round_trip(&fork_choice); + } + Operation::AssertPayloadReceived { + block_root, + expected, + } => { + let actual = fork_choice.is_payload_received(&block_root); + assert_eq!( + actual, expected, + "payload_received mismatch at op index {}", + op_index + ); + } } } } @@ -314,8 +547,7 @@ fn get_checkpoint(i: u64) -> Checkpoint { } fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { - // The checkpoint are ignored `ProtoArrayForkChoice::from_bytes` so any value is ok - let bytes = original.as_bytes(Checkpoint::default(), Checkpoint::default()); + let bytes = original.as_bytes(); let decoded = ProtoArrayForkChoice::from_bytes(&bytes, original.balances.clone()) .expect("fork choice should decode from bytes"); assert!( diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index aa26a84306..794310ef89 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -16,6 +16,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -35,6 +37,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -53,6 +57,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -73,6 +79,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -91,6 +99,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -101,7 +111,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is now 1, because 1 has a vote. @@ -120,6 +130,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -143,7 +155,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -162,6 +174,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -196,6 +210,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -216,6 +232,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -245,7 +263,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), }); // Ensure that the head is still 2 @@ -266,6 +284,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -315,6 +335,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Invalidation of 3 should have removed upstream weight. @@ -347,7 +369,7 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(1), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), }); // Ensure that the head has switched back to 1 @@ -368,6 +390,8 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -399,6 +423,9 @@ pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } @@ -418,6 +445,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -437,6 +466,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -455,6 +486,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -475,6 +508,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -493,6 +528,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -503,7 +540,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is now 1, because 1 has a vote. @@ -522,6 +559,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -545,7 +584,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -564,6 +603,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -598,6 +639,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -618,6 +661,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -647,7 +692,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), }); // Move validator #1 vote from 2 to 3 @@ -660,7 +705,7 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), }); // Ensure that the head is now 3. @@ -681,6 +726,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -730,6 +777,8 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Invalidation of 3 should have removed upstream weight. @@ -763,6 +812,9 @@ pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } @@ -782,6 +834,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -801,6 +855,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -819,6 +875,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -839,6 +897,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -857,6 +917,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -867,7 +929,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is now 1, because 1 has a vote. @@ -886,6 +948,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -909,7 +973,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is 1. @@ -928,6 +992,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: None, }); ops.push(Operation::AssertWeight { @@ -962,6 +1028,8 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is now 3, applying a proposer boost to 3 as well. @@ -985,13 +1053,15 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { proposer_boost_root: get_root(3), }); + // Stored weights are pure attestation scores (proposer boost is applied + // on-the-fly in the walk's `get_weight`, not baked into `node.weight()`). ops.push(Operation::AssertWeight { block_root: get_root(0), - weight: 33_250, + weight: 2_000, }); ops.push(Operation::AssertWeight { block_root: get_root(1), - weight: 33_250, + weight: 2_000, }); ops.push(Operation::AssertWeight { block_root: get_root(2), @@ -999,8 +1069,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }); ops.push(Operation::AssertWeight { block_root: get_root(3), - // This is a "magic number" generated from `calculate_committee_fraction`. - weight: 31_250, + weight: 0, }); // Invalidate the payload of 3. @@ -1065,6 +1134,9 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index 3b31616145..76f9a95315 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -10,6 +10,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Build the following tree (stick? lol). @@ -27,6 +29,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), @@ -34,6 +38,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(1), justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), @@ -41,6 +47,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(2), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: get_checkpoint(1), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that with justified epoch 0 we find 3 @@ -57,6 +65,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that with justified epoch 1 we find 3 @@ -77,6 +87,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that with justified epoch 2 we find 3 @@ -93,6 +105,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(1), justified_state_balances: balances, expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, }); // END OF TESTS @@ -101,6 +115,9 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } @@ -114,6 +131,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Build the following tree. @@ -137,6 +156,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), @@ -147,6 +168,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), @@ -157,6 +180,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), @@ -167,6 +192,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), @@ -177,6 +204,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Right branch @@ -186,6 +215,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), @@ -193,6 +224,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(2), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), @@ -200,6 +233,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { parent_root: get_root(4), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), @@ -210,6 +245,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(2), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), @@ -220,6 +257,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). @@ -240,6 +279,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above, but with justified epoch 2. ops.push(Operation::FindHead { @@ -250,6 +291,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above, but with justified epoch 3. // @@ -264,6 +307,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to 1. @@ -282,7 +327,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(0), + attestation_slot: Slot::new(0), }); // Ensure that if we start at 0 we find 9 (just: 0, fin: 0). @@ -303,6 +348,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Save as above but justified epoch 2. ops.push(Operation::FindHead { @@ -313,6 +360,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Save as above but justified epoch 3. // @@ -327,6 +376,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to 2. @@ -345,7 +396,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(0), + attestation_slot: Slot::new(0), }); // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). @@ -366,6 +417,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -376,6 +429,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 3. // @@ -390,6 +445,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that if we start at 1 we find 9 (just: 0, fin: 0). @@ -413,6 +470,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -423,6 +482,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 3. // @@ -437,6 +498,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that if we start at 2 we find 10 (just: 0, fin: 0). @@ -457,6 +520,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { @@ -467,6 +532,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Same as above but justified epoch 3. // @@ -481,6 +548,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances, expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // END OF TESTS @@ -489,6 +558,9 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs new file mode 100644 index 0000000000..ea37780795 --- /dev/null +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -0,0 +1,893 @@ +use super::*; + +fn gloas_spec() -> ChainSpec { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + spec.gloas_fork_epoch = Some(Epoch::new(0)); + spec +} + +pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Build two branches off genesis where one child extends parent's payload chain (Full) + // and the other does not (Empty). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Extend both branches to verify that head selection follows the selected chain. + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the Gloas fork choice tree. + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(1), + }); + + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: PayloadStatus::Empty, + }); + + // With equal full/empty parent weights, tiebreak decides which chain to follow. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: false, + is_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(4), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + + // One Full and one Empty vote for the same head block: tie probes via runtime tiebreak, + // which defaults to Empty unless timely+data-available evidence is set. + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(2), + payload_present: true, + blob_data_available: false, + }); + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 1, + block_root: get_root(1), + attestation_slot: Slot::new(2), + payload_present: false, + blob_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + current_slot: Slot::new(0), + // With MainnetEthSpec PTC_SIZE=512, 1 bit set out of 256 threshold → not timely → Empty. + expected_payload_status: Some(PayloadStatus::Empty), + }); + // PTC votes write to bitfields only, not to full/empty weight. + // Weight is 0 because no CL attestations target this block. + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(1), + expected_full_weight: 0, + expected_empty_weight: 0, + }); + + // Flip validator 0 to Empty; both bits now clear. + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(3), + payload_present: false, + blob_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: Some(PayloadStatus::Empty), + }); + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(1), + expected_full_weight: 0, + expected_empty_weight: 0, + }); + + // Same-slot attestation to a new head candidate should be Pending (no payload bucket change). + // Root 5 is an Empty child of root_1 (parent_hash doesn't match root_1's block_hash), + // so it's reachable through root_1's Empty direction (root_1 has no payload_received). + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_root(5), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(101)), + execution_payload_block_hash: Some(get_hash(5)), + }); + ops.push(Operation::ProcessPayloadAttestation { + validator_index: 2, + block_root: get_root(5), + attestation_slot: Slot::new(3), + payload_present: true, + blob_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1], + expected_head: get_root(5), + current_slot: Slot::new(0), + expected_payload_status: Some(PayloadStatus::Empty), + }); + ops.push(Operation::AssertPayloadWeights { + block_root: get_root(5), + expected_full_weight: 0, + expected_empty_weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +/// Test that CL attestation weight can flip the head between Full/Empty branches, +/// overriding the tiebreaker. +pub fn get_gloas_find_head_vote_transition_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Competing branches with distinct payload ancestry (Full vs Empty from genesis). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the Gloas fork choice tree. + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(1), + }); + + // Equal branch weights: tiebreak FULL picks branch rooted at 3. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + // CL attestation to Empty branch (root 4) from validator 0 → head flips to 4. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(4), + attestation_slot: Slot::new(3), + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(4), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + // CL attestation back to Full branch (root 3) → head returns to 3. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(3), + attestation_slot: Slot::new(4), + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +/// CL attestation weight overrides payload preference tiebreaker. +pub fn get_gloas_weight_priority_over_payload_preference_test_definition() +-> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Build two branches where one child extends payload (Full) and the other doesn't (Empty). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the Gloas fork choice tree. + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(1), + }); + + // Parent prefers Full on equal branch weights (tiebreaker). + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + // Two CL attestations to the Empty branch make it strictly heavier, + // overriding the Full tiebreaker. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(4), + attestation_slot: Slot::new(3), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(4), + attestation_slot: Slot::new(3), + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(4), + current_slot: Slot::new(0), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +pub fn get_gloas_parent_empty_when_child_points_to_grandparent_test_definition() +-> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Build a three-block chain A -> B -> C (CL parent links). + // A: EL parent = genesis hash(0), EL hash = hash(1). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + + // B: EL parent = hash(1), EL hash = hash(2). + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // C: CL parent is B, but EL parent points to A (hash 1), not B (hash 2). + // This models B's payload not arriving in time, so C records parent status as Empty. + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_root(3), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(3), + expected_status: PayloadStatus::Empty, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +/// Test interleaving of blocks, regular attestations, and tiebreaker. +/// +/// genesis → block 1 (Full) → block 3 +/// → block 2 (Empty) → block 4 +/// +/// With equal CL weight, tiebreaker determines which branch wins. +/// An extra CL attestation can override the tiebreaker. +pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Step 1: Two competing blocks at slot 1. + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Step 2: Regular attestations arrive, one per branch (equal CL weight). + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(1), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + attestation_slot: Slot::new(1), + }); + + // Step 3: Child blocks at slot 2. + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(3)), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(4), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(4)), + }); + + // Mark root_1 as having received its execution payload so that + // its FULL virtual node exists in the Gloas fork choice tree. + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(1), + }); + + // Step 4: Set tiebreaker to Empty on genesis → Empty branch wins. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: false, + is_data_available: false, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(4), + current_slot: Slot::new(1), + expected_payload_status: None, + }); + + // Step 5: Flip tiebreaker to Full → Full branch wins. + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(3), + current_slot: Slot::new(100), + expected_payload_status: None, + }); + + // Step 6: Add extra CL weight to Empty branch → overrides Full tiebreaker. + ops.push(Operation::ProcessAttestation { + validator_index: 2, + block_root: get_root(4), + attestation_slot: Slot::new(3), + }); + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1, 1], + expected_head: get_root(4), + current_slot: Slot::new(100), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +/// Test interleaving of blocks, payload validation, and attestations. +/// +/// Scenario: +/// - Genesis block (slot 0) +/// - Block 1 (slot 1) extends genesis, Full chain +/// - Block 2 (slot 1) extends genesis, Empty chain +/// - Before payload arrives: payload_received is false for block 1 +/// - Process execution payload for block 1 → payload_received becomes true +/// - Payload attestations arrive voting block 1's payload as timely + available +/// - Head should follow block 1 because the PTC votes now count (payload_received = true) +pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block 1 at slot 1: extends genesis Full chain. + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(0)), + execution_payload_block_hash: Some(get_hash(1)), + }); + + // Block 2 at slot 1: extends genesis Empty chain (parent_hash doesn't match genesis EL hash). + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(99)), + execution_payload_block_hash: Some(get_hash(100)), + }); + + // Both children have parent_payload_status set correctly. + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(1), + expected_status: PayloadStatus::Full, + }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: PayloadStatus::Empty, + }); + + // Per spec `get_forkchoice_store`: genesis starts with payload_received=true + // (anchor block is in `payload_states`). + ops.push(Operation::AssertPayloadReceived { + block_root: get_root(0), + expected: true, + }); + + // Give one vote to each child so they have equal weight. + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + attestation_slot: Slot::new(1), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + attestation_slot: Slot::new(1), + }); + + // Equal weight, payload_received=true on genesis → tiebreaker uses + // payload_received (not previous slot, equal payload weights) → prefers Full. + // Block 1 (Full) wins because it matches the Full preference. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + current_slot: Slot::new(100), + expected_payload_status: None, + }); + + // ProcessExecutionPayloadEnvelope on genesis is a no-op (already received at init). + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(0), + }); + + ops.push(Operation::AssertPayloadReceived { + block_root: get_root(0), + expected: true, + }); + + // Set PTC votes on genesis as timely + data available (simulates PTC voting). + // This doesn't change the preference since genesis is not the previous slot + // (slot 0 + 1 != current_slot 100). + ops.push(Operation::SetPayloadTiebreak { + block_root: get_root(0), + is_timely: true, + is_data_available: true, + }); + + // Still prefers Full via payload_received tiebreaker → Block 1 (Full) wins. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1, 1], + expected_head: get_root(1), + current_slot: Slot::new(100), + expected_payload_status: None, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + execution_payload_parent_hash: Some(get_hash(42)), + execution_payload_block_hash: Some(get_hash(0)), + spec: Some(gloas_spec()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn gloas_fork_boundary_spec() -> ChainSpec { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + spec.gloas_fork_epoch = Some(Epoch::new(1)); + spec + } + + /// Gloas fork boundary: a chain starting pre-Gloas (V17 nodes) that crosses into + /// Gloas (V29 nodes). The head should advance through the fork boundary. + /// + /// Parameters: + /// - `skip_first_gloas_slot`: if true, there is no block at the first Gloas slot (slot 32); + /// the first V29 block appears at slot 33. + /// - `first_gloas_block_full`: if true, the first V29 block extends the parent V17 node's + /// EL chain (Full parent payload status). If false, it doesn't (Empty). + fn get_gloas_fork_boundary_test_definition( + skip_first_gloas_slot: bool, + first_gloas_block_full: bool, + ) -> ForkChoiceTestDefinition { + let mut ops = vec![]; + + // Block at slot 31 — last pre-Gloas slot. Created as a V17 node because + // gloas_fork_epoch = 1 → Gloas starts at slot 32. + // + // The test harness sets execution_status = Optimistic(ExecutionBlockHash::from_root(root)), + // so this V17 node's EL block hash = ExecutionBlockHash::from_root(get_root(1)). + ops.push(Operation::ProcessBlock { + slot: Slot::new(31), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + }); + + // First Gloas block (V29 node). + let gloas_slot = if skip_first_gloas_slot { 33 } else { 32 }; + + // The first Gloas block should always have the pre-Gloas block as its execution parent, + // although this is currently not checked anywhere (the spec doesn't mention this). + ops.push(Operation::ProcessBlock { + slot: Slot::new(gloas_slot), + root: get_root(2), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Parent payload status of fork boundary block should always be Empty. + let expected_parent_status = PayloadStatus::Empty; + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), + expected_status: expected_parent_status, + }); + + // Mark root 2's execution payload as received so the Full virtual child exists. + if first_gloas_block_full { + ops.push(Operation::ProcessExecutionPayloadEnvelope { + block_root: get_root(2), + }); + } + + // Extend the chain with another V29 block (Full child of root 2). + ops.push(Operation::ProcessBlock { + slot: Slot::new(gloas_slot + 1), + root: get_root(3), + parent_root: get_root(2), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: if first_gloas_block_full { + Some(get_hash(2)) + } else { + Some(get_hash(1)) + }, + execution_payload_block_hash: Some(get_hash(3)), + }); + + // Head should advance to the tip of the chain through the fork boundary. + ops.push(Operation::FindHead { + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + justified_state_balances: vec![1], + expected_head: get_root(3), + current_slot: Slot::new(gloas_slot + 1), + expected_payload_status: None, + }); + + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(3), + expected_status: if first_gloas_block_full { + PayloadStatus::Full + } else { + PayloadStatus::Empty + }, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + operations: ops, + // Genesis is V17 (slot 0 < Gloas fork slot 32), these are unused for V17. + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: Some(gloas_fork_boundary_spec()), + } + } + + #[test] + fn fork_boundary_no_skip_full() { + get_gloas_fork_boundary_test_definition(false, true).run(); + } + + #[test] + fn fork_boundary_no_skip_empty() { + get_gloas_fork_boundary_test_definition(false, false).run(); + } + + #[test] + fn fork_boundary_skip_first_gloas_slot_full() { + get_gloas_fork_boundary_test_definition(true, true).run(); + } + + #[test] + fn fork_boundary_skip_first_gloas_slot_empty() { + get_gloas_fork_boundary_test_definition(true, false).run(); + } + + #[test] + fn chain_following() { + let test = get_gloas_chain_following_test_definition(); + test.run(); + } + + #[test] + fn payload_probe() { + let test = get_gloas_payload_probe_test_definition(); + test.run(); + } + + #[test] + fn find_head_vote_transition() { + let test = get_gloas_find_head_vote_transition_test_definition(); + test.run(); + } + + #[test] + fn weight_priority_over_payload_preference() { + let test = get_gloas_weight_priority_over_payload_preference_test_definition(); + test.run(); + } + + #[test] + fn parent_empty_when_child_points_to_grandparent() { + let test = get_gloas_parent_empty_when_child_points_to_grandparent_test_definition(); + test.run(); + } + + #[test] + fn interleaved_attestations() { + let test = get_gloas_interleaved_attestations_test_definition(); + test.run(); + } + + #[test] + fn payload_received_interleaving() { + let test = get_gloas_payload_received_interleaving_test_definition(); + test.run(); + } +} diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index d20eaacb99..7b5ee31c64 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -18,6 +18,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: Hash256::zero(), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 2 // @@ -36,6 +38,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is 2 // @@ -53,6 +57,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 1 // @@ -71,6 +77,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is still 2 // @@ -88,6 +96,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 3 // @@ -108,6 +118,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure 2 is still the head // @@ -127,6 +139,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 4 // @@ -147,6 +161,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is 4. // @@ -166,6 +182,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(4), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 5 with a justified epoch of 2 // @@ -185,6 +203,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure the head is now 5 whilst the justified epoch is 0. // @@ -206,6 +226,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(5), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Ensure there is no error when starting from a block that has the // wrong justified epoch. @@ -232,6 +254,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(5), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head. // @@ -250,6 +274,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(5), + current_slot: Slot::new(0), + expected_payload_status: None, }, // Add block 6 // @@ -271,6 +297,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: Hash256::zero(), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }, // Ensure 6 is the head // @@ -291,6 +319,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(6), + current_slot: Slot::new(0), + expected_payload_status: None, }, ]; @@ -305,6 +335,9 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, operations, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index 01994fff9b..ac97a592b7 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -16,6 +16,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(0), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 2. @@ -35,6 +37,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is 2 @@ -53,6 +57,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -73,6 +79,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -91,6 +99,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 1 @@ -101,7 +111,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(1), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is now 1, because 1 has a vote. @@ -120,6 +130,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(1), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add a vote to block 2 @@ -130,7 +142,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(2), - target_epoch: Epoch::new(2), + attestation_slot: Slot::new(2), }); // Ensure that the head is 2 since 1 and 2 both have a vote @@ -149,6 +161,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 3. @@ -170,6 +184,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is still 2 @@ -190,6 +206,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Move validator #0 vote from 1 to 3 @@ -202,7 +220,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(3), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), }); // Ensure that the head is still 2 @@ -223,6 +241,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't @@ -236,7 +256,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(1), - target_epoch: Epoch::new(3), + attestation_slot: Slot::new(3), }); // Ensure that the head is now 3 @@ -257,6 +277,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(3), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 4. @@ -280,6 +302,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that the head is now 4 @@ -302,6 +326,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(4), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 5, which has a justified epoch of 2. @@ -327,19 +353,22 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(1), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); - // Ensure that 5 is filtered out and the head stays at 4. + // Block 5 has incompatible finalized checkpoint, so `get_filtered_block_tree` + // excludes the entire 1->3->4->5 branch (no viable leaf). Head moves to 2. // // 0 // / \ - // 2 1 + // head-> 2 1 // | // 3 // | - // 4 <- head + // 4 // / - // 5 + // 5 <- incompatible finalized checkpoint ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -350,7 +379,9 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_root(4), + expected_head: get_root(2), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 6, which has a justified epoch of 0. @@ -376,6 +407,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(1), root: get_root(0), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Move both votes to 5. @@ -392,12 +425,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(5), - target_epoch: Epoch::new(4), + attestation_slot: Slot::new(4), }); ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(5), - target_epoch: Epoch::new(4), + attestation_slot: Slot::new(4), }); // Add blocks 7, 8 and 9. Adding these blocks helps test the `best_descendant` @@ -430,6 +463,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), @@ -443,6 +478,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), @@ -456,6 +493,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure that 6 is the head, even though 5 has all the votes. This is testing to ensure @@ -487,6 +526,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(6), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -520,6 +561,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -545,12 +588,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 0, block_root: get_root(9), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), }); ops.push(Operation::ProcessAttestation { validator_index: 1, block_root: get_root(9), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), }); // Add block 10 @@ -582,6 +625,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Double-check the head is still 9 (no diagram this time) @@ -596,6 +641,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Introduce 2 more validators into the system @@ -621,12 +668,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::ProcessAttestation { validator_index: 2, block_root: get_root(10), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), }); ops.push(Operation::ProcessAttestation { validator_index: 3, block_root: get_root(10), - target_epoch: Epoch::new(5), + attestation_slot: Slot::new(5), }); // Check the head is now 10. @@ -657,6 +704,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Set the balances of the last two validators to zero @@ -682,6 +731,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Set the balances of the last two validators back to 1 @@ -707,6 +758,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(10), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Remove the last two validators @@ -733,6 +786,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that pruning below the prune threshold does not prune. @@ -754,6 +809,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Ensure that pruning above the prune threshold does prune. @@ -792,6 +849,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances.clone(), expected_head: get_root(9), + current_slot: Slot::new(0), + expected_payload_status: None, }); // Add block 11 @@ -817,6 +876,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { epoch: Epoch::new(2), root: get_root(5), }, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, }); // Ensure the head is now 11 @@ -841,6 +902,8 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { }, justified_state_balances: balances, expected_head: get_root(11), + current_slot: Slot::new(0), + expected_payload_status: None, }); ForkChoiceTestDefinition { @@ -854,6 +917,9 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { root: get_root(0), }, operations: ops, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + spec: None, } } diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 04e57d791b..702c014f07 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -8,13 +8,13 @@ mod ssz_container; pub use crate::justified_balances::JustifiedBalances; pub use crate::proto_array::{InvalidationOperation, calculate_committee_fraction}; pub use crate::proto_array_fork_choice::{ - Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, - ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, + Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, LatestMessage, PayloadStatus, + ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::{SszContainer, SszContainerV28}; + pub use super::ssz_container::{SszContainer, SszContainerV28, SszContainerV29}; } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5bfcdae463..dfb43f5f34 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,12 +1,18 @@ use crate::error::InvalidBestNodeInfo; -use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error}; +use crate::proto_array_fork_choice::IndexedForkChoiceNode; +use crate::{ + Block, ExecutionStatus, JustifiedBalances, LatestMessage, PayloadStatus, error::Error, +}; use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; +use ssz::BitVector; use ssz::Encode; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; +use std::time::Duration; use superstruct::superstruct; +use typenum::U512; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -17,6 +23,14 @@ use types::{ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); +fn all_true_bitvector() -> BitVector { + let mut bv = BitVector::new(); + for i in 0..bv.len() { + let _ = bv.set(i, true); + } + bv +} + /// Defines an operation which may invalidate the `execution_status` of some nodes. #[derive(Clone, Debug)] pub enum InvalidationOperation { @@ -68,47 +82,151 @@ impl InvalidationOperation { } } -pub type ProtoNode = ProtoNodeV17; - #[superstruct( - variants(V17), - variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)), - no_enum + variants(V17, V29), + variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)) )] +#[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Clone)] +#[ssz(enum_behaviour = "union")] pub struct ProtoNode { /// The `slot` is not necessary for `ProtoArray`, it just exists so external components can /// easily query the block slot. This is useful for upstream fork choice logic. + #[superstruct(getter(copy))] pub slot: Slot, /// The `state_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely attestation verification). + #[superstruct(getter(copy))] pub state_root: Hash256, /// The root that would be used for the `attestation.data.target.root` if a LMD vote was cast /// for this block. /// /// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely fork choice attestation verification). + #[superstruct(getter(copy))] pub target_root: Hash256, pub current_epoch_shuffling_id: AttestationShufflingId, pub next_epoch_shuffling_id: AttestationShufflingId, + #[superstruct(getter(copy))] pub root: Hash256, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_usize")] pub parent: Option, - #[superstruct(only(V17))] + #[superstruct(only(V17, V29), partial_getter(copy))] pub justified_checkpoint: Checkpoint, - #[superstruct(only(V17))] + #[superstruct(only(V17, V29), partial_getter(copy))] pub finalized_checkpoint: Checkpoint, + #[superstruct(getter(copy))] pub weight: u64, + #[superstruct(only(V17), partial_getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_child: Option, + #[superstruct(only(V17), partial_getter(copy))] #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, /// Indicates if an execution node has marked this block as valid. Also contains the execution - /// block hash. + /// block hash. This is only used pre-Gloas. + #[superstruct(only(V17), partial_getter(copy))] pub execution_status: ExecutionStatus, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_checkpoint")] pub unrealized_justified_checkpoint: Option, + #[superstruct(getter(copy))] #[ssz(with = "four_byte_option_checkpoint")] pub unrealized_finalized_checkpoint: Option, + + /// We track the parent payload status from which the current node was extended. + #[superstruct(only(V29), partial_getter(copy))] + pub parent_payload_status: PayloadStatus, + #[superstruct(only(V29), partial_getter(copy))] + pub empty_payload_weight: u64, + #[superstruct(only(V29), partial_getter(copy))] + pub full_payload_weight: u64, + #[superstruct(only(V29), partial_getter(copy))] + pub execution_payload_block_hash: ExecutionBlockHash, + #[superstruct(only(V29), partial_getter(copy))] + pub execution_payload_parent_hash: ExecutionBlockHash, + /// Equivalent to spec's `block_timeliness[root][ATTESTATION_TIMELINESS_INDEX]`. + #[superstruct(only(V29), partial_getter(copy))] + pub block_timeliness_attestation_threshold: bool, + /// Equivalent to spec's `block_timeliness[root][PTC_TIMELINESS_INDEX]`. + #[superstruct(only(V29), partial_getter(copy))] + pub block_timeliness_ptc_threshold: bool, + /// Equivalent to spec's `store.payload_timeliness_vote[root]`. + /// PTC timeliness vote bitfield, indexed by PTC committee position. + /// Bit i set means PTC member i voted `payload_present = true`. + /// Tiebreak derived as: `num_set_bits() > ptc_size / 2`. + #[superstruct(only(V29))] + pub payload_timeliness_votes: BitVector, + /// Equivalent to spec's `store.payload_data_availability_vote[root]`. + /// PTC data availability vote bitfield, indexed by PTC committee position. + /// Bit i set means PTC member i voted `blob_data_available = true`. + /// Tiebreak derived as: `num_set_bits() > ptc_size / 2`. + #[superstruct(only(V29))] + pub payload_data_availability_votes: BitVector, + /// Whether the execution payload for this block has been received and validated locally. + /// Maps to `root in store.payload_states` in the spec. + #[superstruct(only(V29), partial_getter(copy))] + pub payload_received: bool, + /// The proposer index for this block, used by `should_apply_proposer_boost` + /// to detect equivocations at the parent's slot. + #[superstruct(only(V29), partial_getter(copy))] + pub proposer_index: u64, + /// Weight from equivocating validators that voted for this block. + /// Used by `is_head_weak` to match the spec's monotonicity guarantee: + /// more attestations can only increase head weight, never decrease it. + #[superstruct(only(V29), partial_getter(copy))] + pub equivocating_attestation_score: u64, +} + +impl ProtoNode { + /// Generic version of spec's `parent_payload_status` that works for pre-Gloas nodes by + /// considering their parents Empty. + pub fn get_parent_payload_status(&self) -> PayloadStatus { + self.parent_payload_status().unwrap_or(PayloadStatus::Empty) + } + + pub fn is_parent_node_full(&self) -> bool { + self.get_parent_payload_status() == PayloadStatus::Full + } + + pub fn attestation_score(&self, payload_status: PayloadStatus) -> u64 { + match payload_status { + PayloadStatus::Pending => self.weight(), + // Pre-Gloas (V17) nodes have no payload separation — all weight + // is in `weight()`. Post-Gloas (V29) nodes track per-status weights. + PayloadStatus::Empty => self + .empty_payload_weight() + .unwrap_or_else(|_| self.weight()), + PayloadStatus::Full => self.full_payload_weight().unwrap_or_else(|_| self.weight()), + } + } + + pub fn is_payload_timely(&self) -> bool { + let Ok(node) = self.as_v29() else { + return false; + }; + + // Equivalent to `if root not in store.payload_states` in the spec. + if !node.payload_received { + return false; + } + + node.payload_timeliness_votes.num_set_bits() > E::ptc_size() / 2 + } + + pub fn is_payload_data_available(&self) -> bool { + let Ok(node) = self.as_v29() else { + return false; + }; + + // Equivalent to `if root not in store.payload_states` in the spec. + if !node.payload_received { + return false; + } + + // TODO(gloas): add function on EthSpec for DATA_AVAILABILITY_TIMELY_THRESHOLD + node.payload_data_availability_votes.num_set_bits() > E::ptc_size() / 2 + } } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -126,6 +244,122 @@ impl Default for ProposerBoost { } } +/// Accumulated score changes for a single proto-array node during a `find_head` pass. +/// +/// `delta` tracks the ordinary LMD-GHOST balance change applied to the concrete block node. +/// This is the same notion of weight that pre-gloas fork choice used. +/// +/// +/// Under gloas we also need to track how votes contribute to the parent's virtual payload +/// branches: +/// +/// - `empty_delta` is the balance change attributable to votes that support the `Empty` payload +/// interpretation of the node +/// - `full_delta` is the balance change attributable to votes that support the `Full` payload +/// interpretation of the node +/// +/// Votes in `Pending` state only affect `delta`; they do not contribute to either payload bucket. +/// During score application these payload deltas are propagated independently up the tree so that +/// ancestors can compare children using payload-aware tie breaking. +#[derive(Clone, PartialEq, Debug, Copy)] +pub struct NodeDelta { + /// Total weight change for the node. All votes contribute regardless of payload status. + pub delta: i64, + /// Weight change from `PayloadStatus::Empty` votes. + pub empty_delta: i64, + /// Weight change from `PayloadStatus::Full` votes. + pub full_delta: i64, + /// Weight from equivocating validators that voted for this node. + pub equivocating_attestation_delta: u64, +} + +impl NodeDelta { + /// Classify a vote into the payload bucket it contributes to for `block_slot`. + /// + /// Per the gloas model: + /// + /// - a same-slot vote is `Pending` + /// - a later vote with `payload_present = true` is `Full` + /// - a later vote with `payload_present = false` is `Empty` + /// + /// This classification is used only for payload-aware accounting; all votes still contribute to + /// the aggregate `delta`. + pub fn payload_status( + vote_slot: Slot, + payload_present: bool, + block_slot: Slot, + ) -> PayloadStatus { + if vote_slot == block_slot { + PayloadStatus::Pending + } else if payload_present { + PayloadStatus::Full + } else { + PayloadStatus::Empty + } + } + + /// Add `balance` to the payload bucket selected by `status`. + /// + /// `Pending` votes do not affect payload buckets, so this becomes a no-op for that case. + pub fn add_payload_delta( + &mut self, + status: PayloadStatus, + balance: u64, + index: usize, + ) -> Result<(), Error> { + let field = match status { + PayloadStatus::Full => &mut self.full_delta, + PayloadStatus::Empty => &mut self.empty_delta, + PayloadStatus::Pending => return Ok(()), + }; + *field = field + .checked_add(balance as i64) + .ok_or(Error::DeltaOverflow(index))?; + Ok(()) + } + + /// Create a delta that only affects the aggregate block weight. + /// + /// This is useful for callers or tests that only care about ordinary LMD-GHOST weight changes + /// and do not need payload-aware accounting. + pub fn from_delta(delta: i64) -> Self { + Self { + delta, + empty_delta: 0, + full_delta: 0, + equivocating_attestation_delta: 0, + } + } + + /// Subtract `balance` from the payload bucket selected by `status`. + /// + /// `Pending` votes do not affect payload buckets, so this becomes a no-op for that case. + pub fn sub_payload_delta( + &mut self, + status: PayloadStatus, + balance: u64, + index: usize, + ) -> Result<(), Error> { + let field = match status { + PayloadStatus::Full => &mut self.full_delta, + PayloadStatus::Empty => &mut self.empty_delta, + PayloadStatus::Pending => return Ok(()), + }; + *field = field + .checked_sub(balance as i64) + .ok_or(Error::DeltaOverflow(index))?; + Ok(()) + } +} + +/// Compare NodeDelta with i64 by comparing the aggregate `delta` field. +/// This is used by tests that only care about the total weight delta. +impl PartialEq for NodeDelta { + fn eq(&self, other: &i64) -> bool { + self.delta == *other + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -133,7 +367,6 @@ pub struct ProtoArray { pub prune_threshold: usize, pub nodes: Vec, pub indices: HashMap, - pub previous_proposer_boost: ProposerBoost, } impl ProtoArray { @@ -153,13 +386,7 @@ impl ProtoArray { #[allow(clippy::too_many_arguments)] pub fn apply_score_changes( &mut self, - mut deltas: Vec, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - new_justified_balances: &JustifiedBalances, - proposer_boost_root: Hash256, - current_slot: Slot, - spec: &ChainSpec, + mut deltas: Vec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { return Err(Error::InvalidDeltaLen { @@ -168,9 +395,6 @@ impl ProtoArray { }); } - // Default the proposer boost score to zero. - let mut proposer_score = 0; - // Iterate backwards through all indices in `self.nodes`. for node_index in (0..self.nodes.len()).rev() { let node = self @@ -181,116 +405,95 @@ impl ProtoArray { // There is no need to adjust the balances or manage parent of the zero hash since it // is an alias to the genesis block. The weight applied to the genesis block is // irrelevant as we _always_ choose it and it's impossible for it to have a parent. - if node.root == Hash256::zero() { + if node.root() == Hash256::zero() { continue; } - let execution_status_is_invalid = node.execution_status.is_invalid(); - - let mut node_delta = if execution_status_is_invalid { - // If the node has an invalid execution payload, reduce its weight to zero. - 0_i64 - .checked_sub(node.weight as i64) - .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? + let execution_status_is_invalid = if let Ok(proto_node) = node.as_v17() + && proto_node.execution_status.is_invalid() + { + true } else { - deltas - .get(node_index) - .copied() - .ok_or(Error::InvalidNodeDelta(node_index))? + false }; - // If we find the node for which the proposer boost was previously applied, decrease - // the delta by the previous score amount. - if self.previous_proposer_boost.root != Hash256::zero() - && self.previous_proposer_boost.root == node.root - // Invalid nodes will always have a weight of zero so there's no need to subtract - // the proposer boost delta. - && !execution_status_is_invalid - { - node_delta = node_delta - .checked_sub(self.previous_proposer_boost.score as i64) - .ok_or(Error::DeltaOverflow(node_index))?; - } - // If we find the node matching the current proposer boost root, increase - // the delta by the new score amount (unless the block has an invalid execution status). - // - // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance - if let Some(proposer_score_boost) = spec.proposer_score_boost - && proposer_boost_root != Hash256::zero() - && proposer_boost_root == node.root - // Invalid nodes (or their ancestors) should not receive a proposer boost. - && !execution_status_is_invalid - { - proposer_score = - calculate_committee_fraction::(new_justified_balances, proposer_score_boost) - .ok_or(Error::ProposerBoostOverflow(node_index))?; - node_delta = node_delta - .checked_add(proposer_score as i64) - .ok_or(Error::DeltaOverflow(node_index))?; - } + let node_delta = deltas + .get(node_index) + .copied() + .ok_or(Error::InvalidNodeDelta(node_index))?; + + let delta = if execution_status_is_invalid { + // If the node has an invalid execution payload, reduce its weight to zero. + 0_i64 + .checked_sub(node.weight() as i64) + .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? + } else { + node_delta.delta + }; + + let (node_empty_delta, node_full_delta) = if node.as_v29().is_ok() { + (node_delta.empty_delta, node_delta.full_delta) + } else { + (0, 0) + }; + + // Proposer boost is NOT applied here. It is computed on-the-fly + // during the virtual tree walk in `get_weight`, matching the spec's + // `get_weight` which adds boost separately from `get_attestation_score`. // Apply the delta to the node. if execution_status_is_invalid { // Invalid nodes always have a weight of 0. - node.weight = 0 - } else if node_delta < 0 { - // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` - // here. - // - // I can't think of any valid reason why `node_delta.abs()` should be greater than - // `node.weight`, so I have chosen `checked_sub` to try and fail-fast if there is - // some error. - // - // However, I am not fully convinced that some valid case for `saturating_sub` does - // not exist. - node.weight = node - .weight - .checked_sub(node_delta.unsigned_abs()) - .ok_or(Error::DeltaOverflow(node_index))?; + *node.weight_mut() = 0; } else { - node.weight = node - .weight - .checked_add(node_delta as u64) - .ok_or(Error::DeltaOverflow(node_index))?; + *node.weight_mut() = apply_delta(node.weight(), delta, node_index)?; + } + + // Apply post-Gloas score deltas. + if let Ok(node) = node.as_v29_mut() { + node.empty_payload_weight = + apply_delta(node.empty_payload_weight, node_empty_delta, node_index)?; + node.full_payload_weight = + apply_delta(node.full_payload_weight, node_full_delta, node_index)?; + node.equivocating_attestation_score = node + .equivocating_attestation_score + .saturating_add(node_delta.equivocating_attestation_delta); } // Update the parent delta (if any). - if let Some(parent_index) = node.parent { + if let Some(parent_index) = node.parent() { let parent_delta = deltas .get_mut(parent_index) .ok_or(Error::InvalidParentDelta(parent_index))?; - // Back-propagate the nodes delta to its parent. - *parent_delta += node_delta; - } - } + // Back-propagate the node's delta to its parent. + parent_delta.delta = parent_delta + .delta + .checked_add(delta) + .ok_or(Error::DeltaOverflow(parent_index))?; - // After applying all deltas, update the `previous_proposer_boost`. - self.previous_proposer_boost = ProposerBoost { - root: proposer_boost_root, - score: proposer_score, - }; - - // A second time, iterate backwards through all indices in `self.nodes`. - // - // We _must_ perform these functions separate from the weight-updating loop above to ensure - // that we have a fully coherent set of weights before updating parent - // best-child/descendant. - for node_index in (0..self.nodes.len()).rev() { - let node = self - .nodes - .get_mut(node_index) - .ok_or(Error::InvalidNodeIndex(node_index))?; - - // If the node has a parent, try to update its best-child and best-descendant. - if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant::( - parent_index, - node_index, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; + // Route ALL child weight into the parent's FULL or EMPTY bucket + // based on the child's `parent_payload_status` (the ancestor path + // direction). If this child is on the FULL path from the parent, + // all weight supports the parent's FULL virtual node, and vice versa. + if let Ok(child_v29) = node.as_v29() { + if child_v29.parent_payload_status == PayloadStatus::Full { + parent_delta.full_delta = parent_delta + .full_delta + .checked_add(delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } else { + parent_delta.empty_delta = parent_delta + .empty_delta + .checked_add(delta) + .ok_or(Error::DeltaOverflow(parent_index))?; + } + } else { + // This is a v17 node with a v17 parent. + // There is no empty or full weight for v17 nodes, so nothing to propagate. + // In the tree walk, the v17 nodes have an empty child with 0 weight, which + // wins by default (it is the only child). + } } } @@ -304,71 +507,285 @@ impl ProtoArray { &mut self, block: Block, current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, + spec: &ChainSpec, + time_into_slot: Duration, ) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { return Ok(()); } - let node_index = self.nodes.len(); - - let node = ProtoNode { - slot: block.slot, - root: block.root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id, - next_epoch_shuffling_id: block.next_epoch_shuffling_id, - state_root: block.state_root, - parent: block - .parent_root - .and_then(|parent| self.indices.get(&parent).copied()), - justified_checkpoint: block.justified_checkpoint, - finalized_checkpoint: block.finalized_checkpoint, - weight: 0, - best_child: None, - best_descendant: None, - execution_status: block.execution_status, - unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + // We do not allow `proposer_index=None` for calls to `on_block`, it is only non-optional + // for backwards-compatibility with pre-Gloas V17 proto nodes. + let Some(proposer_index) = block.proposer_index else { + return Err(Error::OnBlockRequiresProposerIndex); }; - // If the parent has an invalid execution status, return an error before adding the block to - // `self`. - if let Some(parent_index) = node.parent { + let node_index = self.nodes.len(); + + let parent_index = block + .parent_root + .and_then(|parent| self.indices.get(&parent).copied()); + + let node = if !spec.fork_name_at_slot::(block.slot).gloas_enabled() { + ProtoNode::V17(ProtoNodeV17 { + slot: block.slot, + root: block.root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, + state_root: block.state_root, + parent: parent_index, + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, + weight: 0, + best_child: None, + best_descendant: None, + execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + }) + } else { + let is_current_slot = current_slot == block.slot; + + let execution_payload_block_hash = + block + .execution_payload_block_hash + .ok_or(Error::BrokenBlock { + block_root: block.root, + })?; + + let execution_payload_parent_hash = + block + .execution_payload_parent_hash + .ok_or(Error::BrokenBlock { + block_root: block.root, + })?; + + let parent_payload_status: PayloadStatus = + if let Some(parent_node) = parent_index.and_then(|idx| self.nodes.get(idx)) { + match parent_node { + ProtoNode::V29(v29) => { + // Both parent and child are Gloas blocks. The parent is full if the + // block hash in the parent node matches the parent block hash in the + // child bid. + if execution_payload_parent_hash == v29.execution_payload_block_hash { + PayloadStatus::Full + } else { + PayloadStatus::Empty + } + } + ProtoNode::V17(_) => { + // Parent is pre-Gloas, pre-Gloas blocks are treated as having Empty + // payload status. This case is reached during the fork transition. + PayloadStatus::Empty + } + } + } else { + // TODO(gloas): re-assess this assumption + // Parent is missing (genesis or pruned due to finalization). Default to Full + // since this path should only be hit at Gloas genesis. + PayloadStatus::Full + }; + + // Per spec `get_forkchoice_store`: the anchor (genesis) block has + // its payload state initialized (`payload_states = {anchor_root: ...}`). + // Without `payload_received = true` on genesis, the FULL virtual + // child doesn't exist in the spec's `get_node_children`, making all + // Full concrete children of genesis unreachable in `get_head`. + let is_genesis = parent_index.is_none(); + + ProtoNode::V29(ProtoNodeV29 { + slot: block.slot, + root: block.root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, + state_root: block.state_root, + parent: parent_index, + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, + weight: 0, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + parent_payload_status, + empty_payload_weight: 0, + full_payload_weight: 0, + execution_payload_block_hash, + execution_payload_parent_hash, + // Per spec `get_forkchoice_store`: the anchor block's PTC votes are + // initialized to all-True, ensuring `is_payload_timely` and + // `is_payload_data_available` return true for the anchor. + payload_timeliness_votes: if is_genesis { + all_true_bitvector() + } else { + BitVector::default() + }, + payload_data_availability_votes: if is_genesis { + all_true_bitvector() + } else { + BitVector::default() + }, + payload_received: is_genesis, + proposer_index, + // Spec: `record_block_timeliness` + `get_forkchoice_store`. + // Anchor gets [True, True]. Others computed from time_into_slot. + block_timeliness_attestation_threshold: is_genesis + || (is_current_slot + && time_into_slot < spec.get_attestation_due::(current_slot)), + block_timeliness_ptc_threshold: is_genesis + || (is_current_slot && time_into_slot < spec.get_payload_attestation_due()), + equivocating_attestation_score: 0, + }) + }; + + // If the parent has an invalid execution status, return an error before adding the + // block to `self`. This applies only when the parent is a V17 node with execution tracking. + if let Some(parent_index) = node.parent() { let parent = self .nodes .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - if parent.execution_status.is_invalid() { + + // Execution status tracking only exists on V17 (pre-Gloas) nodes. + if let Ok(v17) = parent.as_v17() + && v17.execution_status.is_invalid() + { return Err(Error::ParentExecutionStatusIsInvalid { block_root: block.root, - parent_root: parent.root, + parent_root: parent.root(), }); } } - self.indices.insert(node.root, node_index); + self.indices.insert(node.root(), node_index); self.nodes.push(node.clone()); - if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant::( - parent_index, - node_index, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - if matches!(block.execution_status, ExecutionStatus::Valid(_)) { - self.propagate_execution_payload_validation_by_index(parent_index)?; - } + if let Some(parent_index) = node.parent() + && matches!(block.execution_status, ExecutionStatus::Valid(_)) + { + self.propagate_execution_payload_validation_by_index(parent_index)?; } Ok(()) } + /// Spec: `is_head_weak`. + // TODO(gloas): the spec adds weight from equivocating validators in the + // head slot's *committees*, regardless of who they voted for. We approximate + // with `equivocating_attestation_score` which only tracks equivocating + // validators whose vote pointed at this block. This under-counts when an + // equivocating validator is in the committee but voted for a different fork, + // which could allow a re-org the spec wouldn't. In practice the deviation + // is small — it requires equivocating validators voting for competing forks + // AND the head weight to be exactly at the reorg threshold boundary. + // Fixing this properly requires committee computation from BeaconState, + // which is not available in proto_array. The fix would be to pass + // pre-computed equivocating committee weight from the beacon_chain caller. + fn is_head_weak( + &self, + head_node: &ProtoNode, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> bool { + let reorg_threshold = calculate_committee_fraction::( + justified_balances, + spec.reorg_head_weight_threshold.unwrap_or(20), + ) + .unwrap_or(0); + + let head_weight = head_node + .attestation_score(PayloadStatus::Pending) + .saturating_add(head_node.equivocating_attestation_score().unwrap_or(0)); + + head_weight < reorg_threshold + } + + /// Spec's `should_apply_proposer_boost` for Gloas. + /// + /// Returns `true` if the proposer boost should be kept. Returns `false` if the + /// boost should be subtracted (invalidated) because the parent is weak and there + /// are no equivocating blocks at the parent's slot. + fn should_apply_proposer_boost( + &self, + proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result { + if proposer_boost_root.is_zero() { + return Ok(false); + } + + let block_index = *self + .indices + .get(&proposer_boost_root) + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let block = self + .nodes + .get(block_index) + .ok_or(Error::InvalidNodeIndex(block_index))?; + let parent_index = block + .parent() + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let parent = self + .nodes + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))?; + let slot = block.slot(); + + // Apply proposer boost if `parent` is not from the previous slot + if parent.slot().saturating_add(1_u64) < slot { + return Ok(true); + } + + // Apply proposer boost if `parent` is not weak + if !self.is_head_weak::(parent, justified_balances, spec) { + return Ok(true); + } + + // Parent is weak. Apply boost unless there's an equivocating block at + // the parent's slot from the same proposer. + let parent_slot = parent.slot(); + let parent_root = parent.root(); + let parent_proposer = parent.proposer_index(); + + let has_equivocation = self.nodes.iter().any(|node| { + if let Ok(timeliness) = node.block_timeliness_ptc_threshold() + && let Ok(proposer_index) = node.proposer_index() + { + timeliness + && Ok(proposer_index) == parent_proposer + && node.slot() == parent_slot + && node.root() != parent_root + } else { + // Pre-Gloas. + false + } + }); + + Ok(!has_equivocation) + } + + /// Process a valid execution payload envelope for a Gloas block. + /// + /// Sets `payload_received` to true. + pub fn on_valid_payload_envelope_received(&mut self, block_root: Hash256) -> Result<(), Error> { + let index = *self + .indices + .get(&block_root) + .ok_or(Error::NodeUnknown(block_root))?; + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + let v29 = node + .as_v29_mut() + .map_err(|_| Error::InvalidNodeVariant { block_root })?; + v29.payload_received = true; + + Ok(()) + } + /// Updates the `block_root` and all ancestors to have validated execution payloads. /// /// Returns an error if: @@ -388,6 +805,8 @@ impl ProtoArray { /// Updates the `verified_node_index` and all ancestors to have validated execution payloads. /// + /// This function is a no-op if called for a Gloas block. + /// /// Returns an error if: /// /// - The `verified_node_index` is unknown. @@ -402,32 +821,39 @@ impl ProtoArray { .nodes .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - let parent_index = match node.execution_status { - // We have reached a node that we already know is valid. No need to iterate further - // since we assume an ancestors have already been set to valid. - ExecutionStatus::Valid(_) => return Ok(()), - // We have reached an irrelevant node, this node is prior to a terminal execution - // block. There's no need to iterate further, it's impossible for this block to have - // any relevant ancestors. - ExecutionStatus::Irrelevant(_) => return Ok(()), - // The block has an unknown status, set it to valid since any ancestor of a valid - // payload can be considered valid. - ExecutionStatus::Optimistic(payload_block_hash) => { - node.execution_status = ExecutionStatus::Valid(payload_block_hash); - if let Some(parent_index) = node.parent { - parent_index - } else { - // We have reached the root block, iteration complete. - return Ok(()); + let parent_index = match node { + ProtoNode::V17(node) => match node.execution_status { + // We have reached a node that we already know is valid. No need to iterate further + // since we assume an ancestors have already been set to valid. + ExecutionStatus::Valid(_) => return Ok(()), + // We have reached an irrelevant node, this node is prior to a terminal execution + // block. There's no need to iterate further, it's impossible for this block to have + // any relevant ancestors. + ExecutionStatus::Irrelevant(_) => return Ok(()), + // The block has an unknown status, set it to valid since any ancestor of a valid + // payload can be considered valid. + ExecutionStatus::Optimistic(payload_block_hash) => { + node.execution_status = ExecutionStatus::Valid(payload_block_hash); + if let Some(parent_index) = node.parent { + parent_index + } else { + // We have reached the root block, iteration complete. + return Ok(()); + } } - } - // An ancestor of the valid payload was invalid. This is a serious error which - // indicates a consensus failure in the execution node. This is unrecoverable. - ExecutionStatus::Invalid(ancestor_payload_block_hash) => { - return Err(Error::InvalidAncestorOfValidPayload { - ancestor_block_root: node.root, - ancestor_payload_block_hash, - }); + // An ancestor of the valid payload was invalid. This is a serious error which + // indicates a consensus failure in the execution node. This is unrecoverable. + ExecutionStatus::Invalid(ancestor_payload_block_hash) => { + return Err(Error::InvalidAncestorOfValidPayload { + ancestor_block_root: node.root, + ancestor_payload_block_hash, + }); + } + }, + // Gloas nodes should not be marked valid by this function, which exists only + // for pre-Gloas fork choice. + ProtoNode::V29(_) => { + return Ok(()); } }; @@ -438,6 +864,7 @@ impl ProtoArray { /// Invalidate zero or more blocks, as specified by the `InvalidationOperation`. /// /// See the documentation of `InvalidationOperation` for usage. + // TODO(gloas): this needs some tests for the mixed Gloas/pre-Gloas case. pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, @@ -484,10 +911,11 @@ impl ProtoArray { .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - match node.execution_status { - ExecutionStatus::Valid(hash) - | ExecutionStatus::Invalid(hash) - | ExecutionStatus::Optimistic(hash) => { + let node_execution_status = node.execution_status(); + match node_execution_status { + Ok(ExecutionStatus::Valid(hash)) + | Ok(ExecutionStatus::Invalid(hash)) + | Ok(ExecutionStatus::Optimistic(hash)) => { // If we're no longer processing the `head_block_root` and the last valid // ancestor is unknown, exit this loop and proceed to invalidate and // descendants of `head_block_root`/`latest_valid_ancestor_root`. @@ -496,74 +924,51 @@ impl ProtoArray { // supplied, don't validate any ancestors. The alternative is to invalidate // *all* ancestors, which would likely involve shutting down the client due to // an invalid justified checkpoint. - if !latest_valid_ancestor_is_descendant && node.root != head_block_root { + if !latest_valid_ancestor_is_descendant && node.root() != head_block_root { break; } else if op.latest_valid_ancestor() == Some(hash) { - // If the `best_child` or `best_descendant` of the latest valid hash was - // invalidated, set those fields to `None`. - // - // In theory, an invalid `best_child` necessarily infers an invalid - // `best_descendant`. However, we check each variable independently to - // defend against errors which might result in an invalid block being set as - // head. - if node - .best_child - .is_some_and(|i| invalidated_indices.contains(&i)) - { - node.best_child = None - } - if node - .best_descendant - .is_some_and(|i| invalidated_indices.contains(&i)) - { - node.best_descendant = None - } - + // Reached latest valid block, stop invalidating further. break; } } - ExecutionStatus::Irrelevant(_) => break, + Ok(ExecutionStatus::Irrelevant(_)) => break, + Err(_) => break, } // Only invalidate the head block if either: // // - The head block was specifically indicated to be invalidated. // - The latest valid hash is a known ancestor. - if node.root != head_block_root + if node.root() != head_block_root || op.invalidate_block_root() || latest_valid_ancestor_is_descendant { - match &node.execution_status { + match node.execution_status() { // It's illegal for an execution client to declare that some previously-valid block // is now invalid. This is a consensus failure on their behalf. - ExecutionStatus::Valid(hash) => { + Ok(ExecutionStatus::Valid(hash)) => { return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, + block_root: node.root(), + payload_block_hash: hash, }); } - ExecutionStatus::Optimistic(hash) => { + Ok(ExecutionStatus::Optimistic(hash)) => { invalidated_indices.insert(index); - node.execution_status = ExecutionStatus::Invalid(*hash); - - // It's impossible for an invalid block to lead to a "best" block, so set these - // fields to `None`. - // - // Failing to set these values will result in `Self::node_leads_to_viable_head` - // returning `false` for *valid* ancestors of invalid blocks. - node.best_child = None; - node.best_descendant = None; + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Invalid(hash); + } } // The block is already invalid, but keep going backwards to ensure all ancestors // are updated. - ExecutionStatus::Invalid(_) => (), + Ok(ExecutionStatus::Invalid(_)) => (), // This block is pre-merge, therefore it has no execution status. Nor do its // ancestors. - ExecutionStatus::Irrelevant(_) => break, + Ok(ExecutionStatus::Irrelevant(_)) => break, + Err(_) => break, } } - if let Some(parent_index) = node.parent { + if let Some(parent_index) = node.parent() { index = parent_index } else { // The root of the block tree has been reached (aka the finalized block), without @@ -597,24 +1002,27 @@ impl ProtoArray { .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - if let Some(parent_index) = node.parent + if let Some(parent_index) = node.parent() && invalidated_indices.contains(&parent_index) { - match &node.execution_status { - ExecutionStatus::Valid(hash) => { + match node.execution_status() { + Ok(ExecutionStatus::Valid(hash)) => { return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, + block_root: node.root(), + payload_block_hash: hash, }); } - ExecutionStatus::Optimistic(hash) | ExecutionStatus::Invalid(hash) => { - node.execution_status = ExecutionStatus::Invalid(*hash) + Ok(ExecutionStatus::Optimistic(hash)) | Ok(ExecutionStatus::Invalid(hash)) => { + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Invalid(hash) + } } - ExecutionStatus::Irrelevant(_) => { + Ok(ExecutionStatus::Irrelevant(_)) => { return Err(Error::IrrelevantDescendant { - block_root: node.root, + block_root: node.root(), }); } + Err(_) => (), } invalidated_indices.insert(index); @@ -632,13 +1040,17 @@ impl ProtoArray { /// been called without a subsequent `Self::apply_score_changes` call. This is because /// `on_new_block` does not attempt to walk backwards through the tree and update the /// best-child/best-descendant links. + #[allow(clippy::too_many_arguments)] pub fn find_head( &self, justified_root: &Hash256, current_slot: Slot, best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, - ) -> Result { + proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result<(Hash256, PayloadStatus), Error> { let justified_index = self .indices .get(justified_root) @@ -652,25 +1064,30 @@ impl ProtoArray { // Since there are no valid descendants of a justified block with an invalid execution // payload, there would be no head to choose from. - // - // Fork choice is effectively broken until a new justified root is set. It might not be - // practically possible to set a new justified root if we are unable to find a new head. - // - // This scenario is *unsupported*. It represents a serious consensus failure. - if justified_node.execution_status.is_invalid() { + // Execution status tracking only exists on V17 (pre-Gloas) nodes. + if let Ok(v17) = justified_node.as_v17() + && v17.execution_status.is_invalid() + { return Err(Error::InvalidJustifiedCheckpointExecutionStatus { justified_root: *justified_root, }); } - let best_descendant_index = justified_node.best_descendant.unwrap_or(justified_index); - - let best_node = self - .nodes - .get(best_descendant_index) - .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; + let best_fc_node = self.find_head_walk::( + justified_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + proposer_boost_root, + justified_balances, + spec, + )?; // Perform a sanity check that the node is indeed valid to be the head. + let best_node = self + .nodes + .get(best_fc_node.proto_node_index) + .ok_or(Error::InvalidNodeIndex(best_fc_node.proto_node_index))?; if !self.node_is_viable_for_head::( best_node, current_slot, @@ -682,13 +1099,381 @@ impl ProtoArray { start_root: *justified_root, justified_checkpoint: best_justified_checkpoint, finalized_checkpoint: best_finalized_checkpoint, - head_root: best_node.root, - head_justified_checkpoint: best_node.justified_checkpoint, - head_finalized_checkpoint: best_node.finalized_checkpoint, + head_root: best_node.root(), + head_justified_checkpoint: *best_node.justified_checkpoint(), + head_finalized_checkpoint: *best_node.finalized_checkpoint(), }))); } - Ok(best_node.root) + Ok((best_fc_node.root, best_fc_node.payload_status)) + } + + /// Spec: `get_filtered_block_tree`. + /// + /// Returns the set of node indices on viable branches — those with at least + /// one leaf descendant with correct justified/finalized checkpoints. + fn get_filtered_block_tree( + &self, + start_index: usize, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> HashSet { + let mut viable = HashSet::new(); + self.filter_block_tree::( + start_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + &mut viable, + ); + viable + } + + /// Spec: `filter_block_tree`. + fn filter_block_tree( + &self, + node_index: usize, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + viable: &mut HashSet, + ) -> bool { + let Some(node) = self.nodes.get(node_index) else { + return false; + }; + + // Skip invalid children — they aren't in store.blocks in the spec. + let children: Vec = self + .nodes + .iter() + .enumerate() + .filter(|(_, child)| { + child.parent() == Some(node_index) + && !child + .execution_status() + .is_ok_and(|status| status.is_invalid()) + }) + .map(|(i, _)| i) + .collect(); + + if !children.is_empty() { + // Evaluate ALL children (no short-circuit) to mark all viable branches. + let any_viable = children + .iter() + .map(|&child_index| { + self.filter_block_tree::( + child_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + viable, + ) + }) + .collect::>() + .into_iter() + .any(|v| v); + if any_viable { + viable.insert(node_index); + return true; + } + return false; + } + + // Leaf node: check viability. + if self.node_is_viable_for_head::( + node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) { + viable.insert(node_index); + return true; + } + false + } + + /// Spec: `get_head`. + #[allow(clippy::too_many_arguments)] + fn find_head_walk( + &self, + start_index: usize, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + proposer_boost_root: Hash256, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result { + let mut head = IndexedForkChoiceNode { + root: best_justified_checkpoint.root, + proto_node_index: start_index, + payload_status: PayloadStatus::Pending, + }; + + // Spec: `get_filtered_block_tree`. + let viable_nodes = self.get_filtered_block_tree::( + start_index, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ); + + // Compute once rather than per-child per-level. + let apply_proposer_boost = + self.should_apply_proposer_boost::(proposer_boost_root, justified_balances, spec)?; + + loop { + let children: Vec<_> = self + .get_node_children(&head)? + .into_iter() + .filter(|(fc_node, _)| viable_nodes.contains(&fc_node.proto_node_index)) + .collect(); + + if children.is_empty() { + return Ok(head); + } + + head = children + .into_iter() + .map(|(child, ref proto_node)| -> Result<_, Error> { + let weight = self.get_weight::( + &child, + proto_node, + apply_proposer_boost, + proposer_boost_root, + current_slot, + justified_balances, + spec, + )?; + let payload_status_tiebreaker = self.get_payload_status_tiebreaker::( + &child, + proto_node, + current_slot, + proposer_boost_root, + )?; + Ok((child, weight, payload_status_tiebreaker)) + }) + .collect::, Error>>()? + .into_iter() + .max_by_key(|(child, weight, payload_status_tiebreaker)| { + (*weight, child.root, *payload_status_tiebreaker) + }) + .map(|(child, _, _)| child) + .ok_or(Error::NoViableChildren)?; + } + } + + /// Spec: `get_weight`. + #[allow(clippy::too_many_arguments)] + fn get_weight( + &self, + fc_node: &IndexedForkChoiceNode, + proto_node: &ProtoNode, + apply_proposer_boost: bool, + proposer_boost_root: Hash256, + current_slot: Slot, + justified_balances: &JustifiedBalances, + spec: &ChainSpec, + ) -> Result { + if fc_node.payload_status == PayloadStatus::Pending + || proto_node.slot().saturating_add(1_u64) != current_slot + { + let attestation_score = proto_node.attestation_score(fc_node.payload_status); + + if !apply_proposer_boost { + return Ok(attestation_score); + } + + // Spec: proposer boost is treated as a synthetic vote. + let message = LatestMessage { + slot: current_slot, + root: proposer_boost_root, + payload_present: false, + }; + let proposer_score = if self.is_supporting_vote(fc_node, &message)? { + get_proposer_score::(justified_balances, spec)? + } else { + 0 + }; + + Ok(attestation_score.saturating_add(proposer_score)) + } else { + Ok(0) + } + } + + /// Spec: `is_supporting_vote`. + fn is_supporting_vote( + &self, + node: &IndexedForkChoiceNode, + message: &LatestMessage, + ) -> Result { + let block = self + .nodes + .get(node.proto_node_index) + .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; + + if node.root == message.root { + if node.payload_status == PayloadStatus::Pending { + return Ok(true); + } + // For the proposer boost case: message.slot == current_slot == block.slot, + // so this returns false — boost does not support EMPTY/FULL of the + // boosted block itself, only its ancestors. + if message.slot <= block.slot() { + return Ok(false); + } + if message.payload_present { + Ok(node.payload_status == PayloadStatus::Full) + } else { + Ok(node.payload_status == PayloadStatus::Empty) + } + } else { + let ancestor = self.get_ancestor_node(message.root, block.slot())?; + Ok(node.root == ancestor.root + && (node.payload_status == PayloadStatus::Pending + || node.payload_status == ancestor.payload_status)) + } + } + + /// Spec: `get_ancestor` (modified to return ForkChoiceNode with payload_status). + fn get_ancestor_node(&self, root: Hash256, slot: Slot) -> Result { + let index = *self.indices.get(&root).ok_or(Error::NodeUnknown(root))?; + let block = self + .nodes + .get(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + if block.slot() <= slot { + return Ok(IndexedForkChoiceNode { + root, + proto_node_index: index, + payload_status: PayloadStatus::Pending, + }); + } + + // Walk up until we find the ancestor at `slot`. + let mut child_index = index; + let mut current_index = block.parent().ok_or(Error::NodeUnknown(block.root()))?; + + loop { + let current = self + .nodes + .get(current_index) + .ok_or(Error::InvalidNodeIndex(current_index))?; + + if current.slot() <= slot { + let child = self + .nodes + .get(child_index) + .ok_or(Error::InvalidNodeIndex(child_index))?; + return Ok(IndexedForkChoiceNode { + root: current.root(), + proto_node_index: current_index, + payload_status: child.get_parent_payload_status(), + }); + } + + child_index = current_index; + current_index = current.parent().ok_or(Error::NodeUnknown(current.root()))?; + } + } + + /// Spec: `get_node_children`. + fn get_node_children( + &self, + node: &IndexedForkChoiceNode, + ) -> Result, Error> { + if node.payload_status == PayloadStatus::Pending { + let proto_node = self + .nodes + .get(node.proto_node_index) + .ok_or(Error::InvalidNodeIndex(node.proto_node_index))?; + let mut children = vec![(node.with_status(PayloadStatus::Empty), proto_node.clone())]; + // The FULL virtual child only exists if the payload has been received. + if proto_node.payload_received().is_ok_and(|received| received) { + children.push((node.with_status(PayloadStatus::Full), proto_node.clone())); + } + Ok(children) + } else { + Ok(self + .nodes + .iter() + .enumerate() + .filter(|(_, child_node)| { + child_node.parent() == Some(node.proto_node_index) + && child_node.get_parent_payload_status() == node.payload_status + }) + .map(|(child_index, child_node)| { + ( + IndexedForkChoiceNode { + root: child_node.root(), + proto_node_index: child_index, + payload_status: PayloadStatus::Pending, + }, + child_node.clone(), + ) + }) + .collect()) + } + } + + fn get_payload_status_tiebreaker( + &self, + fc_node: &IndexedForkChoiceNode, + proto_node: &ProtoNode, + current_slot: Slot, + proposer_boost_root: Hash256, + ) -> Result { + if fc_node.payload_status == PayloadStatus::Pending + || proto_node.slot().saturating_add(1_u64) != current_slot + { + Ok(fc_node.payload_status as u8) + } else if fc_node.payload_status == PayloadStatus::Empty { + Ok(1) + } else if self.should_extend_payload::(fc_node, proto_node, proposer_boost_root)? { + Ok(2) + } else { + Ok(0) + } + } + + fn should_extend_payload( + &self, + fc_node: &IndexedForkChoiceNode, + proto_node: &ProtoNode, + proposer_boost_root: Hash256, + ) -> Result { + // Per spec: `proposer_root == Root()` is one of the `or` conditions that + // makes `should_extend_payload` return True. + if proposer_boost_root.is_zero() { + return Ok(true); + } + + let proposer_boost_node_index = *self + .indices + .get(&proposer_boost_root) + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let proposer_boost_node = self + .nodes + .get(proposer_boost_node_index) + .ok_or(Error::InvalidNodeIndex(proposer_boost_node_index))?; + + let parent_index = proposer_boost_node + .parent() + .ok_or(Error::NodeUnknown(proposer_boost_root))?; + let proposer_boost_parent_root = self + .nodes + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))? + .root(); + + Ok( + (proto_node.is_payload_timely::() && proto_node.is_payload_data_available::()) + || proposer_boost_parent_root != fc_node.root + || proposer_boost_node.is_parent_node_full(), + ) } /// Update the tree with new finalization information. The tree is only actually pruned if both @@ -721,7 +1506,7 @@ impl ProtoArray { .nodes .get(node_index) .ok_or(Error::InvalidNodeIndex(node_index))? - .root; + .root(); self.indices.remove(root); } @@ -738,176 +1523,15 @@ impl ProtoArray { // Iterate through all the existing nodes and adjust their indices to match the new layout // of `self.nodes`. for node in self.nodes.iter_mut() { - if let Some(parent) = node.parent { + if let Some(parent) = node.parent() { // If `node.parent` is less than `finalized_index`, set it to `None`. - node.parent = parent.checked_sub(finalized_index); - } - if let Some(best_child) = node.best_child { - node.best_child = Some( - best_child - .checked_sub(finalized_index) - .ok_or(Error::IndexOverflow("best_child"))?, - ); - } - if let Some(best_descendant) = node.best_descendant { - node.best_descendant = Some( - best_descendant - .checked_sub(finalized_index) - .ok_or(Error::IndexOverflow("best_descendant"))?, - ); + *node.parent_mut() = parent.checked_sub(finalized_index); } } Ok(()) } - /// Observe the parent at `parent_index` with respect to the child at `child_index` and - /// potentially modify the `parent.best_child` and `parent.best_descendant` values. - /// - /// ## Detail - /// - /// There are four outcomes: - /// - /// - The child is already the best child but it's now invalid due to a FFG change and should be removed. - /// - The child is already the best child and the parent is updated with the new - /// best-descendant. - /// - The child is not the best child but becomes the best child. - /// - The child is not the best child and does not become the best child. - fn maybe_update_best_child_and_descendant( - &mut self, - parent_index: usize, - child_index: usize, - current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - ) -> Result<(), Error> { - let child = self - .nodes - .get(child_index) - .ok_or(Error::InvalidNodeIndex(child_index))?; - - let parent = self - .nodes - .get(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))?; - - let child_leads_to_viable_head = self.node_leads_to_viable_head::( - child, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - // These three variables are aliases to the three options that we may set the - // `parent.best_child` and `parent.best_descendant` to. - // - // I use the aliases to assist readability. - let change_to_none = (None, None); - let change_to_child = ( - Some(child_index), - child.best_descendant.or(Some(child_index)), - ); - let no_change = (parent.best_child, parent.best_descendant); - - let (new_best_child, new_best_descendant) = - if let Some(best_child_index) = parent.best_child { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( - best_child, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. - change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. - no_change - } else if child.weight == best_child.weight { - // Tie-breaker of equal weights by root. - if child.root >= best_child.root { - change_to_child - } else { - no_change - } - } else { - // Choose the winner by weight. - if child.weight > best_child.weight { - change_to_child - } else { - no_change - } - } - } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; - - let parent = self - .nodes - .get_mut(parent_index) - .ok_or(Error::InvalidNodeIndex(parent_index))?; - - parent.best_child = new_best_child; - parent.best_descendant = new_best_descendant; - - Ok(()) - } - - /// Indicates if the node itself is viable for the head, or if its best descendant is viable - /// for the head. - fn node_leads_to_viable_head( - &self, - node: &ProtoNode, - current_slot: Slot, - best_justified_checkpoint: Checkpoint, - best_finalized_checkpoint: Checkpoint, - ) -> Result { - let best_descendant_is_viable_for_head = - if let Some(best_descendant_index) = node.best_descendant { - let best_descendant = self - .nodes - .get(best_descendant_index) - .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - - self.node_is_viable_for_head::( - best_descendant, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - ) - } else { - false - }; - - Ok(best_descendant_is_viable_for_head - || self.node_is_viable_for_head::( - node, - current_slot, - best_justified_checkpoint, - best_finalized_checkpoint, - )) - } - /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#filter_block_tree @@ -921,25 +1545,27 @@ impl ProtoArray { best_justified_checkpoint: Checkpoint, best_finalized_checkpoint: Checkpoint, ) -> bool { - if node.execution_status.is_invalid() { + if let Ok(proto_node) = node.as_v17() + && proto_node.execution_status.is_invalid() + { return false; } let genesis_epoch = Epoch::new(0); let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let node_epoch = node.slot.epoch(E::slots_per_epoch()); - let node_justified_checkpoint = node.justified_checkpoint; + let node_epoch = node.slot().epoch(E::slots_per_epoch()); + let node_justified_checkpoint = node.justified_checkpoint(); let voting_source = if current_epoch > node_epoch { // The block is from a prior epoch, the voting source will be pulled-up. - node.unrealized_justified_checkpoint + node.unrealized_justified_checkpoint() // Sometimes we don't track the unrealized justification. In // that case, just use the fully-realized justified checkpoint. - .unwrap_or(node_justified_checkpoint) + .unwrap_or(*node_justified_checkpoint) } else { // The block is not from a prior epoch, therefore the voting source // is not pulled up. - node_justified_checkpoint + *node_justified_checkpoint }; let correct_justified = best_justified_checkpoint.epoch == genesis_epoch @@ -948,7 +1574,7 @@ impl ProtoArray { let correct_finalized = best_finalized_checkpoint.epoch == genesis_epoch || self - .is_finalized_checkpoint_or_descendant::(node.root, best_finalized_checkpoint); + .is_finalized_checkpoint_or_descendant::(node.root(), best_finalized_checkpoint); correct_justified && correct_finalized } @@ -970,7 +1596,7 @@ impl ProtoArray { block_root: &Hash256, ) -> impl Iterator + 'a { self.iter_nodes(block_root) - .map(|node| (node.root, node.slot)) + .map(|node| (node.root(), node.slot())) } /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always @@ -991,8 +1617,8 @@ impl ProtoArray { .and_then(|ancestor_index| self.nodes.get(*ancestor_index)) .and_then(|ancestor| { self.iter_block_roots(&descendant_root) - .take_while(|(_root, slot)| *slot >= ancestor.slot) - .find(|(_root, slot)| *slot == ancestor.slot) + .take_while(|(_root, slot)| *slot >= ancestor.slot()) + .find(|(_root, slot)| *slot == ancestor.slot()) .map(|(root, _slot)| root == ancestor_root) }) .unwrap_or(false) @@ -1031,15 +1657,15 @@ impl ProtoArray { // Run this check once, outside of the loop rather than inside the loop. // If the conditions don't match for this node then they're unlikely to // start matching for its ancestors. - for checkpoint in &[node.finalized_checkpoint, node.justified_checkpoint] { - if checkpoint == &best_finalized_checkpoint { + for checkpoint in &[node.finalized_checkpoint(), node.justified_checkpoint()] { + if **checkpoint == best_finalized_checkpoint { return true; } } for checkpoint in &[ - node.unrealized_finalized_checkpoint, - node.unrealized_justified_checkpoint, + node.unrealized_finalized_checkpoint(), + node.unrealized_justified_checkpoint(), ] { if checkpoint.is_some_and(|cp| cp == best_finalized_checkpoint) { return true; @@ -1049,13 +1675,13 @@ impl ProtoArray { loop { // If `node` is less than or equal to the finalized slot then `node` // must be the finalized block. - if node.slot <= finalized_slot { - return node.root == finalized_root; + if node.slot() <= finalized_slot { + return node.root() == finalized_root; } // Since `node` is from a higher slot that the finalized checkpoint, // replace `node` with the parent of `node`. - if let Some(parent) = node.parent.and_then(|index| self.nodes.get(index)) { + if let Some(parent) = node.parent().and_then(|index| self.nodes.get(index)) { node = parent } else { // If `node` is not the finalized block and its parent does not @@ -1077,11 +1703,12 @@ impl ProtoArray { .iter() .rev() .find(|node| { - node.execution_status - .block_hash() + node.execution_status() + .ok() + .and_then(|execution_status| execution_status.block_hash()) .is_some_and(|node_block_hash| node_block_hash == *block_hash) }) - .map(|node| node.root) + .map(|node| node.root()) } /// Returns all nodes that have zero children and are descended from the finalized checkpoint. @@ -1095,13 +1722,17 @@ impl ProtoArray { ) -> Vec<&ProtoNode> { self.nodes .iter() - .filter(|node| { - node.best_child.is_none() + .enumerate() + .filter(|(i, node)| { + // TODO(gloas): we unoptimized this for Gloas fork choice, could re-optimize. + let num_children = self.nodes.iter().filter(|n| n.parent() == Some(*i)).count(); + num_children == 0 && self.is_finalized_checkpoint_or_descendant::( - node.root, + node.root(), best_finalized_checkpoint, ) }) + .map(|(_, node)| node) .collect() } } @@ -1121,6 +1752,31 @@ pub fn calculate_committee_fraction( .checked_div(100) } +/// Spec: `get_proposer_score`. +fn get_proposer_score( + justified_balances: &JustifiedBalances, + spec: &ChainSpec, +) -> Result { + let Some(proposer_score_boost) = spec.proposer_score_boost else { + return Ok(0); + }; + calculate_committee_fraction::(justified_balances, proposer_score_boost) + .ok_or(Error::ProposerBoostOverflow(0)) +} + +/// Apply a signed delta to an unsigned weight, returning an error on overflow. +fn apply_delta(weight: u64, delta: i64, index: usize) -> Result { + if delta < 0 { + weight + .checked_sub(delta.unsigned_abs()) + .ok_or(Error::DeltaOverflow(index)) + } else { + weight + .checked_add(delta as u64) + .ok_or(Error::DeltaOverflow(index)) + } +} + /// Reverse iterator over one path through a `ProtoArray`. pub struct Iter<'a> { next_node_index: Option, @@ -1133,7 +1789,7 @@ impl<'a> Iterator for Iter<'a> { fn next(&mut self) -> Option { let next_node_index = self.next_node_index?; let node = self.proto_array.nodes.get(next_node_index)?; - self.next_node_index = node.parent; + self.next_node_index = node.parent(); Some(node) } } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 3edf1e0644..0ecaea3971 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -2,8 +2,7 @@ use crate::{ JustifiedBalances, error::Error, proto_array::{ - InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, - calculate_committee_fraction, + InvalidationOperation, Iter, NodeDelta, ProtoArray, ProtoNode, calculate_committee_fraction, }, ssz_container::SszContainer, }; @@ -14,22 +13,74 @@ use ssz_derive::{Decode, Encode}; use std::{ collections::{BTreeSet, HashMap}, fmt, + time::Duration, }; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - Slot, + Slot, StatePayloadStatus, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; #[derive(Default, PartialEq, Clone, Encode, Decode)] pub struct VoteTracker { + current_root: Hash256, + next_root: Hash256, + current_slot: Slot, + next_slot: Slot, + current_payload_present: bool, + next_payload_present: bool, +} + +// Can be deleted once the V28 schema migration is buried. +// Matches the on-disk format from schema v28: current_root, next_root, next_epoch. +#[derive(Default, PartialEq, Clone, Encode, Decode)] +pub struct VoteTrackerV28 { current_root: Hash256, next_root: Hash256, next_epoch: Epoch, } -/// Represents the verification status of an execution payload. +// This impl is only used upon upgrade from pre-Gloas to Gloas with all pre-Gloas nodes. +// The payload status is `false` for pre-Gloas nodes. +impl From for VoteTracker { + fn from(v: VoteTrackerV28) -> Self { + VoteTracker { + current_root: v.current_root, + next_root: v.next_root, + // The v28 format stored next_epoch rather than slots. Default to 0 since the + // vote tracker will be updated on the next attestation. + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, + } + } +} + +// This impl is only used upon downgrade from V29 to V28, with exclusively pre-Gloas nodes. +impl From for VoteTrackerV28 { + fn from(v: VoteTracker) -> Self { + // Drop the payload_present fields. This is safe because this is only called on pre-Gloas + // nodes. + VoteTrackerV28 { + current_root: v.current_root, + next_root: v.next_root, + // The v28 format stored next_epoch. Default to 0 since the vote tracker will be + // updated on the next attestation. + next_epoch: Epoch::new(0), + } + } +} + +/// Spec's `LatestMessage` type. Only used in tests. +pub struct LatestMessage { + pub slot: Slot, + pub root: Hash256, + pub payload_present: bool, +} + +/// Represents the verification status of an execution payload pre-Gloas. #[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)] #[ssz(enum_behaviour = "union")] pub enum ExecutionStatus { @@ -49,6 +100,46 @@ pub enum ExecutionStatus { Irrelevant(bool), } +/// Represents the status of an execution payload post-Gloas. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, Serialize, Deserialize)] +#[ssz(enum_behaviour = "tag")] +#[repr(u8)] +pub enum PayloadStatus { + Empty = 0, + Full = 1, + Pending = 2, +} + +impl PayloadStatus { + /// Convert a `PayloadStatus` into the equivalent `StatePayloadStatus`. + /// + /// This maps `Empty` onto `StatePayloadStatus::Pending` because empty and pending fork choice + /// nodes correspond to the exact same state. + pub fn as_state_payload_status(self) -> StatePayloadStatus { + match self { + Self::Empty | Self::Pending => StatePayloadStatus::Pending, + Self::Full => StatePayloadStatus::Full, + } + } +} + +/// Spec's `ForkChoiceNode` augmented with ProtoNode index. +pub struct IndexedForkChoiceNode { + pub root: Hash256, + pub proto_node_index: usize, + pub payload_status: PayloadStatus, +} + +impl IndexedForkChoiceNode { + pub fn with_status(&self, payload_status: PayloadStatus) -> Self { + Self { + root: self.root, + proto_node_index: self.proto_node_index, + payload_status, + } + } +} + impl ExecutionStatus { pub fn is_execution_enabled(&self) -> bool { !matches!(self, ExecutionStatus::Irrelevant(_)) @@ -159,6 +250,11 @@ pub struct Block { pub execution_status: ExecutionStatus, pub unrealized_justified_checkpoint: Option, pub unrealized_finalized_checkpoint: Option, + + /// post-Gloas fields + pub execution_payload_parent_hash: Option, + pub execution_payload_block_hash: Option, + pub proposer_index: Option, } impl Block { @@ -422,12 +518,15 @@ impl ProtoArrayForkChoice { current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, execution_status: ExecutionStatus, + execution_payload_parent_hash: Option, + execution_payload_block_hash: Option, + proposer_index: u64, + spec: &ChainSpec, ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), - previous_proposer_boost: ProposerBoost::default(), }; let block = Block { @@ -445,14 +544,20 @@ impl ProtoArrayForkChoice { execution_status, unrealized_justified_checkpoint: Some(justified_checkpoint), unrealized_finalized_checkpoint: Some(finalized_checkpoint), + execution_payload_parent_hash, + execution_payload_block_hash, + proposer_index: Some(proposer_index), }; proto_array .on_block::( block, current_slot, - justified_checkpoint, - finalized_checkpoint, + spec, + // Anchor block is always timely (delay=0 ensures both timeliness + // checks pass). Combined with `is_genesis` override in on_block, + // this matches spec's `block_timeliness = {anchor: [True, True]}`. + Duration::ZERO, ) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; @@ -463,6 +568,18 @@ impl ProtoArrayForkChoice { }) } + /// Mark a Gloas payload envelope as valid and received. + /// + /// This must only be called for valid Gloas payloads. + pub fn on_valid_payload_envelope_received( + &mut self, + block_root: Hash256, + ) -> Result<(), String> { + self.proto_array + .on_valid_payload_envelope_received(block_root) + .map_err(|e| format!("Failed to process execution payload: {:?}", e)) + } + /// See `ProtoArray::propagate_execution_payload_validation` for documentation. pub fn process_execution_payload_validation( &mut self, @@ -488,36 +605,71 @@ impl ProtoArrayForkChoice { &mut self, validator_index: usize, block_root: Hash256, - target_epoch: Epoch, + attestation_slot: Slot, + payload_present: bool, ) -> Result<(), String> { let vote = self.votes.get_mut(validator_index); - if target_epoch > vote.next_epoch || *vote == VoteTracker::default() { + if attestation_slot > vote.next_slot || *vote == VoteTracker::default() { vote.next_root = block_root; - vote.next_epoch = target_epoch; + vote.next_slot = attestation_slot; + vote.next_payload_present = payload_present; } Ok(()) } + /// Process a PTC vote by setting the appropriate bits on the target block's V29 node. + /// + /// `ptc_index` is the voter's position in the PTC committee (resolved by the caller). + /// This writes directly to the node's bitfields, bypassing the delta pipeline. + pub fn process_payload_attestation( + &mut self, + block_root: Hash256, + ptc_index: usize, + payload_present: bool, + blob_data_available: bool, + ) -> Result<(), String> { + let node_index = self + .proto_array + .indices + .get(&block_root) + .copied() + .ok_or_else(|| { + format!("process_payload_attestation: unknown block root {block_root:?}") + })?; + let node = self.proto_array.nodes.get_mut(node_index).ok_or_else(|| { + format!("process_payload_attestation: invalid node index {node_index}") + })?; + let v29 = node + .as_v29_mut() + .map_err(|_| format!("process_payload_attestation: node {block_root:?} is not V29"))?; + + v29.payload_timeliness_votes + .set(ptc_index, payload_present) + .map_err(|e| format!("process_payload_attestation: timeliness set failed: {e:?}"))?; + v29.payload_data_availability_votes + .set(ptc_index, blob_data_available) + .map_err(|e| { + format!("process_payload_attestation: data availability set failed: {e:?}") + })?; + + Ok(()) + } + pub fn process_block( &mut self, block: Block, current_slot: Slot, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, + spec: &ChainSpec, + time_into_slot: Duration, ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); } self.proto_array - .on_block::( - block, - current_slot, - justified_checkpoint, - finalized_checkpoint, - ) + .on_block::(block, current_slot, spec, time_into_slot) .map_err(|e| format!("process_block_error: {:?}", e)) } @@ -531,12 +683,19 @@ impl ProtoArrayForkChoice { equivocating_indices: &BTreeSet, current_slot: Slot, spec: &ChainSpec, - ) -> Result { + ) -> Result<(Hash256, PayloadStatus), String> { let old_balances = &mut self.balances; let new_balances = justified_state_balances; + let node_slots = self + .proto_array + .nodes + .iter() + .map(|node| node.slot()) + .collect::>(); let deltas = compute_deltas( &self.proto_array.indices, + &node_slots, &mut self.votes, &old_balances.effective_balances, &new_balances.effective_balances, @@ -545,15 +704,7 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; self.proto_array - .apply_score_changes::( - deltas, - justified_checkpoint, - finalized_checkpoint, - new_balances, - proposer_boost_root, - current_slot, - spec, - ) + .apply_score_changes::(deltas) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; *old_balances = new_balances.clone(); @@ -564,6 +715,9 @@ impl ProtoArrayForkChoice { current_slot, justified_checkpoint, finalized_checkpoint, + proposer_boost_root, + new_balances, + spec, ) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -593,13 +747,13 @@ impl ProtoArrayForkChoice { )?; // Only re-org a single slot. This prevents cascading failures during asynchrony. - let head_slot_ok = info.head_node.slot + 1 == current_slot; + let head_slot_ok = info.head_node.slot().saturating_add(1_u64) == current_slot; if !head_slot_ok { return Err(DoNotReOrg::HeadDistance.into()); } // Only re-org if the head's weight is less than the heads configured committee fraction. - let head_weight = info.head_node.weight; + let head_weight = info.head_node.weight(); let re_org_head_weight_threshold = info.re_org_head_weight_threshold; let weak_head = head_weight < re_org_head_weight_threshold; if !weak_head { @@ -610,8 +764,10 @@ impl ProtoArrayForkChoice { .into()); } - // Only re-org if the parent's weight is greater than the parents configured committee fraction. - let parent_weight = info.parent_node.weight; + // Spec: `is_parent_strong`. Use payload-aware weight matching the + // payload path the head node is on from its parent. + let parent_payload_status = info.head_node.get_parent_payload_status(); + let parent_weight = info.parent_node.attestation_score(parent_payload_status); let re_org_parent_weight_threshold = info.re_org_parent_weight_threshold; let parent_strong = parent_weight > re_org_parent_weight_threshold; if !parent_strong { @@ -650,14 +806,14 @@ impl ProtoArrayForkChoice { let parent_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; let head_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; - let parent_slot = parent_node.slot; - let head_slot = head_node.slot; - let re_org_block_slot = head_slot + 1; + let parent_slot = parent_node.slot(); + let head_slot = head_node.slot(); + let re_org_block_slot = head_slot.saturating_add(1_u64); // Check finalization distance. let proposal_epoch = re_org_block_slot.epoch(E::slots_per_epoch()); let finalized_epoch = head_node - .unrealized_finalized_checkpoint + .unrealized_finalized_checkpoint() .ok_or(DoNotReOrg::MissingHeadFinalizedCheckpoint)? .epoch; let epochs_since_finalization = proposal_epoch.saturating_sub(finalized_epoch).as_u64(); @@ -689,10 +845,10 @@ impl ProtoArrayForkChoice { } // Check FFG. - let ffg_competitive = parent_node.unrealized_justified_checkpoint - == head_node.unrealized_justified_checkpoint - && parent_node.unrealized_finalized_checkpoint - == head_node.unrealized_finalized_checkpoint; + let ffg_competitive = parent_node.unrealized_justified_checkpoint() + == head_node.unrealized_justified_checkpoint() + && parent_node.unrealized_finalized_checkpoint() + == head_node.unrealized_finalized_checkpoint(); if !ffg_competitive { return Err(DoNotReOrg::JustificationAndFinalizationNotCompetitive.into()); } @@ -720,20 +876,17 @@ impl ProtoArrayForkChoice { /// This will operate on *all* blocks, even those that do not descend from the finalized /// ancestor. pub fn contains_invalid_payloads(&mut self) -> bool { - self.proto_array - .nodes - .iter() - .any(|node| node.execution_status.is_invalid()) + self.proto_array.nodes.iter().any(|node| { + node.execution_status() + .is_ok_and(|status| status.is_invalid()) + }) } /// For all nodes, regardless of their relationship to the finalized block, set their execution /// status to be optimistic. /// /// In practice this means forgetting any `VALID` or `INVALID` statuses. - pub fn set_all_blocks_to_optimistic( - &mut self, - spec: &ChainSpec, - ) -> Result<(), String> { + pub fn set_all_blocks_to_optimistic(&mut self) -> Result<(), String> { // Iterate backwards through all nodes in the `proto_array`. Whilst it's not strictly // required to do this process in reverse, it seems natural when we consider how LMD votes // are counted. @@ -748,19 +901,21 @@ impl ProtoArrayForkChoice { .get_mut(node_index) .ok_or("unreachable index out of bounds in proto_array nodes")?; - match node.execution_status { - ExecutionStatus::Invalid(block_hash) => { - node.execution_status = ExecutionStatus::Optimistic(block_hash); + match node.execution_status() { + Ok(ExecutionStatus::Invalid(block_hash)) => { + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Optimistic(block_hash); + } // Restore the weight of the node, it would have been set to `0` in // `apply_score_changes` when it was invalidated. - let mut restored_weight: u64 = self + let restored_weight: u64 = self .votes .0 .iter() .enumerate() .filter_map(|(validator_index, vote)| { - if vote.current_root == node.root { + if vote.current_root == node.root() { // Any voting validator that does not have a balance should be // ignored. This is consistent with `compute_deltas`. self.balances.effective_balances.get(validator_index) @@ -770,36 +925,16 @@ impl ProtoArrayForkChoice { }) .sum(); - // If the invalid root was boosted, apply the weight to it and - // ancestors. - if let Some(proposer_score_boost) = spec.proposer_score_boost - && self.proto_array.previous_proposer_boost.root == node.root - { - // Compute the score based upon the current balances. We can't rely on - // the `previous_proposr_boost.score` since it is set to zero with an - // invalid node. - let proposer_score = - calculate_committee_fraction::(&self.balances, proposer_score_boost) - .ok_or("Failed to compute proposer boost")?; - // Store the score we've applied here so it can be removed in - // a later call to `apply_score_changes`. - self.proto_array.previous_proposer_boost.score = proposer_score; - // Apply this boost to this node. - restored_weight = restored_weight - .checked_add(proposer_score) - .ok_or("Overflow when adding boost to weight")?; - } - // Add the restored weight to the node and all ancestors. if restored_weight > 0 { let mut node_or_ancestor = node; loop { - node_or_ancestor.weight = node_or_ancestor - .weight + *node_or_ancestor.weight_mut() = node_or_ancestor + .weight() .checked_add(restored_weight) .ok_or("Overflow when adding weight to ancestor")?; - if let Some(parent_index) = node_or_ancestor.parent { + if let Some(parent_index) = node_or_ancestor.parent() { node_or_ancestor = self .proto_array .nodes @@ -815,11 +950,14 @@ impl ProtoArrayForkChoice { } // There are no balance changes required if the node was either valid or // optimistic. - ExecutionStatus::Valid(block_hash) | ExecutionStatus::Optimistic(block_hash) => { - node.execution_status = ExecutionStatus::Optimistic(block_hash) + Ok(ExecutionStatus::Valid(block_hash)) + | Ok(ExecutionStatus::Optimistic(block_hash)) => { + if let ProtoNode::V17(node) = node { + node.execution_status = ExecutionStatus::Optimistic(block_hash) + } } // An irrelevant node cannot become optimistic, this is a no-op. - ExecutionStatus::Irrelevant(_) => (), + Ok(ExecutionStatus::Irrelevant(_)) | Err(_) => (), } } @@ -856,30 +994,48 @@ impl ProtoArrayForkChoice { pub fn get_block(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; let parent_root = block - .parent + .parent() .and_then(|i| self.proto_array.nodes.get(i)) - .map(|parent| parent.root); + .map(|parent| parent.root()); Some(Block { - slot: block.slot, - root: block.root, + slot: block.slot(), + root: block.root(), parent_root, - state_root: block.state_root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), - next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), - justified_checkpoint: block.justified_checkpoint, - finalized_checkpoint: block.finalized_checkpoint, - execution_status: block.execution_status, - unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + state_root: block.state_root(), + target_root: block.target_root(), + current_epoch_shuffling_id: block.current_epoch_shuffling_id().clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id().clone(), + justified_checkpoint: *block.justified_checkpoint(), + finalized_checkpoint: *block.finalized_checkpoint(), + execution_status: block + .execution_status() + .unwrap_or_else(|_| ExecutionStatus::irrelevant()), + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint(), + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint(), + execution_payload_parent_hash: block.execution_payload_parent_hash().ok(), + execution_payload_block_hash: block.execution_payload_block_hash().ok(), + proposer_index: block.proposer_index().ok(), }) } /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; - Some(block.execution_status) + Some( + block + .execution_status() + .unwrap_or_else(|_| ExecutionStatus::irrelevant()), + ) + } + + /// Returns whether the execution payload for a block has been received. + /// + /// Returns `false` for pre-Gloas (V17) nodes or unknown blocks. + pub fn is_payload_received(&self, block_root: &Hash256) -> bool { + self.get_proto_node(block_root) + .and_then(|node| node.payload_received().ok()) + .unwrap_or(false) } /// Returns the weight of a given block. @@ -888,9 +1044,11 @@ impl ProtoArrayForkChoice { self.proto_array .nodes .get(*block_index) - .map(|node| node.weight) + .map(|node| node.weight()) } + /// Returns the payload status of the head node based on accumulated weights and tiebreaker. + /// /// See `ProtoArray` documentation. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.proto_array @@ -907,14 +1065,17 @@ impl ProtoArrayForkChoice { .is_finalized_checkpoint_or_descendant::(descendant_root, best_finalized_checkpoint) } - pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { - if validator_index < self.votes.0.len() { - let vote = &self.votes.0[validator_index]; - + /// NOTE: only used in tests. + pub fn latest_message(&self, validator_index: usize) -> Option { + if let Some(vote) = self.votes.0.get(validator_index) { if *vote == VoteTracker::default() { None } else { - Some((vote.next_root, vote.next_epoch)) + Some(LatestMessage { + root: vote.next_root, + slot: vote.next_slot, + payload_present: vote.next_payload_present, + }) } } else { None @@ -934,21 +1095,12 @@ impl ProtoArrayForkChoice { self.proto_array.iter_block_roots(block_root) } - pub fn as_ssz_container( - &self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, - ) -> SszContainer { - SszContainer::from_proto_array(self, justified_checkpoint, finalized_checkpoint) + pub fn as_ssz_container(&self) -> SszContainer { + SszContainer::from_proto_array(self) } - pub fn as_bytes( - &self, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, - ) -> Vec { - self.as_ssz_container(justified_checkpoint, finalized_checkpoint) - .as_ssz_bytes() + pub fn as_bytes(&self) -> Vec { + self.as_ssz_container().as_ssz_bytes() } pub fn from_bytes(bytes: &[u8], balances: JustifiedBalances) -> Result { @@ -1002,12 +1154,28 @@ impl ProtoArrayForkChoice { /// always valid). fn compute_deltas( indices: &HashMap, + node_slots: &[Slot], votes: &mut ElasticList, old_balances: &[u64], new_balances: &[u64], equivocating_indices: &BTreeSet, -) -> Result, Error> { - let mut deltas = vec![0_i64; indices.len()]; +) -> Result, Error> { + let block_slot = |index: usize| -> Result { + node_slots + .get(index) + .copied() + .ok_or(Error::InvalidNodeDelta(index)) + }; + + let mut deltas = vec![ + NodeDelta { + delta: 0, + empty_delta: 0, + full_delta: 0, + equivocating_attestation_delta: 0, + }; + indices.len() + ]; for (val_index, vote) in votes.iter_mut().enumerate() { // There is no need to create a score change if the validator has never voted or both their @@ -1032,17 +1200,30 @@ fn compute_deltas( let old_balance = old_balances.get(val_index).copied().unwrap_or(0); if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { - let delta = deltas - .get(current_delta_index) - .ok_or(Error::InvalidNodeDelta(current_delta_index))? + let node_delta = deltas + .get_mut(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))?; + node_delta.delta = node_delta + .delta .checked_sub(old_balance as i64) .ok_or(Error::DeltaOverflow(current_delta_index))?; - // Array access safe due to check on previous line. - deltas[current_delta_index] = delta; + let status = NodeDelta::payload_status( + vote.current_slot, + vote.current_payload_present, + block_slot(current_delta_index)?, + ); + node_delta.sub_payload_delta(status, old_balance, current_delta_index)?; + + // Track equivocating weight for `is_head_weak` monotonicity. + node_delta.equivocating_attestation_delta = node_delta + .equivocating_attestation_delta + .saturating_add(old_balance); } vote.current_root = Hash256::zero(); + vote.current_slot = Slot::new(0); + vote.current_payload_present = false; } // We've handled this slashed validator, continue without applying an ordinary delta. continue; @@ -1059,34 +1240,52 @@ fn compute_deltas( // on-boarded less validators than the prior fork. let new_balance = new_balances.get(val_index).copied().unwrap_or(0); - if vote.current_root != vote.next_root || old_balance != new_balance { + if vote.current_root != vote.next_root + || old_balance != new_balance + || vote.current_payload_present != vote.next_payload_present + || vote.current_slot != vote.next_slot + { // We ignore the vote if it is not known in `indices`. We assume that it is outside // of our tree (i.e., pre-finalization) and therefore not interesting. if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { - let delta = deltas - .get(current_delta_index) - .ok_or(Error::InvalidNodeDelta(current_delta_index))? + let node_delta = deltas + .get_mut(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))?; + node_delta.delta = node_delta + .delta .checked_sub(old_balance as i64) .ok_or(Error::DeltaOverflow(current_delta_index))?; - // Array access safe due to check on previous line. - deltas[current_delta_index] = delta; + let status = NodeDelta::payload_status( + vote.current_slot, + vote.current_payload_present, + block_slot(current_delta_index)?, + ); + node_delta.sub_payload_delta(status, old_balance, current_delta_index)?; } // We ignore the vote if it is not known in `indices`. We assume that it is outside // of our tree (i.e., pre-finalization) and therefore not interesting. if let Some(next_delta_index) = indices.get(&vote.next_root).copied() { - let delta = deltas - .get(next_delta_index) - .ok_or(Error::InvalidNodeDelta(next_delta_index))? + let node_delta = deltas + .get_mut(next_delta_index) + .ok_or(Error::InvalidNodeDelta(next_delta_index))?; + node_delta.delta = node_delta + .delta .checked_add(new_balance as i64) .ok_or(Error::DeltaOverflow(next_delta_index))?; - // Array access safe due to check on previous line. - deltas[next_delta_index] = delta; + let status = NodeDelta::payload_status( + vote.next_slot, + vote.next_payload_present, + block_slot(next_delta_index)?, + ); + node_delta.add_payload_delta(status, new_balance, next_delta_index)?; } vote.current_root = vote.next_root; + vote.current_slot = vote.next_slot; + vote.current_payload_present = vote.next_payload_present; } } @@ -1104,8 +1303,13 @@ mod test_compute_deltas { Hash256::from_low_u64_be(i as u64 + 1) } + fn test_node_slots(count: usize) -> Vec { + vec![Slot::new(0); count] + } + #[test] fn finalized_descendant() { + let spec = MainnetEthSpec::default_spec(); let genesis_slot = Slot::new(0); let genesis_epoch = Epoch::new(0); @@ -1136,6 +1340,10 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, + None, + None, + 0, + &spec, ) .unwrap(); @@ -1152,13 +1360,16 @@ mod test_compute_deltas { next_epoch_shuffling_id: junk_shuffling_id.clone(), justified_checkpoint: genesis_checkpoint, finalized_checkpoint: genesis_checkpoint, - execution_status, unrealized_justified_checkpoint: Some(genesis_checkpoint), + execution_status, unrealized_finalized_checkpoint: Some(genesis_checkpoint), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + proposer_index: Some(0), }, genesis_slot + 1, - genesis_checkpoint, - genesis_checkpoint, + &spec, + Duration::ZERO, ) .unwrap(); @@ -1180,10 +1391,13 @@ mod test_compute_deltas { execution_status, unrealized_justified_checkpoint: None, unrealized_finalized_checkpoint: None, + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + proposer_index: Some(0), }, genesis_slot + 1, - genesis_checkpoint, - genesis_checkpoint, + &spec, + Duration::ZERO, ) .unwrap(); @@ -1259,6 +1473,7 @@ mod test_compute_deltas { /// *checkpoint*, not just the finalized *block*. #[test] fn finalized_descendant_edge_case() { + let spec = MainnetEthSpec::default_spec(); let get_block_root = Hash256::from_low_u64_be; let genesis_slot = Slot::new(0); let junk_state_root = Hash256::zero(); @@ -1280,6 +1495,10 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, + None, + None, + 0, + &spec, ) .unwrap(); @@ -1308,10 +1527,13 @@ mod test_compute_deltas { execution_status, unrealized_justified_checkpoint: Some(genesis_checkpoint), unrealized_finalized_checkpoint: Some(genesis_checkpoint), + execution_payload_parent_hash: None, + execution_payload_block_hash: None, + proposer_index: Some(0), }, Slot::from(block.slot), - genesis_checkpoint, - genesis_checkpoint, + &spec, + Duration::ZERO, ) .unwrap(); }; @@ -1414,7 +1636,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: Hash256::zero(), next_root: Hash256::zero(), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(0); new_balances.push(0); @@ -1422,6 +1647,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1465,7 +1691,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: Hash256::zero(), next_root: hash_from_index(0), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1473,6 +1702,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1523,7 +1753,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: Hash256::zero(), next_root: hash_from_index(i), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1531,6 +1764,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1576,7 +1810,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(0), next_root: hash_from_index(1), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(BALANCE); new_balances.push(BALANCE); @@ -1584,6 +1821,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1640,18 +1878,25 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: Hash256::zero(), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); // One validator moves their vote from the block to something outside the tree. votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: Hash256::from_low_u64_be(1337), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1693,7 +1938,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(0), next_root: hash_from_index(1), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); old_balances.push(OLD_BALANCE); new_balances.push(NEW_BALANCE); @@ -1701,6 +1949,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1762,12 +2011,16 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: hash_from_index(2), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); } let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1818,12 +2071,16 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: hash_from_index(2), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); } let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1872,7 +2129,10 @@ mod test_compute_deltas { votes.0.push(VoteTracker { current_root: hash_from_index(1), next_root: hash_from_index(2), - next_epoch: Epoch::new(0), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: false, }); } @@ -1881,6 +2141,7 @@ mod test_compute_deltas { let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &old_balances, &new_balances, @@ -1910,6 +2171,7 @@ mod test_compute_deltas { // Re-computing the deltas should be a no-op (no repeat deduction for the slashed validator). let deltas = compute_deltas( &indices, + &test_node_slots(indices.len()), &mut votes, &new_balances, &new_balances, @@ -1918,4 +2180,68 @@ mod test_compute_deltas { .expect("should compute deltas"); assert_eq!(deltas, vec![0, 0]); } + + #[test] + fn payload_bucket_changes_on_non_pending_vote() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + indices.insert(hash_from_index(1), 0); + + let node_slots = vec![Slot::new(0)]; + let mut votes = ElasticList(vec![VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(1), + current_slot: Slot::new(1), + next_slot: Slot::new(1), + current_payload_present: false, + next_payload_present: true, + }]); + + let deltas = compute_deltas( + &indices, + &node_slots, + &mut votes, + &[BALANCE], + &[BALANCE], + &BTreeSet::new(), + ) + .expect("should compute deltas"); + + assert_eq!(deltas[0].delta, 0); + assert_eq!(deltas[0].empty_delta, -(BALANCE as i64)); + assert_eq!(deltas[0].full_delta, BALANCE as i64); + } + + #[test] + fn pending_vote_only_updates_regular_weight() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + indices.insert(hash_from_index(1), 0); + + let node_slots = vec![Slot::new(0)]; + let mut votes = ElasticList(vec![VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(1), + current_slot: Slot::new(0), + next_slot: Slot::new(0), + current_payload_present: false, + next_payload_present: true, + }]); + + let deltas = compute_deltas( + &indices, + &node_slots, + &mut votes, + &[BALANCE], + &[BALANCE], + &BTreeSet::new(), + ) + .expect("should compute deltas"); + + assert_eq!(deltas[0].delta, 0); + assert_eq!(deltas[0].empty_delta, 0); + assert_eq!(deltas[0].full_delta, 0); + } } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 42696256f7..69efb35027 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,8 +1,8 @@ use crate::proto_array::ProposerBoost; use crate::{ Error, JustifiedBalances, - proto_array::{ProtoArray, ProtoNodeV17}, - proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, + proto_array::{ProtoArray, ProtoNode, ProtoNodeV17}, + proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker, VoteTrackerV28}, }; use ssz::{Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; @@ -14,54 +14,55 @@ use types::{Checkpoint, Hash256}; // selector. four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); -pub type SszContainer = SszContainerV28; +pub type SszContainer = SszContainerV29; #[superstruct( - variants(V28), + variants(V28, V29), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct SszContainer { + #[superstruct(only(V28))] + pub votes_v28: Vec, + #[superstruct(only(V29))] pub votes: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration + #[superstruct(only(V28))] justified_checkpoint: Checkpoint, // Deprecated, remove in a future schema migration + #[superstruct(only(V28))] finalized_checkpoint: Checkpoint, + #[superstruct(only(V28))] pub nodes: Vec, + #[superstruct(only(V29))] + pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, + #[superstruct(only(V28))] pub previous_proposer_boost: ProposerBoost, } -impl SszContainer { - pub fn from_proto_array( - from: &ProtoArrayForkChoice, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, - ) -> Self { +impl SszContainerV29 { + pub fn from_proto_array(from: &ProtoArrayForkChoice) -> Self { let proto_array = &from.proto_array; Self { votes: from.votes.0.clone(), prune_threshold: proto_array.prune_threshold, - justified_checkpoint, - finalized_checkpoint, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), - previous_proposer_boost: proto_array.previous_proposer_boost, } } } -impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { +impl TryFrom<(SszContainerV29, JustifiedBalances)> for ProtoArrayForkChoice { type Error = Error; - fn try_from((from, balances): (SszContainer, JustifiedBalances)) -> Result { + fn try_from((from, balances): (SszContainerV29, JustifiedBalances)) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, nodes: from.nodes, indices: from.indices.into_iter().collect::>(), - previous_proposer_boost: from.previous_proposer_boost, }; Ok(Self { @@ -71,3 +72,50 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { }) } } + +// Convert legacy V28 to current V29. +impl From for SszContainerV29 { + fn from(v28: SszContainerV28) -> Self { + Self { + votes: v28.votes_v28.into_iter().map(Into::into).collect(), + prune_threshold: v28.prune_threshold, + nodes: v28 + .nodes + .into_iter() + .map(|mut node| { + // best_child/best_descendant are no longer used (replaced by + // the virtual tree walk). Clear during conversion. + node.best_child = None; + node.best_descendant = None; + ProtoNode::V17(node) + }) + .collect(), + indices: v28.indices, + } + } +} + +// Downgrade current V29 to legacy V28 (lossy: V29 nodes lose payload-specific fields). +impl From for SszContainerV28 { + fn from(v29: SszContainerV29) -> Self { + Self { + votes_v28: v29.votes.into_iter().map(Into::into).collect(), + prune_threshold: v29.prune_threshold, + // These checkpoints are not consumed in v28 paths since the upgrade from v17, + // we can safely default the values. + justified_checkpoint: Checkpoint::default(), + finalized_checkpoint: Checkpoint::default(), + nodes: v29 + .nodes + .into_iter() + .filter_map(|node| match node { + ProtoNode::V17(v17) => Some(v17), + ProtoNode::V29(_) => None, + }) + .collect(), + indices: v29.indices, + // Proposer boost is not tracked in V29 (computed on-the-fly), so reset it. + previous_proposer_boost: ProposerBoost::default(), + } + } +} diff --git a/consensus/types/src/attestation/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs index 4de805570c..bb2087e330 100644 --- a/consensus/types/src/attestation/indexed_payload_attestation.rs +++ b/consensus/types/src/attestation/indexed_payload_attestation.rs @@ -2,7 +2,6 @@ use crate::test_utils::TestRandom; use crate::{EthSpec, ForkName, PayloadAttestationData}; use bls::AggregateSignature; use context_deserialize::context_deserialize; -use core::slice::Iter; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -21,12 +20,6 @@ pub struct IndexedPayloadAttestation { pub signature: AggregateSignature, } -impl IndexedPayloadAttestation { - pub fn attesting_indices_iter(&self) -> Iter<'_, u64> { - self.attesting_indices.iter() - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index cc79d3fc29..e612c8b6db 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -107,6 +107,8 @@ pub struct ChainSpec { pub shard_committee_period: u64, pub proposer_reorg_cutoff_bps: u64, pub attestation_due_bps: u64, + pub attestation_due_bps_gloas: u64, + pub payload_attestation_due_bps: u64, pub aggregate_due_bps: u64, pub sync_message_due_bps: u64, pub contribution_due_bps: u64, @@ -115,6 +117,8 @@ pub struct ChainSpec { * Derived time values (computed at startup via `compute_derived_values()`) */ pub unaggregated_attestation_due: Duration, + pub unaggregated_attestation_due_gloas: Duration, + pub payload_attestation_due: Duration, pub aggregate_attestation_due: Duration, pub sync_message_due: Duration, pub contribution_and_proof_due: Duration, @@ -877,6 +881,20 @@ impl ChainSpec { self.unaggregated_attestation_due } + /// Spec: `get_attestation_due_ms`. Returns the epoch-appropriate threshold. + pub fn get_attestation_due(&self, slot: Slot) -> Duration { + if self.fork_name_at_slot::(slot).gloas_enabled() { + self.unaggregated_attestation_due_gloas + } else { + self.unaggregated_attestation_due + } + } + + /// Spec: `get_payload_attestation_due_ms`. + pub fn get_payload_attestation_due(&self) -> Duration { + self.payload_attestation_due + } + /// Get the duration into a slot in which an aggregated attestation is due. /// Returns the pre-computed value from `compute_derived_values()`. pub fn get_aggregate_attestation_due(&self) -> Duration { @@ -949,6 +967,12 @@ impl ChainSpec { self.unaggregated_attestation_due = self .compute_slot_component_duration(self.attestation_due_bps) .expect("invalid chain spec: cannot compute unaggregated_attestation_due"); + self.unaggregated_attestation_due_gloas = self + .compute_slot_component_duration(self.attestation_due_bps_gloas) + .expect("invalid chain spec: cannot compute unaggregated_attestation_due_gloas"); + self.payload_attestation_due = self + .compute_slot_component_duration(self.payload_attestation_due_bps) + .expect("invalid chain spec: cannot compute payload_attestation_due"); self.aggregate_attestation_due = self .compute_slot_component_duration(self.aggregate_due_bps) .expect("invalid chain spec: cannot compute aggregate_attestation_due"); @@ -1079,6 +1103,8 @@ impl ChainSpec { shard_committee_period: 256, proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, + attestation_due_bps_gloas: 2500, + payload_attestation_due_bps: 7500, aggregate_due_bps: 6667, sync_message_due_bps: 3333, contribution_due_bps: 6667, @@ -1087,6 +1113,8 @@ impl ChainSpec { * Derived time values (set by `compute_derived_values()`) */ unaggregated_attestation_due: Duration::from_millis(3999), + unaggregated_attestation_due_gloas: Duration::from_millis(3000), + payload_attestation_due: Duration::from_millis(9000), aggregate_attestation_due: Duration::from_millis(8000), sync_message_due: Duration::from_millis(3999), contribution_and_proof_due: Duration::from_millis(8000), @@ -1390,6 +1418,8 @@ impl ChainSpec { * Precomputed for 6000ms slot: 3333 bps = 1999ms, 6667 bps = 4000ms */ unaggregated_attestation_due: Duration::from_millis(1999), + unaggregated_attestation_due_gloas: Duration::from_millis(1500), + payload_attestation_due: Duration::from_millis(4500), aggregate_attestation_due: Duration::from_millis(4000), sync_message_due: Duration::from_millis(1999), contribution_and_proof_due: Duration::from_millis(4000), @@ -1479,6 +1509,8 @@ impl ChainSpec { shard_committee_period: 256, proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, + attestation_due_bps_gloas: 2500, + payload_attestation_due_bps: 7500, aggregate_due_bps: 6667, /* @@ -1486,6 +1518,8 @@ impl ChainSpec { * Precomputed for 5000ms slot: 3333 bps = 1666ms, 6667 bps = 3333ms */ unaggregated_attestation_due: Duration::from_millis(1666), + unaggregated_attestation_due_gloas: Duration::from_millis(1250), + payload_attestation_due: Duration::from_millis(3750), aggregate_attestation_due: Duration::from_millis(3333), sync_message_due: Duration::from_millis(1666), contribution_and_proof_due: Duration::from_millis(3333), @@ -2062,6 +2096,12 @@ pub struct Config { #[serde(default = "default_attestation_due_bps")] #[serde(with = "serde_utils::quoted_u64")] attestation_due_bps: u64, + #[serde(default = "default_attestation_due_bps_gloas")] + #[serde(with = "serde_utils::quoted_u64")] + attestation_due_bps_gloas: u64, + #[serde(default = "default_payload_attestation_due_bps")] + #[serde(with = "serde_utils::quoted_u64")] + payload_attestation_due_bps: u64, #[serde(default = "default_aggregate_due_bps")] #[serde(with = "serde_utils::quoted_u64")] aggregate_due_bps: u64, @@ -2288,6 +2328,14 @@ const fn default_attestation_due_bps() -> u64 { 3333 } +const fn default_attestation_due_bps_gloas() -> u64 { + 2500 +} + +const fn default_payload_attestation_due_bps() -> u64 { + 7500 +} + const fn default_aggregate_due_bps() -> u64 { 6667 } @@ -2539,6 +2587,8 @@ impl Config { proposer_reorg_cutoff_bps: spec.proposer_reorg_cutoff_bps, attestation_due_bps: spec.attestation_due_bps, + attestation_due_bps_gloas: spec.attestation_due_bps_gloas, + payload_attestation_due_bps: spec.payload_attestation_due_bps, aggregate_due_bps: spec.aggregate_due_bps, sync_message_due_bps: spec.sync_message_due_bps, contribution_due_bps: spec.contribution_due_bps, @@ -2632,6 +2682,8 @@ impl Config { min_epochs_for_data_column_sidecars_requests, proposer_reorg_cutoff_bps, attestation_due_bps, + attestation_due_bps_gloas, + payload_attestation_due_bps, aggregate_due_bps, sync_message_due_bps, contribution_due_bps, @@ -2731,6 +2783,8 @@ impl Config { proposer_reorg_cutoff_bps, attestation_due_bps, + attestation_due_bps_gloas, + payload_attestation_due_bps, aggregate_due_bps, sync_message_due_bps, contribution_due_bps, @@ -3634,11 +3688,9 @@ mod yaml_tests { "EIP7928_FORK_VERSION", "EIP7928_FORK_EPOCH", // Gloas params not yet in Config - "ATTESTATION_DUE_BPS_GLOAS", "AGGREGATE_DUE_BPS_GLOAS", "SYNC_MESSAGE_DUE_BPS_GLOAS", "CONTRIBUTION_DUE_BPS_GLOAS", - "PAYLOAD_ATTESTATION_DUE_BPS", "MAX_REQUEST_PAYLOADS", // Gloas fork choice params not yet in Config "REORG_HEAD_WEIGHT_THRESHOLD", diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 07a7d4c6b6..06f204ab01 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -30,7 +30,8 @@ use types::{ Attestation, AttestationRef, AttesterSlashing, AttesterSlashingRef, BeaconBlock, BeaconState, BlobSidecar, BlobsList, BlockImportSource, Checkpoint, DataColumnSidecar, DataColumnSidecarList, DataColumnSubnetId, ExecutionBlockHash, Hash256, IndexedAttestation, - KzgProof, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, + KzgProof, ProposerPreparationData, SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, + Uint256, }; // When set to true, cache any states fetched from the db. @@ -72,6 +73,7 @@ pub struct Checks { proposer_boost_root: Option, get_proposer_head: Option, should_override_forkchoice_update: Option, + head_payload_status: Option, } #[derive(Debug, Clone, Deserialize)] @@ -94,7 +96,15 @@ impl From for PayloadStatusV1 { #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step { +pub enum Step< + TBlock, + TBlobs, + TColumns, + TAttestation, + TAttesterSlashing, + TPowBlock, + TExecutionPayload = String, +> { Tick { tick: u64, }, @@ -128,6 +138,10 @@ pub enum Step, valid: bool, }, + OnExecutionPayload { + execution_payload: TExecutionPayload, + valid: bool, + }, } #[derive(Debug, Clone, Deserialize)] @@ -151,6 +165,7 @@ pub struct ForkChoiceTest { Attestation, AttesterSlashing, PowBlock, + SignedExecutionPayloadEnvelope, >, >, } @@ -271,6 +286,17 @@ impl LoadCase for ForkChoiceTest { valid, }) } + Step::OnExecutionPayload { + execution_payload, + valid, + } => { + let envelope = + ssz_decode_file(&path.join(format!("{execution_payload}.ssz_snappy")))?; + Ok(Step::OnExecutionPayload { + execution_payload: envelope, + valid, + }) + } }) .collect::>()?; let anchor_state = ssz_decode_state(&path.join("anchor_state.ssz_snappy"), spec)?; @@ -359,6 +385,7 @@ impl Case for ForkChoiceTest { proposer_boost_root, get_proposer_head, should_override_forkchoice_update: should_override_fcu, + head_payload_status, } = checks.as_ref(); if let Some(expected_head) = head { @@ -405,6 +432,10 @@ impl Case for ForkChoiceTest { if let Some(expected_proposer_head) = get_proposer_head { tester.check_expected_proposer_head(*expected_proposer_head)?; } + + if let Some(expected_status) = head_payload_status { + tester.check_head_payload_status(*expected_status)?; + } } Step::MaybeValidBlockAndColumns { @@ -414,6 +445,12 @@ impl Case for ForkChoiceTest { } => { tester.process_block_and_columns(block.clone(), columns.clone(), *valid)?; } + Step::OnExecutionPayload { + execution_payload, + valid, + } => { + tester.process_execution_payload(execution_payload, *valid)?; + } } } @@ -584,6 +621,18 @@ impl Tester { self.apply_invalid_block(&block)?; } + // Per spec test runner: an on_block step implies receiving block's attestations + // and attester slashings. + if success { + for attestation in block.message().body().attestations() { + let att = attestation.clone_as_attestation(); + let _ = self.process_attestation(&att); + } + for attester_slashing in block.message().body().attester_slashings() { + self.process_attester_slashing(attester_slashing); + } + } + Ok(()) } @@ -674,6 +723,18 @@ impl Tester { self.apply_invalid_block(&block)?; } + // Per spec test runner: an on_block step implies receiving block's attestations + // and attester slashings. + if success { + for attestation in block.message().body().attestations() { + let att = attestation.clone_as_attestation(); + let _ = self.process_attestation(&att); + } + for attester_slashing in block.message().body().attester_slashings() { + self.process_attester_slashing(attester_slashing); + } + } + Ok(()) } @@ -913,7 +974,7 @@ impl Tester { ) -> Result<(), Error> { let mut fc = self.harness.chain.canonical_head.fork_choice_write_lock(); let slot = self.harness.chain.slot().unwrap(); - let canonical_head = fc.get_head(slot, &self.harness.spec).unwrap(); + let (canonical_head, _) = fc.get_head(slot, &self.harness.spec).unwrap(); let proposer_head_result = fc.get_proposer_head( slot, canonical_head, @@ -923,7 +984,7 @@ impl Tester { DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, ); let proposer_head = match proposer_head_result { - Ok(head) => head.parent_node.root, + Ok(head) => head.parent_node.root(), Err(ProposerHeadError::DoNotReOrg(_)) => canonical_head, _ => panic!("Unexpected error in get proposer head"), }; @@ -931,6 +992,58 @@ impl Tester { check_equal("proposer_head", proposer_head, expected_proposer_head) } + pub fn process_execution_payload( + &self, + signed_envelope: &SignedExecutionPayloadEnvelope, + valid: bool, + ) -> Result<(), Error> { + let block_root = signed_envelope.message.beacon_block_root; + + // Store the envelope in the database so that child blocks extending + // the FULL path can load the parent's post-payload state. + if valid { + self.harness + .chain + .store + .put_payload_envelope(&block_root, signed_envelope.clone()) + .map_err(|e| { + Error::InternalError(format!( + "Failed to store payload envelope for {block_root:?}: {e:?}", + )) + })?; + } + + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root); + + if valid { + result.map_err(|e| { + Error::InternalError(format!( + "on_execution_payload for block root {} failed: {:?}", + block_root, e + )) + })?; + } else if result.is_ok() { + return Err(Error::DidntFail(format!( + "on_execution_payload for block root {} should have failed", + block_root + ))); + } + + Ok(()) + } + + pub fn check_head_payload_status(&self, expected_status: u8) -> Result<(), Error> { + let head = self.find_head()?; + // PayloadStatus repr: Empty=0, Full=1, Pending=2 (matches spec constants). + let actual = head.head_payload_status() as u8; + check_equal("head_payload_status", actual, expected_status) + } + pub fn check_should_override_fcu( &self, expected_should_override_fcu: ShouldOverrideFcu, diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index f8c16aec0b..4373d6b7d1 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -704,15 +704,27 @@ impl Handler for ForkChoiceHandler { return false; } - // No FCU override tests prior to bellatrix. + // No FCU override tests prior to bellatrix, and removed in Gloas. if self.handler_name == "should_override_forkchoice_update" - && !fork_name.bellatrix_enabled() + && (!fork_name.bellatrix_enabled() || fork_name.gloas_enabled()) { return false; } - // Deposit tests exist only after Electra. - if self.handler_name == "deposit_with_reorg" && !fork_name.electra_enabled() { + // Deposit tests exist only for Electra and Fulu (not Gloas). + if self.handler_name == "deposit_with_reorg" + && (!fork_name.electra_enabled() || fork_name.gloas_enabled()) + { + return false; + } + + // Proposer head tests removed in Gloas. + if self.handler_name == "get_proposer_head" && fork_name.gloas_enabled() { + return false; + } + + // on_execution_payload tests exist only for Gloas. + if self.handler_name == "on_execution_payload" && !fork_name.gloas_enabled() { return false; } @@ -722,8 +734,7 @@ impl Handler for ForkChoiceHandler { } fn disabled_forks(&self) -> Vec { - // TODO(gloas): remove once we have Gloas fork choice tests - vec![ForkName::Gloas] + vec![] } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 3254bb6e90..62eb2dd038 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1038,6 +1038,12 @@ fn fork_choice_deposit_with_reorg() { // There is no mainnet variant for this test. } +#[test] +fn fork_choice_on_execution_payload() { + ForkChoiceHandler::::new("on_execution_payload").run(); + ForkChoiceHandler::::new("on_execution_payload").run(); +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); From 27af0ed82c06019f10a24a6b117fd1a50b45a22a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Sat, 4 Apr 2026 03:35:08 +1100 Subject: [PATCH 56/57] Add test for protocol registration completeness (#8920) Co-Authored-By: Jimmy Chen --- .../lighthouse_network/src/rpc/protocol.rs | 106 +++++++++++++++++- 1 file changed, 104 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 2c92e17c44..c949dfe17d 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -11,7 +11,7 @@ use std::io; use std::marker::PhantomData; use std::sync::{Arc, LazyLock}; use std::time::Duration; -use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; +use strum::{AsRefStr, Display, EnumIter, EnumString, IntoStaticStr}; use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, @@ -329,7 +329,7 @@ pub enum Encoding { } /// All valid protocol name and version combinations. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)] pub enum SupportedProtocol { StatusV1, StatusV2, @@ -499,6 +499,10 @@ impl UpgradeInfo for RPCProtocol { SupportedProtocol::LightClientFinalityUpdateV1, Encoding::SSZSnappy, )); + supported_protocols.push(ProtocolId::new( + SupportedProtocol::LightClientUpdatesByRangeV1, + Encoding::SSZSnappy, + )); } supported_protocols } @@ -1133,3 +1137,101 @@ impl RPCError { } } } + +#[cfg(test)] +mod tests { + use super::*; + use libp2p::core::UpgradeInfo; + use std::collections::HashSet; + use strum::IntoEnumIterator; + use types::{Hash256, Slot}; + + type E = MainnetEthSpec; + + /// Whether this protocol should appear in `currently_supported()` for the given context. + /// + /// Uses an exhaustive match so that adding a new `SupportedProtocol` variant + /// causes a compile error until this function is updated. + fn expected_in_currently_supported( + protocol: SupportedProtocol, + fork_context: &ForkContext, + ) -> bool { + use SupportedProtocol::*; + match protocol { + StatusV1 | StatusV2 | GoodbyeV1 | PingV1 | BlocksByRangeV1 | BlocksByRangeV2 + | BlocksByRootV1 | BlocksByRootV2 | MetaDataV1 | MetaDataV2 => true, + + BlobsByRangeV1 | BlobsByRootV1 => fork_context.fork_exists(ForkName::Deneb), + + DataColumnsByRootV1 | DataColumnsByRangeV1 | MetaDataV3 => { + fork_context.spec.is_peer_das_scheduled() + } + + PayloadEnvelopesByRangeV1 | PayloadEnvelopesByRootV1 => { + fork_context.fork_exists(ForkName::Gloas) + } + + // Light client protocols are not in currently_supported() + LightClientBootstrapV1 + | LightClientOptimisticUpdateV1 + | LightClientFinalityUpdateV1 + | LightClientUpdatesByRangeV1 => false, + } + } + + /// Whether this protocol should appear in `protocol_info()` when light client server is + /// enabled. + /// + /// Uses an exhaustive match so that adding a new `SupportedProtocol` variant + /// causes a compile error until this function is updated. + fn expected_in_protocol_info(protocol: SupportedProtocol, fork_context: &ForkContext) -> bool { + use SupportedProtocol::*; + match protocol { + LightClientBootstrapV1 + | LightClientOptimisticUpdateV1 + | LightClientFinalityUpdateV1 + | LightClientUpdatesByRangeV1 => true, + + _ => expected_in_currently_supported(protocol, fork_context), + } + } + + #[test] + fn all_protocols_registered() { + for fork in ForkName::list_all() { + let spec = fork.make_genesis_spec(E::default_spec()); + let fork_context = Arc::new(ForkContext::new::(Slot::new(0), Hash256::ZERO, &spec)); + + let currently_supported: HashSet = + SupportedProtocol::currently_supported(&fork_context) + .into_iter() + .map(|pid| pid.versioned_protocol) + .collect(); + + let rpc_protocol = RPCProtocol:: { + fork_context: fork_context.clone(), + max_rpc_size: spec.max_payload_size as usize, + enable_light_client_server: true, + phantom: PhantomData, + }; + let protocol_info: HashSet = rpc_protocol + .protocol_info() + .into_iter() + .map(|pid| pid.versioned_protocol) + .collect(); + + for protocol in SupportedProtocol::iter() { + assert_eq!( + currently_supported.contains(&protocol), + expected_in_currently_supported(protocol, &fork_context), + "{protocol:?} registration mismatch in currently_supported() at {fork:?}" + ); + assert_eq!( + protocol_info.contains(&protocol), + expected_in_protocol_info(protocol, &fork_context), + "{protocol:?} registration mismatch in protocol_info() at {fork:?}" + ); + } + } + } +} From 7559dd28090d47c565dbd1fbbe6e20077d27b6b9 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Sat, 4 Apr 2026 17:36:26 -0500 Subject: [PATCH 57/57] Use spec constants for PTC thresholds in fork choice (#9088) Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> --- consensus/proto_array/src/proto_array.rs | 6 +++--- consensus/types/src/core/eth_spec.rs | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index dfb43f5f34..1f7291b260 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -211,7 +211,7 @@ impl ProtoNode { return false; } - node.payload_timeliness_votes.num_set_bits() > E::ptc_size() / 2 + node.payload_timeliness_votes.num_set_bits() > E::payload_timely_threshold() } pub fn is_payload_data_available(&self) -> bool { @@ -224,8 +224,8 @@ impl ProtoNode { return false; } - // TODO(gloas): add function on EthSpec for DATA_AVAILABILITY_TIMELY_THRESHOLD - node.payload_data_availability_votes.num_set_bits() > E::ptc_size() / 2 + node.payload_data_availability_votes.num_set_bits() + > E::data_availability_timely_threshold() } } diff --git a/consensus/types/src/core/eth_spec.rs b/consensus/types/src/core/eth_spec.rs index 36d61fbbf9..4159091f5d 100644 --- a/consensus/types/src/core/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -448,6 +448,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn payload_timely_threshold() -> usize { Self::PTCSize::to_usize() / 2 } + + /// Returns the `DATA_AVAILABILITY_TIMELY_THRESHOLD` constant (PTC_SIZE / 2). + fn data_availability_timely_threshold() -> usize { + Self::PTCSize::to_usize() / 2 + } } /// Macro to inherit some type values from another EthSpec.