From f854afa3528a7aae4b8062926b53dbf0c060e072 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 12 Nov 2025 12:46:05 +1100 Subject: [PATCH 1/9] Prevent unnecessary state advances pre-Fulu (#8388) State advances were observed as especially slow on pre-Fulu networks (mainnet). The reason being: we were doing an extra epoch of state advance because of code that should only have been running after Fulu, when proposer shufflings are determined with lookahead. Only attempt to cache the _next epoch_ shuffling if the state's slot determines it (this will only be true post-Fulu). Reusing the logic for `proposer_shuffling_decision_slot` avoids having to repeat the fiddly logic about the Fulu fork epoch itself. Co-Authored-By: Michael Sproul --- .../beacon_chain/src/state_advance_timer.rs | 42 +++++++++++-------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index b10edf2336..a070dc350b 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -363,24 +363,30 @@ fn advance_head(beacon_chain: &Arc>) -> Resu // For epochs *greater than* the Fulu fork epoch, we have also determined the proposer // shuffling for the next epoch. let next_epoch = state.next_epoch()?; - let next_epoch_decision_root = state.proposer_shuffling_decision_root_at_epoch( - next_epoch, - head_block_root, - &beacon_chain.spec, - )?; - beacon_chain.with_proposer_cache( - next_epoch_decision_root, - next_epoch, - |_| Ok(()), - || { - debug!( - shuffling_decision_root = ?next_epoch_decision_root, - epoch = %next_epoch, - "Computing next epoch proposer shuffling in state advance" - ); - Ok::<_, Error>((advanced_state_root, state.clone())) - }, - )?; + let next_epoch_decision_slot = beacon_chain + .spec + .proposer_shuffling_decision_slot::(next_epoch); + + if state.slot() > next_epoch_decision_slot { + let next_epoch_decision_root = state.proposer_shuffling_decision_root_at_epoch( + next_epoch, + head_block_root, + &beacon_chain.spec, + )?; + beacon_chain.with_proposer_cache( + next_epoch_decision_root, + next_epoch, + |_| Ok(()), + || { + debug!( + shuffling_decision_root = ?next_epoch_decision_root, + epoch = %next_epoch, + "Computing next epoch proposer shuffling in state advance" + ); + Ok::<_, Error>((advanced_state_root, state.clone())) + }, + )?; + } // Update the attester cache. let shuffling_id = From 47b984e799b4d298f91d5c8baadb530ee750d91c Mon Sep 17 00:00:00 2001 From: antondlr Date: Wed, 12 Nov 2025 13:12:12 +0100 Subject: [PATCH 2/9] re-targeting of `remove-windows-ci` against `release-v8.0` (#8406) Co-Authored-By: antondlr --- .github/workflows/release.yml | 39 +------------------------------- .github/workflows/test-suite.yml | 33 --------------------------- 2 files changed, 1 insertion(+), 71 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7c85cdd05c..f7b65f07c9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,8 +32,7 @@ jobs: matrix: arch: [aarch64-unknown-linux-gnu, x86_64-unknown-linux-gnu, - aarch64-apple-darwin, - x86_64-windows] + aarch64-apple-darwin] include: - arch: aarch64-unknown-linux-gnu runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} @@ -44,9 +43,6 @@ jobs: - arch: aarch64-apple-darwin runner: macos-14 profile: maxperf - - arch: x86_64-windows - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} - profile: maxperf runs-on: ${{ matrix.runner }} needs: extract-version @@ -57,19 +53,6 @@ jobs: if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable - # ============================== - # Windows dependencies - # ============================== - - - uses: KyleMayes/install-llvm-action@v1 - if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') - with: - version: "17.0" - directory: ${{ runner.temp }}/llvm - - name: Set LIBCLANG_PATH - if: startsWith(matrix.arch, 'x86_64-windows') - run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - # ============================== # Builds # ============================== @@ -94,12 +77,7 @@ jobs: if: matrix.arch == 'aarch64-apple-darwin' run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - name: Build Lighthouse for Windows - if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - name: Configure GPG and create artifacts - if: startsWith(matrix.arch, 'x86_64-windows') != true env: GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} @@ -118,20 +96,6 @@ jobs: done mv *tar.gz* .. - - name: Configure GPG and create artifacts Windows - if: startsWith(matrix.arch, 'x86_64-windows') - env: - GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} - GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} - run: | - echo $env:GPG_SIGNING_KEY | gpg --batch --import - mkdir artifacts - move $env:USERPROFILE/.cargo/bin/lighthouse.exe ./artifacts - cd artifacts - tar -czf lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz lighthouse.exe - gpg --passphrase "$env:GPG_PASSPHRASE" --batch --pinentry-mode loopback -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz - move *tar.gz* .. - # ======================================================================= # Upload artifacts # This is required to share artifacts between different jobs @@ -239,7 +203,6 @@ jobs: | Apple logo | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz.asc) | | Linux logo | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) | | Raspberrypi logo | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) | - | Windows logo | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) | | | | | | | **System** | **Option** | - | **Resource** | | Docker logo | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0201bf9ae3..0cdd8211da 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -107,38 +107,6 @@ jobs: if: env.SELF_HOSTED_RUNNERS == 'true' continue-on-error: true run: sccache --show-stats - release-tests-windows: - name: release-tests-windows - needs: [check-labels] - if: needs.check-labels.outputs.skip_ci != 'true' - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} - steps: - - uses: actions/checkout@v5 - - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' - uses: moonrepo/setup-rust@v1 - with: - channel: stable - cache-target: release - bins: cargo-nextest - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Install Foundry (anvil) - if: env.SELF_HOSTED_RUNNERS == 'false' - uses: foundry-rs/foundry-toolchain@v1 - with: - version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - - name: Install make - if: env.SELF_HOSTED_RUNNERS == 'false' - run: choco install -y make - - name: Set LIBCLANG_PATH - run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - - name: Run tests in release - run: make test-release - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests needs: [check-labels] @@ -501,7 +469,6 @@ jobs: 'check-labels', 'target-branch-check', 'release-tests-ubuntu', - 'release-tests-windows', 'beacon-chain-tests', 'op-pool-tests', 'network-tests', From 01a654bfa881d833bbe170167bf3c544f2a84096 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 17 Nov 2025 13:04:46 +1100 Subject: [PATCH 3/9] Fix tracing span for execution payload verif (#8419) Fix the span on execution payload verification (newPayload), by creating a new span rather than using the parent span. Using the parent span was incorrectly associating the time spent verifying the payload with `from_signature_verified_components`. Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/src/block_verification.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 691293b200..5078e24a51 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1428,11 +1428,11 @@ impl ExecutionPendingBlock { // Spawn the payload verification future as a new task, but don't wait for it to complete. // The `payload_verification_future` will be awaited later to ensure verification completed // successfully. - let current_span = Span::current(); let payload_verification_handle = chain .task_executor .spawn_handle( - payload_verification_future.instrument(current_span), + payload_verification_future + .instrument(debug_span!("execution_payload_verification")), "execution_payload_verification", ) .ok_or(BeaconChainError::RuntimeShutdown)?; From f2b945a5b5a8724f9426bf5a6e6e3404b09fef41 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 17 Nov 2025 13:07:42 +1100 Subject: [PATCH 4/9] Do not require blobs from checkpoint servers from Fulu epochs. (#8413) Addressed this comment here: https://github.com/sigp/lighthouse/issues/6837#issuecomment-3509209465 Lighthouse can only checkpoint sync from a server that can serve blob sidecars, which means they need to be at least custdoying 50% of columns (semi-supernodes) This PR lifts this constraint, as blob sidecar endpoint is getting deprecated in Fulu, and we plan to fetch the checkpoint data columns from peers (#6837) Co-Authored-By: Jimmy Chen --- beacon_node/client/src/builder.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c3c827f0aa..d55afbffe6 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -345,7 +345,13 @@ where .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; - let anchor_blobs = if anchor_block.message().body().has_blobs() { + + // `BlobSidecar` is no longer used from Fulu onwards (superseded by `DataColumnSidecar`), + // which will be fetched via rpc instead (unimplemented). + let is_before_fulu = !spec + .fork_name_at_slot::(anchor_block.slot()) + .fulu_enabled(); + let anchor_blobs = if is_before_fulu && anchor_block.message().body().has_blobs() { let max_blobs_len = spec.max_blobs_per_block(anchor_block.epoch()) as usize; let anchor_blobs_bytes = anchor_blobs_bytes .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; @@ -409,7 +415,11 @@ where debug!("Downloaded finalized block"); - let blobs = if block.message().body().has_blobs() { + // `get_blob_sidecars` API is deprecated from Fulu and may not be supported by all servers + let is_before_fulu = !spec + .fork_name_at_slot::(finalized_block_slot) + .fulu_enabled(); + let blobs = if is_before_fulu && block.message().body().has_blobs() { debug!("Downloading finalized blobs"); if let Some(response) = remote .get_blob_sidecars::(BlockId::Root(block_root), None, &spec) From af1d9b99911e2467a56ce9f308f06ec7a9ead3eb Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 17 Nov 2025 16:23:12 +1100 Subject: [PATCH 5/9] Fix custody context initialization race condition that caused panic (#8391) Take 2 of #8390. Fixes the race condition properly instead of propagating the error. I think this is a better alternative, and doesn't seem to look that bad. * Lift node id loading or generation from `NetworkService ` startup to the `ClientBuilder`, so that it can be used to compute custody columns for the beacon chain without waiting for Network bootstrap. I've considered and implemented a few alternatives: 1. passing `node_id` to beacon chain builder and compute columns when creating `CustodyContext`. This approach isn't good for separation of concerns and isn't great for testability 2. passing `ordered_custody_groups` to beacon chain. `CustodyContext` only uses this to compute ordered custody columns, so we might as well lift this logic out, so we don't have to do error handling in `CustodyContext` construction. Less tests to update;. Co-Authored-By: Jimmy Chen --- beacon_node/beacon_chain/src/builder.rs | 34 ++- .../beacon_chain/src/custody_context.rs | 218 ++++++++---------- .../src/data_availability_checker.rs | 39 ++-- .../overflow_lru_cache.rs | 7 +- beacon_node/beacon_chain/src/test_utils.rs | 18 +- beacon_node/beacon_chain/tests/store_tests.rs | 11 +- beacon_node/client/src/builder.rs | 34 ++- beacon_node/lighthouse_network/src/lib.rs | 2 +- .../lighthouse_network/src/service/mod.rs | 4 +- .../lighthouse_network/tests/common.rs | 14 +- beacon_node/network/src/service.rs | 5 + beacon_node/network/src/service/tests.rs | 3 + .../network/src/subnet_service/tests/mod.rs | 4 + beacon_node/src/lib.rs | 10 +- .../types/src/data_column_custody_group.rs | 23 +- 15 files changed, 230 insertions(+), 196 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 719c24b956..ef438b16e0 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -40,9 +40,10 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info}; +use types::data_column_custody_group::CustodyIndex; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, - FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, + Epoch, EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -102,6 +103,7 @@ pub struct BeaconChainBuilder { task_executor: Option, validator_monitor_config: Option, node_custody_type: NodeCustodyType, + ordered_custody_column_indices: Option>, rng: Option>, } @@ -141,6 +143,7 @@ where task_executor: None, validator_monitor_config: None, node_custody_type: NodeCustodyType::Fullnode, + ordered_custody_column_indices: None, rng: None, } } @@ -647,6 +650,16 @@ where self } + /// Sets the ordered custody column indices for this node. + /// This is used to determine the data columns the node is required to custody. + pub fn ordered_custody_column_indices( + mut self, + ordered_custody_column_indices: Vec, + ) -> Self { + self.ordered_custody_column_indices = Some(ordered_custody_column_indices); + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -740,6 +753,9 @@ where .genesis_state_root .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); + let ordered_custody_column_indices = self + .ordered_custody_column_indices + .ok_or("Cannot build without ordered custody column indices")?; let rng = self.rng.ok_or("Cannot build without an RNG")?; let beacon_proposer_cache: Arc> = <_>::default(); @@ -942,11 +958,16 @@ where custody, self.node_custody_type, head_epoch, + ordered_custody_column_indices, &self.spec, ) } else { ( - CustodyContext::new(self.node_custody_type, &self.spec), + CustodyContext::new( + self.node_custody_type, + ordered_custody_column_indices, + &self.spec, + ), None, ) }; @@ -1220,7 +1241,9 @@ fn build_data_columns_from_blobs( #[cfg(test)] mod test { use super::*; - use crate::test_utils::{EphemeralHarnessType, get_kzg}; + use crate::test_utils::{ + EphemeralHarnessType, generate_data_column_indices_rand_order, get_kzg, + }; use ethereum_hashing::hash; use genesis::{ DEFAULT_ETH1_BLOCK_HASH, generate_deterministic_keypairs, interop_genesis_state, @@ -1272,6 +1295,9 @@ mod test { .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) .rng(Box::new(StdRng::seed_from_u64(42))) + .ordered_custody_column_indices( + generate_data_column_indices_rand_order::(), + ) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index a5ef3ed2f6..c512ce616a 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -2,13 +2,11 @@ use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; -use std::sync::OnceLock; use std::{ collections::{BTreeMap, HashMap}, sync::atomic::{AtomicU64, Ordering}, }; use tracing::{debug, warn}; -use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; use types::{ChainSpec, ColumnIndex, Epoch, EthSpec, Slot}; /// A delay before making the CGC change effective to the data availability checker. @@ -206,7 +204,7 @@ fn get_validators_custody_requirement(validator_custody_units: u64, spec: &Chain /// Therefore, the custody count at any point in time is calculated as the max of /// the validator custody at that time and the current cli params. /// -/// Choosing the max ensures that we always have the minimum required columns and +/// Choosing the max ensures that we always have the minimum required columns, and /// we can adjust the `status.earliest_available_slot` value to indicate to our peers /// the columns that we can guarantee to serve. #[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Deserialize, Serialize)] @@ -218,7 +216,7 @@ pub enum NodeCustodyType { /// wants to subscribe to the minimum number of columns to enable /// reconstruction (50%) of the full blob data on demand. SemiSupernode, - /// The node isn't running with with any explicit cli parameters + /// The node isn't running with any explicit cli parameters /// or is running with cli parameters to indicate that it wants /// to only subscribe to the minimal custody requirements. #[default] @@ -248,9 +246,9 @@ pub struct CustodyContext { validator_custody_count: AtomicU64, /// Maintains all the validators that this node is connected to currently validator_registrations: RwLock, - /// Stores an immutable, ordered list of all custody columns as determined by the node's NodeID - /// on startup. - all_custody_columns_ordered: OnceLock>, + /// Stores an immutable, ordered list of all data column indices as determined by the node's NodeID + /// on startup. This used to determine the node's custody columns. + ordered_custody_column_indices: Vec, _phantom_data: PhantomData, } @@ -259,7 +257,11 @@ impl CustodyContext { /// exists. /// /// The `node_custody_type` value is based on current cli parameters. - pub fn new(node_custody_type: NodeCustodyType, spec: &ChainSpec) -> Self { + pub fn new( + node_custody_type: NodeCustodyType, + ordered_custody_column_indices: Vec, + spec: &ChainSpec, + ) -> Self { let cgc_override = node_custody_type.get_custody_count_override(spec); // If there's no override, we initialise `validator_custody_count` to 0. This has been the // existing behaviour and we maintain this for now to avoid a semantic schema change until @@ -267,7 +269,7 @@ impl CustodyContext { Self { validator_custody_count: AtomicU64::new(cgc_override.unwrap_or(0)), validator_registrations: RwLock::new(ValidatorRegistrations::new(cgc_override)), - all_custody_columns_ordered: OnceLock::new(), + ordered_custody_column_indices, _phantom_data: PhantomData, } } @@ -290,6 +292,7 @@ impl CustodyContext { ssz_context: CustodyContextSsz, node_custody_type: NodeCustodyType, head_epoch: Epoch, + ordered_custody_column_indices: Vec, spec: &ChainSpec, ) -> (Self, Option) { let CustodyContextSsz { @@ -355,39 +358,13 @@ impl CustodyContext { .into_iter() .collect(), }), - all_custody_columns_ordered: OnceLock::new(), + ordered_custody_column_indices, _phantom_data: PhantomData, }; (custody_context, custody_count_changed) } - /// Initializes an ordered list of data columns based on provided custody groups. - /// - /// # Arguments - /// * `all_custody_groups_ordered` - Vector of custody group indices to map to columns - /// * `spec` - Chain specification containing custody parameters - /// - /// # Returns - /// Ok(()) if initialization succeeds, Err with description string if it fails - pub fn init_ordered_data_columns_from_custody_groups( - &self, - all_custody_groups_ordered: Vec, - spec: &ChainSpec, - ) -> Result<(), String> { - let mut ordered_custody_columns = vec![]; - for custody_index in all_custody_groups_ordered { - let columns = compute_columns_for_custody_group::(custody_index, spec) - .map_err(|e| format!("Failed to compute columns for custody group {e:?}"))?; - ordered_custody_columns.extend(columns); - } - self.all_custody_columns_ordered - .set(ordered_custody_columns.into_boxed_slice()) - .map_err(|_| { - "Failed to initialise CustodyContext with computed custody columns".to_string() - }) - } - /// Register a new validator index and updates the list of validators if required. /// /// Also modifies the internal structures if the validator custody has changed to @@ -497,11 +474,7 @@ impl CustodyContext { /// A slice of ordered column indices that should be sampled for this epoch based on the node's custody configuration pub fn sampling_columns_for_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> &[ColumnIndex] { let num_of_columns_to_sample = self.num_of_data_columns_to_sample(epoch, spec); - let all_columns_ordered = self - .all_custody_columns_ordered - .get() - .expect("all_custody_columns_ordered should be initialized"); - &all_columns_ordered[..num_of_columns_to_sample] + &self.ordered_custody_column_indices[..num_of_columns_to_sample] } /// Returns the ordered list of column indices that the node is assigned to custody @@ -528,12 +501,11 @@ impl CustodyContext { self.custody_group_count_at_head(spec) as usize }; - let all_columns_ordered = self - .all_custody_columns_ordered - .get() - .expect("all_custody_columns_ordered should be initialized"); + // This is an unnecessary conversion for spec compliance, basically just multiplying by 1. + let columns_per_custody_group = spec.data_columns_per_group::() as usize; + let custody_column_count = columns_per_custody_group * custody_group_count; - &all_columns_ordered[..custody_group_count] + &self.ordered_custody_column_indices[..custody_column_count] } /// The node has completed backfill for this epoch. Update the internal records so the function @@ -599,11 +571,9 @@ impl From<&CustodyContext> for CustodyContextSsz { #[cfg(test)] mod tests { - use rand::rng; - use rand::seq::SliceRandom; - use types::MainnetEthSpec; - use super::*; + use crate::test_utils::generate_data_column_indices_rand_order; + use types::MainnetEthSpec; type E = MainnetEthSpec; @@ -623,13 +593,10 @@ mod tests { ssz_context, NodeCustodyType::Fullnode, head_epoch, + generate_data_column_indices_rand_order::(), spec, ); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, spec) - .expect("should initialise ordered data columns"); custody_context } @@ -668,6 +635,7 @@ mod tests { ssz_context, target_node_custody_type, head_epoch, + generate_data_column_indices_rand_order::(), spec, ); @@ -738,6 +706,7 @@ mod tests { ssz_context, target_node_custody_type, head_epoch, + generate_data_column_indices_rand_order::(), spec, ); @@ -759,7 +728,11 @@ mod tests { #[test] fn no_validators_supernode_default() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Supernode, + generate_data_column_indices_rand_order::(), + &spec, + ); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.number_of_custody_groups @@ -773,7 +746,11 @@ mod tests { #[test] fn no_validators_semi_supernode_default() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::SemiSupernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::SemiSupernode, + generate_data_column_indices_rand_order::(), + &spec, + ); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.number_of_custody_groups / 2 @@ -787,7 +764,11 @@ mod tests { #[test] fn no_validators_fullnode_default() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.custody_requirement, @@ -802,7 +783,11 @@ mod tests { #[test] fn register_single_validator_should_update_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let bal_per_additional_group = spec.balance_per_additional_custody_group; let min_val_custody_requirement = spec.validator_custody_requirement; // One single node increases its balance over 3 epochs. @@ -826,7 +811,11 @@ mod tests { #[test] fn register_multiple_validators_should_update_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let bal_per_additional_group = spec.balance_per_additional_custody_group; let min_val_custody_requirement = spec.validator_custody_requirement; // Add 3 validators over 3 epochs. @@ -863,7 +852,11 @@ mod tests { #[test] fn register_validators_should_not_update_cgc_for_supernode() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Supernode, + generate_data_column_indices_rand_order::(), + &spec, + ); let bal_per_additional_group = spec.balance_per_additional_custody_group; // Add 3 validators over 3 epochs. @@ -901,7 +894,11 @@ mod tests { #[test] fn cgc_change_should_be_effective_to_sampling_after_delay() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let current_slot = Slot::new(10); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let default_sampling_size = @@ -932,7 +929,11 @@ mod tests { #[test] fn validator_dropped_after_no_registrations_within_expiry_should_not_reduce_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let current_slot = Slot::new(10); let val_custody_units_1 = 10; let val_custody_units_2 = 5; @@ -974,7 +975,11 @@ mod tests { #[test] fn validator_dropped_after_no_registrations_within_expiry() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let current_slot = Slot::new(10); let val_custody_units_1 = 10; let val_custody_units_2 = 5; @@ -1021,37 +1026,6 @@ mod tests { ); } - #[test] - fn should_init_ordered_data_columns_and_return_sampling_columns() { - let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let sampling_size = custody_context.num_of_data_columns_to_sample(Epoch::new(0), &spec); - - // initialise ordered columns - let mut all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - all_custody_groups_ordered.shuffle(&mut rng()); - - custody_context - .init_ordered_data_columns_from_custody_groups( - all_custody_groups_ordered.clone(), - &spec, - ) - .expect("should initialise ordered data columns"); - - let actual_sampling_columns = - custody_context.sampling_columns_for_epoch(Epoch::new(0), &spec); - - let expected_sampling_columns = &all_custody_groups_ordered - .iter() - .flat_map(|custody_index| { - compute_columns_for_custody_group::(*custody_index, &spec) - .expect("should compute columns for custody group") - }) - .collect::>()[0..sampling_size]; - - assert_eq!(actual_sampling_columns, expected_sampling_columns) - } - /// Update the validator every epoch and assert cgc against expected values. fn register_validators_and_assert_cgc( custody_context: &CustodyContext, @@ -1077,12 +1051,12 @@ mod tests { #[test] fn custody_columns_for_epoch_no_validators_fullnode() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + ); assert_eq!( custody_context.custody_columns_for_epoch(None, &spec).len(), @@ -1093,12 +1067,12 @@ mod tests { #[test] fn custody_columns_for_epoch_no_validators_supernode() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Supernode, + ordered_custody_column_indices, + &spec, + ); assert_eq!( custody_context.custody_columns_for_epoch(None, &spec).len(), @@ -1109,14 +1083,14 @@ mod tests { #[test] fn custody_columns_for_epoch_with_validators_should_match_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + ); let val_custody_units = 10; - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); - let _ = custody_context.register_validators( vec![( 0, @@ -1135,14 +1109,14 @@ mod tests { #[test] fn custody_columns_for_epoch_specific_epoch_uses_epoch_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + ); let test_epoch = Epoch::new(5); - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); - let expected_cgc = custody_context.custody_group_count_at_epoch(test_epoch, &spec); assert_eq!( custody_context @@ -1165,6 +1139,7 @@ mod tests { ssz_context, NodeCustodyType::Fullnode, Epoch::new(0), + generate_data_column_indices_rand_order::(), &spec, ); @@ -1198,7 +1173,11 @@ mod tests { fn restore_semi_supernode_with_validators_can_exceed_64() { let spec = E::default_spec(); let semi_supernode_cgc = spec.number_of_custody_groups / 2; // 64 - let custody_context = CustodyContext::::new(NodeCustodyType::SemiSupernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::SemiSupernode, + generate_data_column_indices_rand_order::(), + &spec, + ); // Verify initial CGC is 64 (semi-supernode) assert_eq!( @@ -1348,6 +1327,7 @@ mod tests { ssz_context, NodeCustodyType::Fullnode, Epoch::new(20), + generate_data_column_indices_rand_order::(), &spec, ); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 644c471698..3e859456b1 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -866,11 +866,11 @@ mod test { use crate::CustodyContext; use crate::custody_context::NodeCustodyType; use crate::test_utils::{ - EphemeralHarnessType, NumBlobs, generate_rand_block_and_data_columns, get_kzg, + EphemeralHarnessType, NumBlobs, generate_data_column_indices_rand_order, + generate_rand_block_and_data_columns, get_kzg, }; use rand::SeedableRng; use rand::prelude::StdRng; - use rand::seq::SliceRandom; use slot_clock::{SlotClock, TestingSlotClock}; use std::collections::HashSet; use std::sync::Arc; @@ -892,8 +892,6 @@ mod test { let da_checker = new_da_checker(spec.clone()); let custody_context = &da_checker.custody_context; - let all_column_indices_ordered = - init_custody_context_with_ordered_columns(custody_context, &mut rng, &spec); // GIVEN a single 32 ETH validator is attached slot 0 let epoch = Epoch::new(0); @@ -926,7 +924,8 @@ mod test { &spec, ); let block_root = Hash256::random(); - let requested_columns = &all_column_indices_ordered[..10]; + let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); + let requested_columns = &custody_columns[..10]; da_checker .put_rpc_custody_columns( block_root, @@ -971,8 +970,6 @@ mod test { let da_checker = new_da_checker(spec.clone()); let custody_context = &da_checker.custody_context; - let all_column_indices_ordered = - init_custody_context_with_ordered_columns(custody_context, &mut rng, &spec); // GIVEN a single 32 ETH validator is attached slot 0 let epoch = Epoch::new(0); @@ -1006,7 +1003,8 @@ mod test { &spec, ); let block_root = Hash256::random(); - let requested_columns = &all_column_indices_ordered[..10]; + let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); + let requested_columns = &custody_columns[..10]; let gossip_columns = data_columns .into_iter() .filter(|d| requested_columns.contains(&d.index)) @@ -1096,8 +1094,6 @@ mod test { let da_checker = new_da_checker(spec.clone()); let custody_context = &da_checker.custody_context; - let all_column_indices_ordered = - init_custody_context_with_ordered_columns(custody_context, &mut rng, &spec); // Set custody requirement to 65 columns (enough to trigger reconstruction) let epoch = Epoch::new(1); @@ -1127,7 +1123,8 @@ mod test { // Add 64 columns to the da checker (enough to be able to reconstruct) // Order by all_column_indices_ordered, then take first 64 - let custody_columns = all_column_indices_ordered + let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); + let custody_columns = custody_columns .iter() .filter_map(|&col_idx| data_columns.iter().find(|d| d.index == col_idx).cloned()) .take(64) @@ -1177,19 +1174,6 @@ mod test { ); } - fn init_custody_context_with_ordered_columns( - custody_context: &Arc>, - mut rng: &mut StdRng, - spec: &ChainSpec, - ) -> Vec { - let mut all_data_columns = (0..spec.number_of_custody_groups).collect::>(); - all_data_columns.shuffle(&mut rng); - custody_context - .init_ordered_data_columns_from_custody_groups(all_data_columns.clone(), spec) - .expect("should initialise ordered custody columns"); - all_data_columns - } - fn new_da_checker(spec: Arc) -> DataAvailabilityChecker { let slot_clock = TestingSlotClock::new( Slot::new(0), @@ -1198,7 +1182,12 @@ mod test { ); let kzg = get_kzg(&spec); let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); - let custody_context = Arc::new(CustodyContext::new(NodeCustodyType::Fullnode, &spec)); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = Arc::new(CustodyContext::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + )); let complete_blob_backfill = false; DataAvailabilityChecker::new( complete_blob_backfill, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 402dac1fa8..aa23250296 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -823,6 +823,7 @@ impl DataAvailabilityCheckerInner { mod test { use super::*; + use crate::test_utils::generate_data_column_indices_rand_order; use crate::{ blob_verification::GossipVerifiedBlob, block_verification::PayloadVerificationOutcome, @@ -1023,7 +1024,11 @@ mod test { let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); - let custody_context = Arc::new(CustodyContext::new(NodeCustodyType::Fullnode, &spec)); + let custody_context = Arc::new(CustodyContext::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + )); let cache = Arc::new( DataAvailabilityCheckerInner::::new( capacity_non_zero, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 38797d0264..b626fcd862 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -42,6 +42,7 @@ use parking_lot::{Mutex, RwLockWriteGuard}; use rand::Rng; use rand::SeedableRng; use rand::rngs::StdRng; +use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; @@ -59,6 +60,7 @@ use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; use tree_hash::TreeHash; +use types::data_column_custody_group::CustodyIndex; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; use types::test_utils::TestRandom; @@ -567,6 +569,7 @@ where .shutdown_sender(shutdown_tx) .chain_config(chain_config) .node_custody_type(self.node_custody_type) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::()) .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) .validator_monitor_config(validator_monitor_config) .rng(Box::new(StdRng::seed_from_u64(42))); @@ -596,15 +599,6 @@ where let chain = builder.build().expect("should build"); - chain - .data_availability_checker - .custody_context() - .init_ordered_data_columns_from_custody_groups( - (0..spec.number_of_custody_groups).collect(), - &spec, - ) - .expect("should initialise custody context"); - BeaconChainHarness { spec: chain.spec.clone(), chain: Arc::new(chain), @@ -3431,3 +3425,9 @@ pub fn generate_data_column_sidecars_from_block( ) .unwrap() } + +pub fn generate_data_column_indices_rand_order() -> Vec { + let mut indices = (0..E::number_of_columns() as u64).collect::>(); + indices.shuffle(&mut StdRng::seed_from_u64(42)); + indices +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0c83244f44..806c9dce7c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,11 +7,11 @@ use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::historical_data_columns::HistoricalDataColumnError; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg, mock_execution_layer_from_parts, test_spec, }; +use beacon_chain::test_utils::{SyncCommitteeStrategy, generate_data_column_indices_rand_order}; use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, @@ -2881,17 +2881,10 @@ async fn weak_subjectivity_sync_test( .shutdown_sender(shutdown_tx) .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) .execution_layer(Some(mock.el)) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::()) .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); - beacon_chain - .data_availability_checker - .custody_context() - .init_ordered_data_columns_from_custody_groups( - (0..spec.number_of_custody_groups).collect(), - &spec, - ) - .unwrap(); let beacon_chain = Arc::new(beacon_chain); let wss_block_root = wss_block.canonical_root(); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d55afbffe6..380e0c114a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -28,6 +28,7 @@ use execution_layer::ExecutionLayer; use execution_layer::test_utils::generate_genesis_header; use futures::channel::mpsc::Receiver; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; +use lighthouse_network::identity::Keypair; use lighthouse_network::{NetworkGlobals, prometheus_client::registry::Registry}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; @@ -42,7 +43,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; use tracing::{debug, info, warn}; -use types::data_column_custody_group::get_custody_groups_ordered; +use types::data_column_custody_group::compute_ordered_custody_column_indices; use types::{ BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, @@ -154,6 +155,7 @@ where mut self, client_genesis: ClientGenesis, config: ClientConfig, + node_id: [u8; 32], ) -> Result { let store = self.store.clone(); let chain_spec = self.chain_spec.clone(); @@ -191,6 +193,11 @@ where Kzg::new_from_trusted_setup_no_precomp(&config.trusted_setup).map_err(kzg_err_msg)? }; + let ordered_custody_column_indices = + compute_ordered_custody_column_indices::(node_id, &spec).map_err(|e| { + format!("Failed to compute ordered custody column indices: {:?}", e) + })?; + let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) .store(store) .task_executor(context.executor.clone()) @@ -203,6 +210,7 @@ where .event_handler(event_handler) .execution_layer(execution_layer) .node_custody_type(config.chain.node_custody_type) + .ordered_custody_column_indices(ordered_custody_column_indices) .validator_monitor_config(config.validator_monitor.clone()) .rng(Box::new( StdRng::try_from_rng(&mut OsRng) @@ -463,7 +471,11 @@ where } /// Starts the networking stack. - pub async fn network(mut self, config: Arc) -> Result { + pub async fn network( + mut self, + config: Arc, + local_keypair: Keypair, + ) -> Result { let beacon_chain = self .beacon_chain .clone() @@ -491,12 +503,11 @@ where context.executor, libp2p_registry.as_mut(), beacon_processor_channels.beacon_processor_tx.clone(), + local_keypair, ) .await .map_err(|e| format!("Failed to start network: {:?}", e))?; - init_custody_context(beacon_chain, &network_globals)?; - self.network_globals = Some(network_globals); self.network_senders = Some(network_senders); self.libp2p_registry = libp2p_registry; @@ -798,21 +809,6 @@ where } } -fn init_custody_context( - chain: Arc>, - network_globals: &NetworkGlobals, -) -> Result<(), String> { - let node_id = network_globals.local_enr().node_id().raw(); - let spec = &chain.spec; - let custody_groups_ordered = - get_custody_groups_ordered(node_id, spec.number_of_custody_groups, spec) - .map_err(|e| format!("Failed to compute custody groups: {:?}", e))?; - chain - .data_availability_checker - .custody_context() - .init_ordered_data_columns_from_custody_groups(custody_groups_ordered, spec) -} - impl ClientBuilder> where diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index b6be9b5222..3d96a08357 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -109,7 +109,7 @@ pub use discovery::Eth2Enr; pub use discv5; pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p; -pub use libp2p::{Multiaddr, multiaddr}; +pub use libp2p::{Multiaddr, identity, multiaddr}; pub use libp2p::{PeerId, Swarm, core::ConnectedPoint}; pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea2c53a07f..93c69ee097 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -26,6 +26,7 @@ use gossipsub::{ TopicScoreParams, }; use gossipsub_scoring_parameters::{PeerScoreSettings, lighthouse_gossip_thresholds}; +use libp2p::identity::Keypair; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::behaviour::toggle::Toggle; use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; @@ -171,11 +172,10 @@ impl Network { executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, custody_group_count: u64, + local_keypair: Keypair, ) -> Result<(Self, Arc>), String> { let config = ctx.config.clone(); trace!("Libp2p Service starting"); - // initialise the node's ID - let local_keypair = utils::load_private_key(&config); // Trusted peers will also be marked as explicit in GossipSub. // Cfr. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#explicit-peering-agreements diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 8a3047692f..9e8b243698 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -16,6 +16,7 @@ use types::{ type E = MinimalEthSpec; +use lighthouse_network::identity::secp256k1; use lighthouse_network::rpc::config::InboundRateLimiterConfig; use tempfile::Builder as TempBuilder; @@ -138,10 +139,15 @@ pub async fn build_libp2p_instance( libp2p_registry: None, }; Libp2pInstance( - LibP2PService::new(executor, libp2p_context, custody_group_count) - .await - .expect("should build libp2p instance") - .0, + LibP2PService::new( + executor, + libp2p_context, + custody_group_count, + secp256k1::Keypair::generate().into(), + ) + .await + .expect("should build libp2p instance") + .0, signal, ) } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4bd649ba82..a416f5cb12 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -12,6 +12,7 @@ use futures::future::OptionFuture; use futures::prelude::*; use lighthouse_network::Enr; +use lighthouse_network::identity::Keypair; use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::RpcResponse; @@ -212,6 +213,7 @@ impl NetworkService { executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, + local_keypair: Keypair, ) -> Result< ( NetworkService, @@ -284,6 +286,7 @@ impl NetworkService { .data_availability_checker .custody_context() .custody_group_count_at_head(&beacon_chain.spec), + local_keypair, ) .await?; @@ -366,6 +369,7 @@ impl NetworkService { executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, + local_keypair: Keypair, ) -> Result<(Arc>, NetworkSenders), String> { let (network_service, network_globals, network_senders) = Self::build( beacon_chain, @@ -373,6 +377,7 @@ impl NetworkService { executor.clone(), libp2p_registry, beacon_processor_send, + local_keypair, ) .await?; diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 64815ab2bb..8ff1e0488d 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -6,6 +6,7 @@ use beacon_chain::BeaconChainTypes; use beacon_chain::test_utils::BeaconChainHarness; use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; use futures::StreamExt; +use lighthouse_network::identity::secp256k1; use lighthouse_network::types::{GossipEncoding, GossipKind}; use lighthouse_network::{Enr, GossipTopic}; use std::str::FromStr; @@ -66,6 +67,7 @@ fn test_dht_persistence() { executor, None, beacon_processor_tx, + secp256k1::Keypair::generate().into(), ) .await .unwrap(); @@ -134,6 +136,7 @@ fn test_removing_topic_weight_on_old_topics() { executor.clone(), None, beacon_processor_channels.beacon_processor_tx, + secp256k1::Keypair::generate().into(), ) .await .unwrap() diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 0df28cff6b..bee6569b7b 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -1,4 +1,5 @@ use super::*; +use beacon_chain::test_utils::generate_data_column_indices_rand_order; use beacon_chain::{ BeaconChain, builder::{BeaconChainBuilder, Witness}, @@ -73,6 +74,9 @@ impl TestBeaconChain { Duration::from_secs(recent_genesis_time()), Duration::from_millis(SLOT_DURATION_MILLIS), )) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::< + MainnetEthSpec, + >()) .shutdown_sender(shutdown_tx) .rng(Box::new(StdRng::seed_from_u64(42))) .build() diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 2ceb94729d..6db2150e5f 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -9,6 +9,8 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; pub use config::{get_config, get_data_dir, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; +use lighthouse_network::load_private_key; +use network_utils::enr_ext::peer_id_to_node_id; use slasher::{DatabaseBackendOverride, Slasher}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; @@ -120,8 +122,12 @@ impl ProductionBeaconNode { builder }; + // Generate or load the node id. + let local_keypair = load_private_key(&client_config.network); + let node_id = peer_id_to_node_id(&local_keypair.public().to_peer_id())?.raw(); + let builder = builder - .beacon_chain_builder(client_genesis, client_config.clone()) + .beacon_chain_builder(client_genesis, client_config.clone(), node_id) .await?; info!("Block production enabled"); @@ -133,7 +139,7 @@ impl ProductionBeaconNode { builder .build_beacon_chain()? - .network(Arc::new(client_config.network)) + .network(Arc::new(client_config.network), local_keypair) .await? .notifier()? .http_metrics_config(client_config.http_metrics.clone()) diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data_column_custody_group.rs index 0c44608e46..7ecabab0ab 100644 --- a/consensus/types/src/data_column_custody_group.rs +++ b/consensus/types/src/data_column_custody_group.rs @@ -42,7 +42,7 @@ pub fn get_custody_groups( /// /// # Returns /// Vector of custody group indices in computation order or error if parameters are invalid -pub fn get_custody_groups_ordered( +fn get_custody_groups_ordered( raw_node_id: [u8; 32], custody_group_count: u64, spec: &ChainSpec, @@ -76,6 +76,27 @@ pub fn get_custody_groups_ordered( Ok(custody_groups) } +/// Returns a deterministically ordered list of custody columns assigned to a node, +/// preserving the order in which they were computed during iteration. +/// +/// # Arguments +/// * `raw_node_id` - 32-byte node identifier +/// * `spec` - Chain specification containing custody parameters +pub fn compute_ordered_custody_column_indices( + raw_node_id: [u8; 32], + spec: &ChainSpec, +) -> Result, DataColumnCustodyGroupError> { + let all_custody_groups_ordered = + get_custody_groups_ordered(raw_node_id, spec.number_of_custody_groups, spec)?; + + let mut ordered_custody_columns = vec![]; + for custody_index in all_custody_groups_ordered { + let columns = compute_columns_for_custody_group::(custody_index, spec)?; + ordered_custody_columns.extend(columns); + } + Ok(ordered_custody_columns) +} + /// Returns the columns that are associated with a given custody group. /// /// spec: https://github.com/ethereum/consensus-specs/blob/8e0d0d48e81d6c7c5a8253ab61340f5ea5bac66a/specs/fulu/das-core.md#compute_columns_for_custody_group From 02d0c6a8ce7f4b4f84914b1ebe9a778192df94d1 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 17 Nov 2025 23:43:28 -0800 Subject: [PATCH 6/9] Compute missing_columns correctly (#8425) N/A The difference is computed by taking the difference of expected with received. We were doing the inverse. Thanks to Yassine for finding the issue. Co-Authored-By: Pawan Dhananjay --- .../network/src/sync/range_data_column_batch_request.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index 72e2fb2d5b..b912a6badc 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -268,8 +268,8 @@ impl RangeDataColumnBatchRequest { let received_columns = columns.iter().map(|c| c.index).collect::>(); - let missing_columns = received_columns - .difference(expected_custody_columns) + let missing_columns = expected_custody_columns + .difference(&received_columns) .collect::>(); // blobs are expected for this slot but there is at least one missing columns From 8e54f6e1a8ba2adbe52fd48c4f6e79ddd740af68 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 19 Nov 2025 22:00:36 +1100 Subject: [PATCH 7/9] Fix md format (#8434) The merge queue is failing due to md lint changes: https://github.com/sigp/lighthouse/actions/runs/19491272535/job/55783746002 This PR fixes the lint. I'm targeting the release branch so we can get things merged for release tomorrow, and we'll merge back down to `unstable`. Co-Authored-By: Jimmy Chen Co-Authored-By: Michael Sproul --- book/book.toml | 1 - book/src/advanced_blobs.md | 12 ++++++------ book/src/advanced_database.md | 10 +++++----- book/src/api_vc_endpoints.md | 4 ++-- book/src/archived_merge_migration.md | 16 ++++++++-------- 5 files changed, 21 insertions(+), 22 deletions(-) diff --git a/book/book.toml b/book/book.toml index 7b143710a5..c0d38f6147 100644 --- a/book/book.toml +++ b/book/book.toml @@ -1,7 +1,6 @@ [book] authors = ["Paul Hauner", "Age Manning"] language = "en" -multilingual = false src = "src" title = "Lighthouse Book" diff --git a/book/src/advanced_blobs.md b/book/src/advanced_blobs.md index ccc29acf26..3c005e7431 100644 --- a/book/src/advanced_blobs.md +++ b/book/src/advanced_blobs.md @@ -6,12 +6,12 @@ With the [Fusaka](https://ethereum.org/roadmap/fusaka) upgrade, the main feature Table below summarizes the role of relevant flags in Lighthouse beacon node: -| | Post-Deneb, Pre-Fulu || Post-Fulu || -|-------|----------|----------|-----------|----------| -| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? | -| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No | -| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days | -| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days | +| | Post-Deneb, Pre-Fulu | | Post-Fulu | | +|---------------------|-------------------------------------------|--------------------------------------------------------------|--------------------------------------------------|----------------------------------------------------------| +| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? | +| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No | +| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days | +| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days | While both `--supernode` and `--semi-supernode` can serve blobs, a supernode will be faster to respond to blobs queries as it skips the blob reconstruction step. Running a supernode also helps the network by serving the data columns to its peers. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 4e77046c2d..1643736794 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -63,11 +63,11 @@ that we have observed are: The following table lists the data for different configurations. Note that the disk space requirement is for the `chain_db` and `freezer_db`, excluding the `blobs_db`. -| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync | -|---|---|---|---|---| -| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week | -| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week | -| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks | +| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync | +|------------------------------|---------------------|-----------------------|----------------|--------------| +| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week | +| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week | +| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks | [Jim](https://github.com/mcdee) has done some experiments to study the response time of querying random slots (uncached query) for `--hierarchy-exponents 0,5,7,11` (per-slot diffs) and `--hierarchy-exponents 5,9,11,13,17,21` (per-epoch diffs), as show in the figures below. From the figures, two points can be concluded: diff --git a/book/src/api_vc_endpoints.md b/book/src/api_vc_endpoints.md index 14f4933e17..d128b13b2f 100644 --- a/book/src/api_vc_endpoints.md +++ b/book/src/api_vc_endpoints.md @@ -132,7 +132,7 @@ Returns information regarding the health of the host machine. | Property | Specification | |-------------------|--------------------------------------------| -| Path | `/lighthouse/ui/health` | +| Path | `/lighthouse/ui/health` | | Method | GET | | Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | @@ -178,7 +178,7 @@ Returns the graffiti that will be used for the next block proposal of each valid | Property | Specification | |-------------------|--------------------------------------------| -| Path | `/lighthouse/ui/graffiti` | +| Path | `/lighthouse/ui/graffiti` | | Method | GET | | Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | diff --git a/book/src/archived_merge_migration.md b/book/src/archived_merge_migration.md index ac9c78c5e3..b983db23ae 100644 --- a/book/src/archived_merge_migration.md +++ b/book/src/archived_merge_migration.md @@ -25,14 +25,14 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln
-| Network | Bellatrix | The Merge | Remark | -|---------|-------------------------------|-------------------------------| -----------| -| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | -| Sepolia | 20th June 2022 | 6th July 2022 | | -| Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| -| Mainnet | 6th September 2022| 15th September 2022| | -| Chiado | 10th October 2022 | 4th November 2022 | | -| Gnosis | 30th November 2022| 8th December 2022 | | +| Network | Bellatrix | The Merge | Remark | +|---------|-------------------------------|--------------------------------|---------------------------| +| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | +| Sepolia | 20th June 2022 | 6th July 2022 | | +| Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater` | +| Mainnet | 6th September 2022 | 15th September 2022 | | +| Chiado | 10th October 2022 | 4th November 2022 | | +| Gnosis | 30th November 2022 | 8th December 2022 | |
From 74b8c02630abd2c91700a9f65efdc25b4cb3f629 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 19 Nov 2025 08:00:38 -0300 Subject: [PATCH 8/9] Reimport the checkpoint sync block (#8417) We want to not require checkpoint sync starts to include the required custody data columns, and instead fetch them from p2p. Closes https://github.com/sigp/lighthouse/issues/6837 The checkpoint sync slot can: 1. Be the first slot in the epoch, such that the epoch of the block == the start checkpoint epoch 2. Be in an epoch prior to the start checkpoint epoch In both cases backfill sync already fetches that epoch worth of blocks with current code. This PR modifies the backfill import filter function to allow to re-importing the oldest block slot in the DB. I feel this solution is sufficient unless I'm missing something. ~~I have not tested this yet!~~ Michael has tested this and it works. Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Michael Sproul --- .../beacon_chain/src/historical_blocks.rs | 39 ++++++++++++++++--- .../src/test_utils/mock_builder.rs | 4 +- .../network_beacon_processor/sync_methods.rs | 10 +++++ beacon_node/store/src/hot_cold_store.rs | 8 +++- 4 files changed, 52 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 15e0a55cf5..e4040eea6b 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,5 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; -use crate::{BeaconChain, BeaconChainTypes, metrics}; +use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics}; use itertools::Itertools; use state_processing::{ per_block_processing::ParallelSignatureSets, @@ -34,6 +34,8 @@ pub enum HistoricalBlockError { ValidatorPubkeyCacheTimeout, /// Logic error: should never occur. IndexOutOfBounds, + /// Logic error: should never occur. + MissingOldestBlockRoot { slot: Slot }, /// Internal store error StoreError(StoreError), } @@ -56,7 +58,8 @@ impl BeaconChain { /// `SignatureSetError` or `InvalidSignature` will be returned. /// /// To align with sync we allow some excess blocks with slots greater than or equal to - /// `oldest_block_slot` to be provided. They will be ignored without being checked. + /// `oldest_block_slot` to be provided. They will be re-imported to fill the columns of the + /// checkpoint sync block. /// /// This function should not be called concurrently with any other function that mutates /// the anchor info (including this function itself). If a concurrent mutation occurs that @@ -72,9 +75,12 @@ impl BeaconChain { let blob_info = self.store.get_blob_info(); let data_column_info = self.store.get_data_column_info(); - // Take all blocks with slots less than the oldest block slot. + // Take all blocks with slots less than or equal to the oldest block slot. + // + // This allows for reimport of the blobs/columns for the finalized block after checkpoint + // sync. let num_relevant = blocks.partition_point(|available_block| { - available_block.block().slot() < anchor_info.oldest_block_slot + available_block.block().slot() <= anchor_info.oldest_block_slot }); let total_blocks = blocks.len(); @@ -95,6 +101,7 @@ impl BeaconChain { } let mut expected_block_root = anchor_info.oldest_block_parent; + let mut last_block_root = expected_block_root; let mut prev_block_slot = anchor_info.oldest_block_slot; let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot; @@ -107,7 +114,27 @@ impl BeaconChain { for available_block in blocks_to_import.into_iter().rev() { let (block_root, block, block_data) = available_block.deconstruct(); - if block_root != expected_block_root { + if block.slot() == anchor_info.oldest_block_slot { + // When reimporting, verify that this is actually the same block (same block root). + let oldest_block_root = self + .block_root_at_slot(block.slot(), WhenSlotSkipped::None) + .ok() + .flatten() + .ok_or(HistoricalBlockError::MissingOldestBlockRoot { slot: block.slot() })?; + if block_root != oldest_block_root { + return Err(HistoricalBlockError::MismatchedBlockRoot { + block_root, + expected_block_root: oldest_block_root, + }); + } + + debug!( + ?block_root, + slot = %block.slot(), + "Re-importing historic block" + ); + last_block_root = block_root; + } else if block_root != expected_block_root { return Err(HistoricalBlockError::MismatchedBlockRoot { block_root, expected_block_root, @@ -198,7 +225,7 @@ impl BeaconChain { .ok_or(HistoricalBlockError::IndexOutOfBounds)? .iter() .map(|block| block.parent_root()) - .chain(iter::once(anchor_info.oldest_block_parent)); + .chain(iter::once(last_block_root)); let signature_set = signed_blocks .iter() .zip_eq(block_roots) diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index df1e371719..9967668a5f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -842,7 +842,7 @@ impl MockBuilder { .beacon_client .get_beacon_blocks::(BlockId::Finalized) .await - .map_err(|_| "couldn't get finalized block".to_string())? + .map_err(|e| format!("couldn't get finalized block: {e:?}"))? .ok_or_else(|| "missing finalized block".to_string())? .data() .message() @@ -855,7 +855,7 @@ impl MockBuilder { .beacon_client .get_beacon_blocks::(BlockId::Justified) .await - .map_err(|_| "couldn't get justified block".to_string())? + .map_err(|e| format!("couldn't get justified block: {e:?}"))? .ok_or_else(|| "missing justified block".to_string())? .data() .message() diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 41160fcfe4..e49ae134fe 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -804,6 +804,16 @@ impl NetworkBeaconProcessor { // The peer is faulty if they bad signatures. Some(PeerAction::LowToleranceError) } + HistoricalBlockError::MissingOldestBlockRoot { slot } => { + warn!( + %slot, + error = "missing_oldest_block_root", + "Backfill batch processing error" + ); + // This is an internal error, do not penalize the peer. + None + } + HistoricalBlockError::ValidatorPubkeyCacheTimeout => { warn!( error = "pubkey_cache_timeout", diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index a0a75dbb0d..e926caa9c7 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -649,9 +649,15 @@ impl, Cold: ItemStore> HotColdDB .inspect(|cache| cache.lock().put_block(*block_root, full_block.clone())); DatabaseBlock::Full(full_block) - } else if !self.config.prune_payloads { + } else if !self.config.prune_payloads || *block_root == split.block_root { // If payload pruning is disabled there's a chance we may have the payload of // this finalized block. Attempt to load it but don't error in case it's missing. + // + // We also allow for the split block's payload to be loaded *if it exists*. This is + // necessary on startup when syncing from an unaligned checkpoint (a checkpoint state + // at a skipped slot), and then loading the canonical head (with payload). If we modify + // payload pruning in future so that it doesn't prune the split block's payload, then + // this case could move to the case above where we error if the payload is missing. let fork_name = blinded_block.fork_name(&self.spec)?; if let Some(payload) = self.get_execution_payload(block_root, fork_name)? { DatabaseBlock::Full( From ced49dd265e01ecbf02b12073bbfde3873058abe Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 20 Nov 2025 11:37:06 +1100 Subject: [PATCH 9/9] Release v8.0.1 (#8414) This hot fix release includes the following fixes: * #8388 * #8406 * #8391 * #8413 Co-Authored-By: Jimmy Chen --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c58274598..9026b29e46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0" +version = "8.0.1" dependencies = [ "beacon_node", "bytes", @@ -5064,7 +5064,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -5574,7 +5574,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 6a54d3342e..ea834357c7 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.0" +version = "8.0.1" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index a0965fa548..c865e5ba69 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v8.0.0-", - fallback = "Lighthouse/v8.0.0" + prefix = "Lighthouse/v8.0.1-", + fallback = "Lighthouse/v8.0.1" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "8.0.0" + "8.0.1" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 6b7aeb886c..928b57f9bb 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.0" +version = "8.0.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 0d4129817a..bde8aae0af 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.0" +version = "8.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false