Reduce number of blobs used in tests to speed up CI (#8194)

`beacon-chain-tests` is now regularly taking 1h+ on CI since Fulu fork was added.

This PR attemtpts to reduce the test time by bringing down the number of blobs generated in tests - instead of generating 0..max_blobs, the generator now generates 0..1 blobs by default, and this can be modified by setting `harness.execution_block_generator.set_min_blob_count(n)`.

Note: The blobs are pre-generated and doesn't require too much CPU to generate however processing a larger number of them on the beacon chain does take a lot of time.

This PR also include a few other small improvements
- Our slowest test (`chain_segment_varying_chunk_size`) runs 3x faster in Fulu just by reusing chain segments
- Avoid re-running fork specific tests on all forks
- Fix a bunch of tests that depends on the harness's existing random blob generation, which is fragile


beacon chain test time on test machine is **~2x** faster:

### `unstable`

```
Summary [ 751.586s] 291 tests run: 291 passed (13 slow), 0 skipped
```

### this branch

```
Summary [ 373.792s] 291 tests run: 291 passed (2 slow), 0 skipped
```

The next set of tests to optimise is the ones that use [`get_chain_segment`](77a9af96de/beacon_node/beacon_chain/tests/block_verification.rs (L45)), as it by default build 320 blocks with supernode - an easy optimisation would be to build these blocks with cgc = 8 for tests that only require fullnodes.


  


Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com>

Co-Authored-By: Jimmy Chen <jimmy@sigmaprime.io>
This commit is contained in:
Jimmy Chen
2025-11-04 13:40:44 +11:00
committed by GitHub
parent 2c9b670f5d
commit bc86dc09e5
17 changed files with 171 additions and 172 deletions

View File

@@ -297,19 +297,20 @@ async fn chain_segment_full_segment() {
#[tokio::test]
async fn chain_segment_varying_chunk_size() {
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
for chunk_size in &[1, 2, 31, 32, 33] {
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.collect();
harness
.chain
.slot_clock
.set_slot(blocks.last().unwrap().slot().as_u64());
for chunk in blocks.chunks(*chunk_size) {
for chunk in blocks.clone().chunks(*chunk_size) {
harness
.chain
.process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes)

View File

@@ -1,20 +1,26 @@
use beacon_chain::blob_verification::GossipVerifiedBlob;
use beacon_chain::data_column_verification::GossipVerifiedDataColumn;
use beacon_chain::test_utils::{BeaconChainHarness, generate_data_column_sidecars_from_block};
use beacon_chain::test_utils::{
BeaconChainHarness, fork_name_from_env, generate_data_column_sidecars_from_block, test_spec,
};
use eth2::types::{EventKind, SseBlobSidecar, SseDataColumnSidecar};
use rand::SeedableRng;
use rand::rngs::StdRng;
use std::sync::Arc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::test_utils::TestRandom;
use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkName, MinimalEthSpec, Slot};
use types::{BlobSidecar, DataColumnSidecar, EthSpec, MinimalEthSpec, Slot};
type E = MinimalEthSpec;
/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API.
#[tokio::test]
async fn blob_sidecar_event_on_process_gossip_blob() {
let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec()));
if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) {
return;
};
let spec = Arc::new(test_spec::<E>());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.deterministic_keypairs(8)
@@ -48,7 +54,11 @@ async fn blob_sidecar_event_on_process_gossip_blob() {
/// Verifies that a data column event is emitted when a gossip verified data column is received via gossip or the publish block API.
#[tokio::test]
async fn data_column_sidecar_event_on_process_gossip_data_column() {
let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec()));
if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) {
return;
};
let spec = Arc::new(test_spec::<E>());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.deterministic_keypairs(8)
@@ -93,7 +103,11 @@ async fn data_column_sidecar_event_on_process_gossip_data_column() {
/// Verifies that a blob event is emitted when blobs are received via RPC.
#[tokio::test]
async fn blob_sidecar_event_on_process_rpc_blobs() {
let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec()));
if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) {
return;
};
let spec = Arc::new(test_spec::<E>());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec)
.deterministic_keypairs(8)
@@ -112,7 +126,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() {
let slot = head_state.slot() + 1;
let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await;
let (kzg_proofs, blobs) = opt_blobs.unwrap();
assert!(blobs.len() > 2);
assert_eq!(blobs.len(), 2);
let blob_1 =
Arc::new(BlobSidecar::new(0, blobs[0].clone(), &signed_block, kzg_proofs[0]).unwrap());
@@ -144,7 +158,11 @@ async fn blob_sidecar_event_on_process_rpc_blobs() {
#[tokio::test]
async fn data_column_sidecar_event_on_process_rpc_columns() {
let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec()));
if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) {
return;
};
let spec = Arc::new(test_spec::<E>());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec.clone())
.deterministic_keypairs(8)

View File

@@ -7,11 +7,11 @@ use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS;
use beacon_chain::data_availability_checker::AvailableBlock;
use beacon_chain::historical_data_columns::HistoricalDataColumnError;
use beacon_chain::schema_change::migrate_schema;
use beacon_chain::test_utils::SyncCommitteeStrategy;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg,
mock_execution_layer_from_parts, test_spec,
};
use beacon_chain::test_utils::{SyncCommitteeStrategy, fork_name_from_env};
use beacon_chain::{
BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig,
NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
@@ -3211,12 +3211,13 @@ async fn test_import_historical_data_columns_batch() {
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_some());
for data_column in data_columns.unwrap() {
for data_column in data_columns.unwrap_or_default() {
data_columns_list.push(data_column);
}
}
assert!(!data_columns_list.is_empty());
harness
.extend_chain(
(E::slots_per_epoch() * 4) as usize,
@@ -3255,8 +3256,18 @@ async fn test_import_historical_data_columns_batch() {
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_some())
if !harness
.get_block(block_root.into())
.unwrap()
.message()
.body()
.blob_kzg_commitments()
.unwrap()
.is_empty()
{
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_some())
};
}
}
@@ -3290,9 +3301,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_some());
for data_column in data_columns.unwrap() {
for data_column in data_columns.unwrap_or_default() {
let mut data_column = (*data_column).clone();
if data_column.index % 2 == 0 {
data_column.signed_block_header.message.body_root = Hash256::ZERO;
@@ -3301,6 +3311,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
data_columns_list.push(Arc::new(data_column));
}
}
assert!(!data_columns_list.is_empty());
harness
.extend_chain(
@@ -3347,7 +3358,11 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() {
// be imported.
#[tokio::test]
async fn test_import_historical_data_columns_batch_no_block_found() {
let spec = ForkName::Fulu.make_genesis_spec(E::default_spec());
if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) {
return;
};
let spec = test_spec::<E>();
let db_path = tempdir().unwrap();
let store = get_store_generic(&db_path, StoreConfig::default(), spec);
let start_slot = Slot::new(1);
@@ -3374,15 +3389,16 @@ async fn test_import_historical_data_columns_batch_no_block_found() {
for block in block_root_iter {
let (block_root, _) = block.unwrap();
let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap();
assert!(data_columns.is_some());
for data_column in data_columns.unwrap() {
for data_column in data_columns.unwrap_or_default() {
let mut data_column = (*data_column).clone();
data_column.signed_block_header.message.body_root = Hash256::ZERO;
data_columns_list.push(Arc::new(data_column));
}
}
assert!(!data_columns_list.is_empty());
harness
.extend_chain(
(E::slots_per_epoch() * 4) as usize,
@@ -4108,6 +4124,12 @@ async fn deneb_prune_blobs_no_finalization() {
/// Check that blob pruning does not fail trying to prune across the fork boundary.
#[tokio::test]
async fn prune_blobs_across_fork_boundary() {
// This test covers earlier forks and only need to be executed once.
// Note: this test is quite expensive (building a chain to epoch 15) and we should revisit this
if fork_name_from_env() != Some(ForkName::latest_stable()) {
return;
}
let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec());
let deneb_fork_epoch = Epoch::new(4);
@@ -4124,6 +4146,7 @@ async fn prune_blobs_across_fork_boundary() {
let store = get_store_generic(&db_path, StoreConfig::default(), spec);
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
harness.execution_block_generator().set_min_blob_count(1);
let blocks_to_deneb_finalization = E::slots_per_epoch() * 7;
let blocks_to_electra_finalization = E::slots_per_epoch() * 4;
@@ -4279,7 +4302,7 @@ async fn prune_blobs_across_fork_boundary() {
// Fulu fork epochs
// Pruning should have been triggered
assert!(store.get_blob_info().oldest_blob_slot <= Some(oldest_slot));
// Oldest blost slot should never be greater than the first fulu slot
// Oldest blob slot should never be greater than the first fulu slot
let fulu_first_slot = fulu_fork_epoch.start_slot(E::slots_per_epoch());
assert!(store.get_blob_info().oldest_blob_slot <= Some(fulu_first_slot));
// Blobs should not exist post-Fulu
@@ -4764,7 +4787,7 @@ async fn fulu_prune_data_columns_margin_test(margin: u64) {
check_data_column_existence(&harness, oldest_data_column_slot, harness.head_slot(), true);
}
/// Check tat there are data column sidecars (or not) at every slot in the range.
/// Check that there are data column sidecars (or not) at every slot in the range.
fn check_data_column_existence(
harness: &TestHarness,
start_slot: Slot,