Merge branch 'gloas-containers' into gloas_envelope_processing_merge_containers

This commit is contained in:
Mark Mackey
2025-11-07 14:38:49 -06:00
193 changed files with 3040 additions and 2310 deletions

View File

@@ -0,0 +1,53 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2::lighthouse::CustodyInfo;
use std::sync::Arc;
use types::EthSpec;
use warp_utils::reject::{custom_bad_request, custom_server_error};
pub fn info<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
) -> Result<CustodyInfo, warp::Rejection> {
if !chain.spec.is_fulu_scheduled() {
return Err(custom_bad_request("Fulu is not scheduled".to_string()));
}
let opt_data_column_custody_info = chain
.store
.get_data_column_custody_info()
.map_err(|e| custom_server_error(format!("error reading DataColumnCustodyInfo: {e:?}")))?;
let column_data_availability_boundary = chain
.column_data_availability_boundary()
.ok_or_else(|| custom_server_error("unreachable: Fulu should be enabled".to_string()))?;
let earliest_custodied_data_column_slot = opt_data_column_custody_info
.and_then(|info| info.earliest_data_column_slot)
.unwrap_or_else(|| {
// If there's no data column custody info/earliest data column slot, it means *column*
// backfill is not running. Block backfill could still be running, so our earliest
// available column is either the oldest block slot or the DA boundary, whichever is
// more recent.
let oldest_block_slot = chain.store.get_anchor_info().oldest_block_slot;
column_data_availability_boundary
.start_slot(T::EthSpec::slots_per_epoch())
.max(oldest_block_slot)
});
let earliest_custodied_data_column_epoch =
earliest_custodied_data_column_slot.epoch(T::EthSpec::slots_per_epoch());
// Compute the custody columns and the CGC *at the earliest custodied slot*. The node might
// have some columns prior to this, but this value is the most up-to-date view of the data the
// node is custodying.
let custody_context = chain.data_availability_checker.custody_context();
let custody_columns = custody_context
.custody_columns_for_epoch(Some(earliest_custodied_data_column_epoch), &chain.spec)
.to_vec();
let custody_group_count = custody_context
.custody_group_count_at_epoch(earliest_custodied_data_column_epoch, &chain.spec);
Ok(CustodyInfo {
earliest_custodied_data_column_slot,
custody_group_count,
custody_columns,
})
}

View File

@@ -13,6 +13,7 @@ mod block_packing_efficiency;
mod block_rewards;
mod build_block_contents;
mod builder_states;
mod custody;
mod database;
mod light_client;
mod metrics;
@@ -4590,6 +4591,50 @@ pub fn serve<T: BeaconChainTypes>(
},
);
// GET lighthouse/custody/info
let get_lighthouse_custody_info = warp::path("lighthouse")
.and(warp::path("custody"))
.and(warp::path("info"))
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P1, move || custody::info(chain))
},
);
// POST lighthouse/custody/backfill
let post_lighthouse_custody_backfill = warp::path("lighthouse")
.and(warp::path("custody"))
.and(warp::path("backfill"))
.and(warp::path::end())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.then(
|task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P1, move || {
// Calling this endpoint will trigger custody backfill once `effective_epoch``
// is finalized.
let effective_epoch = chain
.canonical_head
.cached_head()
.head_slot()
.epoch(T::EthSpec::slots_per_epoch())
+ 1;
let custody_context = chain.data_availability_checker.custody_context();
// Reset validator custody requirements to `effective_epoch` with the latest
// cgc requiremnets.
custody_context.reset_validator_custody_requirements(effective_epoch);
// Update `DataColumnCustodyInfo` to reflect the custody change.
chain.update_data_column_custody_info(Some(
effective_epoch.start_slot(T::EthSpec::slots_per_epoch()),
));
Ok(())
})
},
);
// GET lighthouse/analysis/block_rewards
let get_lighthouse_block_rewards = warp::path("lighthouse")
.and(warp::path("analysis"))
@@ -4891,6 +4936,7 @@ pub fn serve<T: BeaconChainTypes>(
.uor(get_lighthouse_validator_inclusion)
.uor(get_lighthouse_staking)
.uor(get_lighthouse_database_info)
.uor(get_lighthouse_custody_info)
.uor(get_lighthouse_block_rewards)
.uor(get_lighthouse_attestation_performance)
.uor(get_beacon_light_client_optimistic_update)
@@ -4948,6 +4994,7 @@ pub fn serve<T: BeaconChainTypes>(
.uor(post_lighthouse_compaction)
.uor(post_lighthouse_add_peer)
.uor(post_lighthouse_remove_peer)
.uor(post_lighthouse_custody_backfill)
.recover(warp_utils::reject::handle_rejection),
),
)

View File

@@ -60,13 +60,13 @@ pub fn proposer_duties<T: BeaconChainTypes>(
.safe_add(1)
.map_err(warp_utils::reject::arith_error)?
{
let (proposers, dependent_root, execution_status, _fork) =
let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) =
compute_proposer_duties_from_head(request_epoch, chain)
.map_err(warp_utils::reject::unhandled_error)?;
convert_to_api_response(
chain,
request_epoch,
dependent_root,
legacy_dependent_root,
execution_status.is_optimistic_or_invalid(),
proposers,
)
@@ -116,6 +116,11 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>(
.beacon_state
.proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
let legacy_dependent_root = head
.snapshot
.beacon_state
.legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root)
.map_err(warp_utils::reject::beacon_state_error)?;
let execution_optimistic = chain
.is_optimistic_or_invalid_head_block(head_block)
.map_err(warp_utils::reject::unhandled_error)?;
@@ -129,7 +134,7 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>(
convert_to_api_response(
chain,
request_epoch,
head_decision_root,
legacy_dependent_root,
execution_optimistic,
indices.to_vec(),
)
@@ -151,7 +156,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
current_epoch: Epoch,
chain: &BeaconChain<T>,
) -> Result<ApiDuties, warp::reject::Rejection> {
let (indices, dependent_root, execution_status, fork) =
let (indices, dependent_root, legacy_dependent_root, execution_status, fork) =
compute_proposer_duties_from_head(current_epoch, chain)
.map_err(warp_utils::reject::unhandled_error)?;
@@ -166,7 +171,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
convert_to_api_response(
chain,
current_epoch,
dependent_root,
legacy_dependent_root,
execution_status.is_optimistic_or_invalid(),
indices,
)
@@ -229,12 +234,18 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>(
// We can supply the genesis block root as the block root since we know that the only block that
// decides its own root is the genesis block.
let dependent_root = state
.proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec)
let legacy_dependent_root = state
.legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root)
.map_err(BeaconChainError::from)
.map_err(warp_utils::reject::unhandled_error)?;
convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices)
convert_to_api_response(
chain,
epoch,
legacy_dependent_root,
execution_optimistic,
indices,
)
}
/// Converts the internal representation of proposer duties into one that is compatible with the

View File

@@ -1,6 +1,7 @@
use crate::{Config, Context};
use beacon_chain::{
BeaconChain, BeaconChainTypes,
custody_context::NodeCustodyType,
test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
};
use beacon_processor::{
@@ -67,6 +68,20 @@ impl<E: EthSpec> InteractiveTester<E> {
None,
Config::default(),
true,
NodeCustodyType::Fullnode,
)
.await
}
pub async fn new_supernode(spec: Option<ChainSpec>, validator_count: usize) -> Self {
Self::new_with_initializer_and_mutator(
spec,
validator_count,
None,
None,
Config::default(),
true,
NodeCustodyType::Supernode,
)
.await
}
@@ -78,6 +93,7 @@ impl<E: EthSpec> InteractiveTester<E> {
mutator: Option<Mutator<E>>,
config: Config,
use_mock_builder: bool,
node_custody_type: NodeCustodyType,
) -> Self {
let mut harness_builder = BeaconChainHarness::builder(E::default())
.spec_or_default(spec.map(Arc::new))
@@ -93,6 +109,8 @@ impl<E: EthSpec> InteractiveTester<E> {
.fresh_ephemeral_store()
};
harness_builder = harness_builder.node_custody_type(node_custody_type);
// Add a mutator for the beacon chain builder which will be called in
// `HarnessBuilder::build`.
if let Some(mutator) = mutator {

View File

@@ -1,3 +1,4 @@
use beacon_chain::custody_context::NodeCustodyType;
use beacon_chain::test_utils::test_spec;
use beacon_chain::{
GossipVerifiedBlock, IntoGossipVerifiedBlock, WhenSlotSkipped,
@@ -822,6 +823,14 @@ pub async fn blinded_gossip_invalid() {
tester.harness.advance_slot();
// Ensure there's at least one blob in the block, so we don't run into failures when the
// block generator logic changes, as different errors could be returned:
// * Invalidity of blocks: `NotFinalizedDescendant`
// * Invalidity of blobs: `ParentUnknown`
tester
.harness
.execution_block_generator()
.set_min_blob_count(1);
let (blinded_block, _) = tester
.harness
.make_blinded_block_with_modifier(chain_state_before, slot, |b| {
@@ -837,21 +846,20 @@ pub async fn blinded_gossip_invalid() {
assert!(response.is_err());
let error_response: eth2::Error = response.err().unwrap();
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
let pre_finalized_block_root = Hash256::zero();
/* mandated by Beacon API spec */
if tester.harness.spec.is_fulu_scheduled() {
// XXX: this should be a 400 but is a 500 due to the mock-builder being janky
assert_eq!(
error_response.status(),
Some(StatusCode::INTERNAL_SERVER_ERROR)
);
let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() {
format!(
"BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}"
)
} else {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert_server_message_error(
error_response,
format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}"),
);
}
// Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the
// block.
format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}")
};
assert_server_message_error(error_response, expected_error_msg);
}
/// Process a blinded block that is invalid, but valid on gossip.
@@ -1647,6 +1655,10 @@ pub async fn block_seen_on_gossip_with_some_blobs_or_columns() {
)
.await;
tester.harness.advance_slot();
tester
.harness
.execution_block_generator()
.set_min_blob_count(2);
let slot_a = Slot::new(num_initial);
let slot_b = slot_a + 1;
@@ -1956,6 +1968,7 @@ pub async fn duplicate_block_status_code() {
..Config::default()
},
true,
NodeCustodyType::Fullnode,
)
.await;

View File

@@ -1,4 +1,5 @@
//! Tests for API behaviour across fork boundaries.
use beacon_chain::custody_context::NodeCustodyType;
use beacon_chain::{
StateSkipConfig,
test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee},
@@ -426,6 +427,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() {
None,
Default::default(),
true,
NodeCustodyType::Fullnode,
)
.await;
let harness = &tester.harness;

View File

@@ -1,8 +1,11 @@
//! Generic tests that make use of the (newer) `InteractiveApiTester`
use beacon_chain::custody_context::NodeCustodyType;
use beacon_chain::{
ChainConfig,
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy},
test_utils::{
AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy, test_spec,
},
};
use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueMessage};
use eth2::types::ProduceBlockV3Response;
@@ -77,6 +80,7 @@ async fn state_by_root_pruned_from_fork_choice() {
None,
Default::default(),
false,
NodeCustodyType::Fullnode,
)
.await;
@@ -437,6 +441,7 @@ pub async fn proposer_boost_re_org_test(
})),
Default::default(),
false,
NodeCustodyType::Fullnode,
)
.await;
let harness = &tester.harness;
@@ -1021,10 +1026,9 @@ async fn proposer_duties_with_gossip_tolerance() {
assert_eq!(
proposer_duties_tolerant_current_epoch.dependent_root,
head_state
.proposer_shuffling_decision_root_at_epoch(
.legacy_proposer_shuffling_decision_root_at_epoch(
tolerant_current_epoch,
head_block_root,
spec
)
.unwrap()
);
@@ -1053,3 +1057,139 @@ async fn proposer_duties_with_gossip_tolerance() {
proposer_duties_current_epoch
);
}
// Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo`
// have been updated with the correct values.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn lighthouse_restart_custody_backfill() {
let spec = test_spec::<E>();
// Skip pre-Fulu.
if !spec.is_fulu_scheduled() {
return;
}
let validator_count = 24;
let tester = InteractiveTester::<E>::new_supernode(Some(spec), validator_count).await;
let harness = &tester.harness;
let spec = &harness.spec;
let client = &tester.client;
let min_cgc = spec.custody_requirement;
let max_cgc = spec.number_of_custody_groups;
let num_blocks = 2 * E::slots_per_epoch();
let custody_context = harness.chain.data_availability_checker.custody_context();
harness.advance_slot();
harness
.extend_chain_with_sync(
num_blocks as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
SyncCommitteeStrategy::NoValidators,
LightClientStrategy::Disabled,
)
.await;
let cgc_at_head = custody_context.custody_group_count_at_head(spec);
let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch();
assert_eq!(cgc_at_head, max_cgc);
assert_eq!(earliest_data_column_epoch, None);
custody_context
.update_and_backfill_custody_count_at_epoch(harness.chain.epoch().unwrap(), cgc_at_head);
client.post_lighthouse_custody_backfill().await.unwrap();
let cgc_at_head = custody_context.custody_group_count_at_head(spec);
let cgc_at_previous_epoch =
custody_context.custody_group_count_at_epoch(harness.chain.epoch().unwrap() - 1, spec);
let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch();
// `DataColumnCustodyInfo` should have been updated to the head epoch
assert_eq!(
earliest_data_column_epoch,
Some(harness.chain.epoch().unwrap() + 1)
);
// Cgc requirements should have stayed the same at head
assert_eq!(cgc_at_head, max_cgc);
// Cgc requirements at the previous epoch should be `min_cgc`
// This allows for custody backfill to re-fetch columns for this epoch.
assert_eq!(cgc_at_previous_epoch, min_cgc);
}
// Test that a request for next epoch proposer duties suceeds when the current slot clock is within
// gossip clock disparity (500ms) of the new epoch.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn lighthouse_custody_info() {
let mut spec = test_spec::<E>();
// Skip pre-Fulu.
if !spec.is_fulu_scheduled() {
return;
}
// Use a short DA expiry period so we can observe non-zero values for the oldest data column
// slot.
spec.min_epochs_for_blob_sidecars_requests = 2;
spec.min_epochs_for_data_column_sidecars_requests = 2;
let validator_count = 24;
let tester = InteractiveTester::<E>::new(Some(spec), validator_count).await;
let harness = &tester.harness;
let spec = &harness.spec;
let client = &tester.client;
let num_initial = 2 * E::slots_per_epoch();
let num_secondary = 2 * E::slots_per_epoch();
harness.advance_slot();
harness
.extend_chain_with_sync(
num_initial as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
SyncCommitteeStrategy::NoValidators,
LightClientStrategy::Disabled,
)
.await;
assert_eq!(harness.chain.slot().unwrap(), num_initial);
let info = client.get_lighthouse_custody_info().await.unwrap();
assert_eq!(info.earliest_custodied_data_column_slot, 0);
assert_eq!(info.custody_group_count, spec.custody_requirement);
assert_eq!(
info.custody_columns.len(),
info.custody_group_count as usize
);
// Advance the chain some more to expire some blobs.
harness.advance_slot();
harness
.extend_chain_with_sync(
num_secondary as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
SyncCommitteeStrategy::NoValidators,
LightClientStrategy::Disabled,
)
.await;
assert_eq!(harness.chain.slot().unwrap(), num_initial + num_secondary);
let info = client.get_lighthouse_custody_info().await.unwrap();
assert_eq!(
info.earliest_custodied_data_column_slot,
num_initial + num_secondary
- spec.min_epochs_for_data_column_sidecars_requests * E::slots_per_epoch()
);
assert_eq!(info.custody_group_count, spec.custody_requirement);
assert_eq!(
info.custody_columns.len(),
info.custody_group_count as usize
);
}

View File

@@ -178,6 +178,9 @@ impl ApiTester {
"precondition: current slot is one after head"
);
// Set a min blob count for the next block for get_blobs testing
harness.execution_block_generator().set_min_blob_count(2);
let (next_block, _next_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
.await;
@@ -1316,12 +1319,14 @@ impl ApiTester {
.ok()
.map(|(state, _execution_optimistic, _finalized)| state);
let result = self
let result = match self
.client
.get_beacon_states_pending_deposits(state_id.0)
.await
.unwrap()
.map(|res| res.data);
{
Ok(response) => response,
Err(e) => panic!("query failed incorrectly: {e:?}"),
};
if result.is_none() && state_opt.is_none() {
continue;
@@ -1330,7 +1335,12 @@ impl ApiTester {
let state = state_opt.as_mut().expect("result should be none");
let expected = state.pending_deposits().unwrap();
assert_eq!(result.unwrap(), expected.to_vec());
let response = result.unwrap();
assert_eq!(response.data(), &expected.to_vec());
// Check that the version header is returned in the response
let fork_name = state.fork_name(&self.chain.spec).unwrap();
assert_eq!(response.version(), Some(fork_name),);
}
self
@@ -1343,12 +1353,14 @@ impl ApiTester {
.ok()
.map(|(state, _execution_optimistic, _finalized)| state);
let result = self
let result = match self
.client
.get_beacon_states_pending_partial_withdrawals(state_id.0)
.await
.unwrap()
.map(|res| res.data);
{
Ok(response) => response,
Err(e) => panic!("query failed incorrectly: {e:?}"),
};
if result.is_none() && state_opt.is_none() {
continue;
@@ -1357,7 +1369,12 @@ impl ApiTester {
let state = state_opt.as_mut().expect("result should be none");
let expected = state.pending_partial_withdrawals().unwrap();
assert_eq!(result.unwrap(), expected.to_vec());
let response = result.unwrap();
assert_eq!(response.data(), &expected.to_vec());
// Check that the version header is returned in the response
let fork_name = state.fork_name(&self.chain.spec).unwrap();
assert_eq!(response.version(), Some(fork_name),);
}
self
@@ -1855,7 +1872,7 @@ impl ApiTester {
}
pub async fn test_get_blob_sidecars(self, use_indices: bool) -> Self {
let block_id = BlockId(CoreBlockId::Finalized);
let block_id = BlockId(CoreBlockId::Head);
let (block_root, _, _) = block_id.root(&self.chain).unwrap();
let (block, _, _) = block_id.full_block(&self.chain).await.unwrap();
let num_blobs = block.num_expected_blobs();
@@ -1888,7 +1905,7 @@ impl ApiTester {
}
pub async fn test_get_blobs(self, versioned_hashes: bool) -> Self {
let block_id = BlockId(CoreBlockId::Finalized);
let block_id = BlockId(CoreBlockId::Head);
let (block_root, _, _) = block_id.root(&self.chain).unwrap();
let (block, _, _) = block_id.full_block(&self.chain).await.unwrap();
let num_blobs = block.num_expected_blobs();
@@ -1926,7 +1943,7 @@ impl ApiTester {
}
pub async fn test_get_blobs_post_fulu_full_node(self, versioned_hashes: bool) -> Self {
let block_id = BlockId(CoreBlockId::Finalized);
let block_id = BlockId(CoreBlockId::Head);
let (block_root, _, _) = block_id.root(&self.chain).unwrap();
let (block, _, _) = block_id.full_block(&self.chain).await.unwrap();
@@ -7853,6 +7870,8 @@ async fn get_blobs_post_fulu_supernode() {
config.spec.fulu_fork_epoch = Some(Epoch::new(0));
ApiTester::new_from_config(config)
.await
.test_post_beacon_blocks_valid()
.await
// We can call the same get_blobs function in this test
// because the function will call get_blobs_by_versioned_hashes which handles peerDAS post-Fulu
@@ -7873,6 +7892,8 @@ async fn get_blobs_post_fulu_full_node() {
config.spec.fulu_fork_epoch = Some(Epoch::new(0));
ApiTester::new_from_config(config)
.await
.test_post_beacon_blocks_valid()
.await
.test_get_blobs_post_fulu_full_node(false)
.await