mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-02 16:21:42 +00:00
Implement /lighthouse/custody/info API (#8276)
Closes: - https://github.com/sigp/lighthouse/issues/8249 New `/lighthouse/custody` API including: - [x] Earliest custodied data column slot - [x] Node CGC - [x] Custodied columns Co-Authored-By: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
53
beacon_node/http_api/src/custody.rs
Normal file
53
beacon_node/http_api/src/custody.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use eth2::lighthouse::CustodyInfo;
|
||||
use std::sync::Arc;
|
||||
use types::EthSpec;
|
||||
use warp_utils::reject::{custom_bad_request, custom_server_error};
|
||||
|
||||
pub fn info<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<CustodyInfo, warp::Rejection> {
|
||||
if !chain.spec.is_fulu_scheduled() {
|
||||
return Err(custom_bad_request("Fulu is not scheduled".to_string()));
|
||||
}
|
||||
|
||||
let opt_data_column_custody_info = chain
|
||||
.store
|
||||
.get_data_column_custody_info()
|
||||
.map_err(|e| custom_server_error(format!("error reading DataColumnCustodyInfo: {e:?}")))?;
|
||||
|
||||
let column_data_availability_boundary = chain
|
||||
.column_data_availability_boundary()
|
||||
.ok_or_else(|| custom_server_error("unreachable: Fulu should be enabled".to_string()))?;
|
||||
|
||||
let earliest_custodied_data_column_slot = opt_data_column_custody_info
|
||||
.and_then(|info| info.earliest_data_column_slot)
|
||||
.unwrap_or_else(|| {
|
||||
// If there's no data column custody info/earliest data column slot, it means *column*
|
||||
// backfill is not running. Block backfill could still be running, so our earliest
|
||||
// available column is either the oldest block slot or the DA boundary, whichever is
|
||||
// more recent.
|
||||
let oldest_block_slot = chain.store.get_anchor_info().oldest_block_slot;
|
||||
column_data_availability_boundary
|
||||
.start_slot(T::EthSpec::slots_per_epoch())
|
||||
.max(oldest_block_slot)
|
||||
});
|
||||
let earliest_custodied_data_column_epoch =
|
||||
earliest_custodied_data_column_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// Compute the custody columns and the CGC *at the earliest custodied slot*. The node might
|
||||
// have some columns prior to this, but this value is the most up-to-date view of the data the
|
||||
// node is custodying.
|
||||
let custody_context = chain.data_availability_checker.custody_context();
|
||||
let custody_columns = custody_context
|
||||
.custody_columns_for_epoch(Some(earliest_custodied_data_column_epoch), &chain.spec)
|
||||
.to_vec();
|
||||
let custody_group_count = custody_context
|
||||
.custody_group_count_at_epoch(earliest_custodied_data_column_epoch, &chain.spec);
|
||||
|
||||
Ok(CustodyInfo {
|
||||
earliest_custodied_data_column_slot,
|
||||
custody_group_count,
|
||||
custody_columns,
|
||||
})
|
||||
}
|
||||
@@ -13,6 +13,7 @@ mod block_packing_efficiency;
|
||||
mod block_rewards;
|
||||
mod build_block_contents;
|
||||
mod builder_states;
|
||||
mod custody;
|
||||
mod database;
|
||||
mod light_client;
|
||||
mod metrics;
|
||||
@@ -4590,6 +4591,19 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/custody/info
|
||||
let get_lighthouse_custody_info = warp::path("lighthouse")
|
||||
.and(warp::path("custody"))
|
||||
.and(warp::path("info"))
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
|
||||
task_spawner.blocking_json_task(Priority::P1, move || custody::info(chain))
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/analysis/block_rewards
|
||||
let get_lighthouse_block_rewards = warp::path("lighthouse")
|
||||
.and(warp::path("analysis"))
|
||||
@@ -4891,6 +4905,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(get_lighthouse_validator_inclusion)
|
||||
.uor(get_lighthouse_staking)
|
||||
.uor(get_lighthouse_database_info)
|
||||
.uor(get_lighthouse_custody_info)
|
||||
.uor(get_lighthouse_block_rewards)
|
||||
.uor(get_lighthouse_attestation_performance)
|
||||
.uor(get_beacon_light_client_optimistic_update)
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
use beacon_chain::{
|
||||
ChainConfig,
|
||||
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
|
||||
test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy},
|
||||
test_utils::{
|
||||
AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy, test_spec,
|
||||
},
|
||||
};
|
||||
use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueMessage};
|
||||
use eth2::types::ProduceBlockV3Response;
|
||||
@@ -1047,3 +1049,77 @@ async fn proposer_duties_with_gossip_tolerance() {
|
||||
proposer_duties_current_epoch
|
||||
);
|
||||
}
|
||||
|
||||
// Test that a request for next epoch proposer duties suceeds when the current slot clock is within
|
||||
// gossip clock disparity (500ms) of the new epoch.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn lighthouse_custody_info() {
|
||||
let mut spec = test_spec::<E>();
|
||||
|
||||
// Skip pre-Fulu.
|
||||
if !spec.is_fulu_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Use a short DA expiry period so we can observe non-zero values for the oldest data column
|
||||
// slot.
|
||||
spec.min_epochs_for_blob_sidecars_requests = 2;
|
||||
spec.min_epochs_for_data_column_sidecars_requests = 2;
|
||||
|
||||
let validator_count = 24;
|
||||
|
||||
let tester = InteractiveTester::<E>::new(Some(spec), validator_count).await;
|
||||
let harness = &tester.harness;
|
||||
let spec = &harness.spec;
|
||||
let client = &tester.client;
|
||||
|
||||
let num_initial = 2 * E::slots_per_epoch();
|
||||
let num_secondary = 2 * E::slots_per_epoch();
|
||||
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain_with_sync(
|
||||
num_initial as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
SyncCommitteeStrategy::NoValidators,
|
||||
LightClientStrategy::Disabled,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(harness.chain.slot().unwrap(), num_initial);
|
||||
|
||||
let info = client.get_lighthouse_custody_info().await.unwrap();
|
||||
assert_eq!(info.earliest_custodied_data_column_slot, 0);
|
||||
assert_eq!(info.custody_group_count, spec.custody_requirement);
|
||||
assert_eq!(
|
||||
info.custody_columns.len(),
|
||||
info.custody_group_count as usize
|
||||
);
|
||||
|
||||
// Advance the chain some more to expire some blobs.
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain_with_sync(
|
||||
num_secondary as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
SyncCommitteeStrategy::NoValidators,
|
||||
LightClientStrategy::Disabled,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(harness.chain.slot().unwrap(), num_initial + num_secondary);
|
||||
|
||||
let info = client.get_lighthouse_custody_info().await.unwrap();
|
||||
assert_eq!(
|
||||
info.earliest_custodied_data_column_slot,
|
||||
num_initial + num_secondary
|
||||
- spec.min_epochs_for_data_column_sidecars_requests * E::slots_per_epoch()
|
||||
);
|
||||
assert_eq!(info.custody_group_count, spec.custody_requirement);
|
||||
assert_eq!(
|
||||
info.custody_columns.len(),
|
||||
info.custody_group_count as usize
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user