mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-10 12:11:59 +00:00
Merge remote-tracking branch 'origin/stable' into unstable-merge-v8
This commit is contained in:
@@ -4604,6 +4604,37 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// POST lighthouse/custody/backfill
|
||||
let post_lighthouse_custody_backfill = warp::path("lighthouse")
|
||||
.and(warp::path("custody"))
|
||||
.and(warp::path("backfill"))
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
|
||||
task_spawner.blocking_json_task(Priority::P1, move || {
|
||||
// Calling this endpoint will trigger custody backfill once `effective_epoch``
|
||||
// is finalized.
|
||||
let effective_epoch = chain
|
||||
.canonical_head
|
||||
.cached_head()
|
||||
.head_slot()
|
||||
.epoch(T::EthSpec::slots_per_epoch())
|
||||
+ 1;
|
||||
let custody_context = chain.data_availability_checker.custody_context();
|
||||
// Reset validator custody requirements to `effective_epoch` with the latest
|
||||
// cgc requiremnets.
|
||||
custody_context.reset_validator_custody_requirements(effective_epoch);
|
||||
// Update `DataColumnCustodyInfo` to reflect the custody change.
|
||||
chain.update_data_column_custody_info(Some(
|
||||
effective_epoch.start_slot(T::EthSpec::slots_per_epoch()),
|
||||
));
|
||||
Ok(())
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/analysis/block_rewards
|
||||
let get_lighthouse_block_rewards = warp::path("lighthouse")
|
||||
.and(warp::path("analysis"))
|
||||
@@ -4963,6 +4994,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(post_lighthouse_compaction)
|
||||
.uor(post_lighthouse_add_peer)
|
||||
.uor(post_lighthouse_remove_peer)
|
||||
.uor(post_lighthouse_custody_backfill)
|
||||
.recover(warp_utils::reject::handle_rejection),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -60,13 +60,13 @@ pub fn proposer_duties<T: BeaconChainTypes>(
|
||||
.safe_add(1)
|
||||
.map_err(warp_utils::reject::arith_error)?
|
||||
{
|
||||
let (proposers, dependent_root, execution_status, _fork) =
|
||||
let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) =
|
||||
compute_proposer_duties_from_head(request_epoch, chain)
|
||||
.map_err(warp_utils::reject::unhandled_error)?;
|
||||
convert_to_api_response(
|
||||
chain,
|
||||
request_epoch,
|
||||
dependent_root,
|
||||
legacy_dependent_root,
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
proposers,
|
||||
)
|
||||
@@ -116,6 +116,11 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>(
|
||||
.beacon_state
|
||||
.proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec)
|
||||
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||
let legacy_dependent_root = head
|
||||
.snapshot
|
||||
.beacon_state
|
||||
.legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root)
|
||||
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||
let execution_optimistic = chain
|
||||
.is_optimistic_or_invalid_head_block(head_block)
|
||||
.map_err(warp_utils::reject::unhandled_error)?;
|
||||
@@ -129,7 +134,7 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>(
|
||||
convert_to_api_response(
|
||||
chain,
|
||||
request_epoch,
|
||||
head_decision_root,
|
||||
legacy_dependent_root,
|
||||
execution_optimistic,
|
||||
indices.to_vec(),
|
||||
)
|
||||
@@ -151,7 +156,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
|
||||
current_epoch: Epoch,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<ApiDuties, warp::reject::Rejection> {
|
||||
let (indices, dependent_root, execution_status, fork) =
|
||||
let (indices, dependent_root, legacy_dependent_root, execution_status, fork) =
|
||||
compute_proposer_duties_from_head(current_epoch, chain)
|
||||
.map_err(warp_utils::reject::unhandled_error)?;
|
||||
|
||||
@@ -166,7 +171,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
|
||||
convert_to_api_response(
|
||||
chain,
|
||||
current_epoch,
|
||||
dependent_root,
|
||||
legacy_dependent_root,
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
indices,
|
||||
)
|
||||
@@ -229,12 +234,18 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>(
|
||||
|
||||
// We can supply the genesis block root as the block root since we know that the only block that
|
||||
// decides its own root is the genesis block.
|
||||
let dependent_root = state
|
||||
.proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec)
|
||||
let legacy_dependent_root = state
|
||||
.legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root)
|
||||
.map_err(BeaconChainError::from)
|
||||
.map_err(warp_utils::reject::unhandled_error)?;
|
||||
|
||||
convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices)
|
||||
convert_to_api_response(
|
||||
chain,
|
||||
epoch,
|
||||
legacy_dependent_root,
|
||||
execution_optimistic,
|
||||
indices,
|
||||
)
|
||||
}
|
||||
|
||||
/// Converts the internal representation of proposer duties into one that is compatible with the
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::{Config, Context};
|
||||
use beacon_chain::{
|
||||
BeaconChain, BeaconChainTypes,
|
||||
custody_context::NodeCustodyType,
|
||||
test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
|
||||
};
|
||||
use beacon_processor::{
|
||||
@@ -67,6 +68,20 @@ impl<E: EthSpec> InteractiveTester<E> {
|
||||
None,
|
||||
Config::default(),
|
||||
true,
|
||||
NodeCustodyType::Fullnode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn new_supernode(spec: Option<ChainSpec>, validator_count: usize) -> Self {
|
||||
Self::new_with_initializer_and_mutator(
|
||||
spec,
|
||||
validator_count,
|
||||
None,
|
||||
None,
|
||||
Config::default(),
|
||||
true,
|
||||
NodeCustodyType::Supernode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -78,6 +93,7 @@ impl<E: EthSpec> InteractiveTester<E> {
|
||||
mutator: Option<Mutator<E>>,
|
||||
config: Config,
|
||||
use_mock_builder: bool,
|
||||
node_custody_type: NodeCustodyType,
|
||||
) -> Self {
|
||||
let mut harness_builder = BeaconChainHarness::builder(E::default())
|
||||
.spec_or_default(spec.map(Arc::new))
|
||||
@@ -93,6 +109,8 @@ impl<E: EthSpec> InteractiveTester<E> {
|
||||
.fresh_ephemeral_store()
|
||||
};
|
||||
|
||||
harness_builder = harness_builder.node_custody_type(node_custody_type);
|
||||
|
||||
// Add a mutator for the beacon chain builder which will be called in
|
||||
// `HarnessBuilder::build`.
|
||||
if let Some(mutator) = mutator {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use beacon_chain::custody_context::NodeCustodyType;
|
||||
use beacon_chain::test_utils::test_spec;
|
||||
use beacon_chain::{
|
||||
GossipVerifiedBlock, IntoGossipVerifiedBlock, WhenSlotSkipped,
|
||||
@@ -1967,6 +1968,7 @@ pub async fn duplicate_block_status_code() {
|
||||
..Config::default()
|
||||
},
|
||||
true,
|
||||
NodeCustodyType::Fullnode,
|
||||
)
|
||||
.await;
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Tests for API behaviour across fork boundaries.
|
||||
use beacon_chain::custody_context::NodeCustodyType;
|
||||
use beacon_chain::{
|
||||
StateSkipConfig,
|
||||
test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee},
|
||||
@@ -426,6 +427,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() {
|
||||
None,
|
||||
Default::default(),
|
||||
true,
|
||||
NodeCustodyType::Fullnode,
|
||||
)
|
||||
.await;
|
||||
let harness = &tester.harness;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Generic tests that make use of the (newer) `InteractiveApiTester`
|
||||
use beacon_chain::custody_context::NodeCustodyType;
|
||||
use beacon_chain::{
|
||||
ChainConfig,
|
||||
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
|
||||
@@ -76,6 +77,7 @@ async fn state_by_root_pruned_from_fork_choice() {
|
||||
None,
|
||||
Default::default(),
|
||||
false,
|
||||
NodeCustodyType::Fullnode,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -433,6 +435,7 @@ pub async fn proposer_boost_re_org_test(
|
||||
})),
|
||||
Default::default(),
|
||||
false,
|
||||
NodeCustodyType::Fullnode,
|
||||
)
|
||||
.await;
|
||||
let harness = &tester.harness;
|
||||
@@ -1017,10 +1020,9 @@ async fn proposer_duties_with_gossip_tolerance() {
|
||||
assert_eq!(
|
||||
proposer_duties_tolerant_current_epoch.dependent_root,
|
||||
head_state
|
||||
.proposer_shuffling_decision_root_at_epoch(
|
||||
.legacy_proposer_shuffling_decision_root_at_epoch(
|
||||
tolerant_current_epoch,
|
||||
head_block_root,
|
||||
spec
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
@@ -1050,6 +1052,68 @@ async fn proposer_duties_with_gossip_tolerance() {
|
||||
);
|
||||
}
|
||||
|
||||
// Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo`
|
||||
// have been updated with the correct values.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn lighthouse_restart_custody_backfill() {
|
||||
let spec = test_spec::<E>();
|
||||
|
||||
// Skip pre-Fulu.
|
||||
if !spec.is_fulu_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let validator_count = 24;
|
||||
|
||||
let tester = InteractiveTester::<E>::new_supernode(Some(spec), validator_count).await;
|
||||
let harness = &tester.harness;
|
||||
let spec = &harness.spec;
|
||||
let client = &tester.client;
|
||||
let min_cgc = spec.custody_requirement;
|
||||
let max_cgc = spec.number_of_custody_groups;
|
||||
|
||||
let num_blocks = 2 * E::slots_per_epoch();
|
||||
|
||||
let custody_context = harness.chain.data_availability_checker.custody_context();
|
||||
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain_with_sync(
|
||||
num_blocks as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
SyncCommitteeStrategy::NoValidators,
|
||||
LightClientStrategy::Disabled,
|
||||
)
|
||||
.await;
|
||||
|
||||
let cgc_at_head = custody_context.custody_group_count_at_head(spec);
|
||||
let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch();
|
||||
|
||||
assert_eq!(cgc_at_head, max_cgc);
|
||||
assert_eq!(earliest_data_column_epoch, None);
|
||||
|
||||
custody_context
|
||||
.update_and_backfill_custody_count_at_epoch(harness.chain.epoch().unwrap(), cgc_at_head);
|
||||
client.post_lighthouse_custody_backfill().await.unwrap();
|
||||
|
||||
let cgc_at_head = custody_context.custody_group_count_at_head(spec);
|
||||
let cgc_at_previous_epoch =
|
||||
custody_context.custody_group_count_at_epoch(harness.chain.epoch().unwrap() - 1, spec);
|
||||
let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch();
|
||||
|
||||
// `DataColumnCustodyInfo` should have been updated to the head epoch
|
||||
assert_eq!(
|
||||
earliest_data_column_epoch,
|
||||
Some(harness.chain.epoch().unwrap() + 1)
|
||||
);
|
||||
// Cgc requirements should have stayed the same at head
|
||||
assert_eq!(cgc_at_head, max_cgc);
|
||||
// Cgc requirements at the previous epoch should be `min_cgc`
|
||||
// This allows for custody backfill to re-fetch columns for this epoch.
|
||||
assert_eq!(cgc_at_previous_epoch, min_cgc);
|
||||
}
|
||||
|
||||
// Test that a request for next epoch proposer duties suceeds when the current slot clock is within
|
||||
// gossip clock disparity (500ms) of the new epoch.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
|
||||
Reference in New Issue
Block a user