Merge remote-tracking branch 'upstream/unstable' into gloas-containers

This commit is contained in:
Mark Mackey
2025-10-22 16:05:06 -05:00
78 changed files with 5569 additions and 615 deletions

View File

@@ -946,3 +946,110 @@ async fn queue_attestations_from_http() {
attestation_future.await.unwrap();
}
// Test that a request for next epoch proposer duties suceeds when the current slot clock is within
// gossip clock disparity (500ms) of the new epoch.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn proposer_duties_with_gossip_tolerance() {
let validator_count = 24;
let tester = InteractiveTester::<E>::new(None, validator_count).await;
let harness = &tester.harness;
let spec = &harness.spec;
let client = &tester.client;
let num_initial = 4 * E::slots_per_epoch() - 1;
let next_epoch_start_slot = Slot::new(num_initial + 1);
harness.advance_slot();
harness
.extend_chain_with_sync(
num_initial as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
SyncCommitteeStrategy::NoValidators,
LightClientStrategy::Disabled,
)
.await;
assert_eq!(harness.chain.slot().unwrap(), num_initial);
// Set the clock to just before the next epoch.
harness.chain.slot_clock.advance_time(
Duration::from_secs(spec.seconds_per_slot) - spec.maximum_gossip_clock_disparity(),
);
assert_eq!(
harness
.chain
.slot_clock
.now_with_future_tolerance(spec.maximum_gossip_clock_disparity())
.unwrap(),
next_epoch_start_slot
);
let head_state = harness.get_current_state();
let head_block_root = harness.head_block_root();
let tolerant_current_epoch = next_epoch_start_slot.epoch(E::slots_per_epoch());
// This is a regression test for the bug described here:
// https://github.com/sigp/lighthouse/pull/8130/files#r2386594566
//
// To trigger it, we need to prime the proposer shuffling cache with an incorrect entry which
// the previous code would be liable to lookup due to the bugs in its decision root calculation.
let wrong_decision_root = head_state
.proposer_shuffling_decision_root(head_block_root, spec)
.unwrap();
let wrong_proposer_indices = vec![0; E::slots_per_epoch() as usize];
harness
.chain
.beacon_proposer_cache
.lock()
.insert(
tolerant_current_epoch,
wrong_decision_root,
wrong_proposer_indices.clone(),
head_state.fork(),
)
.unwrap();
// Request the proposer duties.
let proposer_duties_tolerant_current_epoch = client
.get_validator_duties_proposer(tolerant_current_epoch)
.await
.unwrap();
assert_eq!(
proposer_duties_tolerant_current_epoch.dependent_root,
head_state
.proposer_shuffling_decision_root_at_epoch(
tolerant_current_epoch,
head_block_root,
spec
)
.unwrap()
);
assert_ne!(
proposer_duties_tolerant_current_epoch
.data
.iter()
.map(|data| data.validator_index as usize)
.collect::<Vec<_>>(),
wrong_proposer_indices,
);
// We should get the exact same result after properly advancing into the epoch.
harness
.chain
.slot_clock
.advance_time(spec.maximum_gossip_clock_disparity());
assert_eq!(harness.chain.slot().unwrap(), next_epoch_start_slot);
let proposer_duties_current_epoch = client
.get_validator_duties_proposer(tolerant_current_epoch)
.await
.unwrap();
assert_eq!(
proposer_duties_tolerant_current_epoch,
proposer_duties_current_epoch
);
}

View File

@@ -1,3 +1,4 @@
use beacon_chain::custody_context::NodeCustodyType;
use beacon_chain::test_utils::RelativeSyncCommittee;
use beacon_chain::{
BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped,
@@ -90,7 +91,7 @@ struct ApiTester {
struct ApiTesterConfig {
spec: ChainSpec,
retain_historic_states: bool,
import_all_data_columns: bool,
node_custody_type: NodeCustodyType,
}
impl Default for ApiTesterConfig {
@@ -100,7 +101,7 @@ impl Default for ApiTesterConfig {
Self {
spec,
retain_historic_states: false,
import_all_data_columns: false,
node_custody_type: NodeCustodyType::Fullnode,
}
}
}
@@ -139,7 +140,7 @@ impl ApiTester {
.deterministic_withdrawal_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
.import_all_data_columns(config.import_all_data_columns)
.node_custody_type(config.node_custody_type)
.build();
harness
@@ -1369,12 +1370,14 @@ impl ApiTester {
.ok()
.map(|(state, _execution_optimistic, _finalized)| state);
let result = self
let result = match self
.client
.get_beacon_states_pending_consolidations(state_id.0)
.await
.unwrap()
.map(|res| res.data);
{
Ok(response) => response,
Err(e) => panic!("query failed incorrectly: {e:?}"),
};
if result.is_none() && state_opt.is_none() {
continue;
@@ -1383,7 +1386,12 @@ impl ApiTester {
let state = state_opt.as_mut().expect("result should be none");
let expected = state.pending_consolidations().unwrap();
assert_eq!(result.unwrap(), expected.to_vec());
let response = result.unwrap();
assert_eq!(response.data(), &expected.to_vec());
// Check that the version header is returned in the response
let fork_name = state.fork_name(&self.chain.spec).unwrap();
assert_eq!(response.version(), Some(fork_name),);
}
self
@@ -7835,8 +7843,7 @@ async fn get_blobs_post_fulu_supernode() {
let mut config = ApiTesterConfig {
retain_historic_states: false,
spec: E::default_spec(),
// For supernode, we import all data columns
import_all_data_columns: true,
node_custody_type: NodeCustodyType::Supernode,
};
config.spec.altair_fork_epoch = Some(Epoch::new(0));
config.spec.bellatrix_fork_epoch = Some(Epoch::new(0));