mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-06 18:21:45 +00:00
* First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commitde01f923c7. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit04b3743ec1Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit440e854199Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commitf66e179a40Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commite4bfe71cd1Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit063b79c16aAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit88bedf09bcAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commitde01f923c7. commit32483d385bAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit2e86585b47Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit1095d60a40Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commitde01f923c7Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit0c2c8c4224Merge:21ecb58fff51a292f7Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commitf51a292f77Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit7e0cddef32Author: Akihito Nakano <sora.akatsuki@gmail.com> Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit21ecb58ff8Merge:2fcb2935e9aefb5539Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit2fcb2935ecAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit12c6ef118aAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commitd37733b846Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit52bb581e07Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commite71020e3e6Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit13f9bba647Merge:60100fc6b4e675cf5dAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit60100fc6beAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commita9cb329a22Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit4dc6e6515eAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit25feedfde3Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Resolve merge conflicts * Linting * fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment
368 lines
14 KiB
Rust
368 lines
14 KiB
Rust
use beacon_chain::test_utils::{
|
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
|
};
|
|
use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS};
|
|
use logging::test_logger;
|
|
use std::sync::LazyLock;
|
|
use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot};
|
|
|
|
// Should ideally be divisible by 3.
|
|
pub const VALIDATOR_COUNT: usize = 48;
|
|
|
|
/// A cached set of keys.
|
|
static KEYPAIRS: LazyLock<Vec<Keypair>> =
|
|
LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT));
|
|
|
|
type E = MainnetEthSpec;
|
|
|
|
fn get_harness(
|
|
validator_count: usize,
|
|
validator_indexes_to_monitor: Vec<usize>,
|
|
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
|
.default_spec()
|
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
|
.logger(test_logger())
|
|
.fresh_ephemeral_store()
|
|
.mock_execution_layer()
|
|
.validator_monitor_config(ValidatorMonitorConfig {
|
|
validators: validator_indexes_to_monitor
|
|
.iter()
|
|
.map(|i| PublicKeyBytes::from(KEYPAIRS[*i].pk.clone()))
|
|
.collect(),
|
|
..<_>::default()
|
|
})
|
|
.build();
|
|
|
|
harness.advance_slot();
|
|
|
|
harness
|
|
}
|
|
|
|
// Regression test for off-by-one caching issue in missed block detection.
|
|
#[tokio::test]
|
|
async fn missed_blocks_across_epochs() {
|
|
let slots_per_epoch = E::slots_per_epoch();
|
|
let all_validators = (0..VALIDATOR_COUNT).collect::<Vec<_>>();
|
|
|
|
let harness = get_harness(VALIDATOR_COUNT, vec![]);
|
|
let validator_monitor = &harness.chain.validator_monitor;
|
|
let mut genesis_state = harness.get_current_state();
|
|
let genesis_state_root = genesis_state.update_tree_hash_cache().unwrap();
|
|
let genesis_block_root = harness.head_block_root();
|
|
|
|
// Skip a slot in the first epoch (to prime the cache inside the missed block function) and then
|
|
// at a different offset in the 2nd epoch. The missed block in the 2nd epoch MUST NOT reuse
|
|
// the cache from the first epoch.
|
|
let first_skip_offset = 3;
|
|
let second_skip_offset = slots_per_epoch / 2;
|
|
assert_ne!(first_skip_offset, second_skip_offset);
|
|
let first_skip_slot = Slot::new(first_skip_offset);
|
|
let second_skip_slot = Slot::new(slots_per_epoch + second_skip_offset);
|
|
let slots = (1..2 * slots_per_epoch)
|
|
.map(Slot::new)
|
|
.filter(|slot| *slot != first_skip_slot && *slot != second_skip_slot)
|
|
.collect::<Vec<_>>();
|
|
|
|
let (block_roots_by_slot, state_roots_by_slot, _, head_state) = harness
|
|
.add_attested_blocks_at_slots(genesis_state, genesis_state_root, &slots, &all_validators)
|
|
.await;
|
|
|
|
// Prime the proposer shuffling cache.
|
|
let mut proposer_shuffling_cache = harness.chain.beacon_proposer_cache.lock();
|
|
for epoch in [0, 1].into_iter().map(Epoch::new) {
|
|
let start_slot = epoch.start_slot(slots_per_epoch) + 1;
|
|
let state = harness
|
|
.get_hot_state(state_roots_by_slot[&start_slot])
|
|
.unwrap();
|
|
let decision_root = state
|
|
.proposer_shuffling_decision_root(genesis_block_root)
|
|
.unwrap();
|
|
proposer_shuffling_cache
|
|
.insert(
|
|
epoch,
|
|
decision_root,
|
|
state
|
|
.get_beacon_proposer_indices(&harness.chain.spec)
|
|
.unwrap(),
|
|
state.fork(),
|
|
)
|
|
.unwrap();
|
|
}
|
|
drop(proposer_shuffling_cache);
|
|
|
|
// Monitor the validator that proposed the block at the same offset in the 0th epoch as the skip
|
|
// in the 1st epoch.
|
|
let innocent_proposer_slot = Slot::new(second_skip_offset);
|
|
let innocent_proposer = harness
|
|
.get_block(block_roots_by_slot[&innocent_proposer_slot])
|
|
.unwrap()
|
|
.message()
|
|
.proposer_index();
|
|
|
|
let mut vm_write = validator_monitor.write();
|
|
|
|
// Call `process_` once to update validator indices.
|
|
vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec);
|
|
// Start monitoring the innocent validator.
|
|
vm_write.add_validator_pubkey(KEYPAIRS[innocent_proposer as usize].pk.compress());
|
|
// Check for missed blocks.
|
|
vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec);
|
|
|
|
// My client is innocent, your honour!
|
|
assert_eq!(
|
|
vm_write.get_monitored_validator_missed_block_count(innocent_proposer),
|
|
0
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn missed_blocks_basic() {
|
|
let validator_count = 16;
|
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
|
|
|
let nb_epoch_to_simulate = Epoch::new(2);
|
|
|
|
// Generate 63 slots (2 epochs * 32 slots per epoch - 1)
|
|
let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1;
|
|
|
|
// 1st scenario //
|
|
//
|
|
// Missed block happens when slot and prev_slot are in the same epoch
|
|
let harness1 = get_harness(validator_count, vec![]);
|
|
harness1
|
|
.extend_chain(
|
|
initial_blocks as usize,
|
|
BlockStrategy::OnCanonicalHead,
|
|
AttestationStrategy::AllValidators,
|
|
)
|
|
.await;
|
|
|
|
let mut _state = &mut harness1.get_current_state();
|
|
let mut epoch = _state.current_epoch();
|
|
|
|
// We have a total of 63 slots and we want slot 57 to be a missed block
|
|
// and this is slot=25 in epoch=1
|
|
let mut idx = initial_blocks - 6;
|
|
let mut slot = Slot::new(idx);
|
|
let mut slot_in_epoch = slot % slots_per_epoch;
|
|
let mut prev_slot = Slot::new(idx - 1);
|
|
let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap();
|
|
let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap();
|
|
let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];
|
|
let mut proposer_shuffling_decision_root = _state
|
|
.proposer_shuffling_decision_root(duplicate_block_root)
|
|
.unwrap();
|
|
|
|
let beacon_proposer_cache = harness1
|
|
.chain
|
|
.validator_monitor
|
|
.read()
|
|
.get_beacon_proposer_cache();
|
|
|
|
// Let's fill the cache with the proposers for the current epoch
|
|
// and push the duplicate_block_root to the block_roots vector
|
|
assert_eq!(
|
|
beacon_proposer_cache.lock().insert(
|
|
epoch,
|
|
proposer_shuffling_decision_root,
|
|
validator_indexes,
|
|
_state.fork()
|
|
),
|
|
Ok(())
|
|
);
|
|
|
|
// Modify the block root of the previous slot to be the same as the block root of the current slot
|
|
// in order to simulate a missed block
|
|
assert_eq!(
|
|
_state.set_block_root(prev_slot, duplicate_block_root),
|
|
Ok(())
|
|
);
|
|
|
|
{
|
|
// Let's validate the state which will call the function responsible for
|
|
// adding the missed blocks to the validator monitor
|
|
let mut validator_monitor = harness1.chain.validator_monitor.write();
|
|
|
|
validator_monitor.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress());
|
|
validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec);
|
|
|
|
// We should have one entry in the missed blocks map
|
|
assert_eq!(
|
|
validator_monitor
|
|
.get_monitored_validator_missed_block_count(missed_block_proposer as u64),
|
|
1,
|
|
);
|
|
}
|
|
|
|
// 2nd scenario //
|
|
//
|
|
// Missed block happens when slot and prev_slot are not in the same epoch
|
|
// making sure that the cache reloads when the epoch changes
|
|
// in that scenario the slot that missed a block is the first slot of the epoch
|
|
let harness2 = get_harness(validator_count, vec![]);
|
|
let advance_slot_by = 9;
|
|
harness2
|
|
.extend_chain(
|
|
(initial_blocks + advance_slot_by) as usize,
|
|
BlockStrategy::OnCanonicalHead,
|
|
AttestationStrategy::AllValidators,
|
|
)
|
|
.await;
|
|
|
|
let mut _state2 = &mut harness2.get_current_state();
|
|
epoch = _state2.current_epoch();
|
|
|
|
// We have a total of 72 slots and we want slot 64 to be the missed block
|
|
// and this is slot=64 in epoch=2
|
|
idx = initial_blocks + (advance_slot_by) - 8;
|
|
slot = Slot::new(idx);
|
|
prev_slot = Slot::new(idx - 1);
|
|
slot_in_epoch = slot % slots_per_epoch;
|
|
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
|
|
validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap();
|
|
missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];
|
|
|
|
let beacon_proposer_cache = harness2
|
|
.chain
|
|
.validator_monitor
|
|
.read()
|
|
.get_beacon_proposer_cache();
|
|
|
|
// Let's fill the cache with the proposers for the current epoch
|
|
// and push the duplicate_block_root to the block_roots vector
|
|
assert_eq!(
|
|
beacon_proposer_cache.lock().insert(
|
|
epoch,
|
|
duplicate_block_root,
|
|
validator_indexes.clone(),
|
|
_state2.fork()
|
|
),
|
|
Ok(())
|
|
);
|
|
|
|
assert_eq!(
|
|
_state2.set_block_root(prev_slot, duplicate_block_root),
|
|
Ok(())
|
|
);
|
|
|
|
{
|
|
// Let's validate the state which will call the function responsible for
|
|
// adding the missed blocks to the validator monitor
|
|
let mut validator_monitor2 = harness2.chain.validator_monitor.write();
|
|
validator_monitor2.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress());
|
|
validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec);
|
|
// We should have one entry in the missed blocks map
|
|
assert_eq!(
|
|
validator_monitor2
|
|
.get_monitored_validator_missed_block_count(missed_block_proposer as u64),
|
|
1
|
|
);
|
|
|
|
// 3rd scenario //
|
|
//
|
|
// A missed block happens but the validator is not monitored
|
|
// it should not be flagged as a missed block
|
|
while validator_indexes[(idx % slots_per_epoch) as usize] == missed_block_proposer
|
|
&& idx / slots_per_epoch == epoch.as_u64()
|
|
{
|
|
idx += 1;
|
|
}
|
|
slot = Slot::new(idx);
|
|
prev_slot = Slot::new(idx - 1);
|
|
slot_in_epoch = slot % slots_per_epoch;
|
|
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
|
|
let second_missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];
|
|
|
|
// This test may fail if we can't find another distinct proposer in the same epoch.
|
|
// However, this should be vanishingly unlikely: P ~= (1/16)^32 = 2e-39.
|
|
assert_ne!(missed_block_proposer, second_missed_block_proposer);
|
|
|
|
assert_eq!(
|
|
_state2.set_block_root(prev_slot, duplicate_block_root),
|
|
Ok(())
|
|
);
|
|
|
|
// Let's validate the state which will call the function responsible for
|
|
// adding the missed blocks to the validator monitor
|
|
validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec);
|
|
|
|
// We shouldn't have any entry in the missed blocks map
|
|
assert_eq!(
|
|
validator_monitor2
|
|
.get_monitored_validator_missed_block_count(second_missed_block_proposer as u64),
|
|
0
|
|
);
|
|
}
|
|
|
|
// 4th scenario //
|
|
//
|
|
// A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH
|
|
// it shouldn't be flagged as a missed block
|
|
let harness3 = get_harness(validator_count, vec![]);
|
|
harness3
|
|
.extend_chain(
|
|
slots_per_epoch as usize,
|
|
BlockStrategy::OnCanonicalHead,
|
|
AttestationStrategy::AllValidators,
|
|
)
|
|
.await;
|
|
|
|
let mut _state3 = &mut harness3.get_current_state();
|
|
epoch = _state3.current_epoch();
|
|
|
|
// We have a total of 32 slots and we want slot 30 to be a missed block
|
|
// and this is slot=30 in epoch=0
|
|
idx = slots_per_epoch - MISSED_BLOCK_LAG_SLOTS as u64 + 2;
|
|
slot = Slot::new(idx);
|
|
slot_in_epoch = slot % slots_per_epoch;
|
|
prev_slot = Slot::new(idx - 1);
|
|
duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap();
|
|
validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap();
|
|
missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];
|
|
proposer_shuffling_decision_root = _state3
|
|
.proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root)
|
|
.unwrap();
|
|
|
|
let beacon_proposer_cache = harness3
|
|
.chain
|
|
.validator_monitor
|
|
.read()
|
|
.get_beacon_proposer_cache();
|
|
|
|
// Let's fill the cache with the proposers for the current epoch
|
|
// and push the duplicate_block_root to the block_roots vector
|
|
assert_eq!(
|
|
beacon_proposer_cache.lock().insert(
|
|
epoch,
|
|
proposer_shuffling_decision_root,
|
|
validator_indexes,
|
|
_state3.fork()
|
|
),
|
|
Ok(())
|
|
);
|
|
|
|
// Modify the block root of the previous slot to be the same as the block root of the current slot
|
|
// in order to simulate a missed block
|
|
assert_eq!(
|
|
_state3.set_block_root(prev_slot, duplicate_block_root),
|
|
Ok(())
|
|
);
|
|
|
|
{
|
|
// Let's validate the state which will call the function responsible for
|
|
// adding the missed blocks to the validator monitor
|
|
let mut validator_monitor3 = harness3.chain.validator_monitor.write();
|
|
validator_monitor3.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress());
|
|
validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec);
|
|
|
|
// We shouldn't have one entry in the missed blocks map
|
|
assert_eq!(
|
|
validator_monitor3
|
|
.get_monitored_validator_missed_block_count(missed_block_proposer as u64),
|
|
0
|
|
);
|
|
}
|
|
}
|