mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-05 17:51:41 +00:00
* First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * SingleAttestation * Add post attestation v2 endpoint logic to attestation service * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * some tests and fixed attestqtion calc * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commitde01f923c7. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Added more test coverage, simplified Attestation conversion, and other minor refactors * Removed unusued codepaths * Fix failing test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit04b3743ec1Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit440e854199Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commitf66e179a40Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commite4bfe71cd1Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit063b79c16aAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit88bedf09bcAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commitde01f923c7. commit32483d385bAuthor: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit2e86585b47Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit1095d60a40Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commitde01f923c7Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit0c2c8c4224Merge:21ecb58fff51a292f7Author: Michael Sproul <michael@sigmaprime.io> Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commitf51a292f77Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit7e0cddef32Author: Akihito Nakano <sora.akatsuki@gmail.com> Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit21ecb58ff8Merge:2fcb2935e9aefb5539Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit2fcb2935ecAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit12c6ef118aAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commitd37733b846Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit52bb581e07Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commite71020e3e6Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit13f9bba647Merge:60100fc6b4e675cf5dAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit60100fc6beAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commita9cb329a22Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit4dc6e6515eAuthor: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit25feedfde3Author: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Cleanup single attestation imports * Fix some single attestation network plumbing * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Merge remote-tracking branch 'origin/electra-alpha10' into single_attestation * Fix some tests * Cargo fmt * lint * fmt * Resolve merge conflicts * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * lint * Linting * fmt * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * Fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment * Merge remote-tracking branch 'origin/electra-alpha10' into single_attestation * Merge remote-tracking branch 'origin/unstable' into single_attestation * Merge remote-tracking branch 'origin/unstable' into single_attestation * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * publish_attestations should accept Either<Attestation,SingleAttestation> * log an error when failing to convert to SingleAttestation * Use Cow to avoid clone * Avoid reconverting to SingleAttestation * Tweak VC error message * update comments * update comments * pass in single attestation as ref to subnetid calculation method * Improved API, new error variants and other minor tweaks * Fix single_attestation event topic boilerplate * fix sse event failure * Add single_attestation event topic test coverage
548 lines
19 KiB
Rust
548 lines
19 KiB
Rust
//! Tests for API behaviour across fork boundaries.
|
|
use beacon_chain::{
|
|
test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME},
|
|
StateSkipConfig,
|
|
};
|
|
use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee};
|
|
use execution_layer::test_utils::generate_genesis_header;
|
|
use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials};
|
|
use http_api::test_utils::*;
|
|
use std::collections::HashSet;
|
|
use types::{
|
|
test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs},
|
|
Address, ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, MinimalEthSpec, Slot,
|
|
};
|
|
|
|
type E = MinimalEthSpec;
|
|
|
|
fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec {
|
|
let mut spec = E::default_spec();
|
|
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
|
spec
|
|
}
|
|
|
|
fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec {
|
|
let mut spec = E::default_spec();
|
|
spec.altair_fork_epoch = Some(Epoch::new(0));
|
|
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
|
spec.capella_fork_epoch = Some(capella_fork_epoch);
|
|
spec
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn sync_committee_duties_across_fork() {
|
|
let validator_count = E::sync_committee_size();
|
|
let fork_epoch = Epoch::new(8);
|
|
let spec = altair_spec(fork_epoch);
|
|
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count).await;
|
|
let harness = &tester.harness;
|
|
let client = &tester.client;
|
|
|
|
let all_validators = harness.get_all_validators();
|
|
let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::<Vec<_>>();
|
|
|
|
assert_eq!(harness.get_current_slot(), 0);
|
|
|
|
// Prior to the fork the endpoint should return an empty vec.
|
|
let early_duties = client
|
|
.post_validator_duties_sync(fork_epoch - 1, &all_validators_u64)
|
|
.await
|
|
.unwrap()
|
|
.data;
|
|
assert!(early_duties.is_empty());
|
|
|
|
// If there's a skip slot at the fork slot, the endpoint should return duties, even
|
|
// though the head state hasn't transitioned yet.
|
|
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
|
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
|
let (_, mut state) = harness
|
|
.add_attested_block_at_slot(
|
|
fork_slot - 1,
|
|
genesis_state,
|
|
genesis_state_root,
|
|
&all_validators,
|
|
)
|
|
.await
|
|
.unwrap();
|
|
|
|
harness.advance_slot();
|
|
assert_eq!(harness.get_current_slot(), fork_slot);
|
|
|
|
let sync_duties = client
|
|
.post_validator_duties_sync(fork_epoch, &all_validators_u64)
|
|
.await
|
|
.unwrap()
|
|
.data;
|
|
assert_eq!(sync_duties.len(), E::sync_committee_size());
|
|
|
|
// After applying a block at the fork slot the duties should remain unchanged.
|
|
let state_root = state.canonical_root().unwrap();
|
|
harness
|
|
.add_attested_block_at_slot(fork_slot, state, state_root, &all_validators)
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
client
|
|
.post_validator_duties_sync(fork_epoch, &all_validators_u64)
|
|
.await
|
|
.unwrap()
|
|
.data,
|
|
sync_duties
|
|
);
|
|
|
|
// Sync duties should also be available for the next period.
|
|
let current_period = fork_epoch.sync_committee_period(&spec).unwrap();
|
|
let next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 1);
|
|
|
|
let next_period_duties = client
|
|
.post_validator_duties_sync(next_period_epoch, &all_validators_u64)
|
|
.await
|
|
.unwrap()
|
|
.data;
|
|
assert_eq!(next_period_duties.len(), E::sync_committee_size());
|
|
|
|
// Sync duties should *not* be available for the period after the next period.
|
|
// We expect a 400 (bad request) response.
|
|
let next_next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 2);
|
|
assert_eq!(
|
|
client
|
|
.post_validator_duties_sync(next_next_period_epoch, &all_validators_u64)
|
|
.await
|
|
.unwrap_err()
|
|
.status()
|
|
.unwrap(),
|
|
400
|
|
);
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn attestations_across_fork_with_skip_slots() {
|
|
let validator_count = E::sync_committee_size();
|
|
let fork_epoch = Epoch::new(8);
|
|
let spec = altair_spec(fork_epoch);
|
|
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count).await;
|
|
let harness = &tester.harness;
|
|
let client = &tester.client;
|
|
|
|
let all_validators = harness.get_all_validators();
|
|
|
|
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
|
let mut fork_state = harness
|
|
.chain
|
|
.state_at_slot(fork_slot, StateSkipConfig::WithStateRoots)
|
|
.unwrap();
|
|
let fork_state_root = fork_state.update_tree_hash_cache().unwrap();
|
|
|
|
harness.set_current_slot(fork_slot);
|
|
|
|
let attestations = harness.make_attestations(
|
|
&all_validators,
|
|
&fork_state,
|
|
fork_state_root,
|
|
(*fork_state.get_block_root(fork_slot - 1).unwrap()).into(),
|
|
fork_slot,
|
|
);
|
|
|
|
let unaggregated_attestations = attestations
|
|
.iter()
|
|
.flat_map(|(atts, _)| atts.iter().map(|(att, _)| att.clone()))
|
|
.collect::<Vec<_>>();
|
|
|
|
assert!(!unaggregated_attestations.is_empty());
|
|
let fork_name = harness.spec.fork_name_at_slot::<E>(fork_slot);
|
|
client
|
|
.post_beacon_pool_attestations_v1(&unaggregated_attestations)
|
|
.await
|
|
.unwrap();
|
|
|
|
let signed_aggregates = attestations
|
|
.into_iter()
|
|
.filter_map(|(_, op_aggregate)| op_aggregate)
|
|
.collect::<Vec<_>>();
|
|
assert!(!signed_aggregates.is_empty());
|
|
|
|
client
|
|
.post_validator_aggregate_and_proof_v1(&signed_aggregates)
|
|
.await
|
|
.unwrap();
|
|
client
|
|
.post_validator_aggregate_and_proof_v2(&signed_aggregates, fork_name)
|
|
.await
|
|
.unwrap();
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn sync_contributions_across_fork_with_skip_slots() {
|
|
let validator_count = E::sync_committee_size();
|
|
let fork_epoch = Epoch::new(8);
|
|
let spec = altair_spec(fork_epoch);
|
|
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count).await;
|
|
let harness = &tester.harness;
|
|
let client = &tester.client;
|
|
|
|
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
|
let fork_state = harness
|
|
.chain
|
|
.state_at_slot(fork_slot, StateSkipConfig::WithStateRoots)
|
|
.unwrap();
|
|
|
|
harness.set_current_slot(fork_slot);
|
|
|
|
let sync_messages = harness.make_sync_contributions(
|
|
&fork_state,
|
|
*fork_state.get_block_root(fork_slot - 1).unwrap(),
|
|
fork_slot,
|
|
RelativeSyncCommittee::Current,
|
|
);
|
|
|
|
let sync_committee_messages = sync_messages
|
|
.iter()
|
|
.flat_map(|(messages, _)| messages.iter().map(|(message, _subnet)| message.clone()))
|
|
.collect::<Vec<_>>();
|
|
assert!(!sync_committee_messages.is_empty());
|
|
|
|
client
|
|
.post_beacon_pool_sync_committee_signatures(&sync_committee_messages)
|
|
.await
|
|
.unwrap();
|
|
|
|
let signed_contributions = sync_messages
|
|
.into_iter()
|
|
.filter_map(|(_, op_aggregate)| op_aggregate)
|
|
.collect::<Vec<_>>();
|
|
assert!(!signed_contributions.is_empty());
|
|
|
|
client
|
|
.post_validator_contribution_and_proofs(&signed_contributions)
|
|
.await
|
|
.unwrap();
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn sync_committee_indices_across_fork() {
|
|
let validator_count = E::sync_committee_size();
|
|
let fork_epoch = Epoch::new(8);
|
|
let spec = altair_spec(fork_epoch);
|
|
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count).await;
|
|
let harness = &tester.harness;
|
|
let client = &tester.client;
|
|
|
|
let all_validators = harness.get_all_validators();
|
|
|
|
// Flatten subcommittees into a single vec.
|
|
let flatten = |subcommittees: &[SyncSubcommittee]| -> Vec<u64> {
|
|
subcommittees
|
|
.iter()
|
|
.flat_map(|sub| sub.indices.iter().copied())
|
|
.collect()
|
|
};
|
|
|
|
// Prior to the fork the `sync_committees` endpoint should return a 400 error.
|
|
assert_eq!(
|
|
client
|
|
.get_beacon_states_sync_committees(StateId::Slot(Slot::new(0)), None)
|
|
.await
|
|
.unwrap_err()
|
|
.status()
|
|
.unwrap(),
|
|
400
|
|
);
|
|
assert_eq!(
|
|
client
|
|
.get_beacon_states_sync_committees(StateId::Head, Some(Epoch::new(0)))
|
|
.await
|
|
.unwrap_err()
|
|
.status()
|
|
.unwrap(),
|
|
400
|
|
);
|
|
|
|
// If there's a skip slot at the fork slot, the endpoint will return a 400 until a block is
|
|
// applied.
|
|
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
|
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
|
let (_, mut state) = harness
|
|
.add_attested_block_at_slot(
|
|
fork_slot - 1,
|
|
genesis_state,
|
|
genesis_state_root,
|
|
&all_validators,
|
|
)
|
|
.await
|
|
.unwrap();
|
|
|
|
harness.advance_slot();
|
|
assert_eq!(harness.get_current_slot(), fork_slot);
|
|
|
|
// Using the head state must fail.
|
|
assert_eq!(
|
|
client
|
|
.get_beacon_states_sync_committees(StateId::Head, Some(fork_epoch))
|
|
.await
|
|
.unwrap_err()
|
|
.status()
|
|
.unwrap(),
|
|
400
|
|
);
|
|
|
|
// In theory we could do a state advance and make this work, but to keep things simple I've
|
|
// avoided doing that for now.
|
|
assert_eq!(
|
|
client
|
|
.get_beacon_states_sync_committees(StateId::Slot(fork_slot), None)
|
|
.await
|
|
.unwrap_err()
|
|
.status()
|
|
.unwrap(),
|
|
400
|
|
);
|
|
|
|
// Once the head is updated it should be useable for requests, including in the next sync
|
|
// committee period.
|
|
let state_root = state.canonical_root().unwrap();
|
|
harness
|
|
.add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators)
|
|
.await
|
|
.unwrap();
|
|
|
|
let current_period = fork_epoch.sync_committee_period(&spec).unwrap();
|
|
let next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 1);
|
|
assert!(next_period_epoch > fork_epoch);
|
|
|
|
for epoch in [
|
|
None,
|
|
Some(fork_epoch),
|
|
Some(fork_epoch + 1),
|
|
Some(next_period_epoch),
|
|
Some(next_period_epoch + 1),
|
|
] {
|
|
let committee = client
|
|
.get_beacon_states_sync_committees(StateId::Head, epoch)
|
|
.await
|
|
.unwrap()
|
|
.data;
|
|
assert_eq!(committee.validators.len(), E::sync_committee_size());
|
|
|
|
assert_eq!(
|
|
committee.validators,
|
|
flatten(&committee.validator_aggregates)
|
|
);
|
|
}
|
|
}
|
|
|
|
/// Assert that an HTTP API error has the given status code and indexed errors for the given indices.
|
|
fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec<usize>) {
|
|
let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { code, failures, .. }) = error
|
|
else {
|
|
panic!("wrong error, expected ServerIndexedMessage, got: {error:?}")
|
|
};
|
|
assert_eq!(code, status_code);
|
|
assert_eq!(failures.len(), indices.len());
|
|
for (index, failure) in indices.into_iter().zip(failures) {
|
|
assert_eq!(failure.index, index as u64);
|
|
}
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn bls_to_execution_changes_update_all_around_capella_fork() {
|
|
let validator_count = 128;
|
|
let fork_epoch = Epoch::new(2);
|
|
let spec = capella_spec(fork_epoch);
|
|
let max_bls_to_execution_changes = E::max_bls_to_execution_changes();
|
|
|
|
// Use a genesis state with entirely BLS withdrawal credentials.
|
|
// Offset keypairs by `validator_count` to create keys distinct from the signing keys.
|
|
let validator_keypairs = generate_deterministic_keypairs(validator_count);
|
|
let withdrawal_keypairs = (0..validator_count)
|
|
.map(|i| Some(generate_deterministic_keypair(i + validator_count)))
|
|
.collect::<Vec<_>>();
|
|
let withdrawal_credentials = withdrawal_keypairs
|
|
.iter()
|
|
.map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec))
|
|
.collect::<Vec<_>>();
|
|
let header = generate_genesis_header(&spec, true);
|
|
let genesis_state = interop_genesis_state_with_withdrawal_credentials(
|
|
&validator_keypairs,
|
|
&withdrawal_credentials,
|
|
HARNESS_GENESIS_TIME,
|
|
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
|
header,
|
|
&spec,
|
|
)
|
|
.unwrap();
|
|
|
|
let tester = InteractiveTester::<E>::new_with_initializer_and_mutator(
|
|
Some(spec.clone()),
|
|
validator_count,
|
|
Some(Box::new(|harness_builder| {
|
|
harness_builder
|
|
.keypairs(validator_keypairs)
|
|
.withdrawal_keypairs(withdrawal_keypairs)
|
|
.genesis_state_ephemeral_store(genesis_state)
|
|
})),
|
|
None,
|
|
Default::default(),
|
|
)
|
|
.await;
|
|
let harness = &tester.harness;
|
|
let client = &tester.client;
|
|
|
|
let all_validators = harness.get_all_validators();
|
|
let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::<Vec<_>>();
|
|
|
|
// Create a bunch of valid address changes.
|
|
let valid_address_changes = all_validators_u64
|
|
.iter()
|
|
.map(|&validator_index| {
|
|
harness.make_bls_to_execution_change(
|
|
validator_index,
|
|
Address::from_low_u64_be(validator_index),
|
|
)
|
|
})
|
|
.collect::<Vec<_>>();
|
|
|
|
// Address changes which conflict with `valid_address_changes` on the address chosen.
|
|
let conflicting_address_changes = all_validators_u64
|
|
.iter()
|
|
.map(|&validator_index| {
|
|
harness.make_bls_to_execution_change(
|
|
validator_index,
|
|
Address::from_low_u64_be(validator_index + 1),
|
|
)
|
|
})
|
|
.collect::<Vec<_>>();
|
|
|
|
// Address changes signed with the wrong key.
|
|
let wrong_key_address_changes = all_validators_u64
|
|
.iter()
|
|
.map(|&validator_index| {
|
|
// Use the correct pubkey.
|
|
let pubkey = &harness.get_withdrawal_keypair(validator_index).pk;
|
|
// And the wrong secret key.
|
|
let secret_key = &harness
|
|
.get_withdrawal_keypair((validator_index + 1) % validator_count as u64)
|
|
.sk;
|
|
harness.make_bls_to_execution_change_with_keys(
|
|
validator_index,
|
|
Address::from_low_u64_be(validator_index),
|
|
pubkey,
|
|
secret_key,
|
|
)
|
|
})
|
|
.collect::<Vec<_>>();
|
|
|
|
// Submit some changes before Capella. Just enough to fill two blocks.
|
|
let num_pre_capella = validator_count / 4;
|
|
let blocks_filled_pre_capella = 2;
|
|
assert_eq!(
|
|
num_pre_capella,
|
|
blocks_filled_pre_capella * max_bls_to_execution_changes
|
|
);
|
|
|
|
client
|
|
.post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella])
|
|
.await
|
|
.unwrap();
|
|
|
|
let expected_received_pre_capella_messages = valid_address_changes[..num_pre_capella].to_vec();
|
|
|
|
// Conflicting changes for the same validators should all fail.
|
|
let error = client
|
|
.post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella])
|
|
.await
|
|
.unwrap_err();
|
|
assert_server_indexed_error(error, 400, (0..num_pre_capella).collect());
|
|
|
|
// Re-submitting the same changes should be accepted.
|
|
client
|
|
.post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella])
|
|
.await
|
|
.unwrap();
|
|
|
|
// Invalid changes signed with the wrong keys should all be rejected without affecting the seen
|
|
// indices filters (apply ALL of them).
|
|
let error = client
|
|
.post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes)
|
|
.await
|
|
.unwrap_err();
|
|
assert_server_indexed_error(error, 400, all_validators.clone());
|
|
|
|
// Advance to right before Capella.
|
|
let capella_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
|
harness.extend_to_slot(capella_slot - 1).await;
|
|
assert_eq!(harness.head_slot(), capella_slot - 1);
|
|
|
|
assert_eq!(
|
|
harness
|
|
.chain
|
|
.op_pool
|
|
.get_bls_to_execution_changes_received_pre_capella(
|
|
&harness.chain.head_snapshot().beacon_state,
|
|
&spec,
|
|
)
|
|
.into_iter()
|
|
.collect::<HashSet<_>>(),
|
|
HashSet::from_iter(expected_received_pre_capella_messages.into_iter()),
|
|
"all pre-capella messages should be queued for capella broadcast"
|
|
);
|
|
|
|
// Add Capella blocks which should be full of BLS to execution changes.
|
|
for i in 0..validator_count / max_bls_to_execution_changes {
|
|
let head_block_root = harness.extend_slots(1).await;
|
|
let head_block = harness
|
|
.chain
|
|
.get_block(&head_block_root)
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
|
|
let bls_to_execution_changes = head_block
|
|
.message()
|
|
.body()
|
|
.bls_to_execution_changes()
|
|
.unwrap();
|
|
|
|
// Block should be full.
|
|
assert_eq!(
|
|
bls_to_execution_changes.len(),
|
|
max_bls_to_execution_changes,
|
|
"block not full on iteration {i}"
|
|
);
|
|
|
|
// Included changes should be the ones from `valid_address_changes` in any order.
|
|
for address_change in bls_to_execution_changes.iter() {
|
|
assert!(valid_address_changes.contains(address_change));
|
|
}
|
|
|
|
// After the initial 2 blocks, add the rest of the changes using a large
|
|
// request containing all the valid, all the conflicting and all the invalid.
|
|
// Despite the invalid and duplicate messages, the new ones should still get picked up by
|
|
// the pool.
|
|
if i == blocks_filled_pre_capella - 1 {
|
|
let all_address_changes: Vec<_> = [
|
|
valid_address_changes.clone(),
|
|
conflicting_address_changes.clone(),
|
|
wrong_key_address_changes.clone(),
|
|
]
|
|
.concat();
|
|
|
|
let error = client
|
|
.post_beacon_pool_bls_to_execution_changes(&all_address_changes)
|
|
.await
|
|
.unwrap_err();
|
|
assert_server_indexed_error(
|
|
error,
|
|
400,
|
|
(validator_count..3 * validator_count).collect(),
|
|
);
|
|
}
|
|
}
|
|
|
|
// Eventually all validators should have eth1 withdrawal credentials.
|
|
let head_state = harness.get_current_state();
|
|
for validator in head_state.validators() {
|
|
assert!(validator.has_eth1_withdrawal_credential(&spec));
|
|
}
|
|
}
|