Persist light client bootstrap (#5915)

* persist light client updates

* update beacon chain to serve light client updates

* resolve todos

* cache best update

* extend cache parts

* is better light client update

* resolve merge conflict

* initial api changes

* add lc update db column

* fmt

* added tests

* add sim

* Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates

* fix some weird issues with the simulator

* tests

* Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates

* test changes

* merge conflict

* testing

* started work on ef tests and some code clean up

* update tests

* linting

* noop pre altair, were still failing on electra though

* allow for zeroed light client header

* Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates

* merge unstable

* remove unwraps

* remove unwraps

* fetch bootstrap without always querying for state

* storing bootstrap parts in db

* mroe code cleanup

* test

* prune sync committee branches from dropped chains

* Update light_client_update.rs

* merge unstable

* move functionality to helper methods

* refactor is best update fn

* refactor is best update fn

* improve organization of light client server cache logic

* fork diget calc, and only spawn as many blcoks as we need for the lc update test

* resovle merge conflict

* add electra bootstrap logic, add logic to cache current sync committee

* add latest sync committe branch cache

* fetch lc update from the cache if it exists

* fmt

* Fix beacon_chain tests

* Add debug code to update ranking_order ef test

* Fix compare code

* merge conflicts

* merge conflict

* add better error messaging

* resolve merge conflicts

* remove lc update from basicsim

* rename sync comittte variable and fix persist condition

* refactor get_light_client_update logic

* add better comments, return helpful error messages over http and rpc

* pruning canonical non checkpoint slots

* fix test

* rerun test

* update pruning logic, add tests

* fix tests

* fix imports

* fmt

* refactor db code

* Refactor db method

* Refactor db method

* add additional comments

* Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-bootstrap

* fix merge

* linting

* merge conflict

* prevent overflow

* enable lc server for http api tests

* fix tests

* remove prints

* remove warning

* revert change
This commit is contained in:
Eitan Seri-Levi
2024-09-09 17:27:49 -07:00
committed by GitHub
parent 51091a40fa
commit a94b12b4d5
19 changed files with 733 additions and 238 deletions

View File

@@ -5,7 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::builder::BeaconChainBuilder;
use beacon_chain::data_availability_checker::AvailableBlock;
use beacon_chain::schema_change::migrate_schema;
use beacon_chain::test_utils::RelativeSyncCommittee;
use beacon_chain::test_utils::SyncCommitteeStrategy;
use beacon_chain::test_utils::{
mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness,
BlockStrategy, DiskHarnessType, KZG,
@@ -104,6 +104,142 @@ fn get_harness_generic(
harness
}
#[tokio::test]
async fn light_client_bootstrap_test() {
let spec = test_spec::<E>();
let Some(_) = spec.altair_fork_epoch else {
// No-op prior to Altair.
return;
};
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 6);
let db_path = tempdir().unwrap();
let log = test_logger();
let seconds_per_slot = spec.seconds_per_slot;
let store = get_store_generic(
&db_path,
StoreConfig {
slots_per_restore_point: 2 * E::slots_per_epoch(),
..Default::default()
},
test_spec::<E>(),
);
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
let num_initial_slots = E::slots_per_epoch() * 7;
let slots: Vec<Slot> = (1..num_initial_slots).map(Slot::new).collect();
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
harness
.add_attested_blocks_at_slots_with_lc_data(
genesis_state.clone(),
genesis_state_root,
&slots,
&all_validators,
None,
SyncCommitteeStrategy::NoValidators,
)
.await;
let wss_block_root = harness
.chain
.block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev)
.unwrap()
.unwrap();
let wss_state_root = harness
.chain
.state_root_at_slot(checkpoint_slot)
.unwrap()
.unwrap();
let wss_block = harness
.chain
.store
.get_full_block(&wss_block_root)
.unwrap()
.unwrap();
let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap();
let wss_state = store
.get_state(&wss_state_root, Some(checkpoint_slot))
.unwrap()
.unwrap();
let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone());
let mock =
mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone());
// Initialise a new beacon chain from the finalized checkpoint.
// The slot clock must be set to a time ahead of the checkpoint state.
let slot_clock = TestingSlotClock::new(
Slot::new(0),
Duration::from_secs(harness.chain.genesis_time),
Duration::from_secs(seconds_per_slot),
);
slot_clock.set_slot(harness.get_current_slot().as_u64());
let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1);
let beacon_chain = BeaconChainBuilder::<DiskHarnessType<E>>::new(MinimalEthSpec)
.store(store.clone())
.custom_spec(test_spec::<E>())
.task_executor(harness.chain.task_executor.clone())
.logger(log.clone())
.weak_subjectivity_state(
wss_state,
wss_block.clone(),
wss_blobs_opt.clone(),
genesis_state,
)
.unwrap()
.store_migrator_config(MigratorConfig::default().blocking())
.dummy_eth1_backend()
.expect("should build dummy backend")
.slot_clock(slot_clock)
.shutdown_sender(shutdown_tx)
.chain_config(ChainConfig::default())
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
log.clone(),
1,
)))
.execution_layer(Some(mock.el))
.kzg(kzg)
.build()
.expect("should build");
let current_state = harness.get_current_state();
if ForkName::Electra == current_state.fork_name_unchecked() {
// TODO(electra) fix beacon state `compute_merkle_proof`
return;
}
let finalized_checkpoint = beacon_chain
.canonical_head
.cached_head()
.finalized_checkpoint();
let block_root = finalized_checkpoint.root;
let (lc_bootstrap, _) = harness
.chain
.get_light_client_bootstrap(&block_root)
.unwrap()
.unwrap();
let bootstrap_slot = match lc_bootstrap {
LightClientBootstrap::Altair(lc_bootstrap) => lc_bootstrap.header.beacon.slot,
LightClientBootstrap::Capella(lc_bootstrap) => lc_bootstrap.header.beacon.slot,
LightClientBootstrap::Deneb(lc_bootstrap) => lc_bootstrap.header.beacon.slot,
LightClientBootstrap::Electra(lc_bootstrap) => lc_bootstrap.header.beacon.slot,
};
assert_eq!(
bootstrap_slot.epoch(E::slots_per_epoch()),
finalized_checkpoint.epoch
);
}
#[tokio::test]
async fn light_client_updates_test() {
let spec = test_spec::<E>();
@@ -170,7 +306,7 @@ async fn light_client_updates_test() {
harness.advance_slot();
harness
.extend_chain(
.extend_chain_with_light_client_data(
num_final_blocks as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
@@ -224,53 +360,6 @@ async fn light_client_updates_test() {
return;
}
let block_root = *current_state
.get_block_root(current_state.slot() - Slot::new(1))
.unwrap();
let contributions = harness.make_sync_contributions(
&current_state,
block_root,
current_state.slot() - Slot::new(1),
RelativeSyncCommittee::Current,
);
// generate sync aggregates
for (_, contribution_and_proof) in contributions {
let contribution = contribution_and_proof
.expect("contribution exists for committee")
.message
.contribution;
beacon_chain
.op_pool
.insert_sync_contribution(contribution.clone())
.unwrap();
beacon_chain
.op_pool
.insert_sync_contribution(contribution)
.unwrap();
}
// check that we can fetch the newly generated sync aggregate
let sync_aggregate = beacon_chain
.op_pool
.get_sync_aggregate(&current_state)
.unwrap()
.unwrap();
// cache light client data
beacon_chain
.light_client_server_cache
.recompute_and_cache_updates(
store.clone(),
current_state.slot() - Slot::new(1),
&block_root,
&sync_aggregate,
&log,
&spec,
)
.unwrap();
// calculate the sync period from the previous slot
let sync_period = (current_state.slot() - Slot::new(1))
.epoch(E::slots_per_epoch())
@@ -291,61 +380,13 @@ async fn light_client_updates_test() {
}
harness
.extend_chain(
.extend_chain_with_light_client_data(
num_final_blocks as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let current_state = harness.get_current_state();
let block_root = *current_state
.get_block_root(current_state.slot() - Slot::new(1))
.unwrap();
let contributions = harness.make_sync_contributions(
&current_state,
block_root,
current_state.slot() - Slot::new(1),
RelativeSyncCommittee::Current,
);
// generate new sync aggregates from this new state
for (_, contribution_and_proof) in contributions {
let contribution = contribution_and_proof
.expect("contribution exists for committee")
.message
.contribution;
beacon_chain
.op_pool
.insert_sync_contribution(contribution.clone())
.unwrap();
beacon_chain
.op_pool
.insert_sync_contribution(contribution)
.unwrap();
}
let sync_aggregate = beacon_chain
.op_pool
.get_sync_aggregate(&current_state)
.unwrap()
.unwrap();
// cache new light client data
beacon_chain
.light_client_server_cache
.recompute_and_cache_updates(
store.clone(),
current_state.slot() - Slot::new(1),
&block_root,
&sync_aggregate,
&log,
&spec,
)
.unwrap();
// we should now have two light client updates in the db
let lc_updates = beacon_chain
.get_light_client_updates(sync_period, 100)