From 858b01f4e355e77a7b2b659843728151f2499290 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 26 Jun 2024 06:29:27 -0700 Subject: [PATCH 01/11] Block processing electra (#5741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Attestation superstruct changes for EIP 7549 (#5644) * update * experiment * superstruct changes * revert * superstruct changes * fix tests * indexed attestation * indexed attestation superstruct * updated TODOs * `superstruct` the `AttesterSlashing` (#5636) * `superstruct` Attester Fork Variants * Push a little further * Deal with Encode / Decode of AttesterSlashing * not so sure about this.. * Stop Encode/Decode Bounds from Propagating Out * Tons of Changes.. * More Conversions to AttestationRef * Add AsReference trait (#15) * Add AsReference trait * Fix some snafus * Got it Compiling! :D * Got Tests Building * Get beacon chain tests compiling --------- Co-authored-by: Michael Sproul * Merge remote-tracking branch 'upstream/unstable' into electra_attestation_changes * Make EF Tests Fork-Agnostic (#5713) * Finish EF Test Fork Agnostic (#5714) * Superstruct `AggregateAndProof` (#5715) * Upgrade `superstruct` to `0.8.0` * superstruct `AggregateAndProof` * Merge remote-tracking branch 'sigp/unstable' into electra_attestation_changes * cargo fmt * Merge pull request #5726 from realbigsean/electra_attestation_changes Merge unstable into Electra attestation changes * process withdrawals updates * cleanup withdrawals processing * update `process_operations` deposit length check * add apply_deposit changes * add execution layer withdrawal request processing * process deposit receipts * add consolidation processing * update process operations function * exit updates * clean up * update slash_validator * EIP7549 `get_attestation_indices` (#5657) * get attesting indices electra impl * fmt * get tests to pass * fmt * fix some beacon chain tests * fmt * fix slasher test * fmt got me again * fix more tests * fix tests * Some small changes (#5739) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * cargo fmt (#5740) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * fix attestation verification * Sketch op pool changes * fix get attesting indices (#5742) * fix get attesting indices * better errors * fix compile * only get committee index once * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Ef test fixes (#5753) * attestation related ef test fixes * delete commented out stuff * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Fix Aggregation Pool for Electra (#5754) * Fix Aggregation Pool for Electra * Remove Outdated Interface * fix ssz (#5755) * Get `electra_op_pool` up to date (#5756) * fix get attesting indices (#5742) * fix get attesting indices * better errors * fix compile * only get committee index once * Ef test fixes (#5753) * attestation related ef test fixes * delete commented out stuff * Fix Aggregation Pool for Electra (#5754) * Fix Aggregation Pool for Electra * Remove Outdated Interface * fix ssz (#5755) --------- Co-authored-by: realbigsean * Revert "Get `electra_op_pool` up to date (#5756)" (#5757) This reverts commit ab9e58aa3d0e6fe2175a4996a5de710e81152896. * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into electra_op_pool * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Compute on chain aggregate impl (#5752) * add compute_on_chain_agg impl to op pool changes * fmt * get op pool tests to pass * update the naive agg pool interface (#5760) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Fix bugs in cross-committee aggregation * Add comment to max cover optimisation * Fix assert * Merge pull request #5749 from sigp/electra_op_pool Optimise Electra op pool aggregation * don't fail on empty consolidations * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * update committee offset * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * only increment the state deposit index on old deposit flow * Fix Electra Fork Choice Tests (#5764) * Fix Electra Fork Choice Tests (#5764) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Fix Consolidation Sigs & Withdrawals * Merge pull request #5766 from ethDreamer/two_fixes Fix Consolidation Sigs & Withdrawals * Subscribe to the correct subnets for electra attestations (#5782) * subscribe to the correct att subnets for electra * subscribe to the correct att subnets for electra * cargo fmt * Subscribe to the correct subnets for electra attestations (#5782) * subscribe to the correct att subnets for electra * subscribe to the correct att subnets for electra * cargo fmt * fix slashing handling * Fix Bug In Block Processing with 0x02 Credentials * Merge remote-tracking branch 'upstream/unstable' * Send unagg attestation based on fork * Publish all aggregates * just one more check bro plz.. * Merge pull request #5832 from ethDreamer/electra_attestation_changes_merge_unstable Merge `unstable` into `electra_attestation_changes` * Merge pull request #5835 from realbigsean/fix-validator-logic Fix validator logic * Merge pull request #5816 from realbigsean/electra-attestation-slashing-handling Electra slashing handling * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Electra attestation changes rm decode impl (#5856) * Remove Crappy Decode impl for Attestation * Remove Inefficient Attestation Decode impl * Implement Schema Upgrade / Downgrade * Update beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs Co-authored-by: Michael Sproul --------- Co-authored-by: Michael Sproul * Fix failing attestation tests and misc electra attestation cleanup (#5810) * - get attestation related beacon chain tests to pass - observed attestations are now keyed off of data + committee index - rename op pool attestationref to compactattestationref - remove unwraps in agg pool and use options instead - cherry pick some changes from ef-tests-electra * cargo fmt * fix failing test * Revert dockerfile changes * make committee_index return option * function args shouldnt be a ref to attestation ref * fmt * fix dup imports --------- Co-authored-by: realbigsean * fix some todos (#5817) * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * add consolidations to merkle calc for inclusion proof * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Remove Duplicate KZG Commitment Merkle Proof Code (#5874) * Remove Duplicate KZG Commitment Merkle Proof Code * s/tree_lists/fields/ * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * fix compile * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Fix slasher tests (#5906) * Fix electra tests * Add electra attestations to double vote tests * Update superstruct to 0.8 * Merge remote-tracking branch 'origin/unstable' into electra_attestation_changes * Small cleanup in slasher tests * Clean up Electra observed aggregates (#5929) * Use consistent key in observed_attestations * Remove unwraps from observed aggregates * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * De-dup attestation constructor logic * Remove unwraps in Attestation construction * Dedup match_attestation_data * Remove outdated TODO * Use ForkName Ord in fork-choice tests * Use ForkName Ord in BeaconBlockBody * Make to_electra not fallible * Remove TestRandom impl for IndexedAttestation * Remove IndexedAttestation faulty Decode impl * Drop TestRandom impl * Add PendingAttestationInElectra * Indexed att on disk (#35) * indexed att on disk * fix lints * Update slasher/src/migrate.rs Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --------- Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> * add electra fork enabled fn to ForkName impl (#36) * add electra fork enabled fn to ForkName impl * remove inadvertent file * Update common/eth2/src/types.rs Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> * Dedup attestation constructor logic in attester cache * Use if let Ok for committee_bits * Dedup Attestation constructor code * Diff reduction in tests * Fix beacon_chain tests * Diff reduction * Use Ord for ForkName in pubsub * Resolve into_attestation_and_indices todo * Remove stale TODO * Fix beacon_chain tests * Test spec invariant * Use electra_enabled in pubsub * Remove get_indexed_attestation_from_signed_aggregate * Use ok_or instead of if let else * committees are sorted * remove dup method `get_indexed_attestation_from_committees` * Merge pull request #5940 from dapplion/electra_attestation_changes_lionreview Electra attestations #5712 review * update default persisted op pool deserialization * ensure aggregate and proof uses serde untagged on ref * Fork aware ssz static attestation tests * Electra attestation changes from Lions review (#5971) * dedup/cleanup and remove unneeded hashset use * remove irrelevant TODOs * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Electra attestation changes sean review (#5972) * instantiate empty bitlist in unreachable code * clean up error conversion * fork enabled bool cleanup * remove a couple todos * return bools instead of options in `aggregate` and use the result * delete commented out code * use map macros in simple transformations * remove signers_disjoint_from * get ef tests compiling * get ef tests compiling * update intentionally excluded files * Avoid changing slasher schema for Electra * Delete slasher schema v4 * Fix clippy * Fix compilation of beacon_chain tests * Update database.rs * Update per_block_processing.rs * Add electra lightclient types * Update slasher/src/database.rs * fix imports * Merge pull request #5980 from dapplion/electra-lightclient Add electra lightclient types * Merge pull request #5975 from michaelsproul/electra-slasher-no-migration Avoid changing slasher schema for Electra * Update beacon_node/beacon_chain/src/attestation_verification.rs * Update beacon_node/beacon_chain/src/attestation_verification.rs * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * Merge branch 'electra_attestation_changes' of https://github.com/realbigsean/lighthouse into block-processing-electra * fork enabled electra --- beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- .../beacon_chain/src/execution_payload.rs | 2 +- .../genesis/src/eth1_genesis_service.rs | 8 +- beacon_node/http_api/src/builder_states.rs | 2 +- .../http_api/tests/interactive_tests.rs | 2 + beacon_node/http_api/tests/tests.rs | 4 +- .../src/common/initiate_validator_exit.rs | 24 +- .../src/common/slash_validator.rs | 4 +- consensus/state_processing/src/genesis.rs | 5 +- .../src/per_block_processing.rs | 74 +++- .../block_signature_verifier.rs | 22 + .../src/per_block_processing/errors.rs | 46 +- .../process_operations.rs | 402 ++++++++++++++++-- .../per_block_processing/signature_sets.rs | 38 +- .../verify_attestation.rs | 18 +- .../per_block_processing/verify_deposit.rs | 2 +- .../src/per_block_processing/verify_exit.rs | 11 + consensus/types/src/beacon_state.rs | 13 +- consensus/types/src/chain_spec.rs | 13 + consensus/types/src/consolidation.rs | 4 +- consensus/types/src/payload.rs | 151 ++++++- consensus/types/src/validator.rs | 27 +- 22 files changed, 799 insertions(+), 78 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f1d9ce791e..66e2964669 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4538,6 +4538,7 @@ impl BeaconChain { let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); if head_state.current_epoch() == proposal_epoch { return get_expected_withdrawals(&unadvanced_state, &self.spec) + .map(|(withdrawals, _)| withdrawals) .map_err(Error::PrepareProposerFailed); } @@ -4555,7 +4556,9 @@ impl BeaconChain { proposal_epoch.start_slot(T::EthSpec::slots_per_epoch()), &self.spec, )?; - get_expected_withdrawals(&advanced_state, &self.spec).map_err(Error::PrepareProposerFailed) + get_expected_withdrawals(&advanced_state, &self.spec) + .map(|(withdrawals, _)| withdrawals) + .map_err(Error::PrepareProposerFailed) } /// Determine whether a fork choice update to the execution layer should be overridden. diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index cbffe36342..a6e0d247dc 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -413,7 +413,7 @@ pub fn get_execution_payload( state.latest_execution_payload_header()?.block_hash(); let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { - Some(get_expected_withdrawals(state, spec)?.into()) + Some(get_expected_withdrawals(state, spec)?.0.into()) } &BeaconState::Bellatrix(_) => None, // These shouldn't happen but they're here to make the pattern irrefutable diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 0ede74ba75..7015705027 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -432,8 +432,14 @@ impl Eth1GenesisService { // Such an optimization would only be useful in a scenario where `MIN_GENESIS_TIME` // is reached _prior_ to `MIN_ACTIVE_VALIDATOR_COUNT`. I suspect this won't be the // case for mainnet, so we defer this optimization. + let Deposit { proof, data } = deposit; + let proof = if PROOF_VERIFICATION { + Some(proof) + } else { + None + }; - apply_deposit(&mut state, &deposit, spec, PROOF_VERIFICATION) + apply_deposit(&mut state, data, proof, true, spec) .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) })?; diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index a540113ab4..54f2c0efa8 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -33,7 +33,7 @@ pub fn get_next_withdrawals( } match get_expected_withdrawals(&state, &chain.spec) { - Ok(withdrawals) => Ok(withdrawals), + Ok((withdrawals, _)) => Ok(withdrawals), Err(e) => Err(warp_utils::reject::custom_server_error(format!( "failed to get expected withdrawal: {:?}", e diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 529dc852e9..711820ccac 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -610,6 +610,7 @@ pub async fn proposer_boost_re_org_test( assert_eq!(state_b.slot(), slot_b); let pre_advance_withdrawals = get_expected_withdrawals(&state_b, &harness.chain.spec) .unwrap() + .0 .to_vec(); complete_state_advance(&mut state_b, None, slot_c, &harness.chain.spec).unwrap(); @@ -696,6 +697,7 @@ pub async fn proposer_boost_re_org_test( get_expected_withdrawals(&state_b, &harness.chain.spec) } .unwrap() + .0 .to_vec(); let payload_attribs_withdrawals = payload_attribs.withdrawals().unwrap(); assert_eq!(expected_withdrawals, *payload_attribs_withdrawals); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 86f2096224..4213fd4ab8 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -5471,7 +5471,9 @@ impl ApiTester { &self.chain.spec, ); } - let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap(); + let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec) + .unwrap() + .0; // fetch expected withdrawals from the client let result = self.client.get_expected_withdrawals(&state_id).await; diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index a40a9dfd39..8874e9ed4b 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -19,16 +19,22 @@ pub fn initiate_validator_exit( state.build_exit_cache(spec)?; // Compute exit queue epoch - let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec)?; - let mut exit_queue_epoch = state - .exit_cache() - .max_epoch()? - .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); - let exit_queue_churn = state.exit_cache().get_churn_at(exit_queue_epoch)?; + let exit_queue_epoch = if state.fork_name_unchecked() >= ForkName::Electra { + let effective_balance = state.get_validator(index)?.effective_balance; + state.compute_exit_epoch_and_update_churn(effective_balance, spec)? + } else { + let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec)?; + let mut exit_queue_epoch = state + .exit_cache() + .max_epoch()? + .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); + let exit_queue_churn = state.exit_cache().get_churn_at(exit_queue_epoch)?; - if exit_queue_churn >= state.get_validator_churn_limit(spec)? { - exit_queue_epoch.safe_add_assign(1)?; - } + if exit_queue_churn >= state.get_validator_churn_limit(spec)? { + exit_queue_epoch.safe_add_assign(1)?; + } + exit_queue_epoch + }; let validator = state.get_validator_cow(index)?; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 520b58a8af..80d857cc00 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -53,8 +53,8 @@ pub fn slash_validator( // Apply proposer and whistleblower rewards let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); - let whistleblower_reward = - validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; + let whistleblower_reward = validator_effective_balance + .safe_div(spec.whistleblower_reward_quotient_for_state(state))?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, BeaconState::Altair(_) diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index a84f359389..c73417077a 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -32,12 +32,13 @@ pub fn initialize_beacon_state_from_eth1( let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - for deposit in deposits.iter() { + for deposit in deposits.into_iter() { deposit_tree .push_leaf(deposit.data.tree_hash_root()) .map_err(BlockProcessingError::MerkleTreeError)?; state.eth1_data_mut().deposit_root = deposit_tree.root(); - apply_deposit(&mut state, deposit, spec, true)?; + let Deposit { proof, data } = deposit; + apply_deposit(&mut state, data, Some(proof), true, spec)?; } process_activations(&mut state, spec)?; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 98671f82b9..e7655b453a 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -20,7 +20,7 @@ pub use verify_attestation::{ }; pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ - get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, + get_existing_validator_index, is_valid_deposit_signature, verify_deposit_merkle_proof, }; pub use verify_exit::verify_exit; @@ -503,13 +503,55 @@ pub fn compute_timestamp_at_slot( pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, -) -> Result, BlockProcessingError> { +) -> Result<(Withdrawals, Option), BlockProcessingError> { let epoch = state.current_epoch(); let mut withdrawal_index = state.next_withdrawal_index()?; let mut validator_index = state.next_withdrawal_validator_index()?; let mut withdrawals = vec![]; let fork_name = state.fork_name_unchecked(); + // [New in Electra:EIP7251] + // Consume pending partial withdrawals + let partial_withdrawals_count = + if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + for withdrawal in partial_withdrawals { + if withdrawal.withdrawable_epoch > epoch + || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize + { + break; + } + + let withdrawal_balance = state.get_balance(withdrawal.index as usize)?; + let validator = state.get_validator(withdrawal.index as usize)?; + + let has_sufficient_effective_balance = + validator.effective_balance >= spec.min_activation_balance; + let has_excess_balance = withdrawal_balance > spec.min_activation_balance; + + if validator.exit_epoch == spec.far_future_epoch + && has_sufficient_effective_balance + && has_excess_balance + { + let withdrawable_balance = std::cmp::min( + withdrawal_balance.safe_sub(spec.min_activation_balance)?, + withdrawal.amount, + ); + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index: withdrawal.index, + address: validator + .get_execution_withdrawal_address(spec) + .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, + amount: withdrawable_balance, + }); + withdrawal_index.safe_add_assign(1)?; + } + } + Some(withdrawals.len()) + } else { + None + }; + let bound = std::cmp::min( state.validators().len() as u64, spec.max_validators_per_withdrawals_sweep, @@ -524,7 +566,7 @@ pub fn get_expected_withdrawals( index: withdrawal_index, validator_index, address: validator - .get_eth1_withdrawal_address(spec) + .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance, }); @@ -534,9 +576,12 @@ pub fn get_expected_withdrawals( index: withdrawal_index, validator_index, address: validator - .get_eth1_withdrawal_address(spec) + .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, - amount: balance.safe_sub(spec.max_effective_balance)?, + amount: balance.safe_sub( + validator + .get_validator_max_effective_balance(spec, state.fork_name_unchecked()), + )?, }); withdrawal_index.safe_add_assign(1)?; } @@ -548,7 +593,7 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok(withdrawals.into()) + Ok((withdrawals.into(), partial_withdrawals_count)) } /// Apply withdrawals to the state. @@ -558,9 +603,9 @@ pub fn process_withdrawals>( spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { match state { - BeaconState::Bellatrix(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { - let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let (expected_withdrawals, partial_withdrawals_count) = + get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; @@ -579,6 +624,17 @@ pub fn process_withdrawals>( )?; } + // Update pending partial withdrawals [New in Electra:EIP7251] + if let Some(partial_withdrawals_count) = partial_withdrawals_count { + // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 + let new_partial_withdrawals = state + .pending_partial_withdrawals()? + .iter_from(partial_withdrawals_count)? + .cloned() + .collect::>(); + *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; + } + // Update the next withdrawal index if this block contained withdrawals if let Some(latest_withdrawal) = expected_withdrawals.last() { *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; @@ -606,6 +662,6 @@ pub fn process_withdrawals>( Ok(()) } // these shouldn't even be encountered but they're here for completeness - BeaconState::Base(_) | BeaconState::Altair(_) => Ok(()), + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Bellatrix(_) => Ok(()), } } diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 74477f5e48..a0c044219d 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -171,6 +171,7 @@ where self.include_exits(block)?; self.include_sync_aggregate(block)?; self.include_bls_to_execution_changes(block)?; + self.include_consolidations(block)?; Ok(()) } @@ -359,6 +360,27 @@ where Ok(()) } + /// Includes all signatures in `self.block.body.consolidations` for verification. + pub fn include_consolidations>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + if let Ok(consolidations) = block.message().body().consolidations() { + self.sets.sets.reserve(consolidations.len()); + for consolidation in consolidations { + let set = consolidation_signature_set( + self.state, + self.get_pubkey.clone(), + consolidation, + self.spec, + )?; + + self.sets.push(set); + } + } + Ok(()) + } + /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 71a284dea4..cebb10b607 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -89,6 +89,46 @@ pub enum BlockProcessingError { found: Hash256, }, WithdrawalCredentialsInvalid, + TooManyPendingConsolidations { + consolidations: usize, + limit: usize, + }, + ConsolidationChurnLimitTooLow { + churn_limit: u64, + minimum: u64, + }, + MatchingSourceTargetConsolidation { + index: u64, + }, + InactiveConsolidationSource { + index: u64, + current_epoch: Epoch, + }, + InactiveConsolidationTarget { + index: u64, + current_epoch: Epoch, + }, + SourceValidatorExiting { + index: u64, + }, + TargetValidatorExiting { + index: u64, + }, + FutureConsolidationEpoch { + current_epoch: Epoch, + consolidation_epoch: Epoch, + }, + NoSourceExecutionWithdrawalCredential { + index: u64, + }, + NoTargetExecutionWithdrawalCredential { + index: u64, + }, + MismatchedWithdrawalCredentials { + source_address: Address, + target_address: Address, + }, + InavlidConsolidationSignature, PendingAttestationInElectra, } @@ -412,7 +452,10 @@ pub enum ExitInvalid { /// The specified validator has already initiated exit. AlreadyInitiatedExit(u64), /// The exit is for a future epoch. - FutureEpoch { state: Epoch, exit: Epoch }, + FutureEpoch { + state: Epoch, + exit: Epoch, + }, /// The validator has not been active for long enough. TooYoungToExit { current_epoch: Epoch, @@ -423,6 +466,7 @@ pub enum ExitInvalid { /// There was an error whilst attempting to get a set of signatures. The signatures may have /// been invalid or an internal error occurred. SignatureSetError(SignatureSetError), + PendingWithdrawalInQueue(u64), } #[derive(Debug, PartialEq, Clone)] diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index bd354901a8..ff126beabe 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -4,8 +4,11 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use crate::signature_sets::consolidation_signature_set; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; +use types::typenum::U33; +use types::validator::is_compounding_withdrawal_credential; pub fn process_operations>( state: &mut BeaconState, @@ -36,6 +39,18 @@ pub fn process_operations>( process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } + if state.fork_name_unchecked().electra_enabled() { + let requests = block_body.execution_payload()?.withdrawal_requests()?; + if let Some(requests) = requests { + process_execution_layer_withdrawal_requests(state, &requests, spec)?; + } + let receipts = block_body.execution_payload()?.deposit_receipts()?; + if let Some(receipts) = receipts { + process_deposit_receipts(state, &receipts, spec)?; + } + process_consolidations(state, block_body.consolidations()?, verify_signatures, spec)?; + } + Ok(()) } @@ -354,17 +369,34 @@ pub fn process_deposits( deposits: &[Deposit], spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - let expected_deposit_len = std::cmp::min( - E::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, - ); - block_verify!( - deposits.len() as u64 == expected_deposit_len, - BlockProcessingError::DepositCountInvalid { - expected: expected_deposit_len as usize, - found: deposits.len(), - } - ); + // [Modified in Electra:EIP6110] + // Disable former deposit mechanism once all prior deposits are processed + // + // If `deposit_receipts_start_index` does not exist as a field on `state`, electra is disabled + // which means we always want to use the old check, so this field defaults to `u64::MAX`. + let eth1_deposit_index_limit = state.deposit_receipts_start_index().unwrap_or(u64::MAX); + + if state.eth1_deposit_index() < eth1_deposit_index_limit { + let expected_deposit_len = std::cmp::min( + E::MaxDeposits::to_u64(), + state.get_outstanding_deposit_len()?, + ); + block_verify!( + deposits.len() as u64 == expected_deposit_len, + BlockProcessingError::DepositCountInvalid { + expected: expected_deposit_len as usize, + found: deposits.len(), + } + ); + } else { + block_verify!( + deposits.len() as u64 == 0, + BlockProcessingError::DepositCountInvalid { + expected: 0, + found: deposits.len(), + } + ); + } // Verify merkle proofs in parallel. deposits @@ -382,60 +414,96 @@ pub fn process_deposits( // Update the state in series. for deposit in deposits { - apply_deposit(state, deposit, spec, false)?; + apply_deposit(state, deposit.data.clone(), None, true, spec)?; } Ok(()) } -/// Process a single deposit, optionally verifying its merkle proof. +/// Process a single deposit, verifying its merkle proof if provided. pub fn apply_deposit( state: &mut BeaconState, - deposit: &Deposit, + deposit_data: DepositData, + proof: Option>, + increment_eth1_deposit_index: bool, spec: &ChainSpec, - verify_merkle_proof: bool, ) -> Result<(), BlockProcessingError> { let deposit_index = state.eth1_deposit_index() as usize; - if verify_merkle_proof { - verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index(), spec) + if let Some(proof) = proof { + let deposit = Deposit { + proof, + data: deposit_data.clone(), + }; + verify_deposit_merkle_proof(state, &deposit, state.eth1_deposit_index(), spec) .map_err(|e| e.into_with_index(deposit_index))?; } - state.eth1_deposit_index_mut().safe_add_assign(1)?; + if increment_eth1_deposit_index { + state.eth1_deposit_index_mut().safe_add_assign(1)?; + } // Get an `Option` where `u64` is the validator index if this deposit public key // already exists in the beacon_state. - let validator_index = get_existing_validator_index(state, &deposit.data.pubkey) + let validator_index = get_existing_validator_index(state, &deposit_data.pubkey) .map_err(|e| e.into_with_index(deposit_index))?; - let amount = deposit.data.amount; + let amount = deposit_data.amount; if let Some(index) = validator_index { - // Update the existing validator balance. - increase_balance(state, index as usize, amount)?; + // [Modified in Electra:EIP7251] + if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { + pending_balance_deposits.push(PendingBalanceDeposit { index, amount })?; + + let validator = state + .validators() + .get(index as usize) + .ok_or(BeaconStateError::UnknownValidator(index as usize))?; + + if is_compounding_withdrawal_credential(deposit_data.withdrawal_credentials, spec) + && validator.has_eth1_withdrawal_credential(spec) + && is_valid_deposit_signature(&deposit_data, spec).is_ok() + { + state.switch_to_compounding_validator(index as usize, spec)?; + } + } else { + // Update the existing validator balance. + increase_balance(state, index as usize, amount)?; + } } else { // The signature should be checked for new validators. Return early for a bad // signature. - if verify_deposit_signature(&deposit.data, spec).is_err() { + if is_valid_deposit_signature(&deposit_data, spec).is_err() { return Ok(()); } + let new_validator_index = state.validators().len(); + + // [Modified in Electra:EIP7251] + let (effective_balance, state_balance) = if state.fork_name_unchecked() >= ForkName::Electra + { + (0, 0) + } else { + ( + std::cmp::min( + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ), + amount, + ) + }; // Create a new validator. let validator = Validator { - pubkey: deposit.data.pubkey, - withdrawal_credentials: deposit.data.withdrawal_credentials, + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), + effective_balance, slashed: false, }; state.validators_mut().push(validator)?; - state.balances_mut().push(deposit.data.amount)?; + state.balances_mut().push(state_balance)?; // Altair or later initializations. if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { @@ -447,6 +515,280 @@ pub fn apply_deposit( if let Ok(inactivity_scores) = state.inactivity_scores_mut() { inactivity_scores.push(0)?; } + + // [New in Electra:EIP7251] + if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { + pending_balance_deposits.push(PendingBalanceDeposit { + index: new_validator_index as u64, + amount, + })?; + } + } + + Ok(()) +} + +pub fn process_execution_layer_withdrawal_requests( + state: &mut BeaconState, + requests: &[ExecutionLayerWithdrawalRequest], + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for request in requests { + let amount = request.amount; + let is_full_exit_request = amount == spec.full_exit_request_amount; + + // If partial withdrawal queue is full, only full exits are processed + if state.pending_partial_withdrawals()?.len() == E::pending_partial_withdrawals_limit() + && !is_full_exit_request + { + continue; + } + + // Verify pubkey exists + let index_opt = state.get_validator_index(&request.validator_pubkey)?; + let Some(index) = index_opt else { + continue; + }; + + let validator = state.get_validator(index)?; + + // Verify withdrawal credentials + let has_correct_credential = validator.has_execution_withdrawal_credential(spec); + let is_correct_source_address = validator + .get_execution_withdrawal_address(spec) + .map(|addr| addr == request.source_address) + .unwrap_or(false); + + if !(has_correct_credential && is_correct_source_address) { + continue; + } + + // Verify the validator is active + if !validator.is_active_at(state.current_epoch()) { + continue; + } + + // Verify exit has not been initiated + if validator.exit_epoch != spec.far_future_epoch { + continue; + } + + // Verify the validator has been active long enough + if state.current_epoch() + < validator + .activation_epoch + .safe_add(spec.shard_committee_period)? + { + continue; + } + + let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(index)?; + if is_full_exit_request { + // Only exit validator if it has no pending withdrawals in the queue + if pending_balance_to_withdraw == 0 { + initiate_validator_exit(state, index, spec)? + } + continue; + } + + let balance = state.get_balance(index)?; + let has_sufficient_effective_balance = + validator.effective_balance >= spec.min_activation_balance; + let has_excess_balance = balance + > spec + .min_activation_balance + .safe_add(pending_balance_to_withdraw)?; + + // Only allow partial withdrawals with compounding withdrawal credentials + if validator.has_compounding_withdrawal_credential(spec) + && has_sufficient_effective_balance + && has_excess_balance + { + let to_withdraw = std::cmp::min( + balance + .safe_sub(spec.min_activation_balance)? + .safe_sub(pending_balance_to_withdraw)?, + amount, + ); + let exit_queue_epoch = state.compute_exit_epoch_and_update_churn(to_withdraw, spec)?; + let withdrawable_epoch = + exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; + state + .pending_partial_withdrawals_mut()? + .push(PendingPartialWithdrawal { + index: index as u64, + amount: to_withdraw, + withdrawable_epoch, + })?; + } + } + Ok(()) +} + +pub fn process_deposit_receipts( + state: &mut BeaconState, + receipts: &[DepositReceipt], + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for receipt in receipts { + // Set deposit receipt start index + if state.deposit_receipts_start_index()? == spec.unset_deposit_receipts_start_index { + *state.deposit_receipts_start_index_mut()? = receipt.index + } + let deposit_data = DepositData { + pubkey: receipt.pubkey, + withdrawal_credentials: receipt.withdrawal_credentials, + amount: receipt.amount, + signature: receipt.signature.clone().into(), + }; + apply_deposit(state, deposit_data, None, false, spec)? + } + + Ok(()) +} + +pub fn process_consolidations( + state: &mut BeaconState, + consolidations: &[SignedConsolidation], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + if consolidations.is_empty() { + return Ok(()); + } + + // If the pending consolidations queue is full, no consolidations are allowed in the block + let pending_consolidations = state.pending_consolidations()?.len(); + let pending_consolidations_limit = E::pending_consolidations_limit(); + block_verify! { + pending_consolidations < pending_consolidations_limit, + BlockProcessingError::TooManyPendingConsolidations { + consolidations: pending_consolidations, + limit: pending_consolidations_limit + } + } + + // If there is too little available consolidation churn limit, no consolidations are allowed in the block + let churn_limit = state.get_consolidation_churn_limit(spec)?; + block_verify! { + churn_limit > spec.min_activation_balance, + BlockProcessingError::ConsolidationChurnLimitTooLow { + churn_limit, + minimum: spec.min_activation_balance + } + } + + for signed_consolidation in consolidations { + let consolidation = signed_consolidation.message.clone(); + + // Verify that source != target, so a consolidation cannot be used as an exit. + block_verify! { + consolidation.source_index != consolidation.target_index, + BlockProcessingError::MatchingSourceTargetConsolidation { + index: consolidation.source_index + } + } + + let source_validator = state.get_validator(consolidation.source_index as usize)?; + let target_validator = state.get_validator(consolidation.target_index as usize)?; + + // Verify the source and the target are active + let current_epoch = state.current_epoch(); + block_verify! { + source_validator.is_active_at(current_epoch), + BlockProcessingError::InactiveConsolidationSource{ + index: consolidation.source_index, + current_epoch + } + } + block_verify! { + target_validator.is_active_at(current_epoch), + BlockProcessingError::InactiveConsolidationTarget{ + index: consolidation.target_index, + current_epoch + } + } + + // Verify exits for source and target have not been initiated + block_verify! { + source_validator.exit_epoch == spec.far_future_epoch, + BlockProcessingError::SourceValidatorExiting{ + index: consolidation.source_index, + } + } + block_verify! { + target_validator.exit_epoch == spec.far_future_epoch, + BlockProcessingError::TargetValidatorExiting{ + index: consolidation.target_index, + } + } + + // Consolidations must specify an epoch when they become valid; they are not valid before then + block_verify! { + current_epoch >= consolidation.epoch, + BlockProcessingError::FutureConsolidationEpoch { + current_epoch, + consolidation_epoch: consolidation.epoch + } + } + + // Verify the source and the target have Execution layer withdrawal credentials + block_verify! { + source_validator.has_execution_withdrawal_credential(spec), + BlockProcessingError::NoSourceExecutionWithdrawalCredential { + index: consolidation.source_index, + } + } + block_verify! { + target_validator.has_execution_withdrawal_credential(spec), + BlockProcessingError::NoTargetExecutionWithdrawalCredential { + index: consolidation.target_index, + } + } + + // Verify the same withdrawal address + let source_address = source_validator + .get_execution_withdrawal_address(spec) + .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?; + let target_address = target_validator + .get_execution_withdrawal_address(spec) + .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?; + block_verify! { + source_address == target_address, + BlockProcessingError::MismatchedWithdrawalCredentials { + source_address, + target_address + } + } + + if verify_signatures.is_true() { + let signature_set = consolidation_signature_set( + state, + |i| get_pubkey_from_state(state, i), + signed_consolidation, + spec, + )?; + block_verify! { + signature_set.verify(), + BlockProcessingError::InavlidConsolidationSignature + } + } + let exit_epoch = state.compute_consolidation_epoch_and_update_churn( + source_validator.effective_balance, + spec, + )?; + let source_validator = state.get_validator_mut(consolidation.source_index as usize)?; + // Initiate source validator exit and append pending consolidation + source_validator.exit_epoch = exit_epoch; + source_validator.withdrawable_epoch = source_validator + .exit_epoch + .safe_add(spec.min_validator_withdrawability_delay)?; + state + .pending_consolidations_mut()? + .push(PendingConsolidation { + source_index: consolidation.source_index, + target_index: consolidation.target_index, + })?; } Ok(()) diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 2e00ee0341..3c683766ad 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -11,8 +11,8 @@ use types::{ BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, - SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + SignedBlsToExecutionChange, SignedConsolidation, SignedContributionAndProof, SignedRoot, + SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -664,3 +664,37 @@ where message, ))) } + +/// Returns two signature sets, one for the source and one for the target validator +/// in the `SignedConsolidation`. +pub fn consolidation_signature_set<'a, E, F>( + state: &'a BeaconState, + get_pubkey: F, + consolidation: &'a SignedConsolidation, + spec: &'a ChainSpec, +) -> Result> +where + E: EthSpec, + F: Fn(usize) -> Option>, +{ + let source_index = consolidation.message.source_index as usize; + let target_index = consolidation.message.target_index as usize; + + let domain = spec.compute_domain( + Domain::Consolidation, + spec.genesis_fork_version, + state.genesis_validators_root(), + ); + + let message = consolidation.message.signing_root(domain); + let source_pubkey = + get_pubkey(source_index).ok_or(Error::ValidatorUnknown(source_index as u64))?; + let target_pubkey = + get_pubkey(target_index).ok_or(Error::ValidatorUnknown(target_index as u64))?; + + Ok(SignatureSet::multiple_pubkeys( + &consolidation.signature, + vec![source_pubkey, target_pubkey], + message, + )) +} diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 6bfb5d7cfe..6bfb51d475 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -68,10 +68,20 @@ pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( ) -> Result> { let data = attestation.data(); - verify!( - data.index < state.get_committee_count_at_slot(data.slot)?, - Invalid::BadCommitteeIndex - ); + // TODO(electra) choosing a validation based on the attestation's fork + // rather than the state's fork makes this simple, but technically the spec + // defines this verification based on the state's fork. + match attestation { + AttestationRef::Base(_) => { + verify!( + data.index < state.get_committee_count_at_slot(data.slot)?, + Invalid::BadCommitteeIndex + ); + } + AttestationRef::Electra(_) => { + verify!(data.index == 0, Invalid::BadCommitteeIndex); + } + } // Verify the Casper FFG vote. verify_casper_ffg_vote(attestation, state)?; diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index a964f3b574..c996e580a7 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -14,7 +14,7 @@ fn error(reason: DepositInvalid) -> BlockOperationError { /// Verify `Deposit.pubkey` signed `Deposit.signature`. /// /// Spec v0.12.1 -pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { +pub fn is_valid_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { let (public_key, signature, msg) = deposit_pubkey_signature_message(deposit_data, spec) .ok_or_else(|| error(DepositInvalid::BadBlsBytes))?; diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index fc258d3829..dea17dbc0c 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -79,5 +79,16 @@ pub fn verify_exit( ); } + // [New in Electra:EIP7251] + // Only exit validator if it has no pending withdrawals in the queue + if let Ok(pending_balance_to_withdraw) = + state.get_pending_balance_to_withdraw(exit.validator_index as usize) + { + verify!( + pending_balance_to_withdraw == 0, + ExitInvalid::PendingWithdrawalInQueue(exit.validator_index) + ); + } + Ok(()) } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index bc20f3aa7b..0426d43cac 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -159,6 +159,8 @@ pub enum Error { IndexNotSupported(usize), InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), + PartialWithdrawalCountInvalid(usize), + NonExecutionAddresWithdrawalCredential, NoCommitteeFound(CommitteeIndex), InvalidCommitteeIndex(CommitteeIndex), InvalidSelectionProof { @@ -1475,6 +1477,14 @@ impl BeaconState { } } + /// Get the balance of a single validator. + pub fn get_balance(&self, validator_index: usize) -> Result { + self.balances() + .get(validator_index) + .ok_or(Error::BalancesOutOfBounds(validator_index)) + .copied() + } + /// Get a mutable reference to the balance of a single validator. pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { self.balances_mut() @@ -2105,11 +2115,12 @@ impl BeaconState { &self, validator_index: usize, spec: &ChainSpec, + current_fork: ForkName, ) -> Result { let max_effective_balance = self .validators() .get(validator_index) - .map(|validator| validator.get_validator_max_effective_balance(spec)) + .map(|validator| validator.get_validator_max_effective_balance(spec, current_fork)) .ok_or(Error::UnknownValidator(validator_index))?; Ok(std::cmp::min( *self diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7609e36035..d2f5909396 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -387,6 +387,19 @@ impl ChainSpec { } } + /// For a given `BeaconState`, return the whistleblower reward quotient associated with its variant. + pub fn whistleblower_reward_quotient_for_state( + &self, + state: &BeaconState, + ) -> u64 { + let fork_name = state.fork_name_unchecked(); + if fork_name >= ForkName::Electra { + self.whistleblower_reward_quotient_electra + } else { + self.whistleblower_reward_quotient + } + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); diff --git a/consensus/types/src/consolidation.rs b/consensus/types/src/consolidation.rs index 09a2d4bb0c..6cc4aa90f2 100644 --- a/consensus/types/src/consolidation.rs +++ b/consensus/types/src/consolidation.rs @@ -1,5 +1,5 @@ -use crate::test_utils::TestRandom; use crate::Epoch; +use crate::{test_utils::TestRandom, SignedRoot}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -27,6 +27,8 @@ pub struct Consolidation { pub epoch: Epoch, } +impl SignedRoot for Consolidation {} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 80a70c171f..644d401ec7 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -39,6 +39,15 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + /// fork-specific fields fn withdrawals_root(&self) -> Result; fn blob_gas_used(&self) -> Result; + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + >; + fn deposit_receipts( + &self, + ) -> Result>, Error>; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -278,6 +287,35 @@ impl ExecPayload for FullPayload { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + match self { + FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { + Err(Error::IncorrectStateVariant) + } + FullPayload::Electra(inner) => { + Ok(Some(inner.execution_payload.withdrawal_requests.clone())) + } + } + } + + fn deposit_receipts( + &self, + ) -> Result>, Error> { + match self { + FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { + Err(Error::IncorrectStateVariant) + } + FullPayload::Electra(inner) => { + Ok(Some(inner.execution_payload.deposit_receipts.clone())) + } + } + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -410,6 +448,35 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + match self { + FullPayloadRef::Bellatrix(_) + | FullPayloadRef::Capella(_) + | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Electra(inner) => { + Ok(Some(inner.execution_payload.withdrawal_requests.clone())) + } + } + } + + fn deposit_receipts( + &self, + ) -> Result>, Error> { + match self { + FullPayloadRef::Bellatrix(_) + | FullPayloadRef::Capella(_) + | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Electra(inner) => { + Ok(Some(inner.execution_payload.deposit_receipts.clone())) + } + } + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -590,6 +657,21 @@ impl ExecPayload for BlindedPayload { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + Ok(None) + } + + fn deposit_receipts( + &self, + ) -> Result>, Error> { + Ok(None) + } + fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -691,6 +773,21 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + Ok(None) + } + + fn deposit_receipts( + &self, + ) -> Result>, Error> { + Ok(None) + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); @@ -717,7 +814,9 @@ macro_rules! impl_exec_payload_common { $is_default_with_empty_roots:block, $f:block, $g:block, - $h:block) => { + $h:block, + $i:block, + $j:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -780,6 +879,23 @@ macro_rules! impl_exec_payload_common { let h = $h; h(self) } + + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + let i = $i; + i(self) + } + + fn deposit_receipts( + &self, + ) -> Result>, Error> { + let j = $j; + j(self) + } } impl From<$wrapped_type> for $wrapper_type { @@ -825,7 +941,9 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - } + }, + { |_| { Ok(None) } }, + { |_| { Ok(None) } } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -912,6 +1030,35 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c + }, + { + let c: for<'a> fn( + &'a $wrapper_type_full, + ) -> Result< + Option< + VariableList< + ExecutionLayerWithdrawalRequest, + E::MaxWithdrawalRequestsPerPayload, + >, + >, + Error, + > = |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawal_requests() + }; + c + }, + { + let c: for<'a> fn( + &'a $wrapper_type_full, + ) -> Result< + Option>, + Error, + > = |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.deposit_receipts() + }; + c } ); diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index f2b36ee153..0054e95f9d 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -130,9 +130,9 @@ impl Validator { is_compounding_withdrawal_credential(self.withdrawal_credentials, spec) } - /// Get the eth1 withdrawal address if this validator has one initialized. - pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ - self.has_eth1_withdrawal_credential(spec) + /// Get the execution withdrawal address if this validator has one initialized. + pub fn get_execution_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ + self.has_execution_withdrawal_credential(spec) .then(|| { self.withdrawal_credentials .as_bytes() @@ -203,7 +203,7 @@ impl Validator { current_fork: ForkName, ) -> bool { if current_fork.electra_enabled() { - self.is_partially_withdrawable_validator_electra(balance, spec) + self.is_partially_withdrawable_validator_electra(balance, spec, current_fork) } else { self.is_partially_withdrawable_validator_capella(balance, spec) } @@ -223,8 +223,9 @@ impl Validator { &self, balance: u64, spec: &ChainSpec, + current_fork: ForkName, ) -> bool { - let max_effective_balance = self.get_validator_max_effective_balance(spec); + let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); let has_max_effective_balance = self.effective_balance == max_effective_balance; let has_excess_balance = balance > max_effective_balance; self.has_execution_withdrawal_credential(spec) @@ -239,11 +240,19 @@ impl Validator { } /// Returns the max effective balance for a validator in gwei. - pub fn get_validator_max_effective_balance(&self, spec: &ChainSpec) -> u64 { - if self.has_compounding_withdrawal_credential(spec) { - spec.max_effective_balance_electra + pub fn get_validator_max_effective_balance( + &self, + spec: &ChainSpec, + current_fork: ForkName, + ) -> u64 { + if current_fork >= ForkName::Electra { + if self.has_compounding_withdrawal_credential(spec) { + spec.max_effective_balance_electra + } else { + spec.min_activation_balance + } } else { - spec.min_activation_balance + spec.max_effective_balance } } } From bf4cbd3b0a11fdb810a9e0bc1c3c3ed0f8873689 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 26 Jun 2024 16:53:53 -0700 Subject: [PATCH 02/11] Remove all batches related to a peer on disconnect (#5969) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove all batches related to a peer on disconnect * Cleanup map entries after disconnect * Allow lookups to continue in case of disconnections * Pretty response types * fmt * Fix lints * Remove lookup if it cannot progress * Fix tests * Remove poll_close on rpc behaviour * Remove redundant test * Fix issue raised by lion * Revert pretty response types * Cleanup * Fix test * Merge remote-tracking branch 'origin/release-v5.2.1' into rpc-error-on-disconnect-revert * Apply suggestions from joao Co-authored-by: João Oliveira * Fix log * update request status on no peers found * Do not remove lookup after peer disconnection * Add comments about expected event api * Update single_block_lookup.rs * Update mod.rs * Merge branch 'rpc-error-on-disconnect-revert' into 5969-review * Merge pull request #10 from dapplion/5969-review Add comments about expected event api --- .../lighthouse_network/src/rpc/handler.rs | 31 ------ .../lighthouse_network/tests/rpc_tests.rs | 94 +------------------ .../network/src/sync/backfill_sync/mod.rs | 38 +++++++- .../network/src/sync/block_lookups/mod.rs | 42 +++++---- .../sync/block_lookups/single_block_lookup.rs | 38 ++++++-- .../network/src/sync/block_lookups/tests.rs | 48 ++++++---- .../src/sync/block_sidecar_coupling.rs | 14 ++- beacon_node/network/src/sync/manager.rs | 27 +++++- .../network/src/sync/network_context.rs | 76 ++++++++++++++- .../src/sync/network_context/requests.rs | 13 ++- .../network/src/sync/range_sync/chain.rs | 26 ++++- .../network/src/sync/range_sync/range.rs | 5 +- 12 files changed, 270 insertions(+), 182 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index b7166efc37..6f338ebc8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -352,37 +352,6 @@ where !matches!(self.state, HandlerState::Deactivated) } - // NOTE: This function gets polled to completion upon a connection close. - fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { - // Inform the network behaviour of any failed requests - - while let Some(substream_id) = self.outbound_substreams.keys().next().cloned() { - let outbound_info = self - .outbound_substreams - .remove(&substream_id) - .expect("The value must exist for a key"); - // If the state of the connection is closing, we do not need to report this case to - // the behaviour, as the connection has just closed non-gracefully - if matches!(outbound_info.state, OutboundSubstreamState::Closing(_)) { - continue; - } - - // Register this request as an RPC Error - return Poll::Ready(Some(HandlerEvent::Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: outbound_info.proto, - id: outbound_info.req_id, - }))); - } - - // Also handle any events that are awaiting to be sent to the behaviour - if !self.events_out.is_empty() { - return Poll::Ready(Some(self.events_out.remove(0))); - } - - Poll::Ready(None) - } - fn poll( &mut self, cx: &mut Context<'_>, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 8d29f5158b..527b853dc3 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,7 +3,7 @@ mod common; use common::Protocol; -use lighthouse_network::rpc::{methods::*, RPCError}; +use lighthouse_network::rpc::methods::*; use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; use ssz::Encode; @@ -1012,98 +1012,6 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { }) } -#[test] -fn test_disconnect_triggers_rpc_error() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; - let enable_logging = false; - - let log = common::build_log(log_level, enable_logging); - let spec = E::default_spec(); - - let rt = Arc::new(Runtime::new().unwrap()); - // get sender/receiver - rt.block_on(async { - let (mut sender, mut receiver) = common::build_node_pair( - Arc::downgrade(&rt), - &log, - ForkName::Base, - &spec, - Protocol::Tcp, - ) - .await; - - // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( - // Must have at least one root for the request to create a stream - vec![Hash256::from_low_u64_be(0)], - &spec, - )); - - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - NetworkEvent::PeerConnectedOutgoing(peer_id) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .send_request(peer_id, 42, rpc_request.clone()) - .unwrap(); - } - NetworkEvent::RPCFailed { error, id: 42, .. } => match error { - RPCError::Disconnected => return, - other => panic!("received unexpected error {:?}", other), - }, - other => { - warn!(log, "Ignoring other event {:?}", other); - } - } - } - }; - - // determine messages to send (PeerId, RequestId). If some, indicates we still need to send - // messages - let mut sending_peer = None; - let receiver_future = async { - loop { - // this future either drives the sending/receiving or times out allowing messages to be - // sent in the timeout - match futures::future::select( - Box::pin(receiver.next_event()), - Box::pin(tokio::time::sleep(Duration::from_secs(1))), - ) - .await - { - futures::future::Either::Left((ev, _)) => match ev { - NetworkEvent::RequestReceived { peer_id, .. } => { - sending_peer = Some(peer_id); - } - other => { - warn!(log, "Ignoring other event {:?}", other); - } - }, - futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required - } - - // if we need to send messages send them here. This will happen after a delay - if let Some(peer_id) = sending_peer.take() { - warn!(log, "Receiver got request, disconnecting peer"); - receiver.__hard_disconnect_testing_only(peer_id); - } - } - }; - - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = sleep(Duration::from_secs(30)) => { - panic!("Future timed out"); - } - } - }) -} - /// Establishes a pair of nodes and disconnects the pair based on the selected protocol via an RPC /// Goodbye message. fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index ce7d04ac0a..5431e1bcdc 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -307,7 +307,11 @@ impl BackFillSync { /// A peer has disconnected. /// If the peer has active batches, those are considered failed and re-requested. #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] - pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), BackFillError> { + pub fn peer_disconnected( + &mut self, + peer_id: &PeerId, + network: &mut SyncNetworkContext, + ) -> Result<(), BackFillError> { if matches!( self.state(), BackFillState::Failed | BackFillState::NotRequired @@ -315,7 +319,37 @@ impl BackFillSync { return Ok(()); } - self.active_requests.remove(peer_id); + if let Some(batch_ids) = self.active_requests.remove(peer_id) { + // fail the batches. + for id in batch_ids { + if let Some(batch) = self.batches.get_mut(&id) { + match batch.download_failed(false) { + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { + self.fail_sync(BackFillError::BatchDownloadFailed(id))?; + } + Ok(BatchOperationOutcome::Continue) => {} + Err(e) => { + self.fail_sync(BackFillError::BatchInvalidState(id, e.0))?; + } + } + // If we have run out of peers in which to retry this batch, the backfill state + // transitions to a paused state. + // We still need to reset the state for all the affected batches, so we should not + // short circuit early. + if self.retry_batch_download(network, id).is_err() { + debug!( + self.log, + "Batch could not be retried"; + "batch_id" => id, + "error" => "no synced peers" + ); + } + } else { + debug!(self.log, "Batch not found while removing peer"; + "peer" => %peer_id, "batch" => id) + } + } + } // Remove the peer from the participation list self.participating_peers.remove(peer_id); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f685b7e59d..0148a6548d 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,3 +1,25 @@ +//! Implements block lookup sync. +//! +//! Block lookup sync is triggered when a peer claims to have imported a block we don't know about. +//! For example, a peer attesting to a head block root that is not in our fork-choice. Lookup sync +//! is recursive in nature, as we may discover that this attested head block root has a parent that +//! is also unknown to us. +//! +//! Block lookup is implemented as an event-driven state machine. It sends events to the network and +//! beacon processor, and expects some set of events back. A discrepancy in the expected event API +//! will result in lookups getting "stuck". A lookup becomes stuck when there is no future event +//! that will trigger the lookup to make progress. There's a fallback mechanism that drops lookups +//! that live for too long, logging the line "Notify the devs a sync lookup is stuck". +//! +//! The expected event API is documented in the code paths that are making assumptions with the +//! comment prefix "Lookup sync event safety:" +//! +//! Block lookup sync attempts to not re-download or re-process data that we already have. Block +//! components are cached temporarily in multiple places before they are imported into fork-choice. +//! Therefore, block lookup sync must peek these caches correctly to decide when to skip a download +//! or consider a lookup complete. These caches are read from the `SyncNetworkContext` and its state +//! returned to this module as `LookupRequestResult` variants. + use self::parent_chain::{compute_parent_chains, NodeChain}; pub use self::single_block_lookup::DownloadResult; use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; @@ -410,21 +432,9 @@ impl BlockLookups { /* Error responses */ pub fn peer_disconnected(&mut self, peer_id: &PeerId) { - self.single_block_lookups.retain(|_, lookup| { + for (_, lookup) in self.single_block_lookups.iter_mut() { lookup.remove_peer(peer_id); - - // Note: this condition should be removed in the future. It's not strictly necessary to drop a - // lookup if there are no peers left. Lookup should only be dropped if it can not make progress - if lookup.has_no_peers() { - debug!(self.log, - "Dropping single lookup after peer disconnection"; - "block_root" => ?lookup.block_root() - ); - false - } else { - true - } - }); + } } /* Processing responses */ @@ -787,12 +797,12 @@ impl BlockLookups { }; if stuck_lookup.id == ancestor_stuck_lookup.id { - warn!(self.log, "Notify the devs, a sync lookup is stuck"; + warn!(self.log, "Notify the devs a sync lookup is stuck"; "block_root" => ?stuck_lookup.block_root(), "lookup" => ?stuck_lookup, ); } else { - warn!(self.log, "Notify the devs, a sync lookup is stuck"; + warn!(self.log, "Notify the devs a sync lookup is stuck"; "block_root" => ?stuck_lookup.block_root(), "lookup" => ?stuck_lookup, "ancestor_block_root" => ?ancestor_stuck_lookup.block_root(), diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 13efd36ab7..e17991286a 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -197,21 +197,36 @@ impl SingleBlockLookup { } let Some(peer_id) = self.use_rand_available_peer() else { - // Allow lookup to not have any peers. In that case do nothing. If the lookup does - // not have peers for some time, it will be dropped. + // Allow lookup to not have any peers and do nothing. This is an optimization to not + // lose progress of lookups created from a block with unknown parent before we receive + // attestations for said block. + // Lookup sync event safety: If a lookup requires peers to make progress, and does + // not receive any new peers for some time it will be dropped. If it receives a new + // peer it must attempt to make progress. + R::request_state_mut(self) + .get_state_mut() + .update_awaiting_download_status("no peers"); return Ok(()); }; let request = R::request_state_mut(self); match request.make_request(id, peer_id, downloaded_block_expected_blobs, cx)? { LookupRequestResult::RequestSent(req_id) => { + // Lookup sync event safety: If make_request returns `RequestSent`, we are + // guaranteed that `BlockLookups::on_download_response` will be called exactly + // with this `req_id`. request.get_state_mut().on_download_start(req_id)? } LookupRequestResult::NoRequestNeeded => { + // Lookup sync event safety: Advances this request to the terminal `Processed` + // state. If all requests reach this state, the request is marked as completed + // in `Self::continue_requests`. request.get_state_mut().on_completed_request()? } // Sync will receive a future event to make progress on the request, do nothing now LookupRequestResult::Pending(reason) => { + // Lookup sync event safety: Refer to the code paths constructing + // `LookupRequestResult::Pending` request .get_state_mut() .update_awaiting_download_status(reason); @@ -222,16 +237,28 @@ impl SingleBlockLookup { // Otherwise, attempt to progress awaiting processing // If this request is awaiting a parent lookup to be processed, do not send for processing. // The request will be rejected with unknown parent error. + // + // TODO: The condition `block_is_processed || Block` can be dropped after checking for + // unknown parent root when import RPC blobs } else if !awaiting_parent && (block_is_processed || matches!(R::response_type(), ResponseType::Block)) { // maybe_start_processing returns Some if state == AwaitingProcess. This pattern is // useful to conditionally access the result data. if let Some(result) = request.get_state_mut().maybe_start_processing() { + // Lookup sync event safety: If `send_for_processing` returns Ok() we are guaranteed + // that `BlockLookups::on_processing_result` will be called exactly once with this + // lookup_id return R::send_for_processing(id, result, cx); } + // Lookup sync event safety: If the request is not in `AwaitingDownload` or + // `AwaitingProcessing` state it is guaranteed to receive some event to make progress. } + // Lookup sync event safety: If a lookup is awaiting a parent we are guaranteed to either: + // (1) attempt to make progress with `BlockLookups::continue_child_lookups` if the parent + // lookup completes, or (2) get dropped if the parent fails and is dropped. + Ok(()) } @@ -246,10 +273,9 @@ impl SingleBlockLookup { self.peers.insert(peer_id) } - /// Remove peer from available peers. Return true if there are no more available peers and all - /// requests are not expecting any future event (AwaitingDownload). - pub fn remove_peer(&mut self, peer_id: &PeerId) -> bool { - self.peers.remove(peer_id) + /// Remove peer from available peers. + pub fn remove_peer(&mut self, peer_id: &PeerId) { + self.peers.remove(peer_id); } /// Returns true if this lookup has zero peers diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index a607151bde..02b07fa43e 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -290,6 +290,7 @@ impl TestRig { .0 } + #[track_caller] fn expect_no_active_single_lookups(&self) { assert!( self.active_single_lookups().is_empty(), @@ -298,6 +299,7 @@ impl TestRig { ); } + #[track_caller] fn expect_no_active_lookups(&self) { self.expect_no_active_single_lookups(); } @@ -539,10 +541,6 @@ impl TestRig { }) } - fn peer_disconnected(&mut self, disconnected_peer_id: PeerId) { - self.send_sync_message(SyncMessage::Disconnect(disconnected_peer_id)); - } - /// Return RPCErrors for all active requests of peer fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { self.drain_network_rx(); @@ -562,6 +560,10 @@ impl TestRig { } } + fn peer_disconnected(&mut self, peer_id: PeerId) { + self.send_sync_message(SyncMessage::Disconnect(peer_id)); + } + fn drain_network_rx(&mut self) { while let Ok(event) = self.network_rx.try_recv() { self.network_rx_queue.push(event); @@ -1026,6 +1028,28 @@ fn test_single_block_lookup_failure() { rig.expect_empty_network(); } +#[test] +fn test_single_block_lookup_peer_disconnected_then_rpc_error() { + let mut rig = TestRig::test_setup(); + + let block_hash = Hash256::random(); + let peer_id = rig.new_connected_peer(); + + // Trigger the request. + rig.trigger_unknown_block_from_attestation(block_hash, peer_id); + let id = rig.expect_block_lookup_request(block_hash); + + // The peer disconnect event reaches sync before the rpc error. + rig.peer_disconnected(peer_id); + // The lookup is not removed as it can still potentially make progress. + rig.assert_single_lookups_count(1); + // The request fails. + rig.single_lookup_failed(id, peer_id, RPCError::Disconnected); + rig.expect_block_lookup_request(block_hash); + // The request should be removed from the network context on disconnection. + rig.expect_empty_network(); +} + #[test] fn test_single_block_lookup_becomes_parent_request() { let mut rig = TestRig::test_setup(); @@ -1289,19 +1313,9 @@ fn test_lookup_peer_disconnected_no_peers_left_while_request() { rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); rig.peer_disconnected(peer_id); rig.rpc_error_all_active_requests(peer_id); - rig.expect_no_active_lookups(); -} - -#[test] -fn test_lookup_peer_disconnected_no_peers_left_not_while_request() { - let mut rig = TestRig::test_setup(); - let peer_id = rig.new_connected_peer(); - let trigger_block = rig.rand_block(); - rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); - rig.peer_disconnected(peer_id); - // Note: this test case may be removed in the future. It's not strictly necessary to drop a - // lookup if there are no peers left. Lookup should only be dropped if it can not make progress - rig.expect_no_active_lookups(); + // Erroring all rpc requests and disconnecting the peer shouldn't remove the requests + // from the lookups map as they can still progress. + rig.assert_single_lookups_count(2); } #[test] diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index d159733cbc..f31f2921ea 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,4 +1,5 @@ use beacon_chain::block_verification_types::RpcBlock; +use lighthouse_network::PeerId; use ssz_types::VariableList; use std::{collections::VecDeque, sync::Arc}; use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; @@ -17,16 +18,19 @@ pub struct BlocksAndBlobsRequestInfo { is_sidecars_stream_terminated: bool, /// Used to determine if this accumulator should wait for a sidecars stream termination request_type: ByRangeRequestType, + /// The peer the request was made to. + pub(crate) peer_id: PeerId, } impl BlocksAndBlobsRequestInfo { - pub fn new(request_type: ByRangeRequestType) -> Self { + pub fn new(request_type: ByRangeRequestType, peer_id: PeerId) -> Self { Self { accumulated_blocks: <_>::default(), accumulated_sidecars: <_>::default(), is_blocks_stream_terminated: <_>::default(), is_sidecars_stream_terminated: <_>::default(), request_type, + peer_id, } } @@ -109,12 +113,14 @@ mod tests { use super::BlocksAndBlobsRequestInfo; use crate::sync::range_sync::ByRangeRequestType; use beacon_chain::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use lighthouse_network::PeerId; use rand::SeedableRng; use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; #[test] fn no_blobs_into_responses() { - let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::Blocks); + let peer_id = PeerId::random(); + let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::Blocks, peer_id); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng).0) @@ -133,7 +139,9 @@ mod tests { #[test] fn empty_blobs_into_responses() { - let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::BlocksAndBlobs); + let peer_id = PeerId::random(); + let mut info = + BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::BlocksAndBlobs, peer_id); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 4c1a1e6b67..0f8cab18c9 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -372,16 +372,39 @@ impl SyncManager { Err(_) => self.update_sync_state(), }, } + } else { + debug!( + self.log, + "RPC error for range request has no associated entry in network context, ungraceful disconnect"; + "peer_id" => %peer_id, + "request_id" => %id, + "error" => ?error, + ); } } } } + /// Handles a peer disconnect. + /// + /// It is important that a peer disconnect retries all the batches/lookups as + /// there is no way to guarantee that libp2p always emits a error along with + /// the disconnect. fn peer_disconnect(&mut self, peer_id: &PeerId) { + // Inject a Disconnected error on all requests associated with the disconnected peer + // to retry all batches/lookups + for request_id in self.network.peer_disconnected(peer_id) { + self.inject_error(*peer_id, request_id, RPCError::Disconnected); + } + + // Remove peer from all data structures self.range_sync.peer_disconnect(&mut self.network, peer_id); + let _ = self + .backfill_sync + .peer_disconnected(peer_id, &mut self.network); self.block_lookups.peer_disconnected(peer_id); + // Regardless of the outcome, we update the sync status. - let _ = self.backfill_sync.peer_disconnected(peer_id); self.update_sync_state(); } @@ -951,7 +974,7 @@ impl SyncManager { self.network.insert_range_blocks_and_blobs_request( id, resp.sender_id, - BlocksAndBlobsRequestInfo::new(resp.request_type), + BlocksAndBlobsRequestInfo::new(resp.request_type, peer_id), ); // inform range that the request needs to be treated as failed // With time we will want to downgrade this log diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index f3f82ee011..6f89b954b3 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -177,6 +177,46 @@ impl SyncNetworkContext { } } + /// Returns the ids of all the requests made to the given peer_id. + pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { + let failed_range_ids = + self.range_blocks_and_blobs_requests + .iter() + .filter_map(|(id, request)| { + if request.1.peer_id == *peer_id { + Some(SyncRequestId::RangeBlockAndBlobs { id: *id }) + } else { + None + } + }); + + let failed_block_ids = self + .blocks_by_root_requests + .iter() + .filter_map(|(id, request)| { + if request.peer_id == *peer_id { + Some(SyncRequestId::SingleBlock { id: *id }) + } else { + None + } + }); + let failed_blob_ids = self + .blobs_by_root_requests + .iter() + .filter_map(|(id, request)| { + if request.peer_id == *peer_id { + Some(SyncRequestId::SingleBlob { id: *id }) + } else { + None + } + }); + + failed_range_ids + .chain(failed_block_ids) + .chain(failed_blob_ids) + .collect() + } + pub fn network_globals(&self) -> &NetworkGlobals { &self.network_beacon_processor.network_globals } @@ -272,8 +312,13 @@ impl SyncNetworkContext { sender_id: RangeRequestId, ) -> Result { let id = self.blocks_by_range_request(peer_id, batch_type, request)?; - self.range_blocks_and_blobs_requests - .insert(id, (sender_id, BlocksAndBlobsRequestInfo::new(batch_type))); + self.range_blocks_and_blobs_requests.insert( + id, + ( + sender_id, + BlocksAndBlobsRequestInfo::new(batch_type, peer_id), + ), + ); Ok(id) } @@ -343,7 +388,10 @@ impl SyncNetworkContext { // Block is known are currently processing, expect a future event with the result of // processing. BlockProcessStatus::NotValidated { .. } => { - return Ok(LookupRequestResult::Pending("block in processing cache")) + // Lookup sync event safety: If the block is currently in the processing cache, we + // are guaranteed to receive a `SyncMessage::GossipBlockProcessResult` that will + // make progress on this lookup + return Ok(LookupRequestResult::Pending("block in processing cache")); } // Block is fully validated. If it's not yet imported it's waiting for missing block // components. Consider this request completed and do nothing. @@ -366,6 +414,12 @@ impl SyncNetworkContext { let request = BlocksByRootSingleRequest(block_root); + // Lookup sync event safety: If network_send.send() returns Ok(_) we are guaranteed that + // eventually at least one this 3 events will be received: + // - StreamTermination(request_id): handled by `Self::on_single_block_response` + // - RPCError(request_id): handled by `Self::on_single_block_response` + // - Disconnect(peer_id) handled by `Self::peer_disconnected``which converts it to a + // ` RPCError(request_id)`event handled by the above method self.network_send .send(NetworkMessage::SendRequest { peer_id, @@ -375,7 +429,7 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; self.blocks_by_root_requests - .insert(id, ActiveBlocksByRootRequest::new(request)); + .insert(id, ActiveBlocksByRootRequest::new(request, peer_id)); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -408,6 +462,13 @@ impl SyncNetworkContext { // latter handle the case where if the peer sent no blobs, penalize. // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. // - if `num_expected_blobs` returns Some = block is processed. + // + // Lookup sync event safety: Reaching this code means that a block is not in any pre-import + // cache nor in the request state of this lookup. Therefore, the block must either: (1) not + // be downloaded yet or (2) the block is already imported into the fork-choice. + // In case (1) the lookup must either successfully download the block or get dropped. + // In case (2) the block will be downloaded, processed, reach `BlockIsAlreadyKnown` and + // get dropped as completed. return Ok(LookupRequestResult::Pending("waiting for block download")); }; @@ -444,6 +505,7 @@ impl SyncNetworkContext { indices, }; + // Lookup sync event safety: Refer to `Self::block_lookup_request` `network_send.send` call self.network_send .send(NetworkMessage::SendRequest { peer_id, @@ -453,7 +515,7 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; self.blobs_by_root_requests - .insert(id, ActiveBlobsByRootRequest::new(request)); + .insert(id, ActiveBlobsByRootRequest::new(request, peer_id)); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -660,6 +722,8 @@ impl SyncNetworkContext { .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; debug!(self.log, "Sending block for processing"; "block" => ?block_root, "id" => id); + // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` with this process type beacon_processor .send_rpc_beacon_block( block_root, @@ -689,6 +753,8 @@ impl SyncNetworkContext { .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; debug!(self.log, "Sending blobs for processing"; "block" => ?block_root, "id" => id); + // Lookup sync event safety: If `beacon_processor.send_rpc_blobs` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type beacon_processor .send_rpc_blobs( block_root, diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 6e4683701b..8387e9b0e1 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,5 +1,8 @@ use beacon_chain::get_block_root; -use lighthouse_network::rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}; +use lighthouse_network::{ + rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}, + PeerId, +}; use std::sync::Arc; use strum::IntoStaticStr; use types::{ @@ -20,13 +23,15 @@ pub enum LookupVerifyError { pub struct ActiveBlocksByRootRequest { request: BlocksByRootSingleRequest, resolved: bool, + pub(crate) peer_id: PeerId, } impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest) -> Self { + pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { Self { request, resolved: false, + peer_id, } } @@ -94,14 +99,16 @@ pub struct ActiveBlobsByRootRequest { request: BlobsByRootSingleBlockRequest, blobs: Vec>>, resolved: bool, + pub(crate) peer_id: PeerId, } impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { + pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { Self { request, blobs: vec![], resolved: false, + peer_id, } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 63cafa9aca..122e8287e6 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -174,8 +174,30 @@ impl SyncingChain { /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. - pub fn remove_peer(&mut self, peer_id: &PeerId) -> ProcessingResult { - self.peers.remove(peer_id); + pub fn remove_peer( + &mut self, + peer_id: &PeerId, + network: &mut SyncNetworkContext, + ) -> ProcessingResult { + if let Some(batch_ids) = self.peers.remove(peer_id) { + // fail the batches. + for id in batch_ids { + if let Some(batch) = self.batches.get_mut(&id) { + if let BatchOperationOutcome::Failed { blacklist } = + batch.download_failed(true)? + { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: id, + }); + } + self.retry_batch_download(network, id)?; + } else { + debug!(self.log, "Batch not found while removing peer"; + "peer" => %peer_id, "batch" => id) + } + } + } if self.peers.is_empty() { Err(RemoveChain::EmptyPeerPool) diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index fe48db35b4..c8e8266684 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -278,8 +278,9 @@ where /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain. fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { - for (removed_chain, sync_type, remove_reason) in - self.chains.call_all(|chain| chain.remove_peer(peer_id)) + for (removed_chain, sync_type, remove_reason) in self + .chains + .call_all(|chain| chain.remove_peer(peer_id, network)) { self.on_chain_removed( removed_chain, From b38019cb1071e5b5a3a3c1b4ec95836842b7ff64 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 27 Jun 2024 01:53:55 +0200 Subject: [PATCH 03/11] Attempt to continue lookups after adding peers (#5993) * Attempt to continue lookups after adding peers --- .../network/src/sync/block_lookups/mod.rs | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 0148a6548d..0ae9bfec52 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -291,7 +291,7 @@ impl BlockLookups { } } - if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers) { + if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers, cx) { warn!(self.log, "Error adding peers to ancestor lookup"; "error" => ?e); } @@ -844,14 +844,17 @@ impl BlockLookups { &mut self, lookup_id: SingleLookupId, peers: &[PeerId], + cx: &mut SyncNetworkContext, ) -> Result<(), String> { let lookup = self .single_block_lookups .get_mut(&lookup_id) .ok_or(format!("Unknown lookup for id {lookup_id}"))?; + let mut added_some_peer = false; for peer in peers { if lookup.add_peer(*peer) { + added_some_peer = true; debug!(self.log, "Adding peer to existing single block lookup"; "block_root" => ?lookup.block_root(), "peer" => ?peer @@ -859,22 +862,25 @@ impl BlockLookups { } } - // We may choose to attempt to continue a lookup here. It is possible that a lookup had zero - // peers and after adding this set of peers it can make progress again. Note that this - // recursive function iterates from child to parent, so continuing the child first is weird. - // However, we choose to not attempt to continue the lookup for simplicity. It's not - // strictly required and just and optimization for a rare corner case. - if let Some(parent_root) = lookup.awaiting_parent() { if let Some((&child_id, _)) = self .single_block_lookups .iter() .find(|(_, l)| l.block_root() == parent_root) { - self.add_peers_to_lookup_and_ancestors(child_id, peers) + self.add_peers_to_lookup_and_ancestors(child_id, peers, cx) } else { Err(format!("Lookup references unknown parent {parent_root:?}")) } + } else if added_some_peer { + // If this lookup is not awaiting a parent and we added at least one peer, attempt to + // make progress. It is possible that a lookup is created with zero peers, attempted to + // make progress, and then receives peers. After that time the lookup will never be + // pruned with `drop_lookups_without_peers` because it has peers. This is rare corner + // case, but it can result in stuck lookups. + let result = lookup.continue_requests(cx); + self.on_lookup_result(lookup_id, result, "add_peers", cx); + Ok(()) } else { Ok(()) } From a910a498a81b76fe4ed171728c52a6425a0ae48a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 27 Jun 2024 17:11:22 +1000 Subject: [PATCH 04/11] Enable jemalloc by default on non windows targets (#5995) * Enable jemalloc by default on non windows target. * Update `allocator_name` function to check for `target_os` instead as we've deprecated `jemalloc` feature. --- Makefile | 8 -------- lighthouse/Cargo.toml | 10 ++++++++-- lighthouse/src/main.rs | 6 +++--- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index c3c8d13238..3f8e688df1 100644 --- a/Makefile +++ b/Makefile @@ -14,14 +14,6 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 -# List of features to use when building natively. Can be overridden via the environment. -# No jemalloc on Windows -ifeq ($(OS),Windows_NT) - FEATURES?= -else - FEATURES?=jemalloc -endif - # List of features to use when cross-compiling. Can be overridden via the environment. CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 20466b5de7..64b08b113e 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -22,8 +22,14 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] -# Use jemalloc. -jemalloc = ["malloc_utils/jemalloc"] +# Deprecated. This is now enabled by default on non windows targets. +jemalloc = [] + +[target.'cfg(not(target_os = "windows"))'.dependencies] +malloc_utils = { workspace = true, features = ["jemalloc"] } + +[target.'cfg(target_os = "windows")'.dependencies] +malloc_utils = { workspace = true } [dependencies] beacon_node = { workspace = true } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index abee30737c..47b44d3828 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -51,10 +51,10 @@ fn bls_library_name() -> &'static str { } fn allocator_name() -> &'static str { - if cfg!(feature = "jemalloc") { - "jemalloc" - } else { + if cfg!(target_os = "windows") { "system" + } else { + "jemalloc" } } From f106533ebc28707f71a22fef1c98724f911bfd71 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 27 Jun 2024 00:35:11 -0700 Subject: [PATCH 05/11] Avoid rayon in lighthouse block verification (#5992) * Avoid rayon in lighthouse --- .../per_block_processing/block_signature_verifier.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index a0c044219d..28ca8935e4 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -4,7 +4,6 @@ use super::signature_sets::{Error as SignatureSetError, *}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::{ConsensusContext, ContextError}; use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; -use rayon::prelude::*; use std::borrow::Cow; use types::{ AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, @@ -411,15 +410,10 @@ impl<'a> ParallelSignatureSets<'a> { /// It is not possible to know exactly _which_ signature is invalid here, just that /// _at least one_ was invalid. /// - /// Uses `rayon` to do a map-reduce of Vitalik's method across multiple cores. + /// Blst library spreads the signature verification work across multiple available cores, so + /// this function is already parallelized. #[must_use] pub fn verify(self) -> bool { - let num_sets = self.sets.len(); - let num_chunks = std::cmp::max(1, num_sets / rayon::current_num_threads()); - self.sets - .into_par_iter() - .chunks(num_chunks) - .map(|chunk| verify_signature_sets(chunk.iter())) - .reduce(|| true, |current, this| current && this) + verify_signature_sets(self.sets.iter()) } } From 9b093c8459a1d8e8cc12acdb5a048503d8220bd5 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 27 Jun 2024 17:35:14 +1000 Subject: [PATCH 06/11] Prevent connections from peers with a banned ip history (#6008) * Block peers based on past ips * Remove unused type --- .../lighthouse_network/src/peer_manager/network_behaviour.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index b776347ad0..3858a2a539 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -14,7 +14,6 @@ use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; -use crate::peer_manager::peerdb::BanResult; use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; @@ -201,7 +200,7 @@ impl NetworkBehaviour for PeerManager { ) -> Result, ConnectionDenied> { trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); // We already checked if the peer was banned on `handle_pending_inbound_connection`. - if let Some(BanResult::BadScore) = self.ban_status(&peer_id) { + if self.ban_status(&peer_id).is_some() { return Err(ConnectionDenied::new( "Connection to peer rejected: peer has a bad score", )); From f14f21f37bcdf878c6cb955f1e8bb985c2411d1c Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:21:37 +0200 Subject: [PATCH 07/11] Bound lookup parent chain length with tip extension (#5705) * Bound lookup parent chain length with tip extension * Add test --- .../network/src/sync/block_lookups/mod.rs | 18 +++++++- .../network/src/sync/block_lookups/tests.rs | 41 +++++++++++++++---- 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f685b7e59d..37f5365944 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -194,7 +194,15 @@ impl BlockLookups { let parent_chains = self.active_parent_lookups(); for (chain_idx, parent_chain) in parent_chains.iter().enumerate() { - if parent_chain.ancestor() == child_block_root_trigger + // `block_root_to_search` will trigger a new lookup, and it will extend a parent_chain + // beyond its max length + let block_would_extend_chain = parent_chain.ancestor() == child_block_root_trigger; + // `block_root_to_search` already has a lookup, and with the block trigger it extends + // the parent_chain beyond its length. This can happen because when creating a lookup + // for a new root we don't do any parent chain length checks + let trigger_is_chain_tip = parent_chain.tip == child_block_root_trigger; + + if (block_would_extend_chain || trigger_is_chain_tip) && parent_chain.len() >= PARENT_DEPTH_TOLERANCE { debug!(self.log, "Parent lookup chain too long"; "block_root" => ?block_root_to_search); @@ -375,6 +383,14 @@ impl BlockLookups { "response_type" => ?response_type, ); + // Here we could check if response extends a parent chain beyond its max length. + // However we defer that check to the handling of a processing error ParentUnknown. + // + // Here we could check if there's already a lookup for parent_root of `response`. In + // that case we know that sending the response for processing will likely result in + // a `ParentUnknown` error. However, for simplicity we choose to not implement this + // optimization. + // Register the download peer here. Once we have received some data over the wire we // attribute it to this peer for scoring latter regardless of how the request was // done. diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index a607151bde..3f681f8ec2 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -278,8 +278,11 @@ impl TestRig { } } - fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.sync_manager.get_failed_chains().contains(chain_hash) + fn assert_failed_chain(&mut self, chain_hash: Hash256) { + let failed_chains = self.sync_manager.get_failed_chains(); + if !failed_chains.contains(&chain_hash) { + panic!("expected failed chains to contain {chain_hash:?}: {failed_chains:?}"); + } } fn find_single_lookup_for(&self, block_root: Hash256) -> Id { @@ -1201,7 +1204,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); for i in 1..=PARENT_FAIL_TOLERANCE { - assert!(!rig.failed_chains_contains(&block_root)); + rig.assert_not_failed_chain(block_root); let id = rig.expect_block_parent_request(parent_root); if i % 2 != 0 { // The request fails. It should be tried again. @@ -1214,8 +1217,8 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { } } - assert!(!rig.failed_chains_contains(&block_root)); - assert!(!rig.failed_chains_contains(&parent.canonical_root())); + rig.assert_not_failed_chain(block_root); + rig.assert_not_failed_chain(parent.canonical_root()); rig.expect_no_active_lookups_empty_network(); } @@ -1253,7 +1256,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { } #[test] -fn test_parent_lookup_too_deep() { +fn test_parent_lookup_too_deep_grow_ancestor() { let mut rig = TestRig::test_setup(); let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE); @@ -1278,7 +1281,31 @@ fn test_parent_lookup_too_deep() { } rig.expect_penalty(peer_id, "chain_too_long"); - assert!(rig.failed_chains_contains(&chain_hash)); + rig.assert_failed_chain(chain_hash); +} + +#[test] +fn test_parent_lookup_too_deep_grow_tip() { + let mut rig = TestRig::test_setup(); + let blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE - 1); + let peer_id = rig.new_connected_peer(); + let tip = blocks.last().unwrap().clone(); + + for block in blocks.into_iter() { + let block_root = block.canonical_root(); + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let id = rig.expect_block_parent_request(block_root); + rig.single_lookup_block_response(id, peer_id, Some(block.clone())); + rig.single_lookup_block_response(id, peer_id, None); + rig.expect_block_process(ResponseType::Block); + rig.single_block_component_processed( + id.lookup_id, + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + ); + } + + rig.expect_penalty(peer_id, "chain_too_long"); + rig.assert_failed_chain(tip.canonical_root()); } #[test] From 784ef5fb43e2630572640c519e2cbf7f6bb50e73 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:21:40 +0200 Subject: [PATCH 08/11] Pass vec to range sync batch (#5710) * Pass vec to range sync batch --- .../network/src/sync/backfill_sync/mod.rs | 12 +--- beacon_node/network/src/sync/manager.rs | 59 ++++++++----------- .../network/src/sync/range_sync/batch.rs | 42 ++++--------- .../network/src/sync/range_sync/chain.rs | 10 +--- .../network/src/sync/range_sync/range.rs | 8 +-- 5 files changed, 46 insertions(+), 85 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index ce7d04ac0a..728642cc78 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -369,7 +369,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + blocks: Vec>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -392,20 +392,14 @@ impl BackFillSync { } }; - if let Some(block) = beacon_block { - // This is not a stream termination, simply add the block to the request - if let Err(e) = batch.add_block(block) { - self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; - } - Ok(ProcessResult::Successful) - } else { + { // A stream termination has been sent. This batch has ended. Process a completed batch. // Remove the request from the peer's active batches self.active_requests .get_mut(peer_id) .map(|active_requests| active_requests.remove(&batch_id)); - match batch.download_completed() { + match batch.download_completed(blocks) { Ok(received) => { let awaiting_batches = self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 4c1a1e6b67..fc1a218d82 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -908,39 +908,32 @@ impl SyncManager { { match resp.responses { Ok(blocks) => { - for block in blocks - .into_iter() - .map(Some) - // chain the stream terminator - .chain(vec![None]) - { - match resp.sender_id { - RangeRequestId::RangeSync { chain_id, batch_id } => { - self.range_sync.blocks_by_range_response( - &mut self.network, - peer_id, - chain_id, - batch_id, - id, - block, - ); - self.update_sync_state(); - } - RangeRequestId::BackfillSync { batch_id } => { - match self.backfill_sync.on_block_response( - &mut self.network, - batch_id, - &peer_id, - id, - block, - ) { - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Ok(ProcessResult::Successful) => {} - Err(_error) => { - // The backfill sync has failed, errors are reported - // within. - self.update_sync_state(); - } + match resp.sender_id { + RangeRequestId::RangeSync { chain_id, batch_id } => { + self.range_sync.blocks_by_range_response( + &mut self.network, + peer_id, + chain_id, + batch_id, + id, + blocks, + ); + self.update_sync_state(); + } + RangeRequestId::BackfillSync { batch_id } => { + match self.backfill_sync.on_block_response( + &mut self.network, + batch_id, + &peer_id, + id, + blocks, + ) { + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Ok(ProcessResult::Successful) => {} + Err(_error) => { + // The backfill sync has failed, errors are reported + // within. + self.update_sync_state(); } } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 75cb49d176..baba8c9a62 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -116,7 +116,7 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>, Id), + Downloading(PeerId, Id), /// The batch has been completely downloaded and is ready for processing. AwaitingProcessing(PeerId, Vec>), /// The batch is being processed. @@ -199,7 +199,7 @@ impl BatchInfo { /// Verifies if an incoming block belongs to this batch. pub fn is_expecting_block(&self, peer_id: &PeerId, request_id: &Id) -> bool { - if let BatchState::Downloading(expected_peer, _, expected_id) = &self.state { + if let BatchState::Downloading(expected_peer, expected_id) = &self.state { return peer_id == expected_peer && expected_id == request_id; } false @@ -209,7 +209,7 @@ impl BatchInfo { pub fn current_peer(&self) -> Option<&PeerId> { match &self.state { BatchState::AwaitingDownload | BatchState::Failed => None, - BatchState::Downloading(peer_id, _, _) + BatchState::Downloading(peer_id, _) | BatchState::AwaitingProcessing(peer_id, _) | BatchState::Processing(Attempt { peer_id, .. }) | BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(peer_id), @@ -250,36 +250,18 @@ impl BatchInfo { &self.failed_processing_attempts } - /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: RpcBlock) -> Result<(), WrongState> { - match self.state.poison() { - BatchState::Downloading(peer, mut blocks, req_id) => { - blocks.push(block); - self.state = BatchState::Downloading(peer, blocks, req_id); - Ok(()) - } - BatchState::Poisoned => unreachable!("Poisoned batch"), - other => { - self.state = other; - Err(WrongState(format!( - "Add block for batch in wrong state {:?}", - self.state - ))) - } - } - } - /// Marks the batch as ready to be processed if the blocks are in the range. The number of /// received blocks is returned, or the wrong batch end on failure #[must_use = "Batch may have failed"] pub fn download_completed( &mut self, + blocks: Vec>, ) -> Result< usize, /* Received blocks */ Result<(Slot, Slot, BatchOperationOutcome), WrongState>, > { match self.state.poison() { - BatchState::Downloading(peer, blocks, _request_id) => { + BatchState::Downloading(peer, _request_id) => { // verify that blocks are in range if let Some(last_slot) = blocks.last().map(|b| b.slot()) { // the batch is non-empty @@ -336,7 +318,7 @@ impl BatchInfo { mark_failed: bool, ) -> Result { match self.state.poison() { - BatchState::Downloading(peer, _, _request_id) => { + BatchState::Downloading(peer, _request_id) => { // register the attempt and check if the batch can be tried again if mark_failed { self.failed_download_attempts.push(peer); @@ -369,7 +351,7 @@ impl BatchInfo { ) -> Result<(), WrongState> { match self.state.poison() { BatchState::AwaitingDownload => { - self.state = BatchState::Downloading(peer, Vec::new(), request_id); + self.state = BatchState::Downloading(peer, request_id); Ok(()) } BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -536,13 +518,9 @@ impl std::fmt::Debug for BatchState { BatchState::AwaitingProcessing(ref peer, ref blocks) => { write!(f, "AwaitingProcessing({}, {} blocks)", peer, blocks.len()) } - BatchState::Downloading(peer, blocks, request_id) => write!( - f, - "Downloading({}, {} blocks, {})", - peer, - blocks.len(), - request_id - ), + BatchState::Downloading(peer, request_id) => { + write!(f, "Downloading({}, {})", peer, request_id) + } BatchState::Poisoned => f.write_str("Poisoned"), } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 63cafa9aca..13c9f4be3b 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -200,7 +200,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + blocks: Vec>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -221,18 +221,14 @@ impl SyncingChain { } }; - if let Some(block) = beacon_block { - // This is not a stream termination, simply add the block to the request - batch.add_block(block)?; - Ok(KeepChain) - } else { + { // A stream termination has been sent. This batch has ended. Process a completed batch. // Remove the request from the peer's active batches self.peers .get_mut(peer_id) .map(|active_requests| active_requests.remove(&batch_id)); - match batch.download_completed() { + match batch.download_completed(blocks) { Ok(received) => { let awaiting_batches = batch_id .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index fe48db35b4..5393b8792c 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -210,11 +210,11 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>, + blocks: Vec>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { - chain.on_block_response(network, batch_id, &peer_id, request_id, beacon_block) + chain.on_block_response(network, batch_id, &peer_id, request_id, blocks) }) { Ok((removed_chain, sync_type)) => { if let Some((removed_chain, remove_reason)) = removed_chain { @@ -795,7 +795,7 @@ mod tests { rig.cx.update_execution_engine_state(EngineState::Offline); // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, None); + range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, vec![]); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); @@ -809,7 +809,7 @@ mod tests { rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, None); + range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, vec![]); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); From 9e12c21f268c80a3f002ae0ca27477f9f512eb6f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 28 Jun 2024 12:10:32 +1000 Subject: [PATCH 09/11] Release v5.2.1 (testing branch) (#5989) * Release v5.2.1 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a1865289b0..814a9b45ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -855,7 +855,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.2.0" +version = "5.2.1" dependencies = [ "beacon_chain", "clap", @@ -1061,7 +1061,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.2.0" +version = "5.2.1" dependencies = [ "beacon_node", "clap", @@ -4322,7 +4322,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.2.0" +version = "5.2.1" dependencies = [ "account_utils", "beacon_chain", @@ -4893,7 +4893,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.2.0" +version = "5.2.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index b95720e807..a5fd29c971 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.2.0" +version = "5.2.1" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 6fb06cc543..d32d799468 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.2.0-", - fallback = "Lighthouse/v5.2.0" + prefix = "Lighthouse/v5.2.1-", + fallback = "Lighthouse/v5.2.1" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 73dd93dc3e..3cddd8ee60 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.2.0" +version = "5.2.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 20466b5de7..a1674d8d2c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.2.0" +version = "5.2.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From 16b81132ca0fa23672cb69c2f81bddaff847e786 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 29 Jun 2024 09:43:21 +1000 Subject: [PATCH 10/11] Electra epoch processing (#5761) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Attestation superstruct changes for EIP 7549 (#5644) * update * experiment * superstruct changes * revert * superstruct changes * fix tests * indexed attestation * indexed attestation superstruct * updated TODOs * `superstruct` the `AttesterSlashing` (#5636) * `superstruct` Attester Fork Variants * Push a little further * Deal with Encode / Decode of AttesterSlashing * not so sure about this.. * Stop Encode/Decode Bounds from Propagating Out * Tons of Changes.. * More Conversions to AttestationRef * Add AsReference trait (#15) * Add AsReference trait * Fix some snafus * Got it Compiling! :D * Got Tests Building * Get beacon chain tests compiling --------- Co-authored-by: Michael Sproul * Merge remote-tracking branch 'upstream/unstable' into electra_attestation_changes * Make EF Tests Fork-Agnostic (#5713) * Finish EF Test Fork Agnostic (#5714) * Superstruct `AggregateAndProof` (#5715) * Upgrade `superstruct` to `0.8.0` * superstruct `AggregateAndProof` * Merge remote-tracking branch 'sigp/unstable' into electra_attestation_changes * cargo fmt * Merge pull request #5726 from realbigsean/electra_attestation_changes Merge unstable into Electra attestation changes * process withdrawals updates * cleanup withdrawals processing * update `process_operations` deposit length check * add apply_deposit changes * add execution layer withdrawal request processing * process deposit receipts * add consolidation processing * update process operations function * exit updates * clean up * update slash_validator * EIP7549 `get_attestation_indices` (#5657) * get attesting indices electra impl * fmt * get tests to pass * fmt * fix some beacon chain tests * fmt * fix slasher test * fmt got me again * fix more tests * fix tests * Some small changes (#5739) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * cargo fmt (#5740) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * fix attestation verification * Sketch op pool changes * fix get attesting indices (#5742) * fix get attesting indices * better errors * fix compile * only get committee index once * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Ef test fixes (#5753) * attestation related ef test fixes * delete commented out stuff * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Fix Aggregation Pool for Electra (#5754) * Fix Aggregation Pool for Electra * Remove Outdated Interface * fix ssz (#5755) * Get `electra_op_pool` up to date (#5756) * fix get attesting indices (#5742) * fix get attesting indices * better errors * fix compile * only get committee index once * Ef test fixes (#5753) * attestation related ef test fixes * delete commented out stuff * Fix Aggregation Pool for Electra (#5754) * Fix Aggregation Pool for Electra * Remove Outdated Interface * fix ssz (#5755) --------- Co-authored-by: realbigsean * Revert "Get `electra_op_pool` up to date (#5756)" (#5757) This reverts commit ab9e58aa3d0e6fe2175a4996a5de710e81152896. * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into electra_op_pool * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Compute on chain aggregate impl (#5752) * add compute_on_chain_agg impl to op pool changes * fmt * get op pool tests to pass * update the naive agg pool interface (#5760) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Fix bugs in cross-committee aggregation * Add comment to max cover optimisation * Fix assert * Electra epoch processing * Merge pull request #5749 from sigp/electra_op_pool Optimise Electra op pool aggregation * don't fail on empty consolidations * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * update committee offset * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * update committee offset * only increment the state deposit index on old deposit flow * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * use correct max eb in epoch cache initialization * drop initiate validator ordering optimization * fix initiate exit for single pass * Fix Electra Fork Choice Tests (#5764) * Fix Electra Fork Choice Tests (#5764) * Fix Electra Fork Choice Tests (#5764) * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * Fix Consolidation Sigs & Withdrawals * Merge pull request #5766 from ethDreamer/two_fixes Fix Consolidation Sigs & Withdrawals * Merge branches 'block-processing-electra' and 'electra-epoch-proc' of https://github.com/sigp/lighthouse into electra-epoch-proc * Subscribe to the correct subnets for electra attestations (#5782) * subscribe to the correct att subnets for electra * subscribe to the correct att subnets for electra * cargo fmt * Subscribe to the correct subnets for electra attestations (#5782) * subscribe to the correct att subnets for electra * subscribe to the correct att subnets for electra * cargo fmt * Subscribe to the correct subnets for electra attestations (#5782) * subscribe to the correct att subnets for electra * subscribe to the correct att subnets for electra * cargo fmt * fix slashing handling * Fix Bug In Block Processing with 0x02 Credentials * Merge remote-tracking branch 'upstream/unstable' * Send unagg attestation based on fork * Publish all aggregates * just one more check bro plz.. * Merge pull request #5832 from ethDreamer/electra_attestation_changes_merge_unstable Merge `unstable` into `electra_attestation_changes` * Merge pull request #5835 from realbigsean/fix-validator-logic Fix validator logic * Merge pull request #5816 from realbigsean/electra-attestation-slashing-handling Electra slashing handling * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * Electra attestation changes rm decode impl (#5856) * Remove Crappy Decode impl for Attestation * Remove Inefficient Attestation Decode impl * Implement Schema Upgrade / Downgrade * Update beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs Co-authored-by: Michael Sproul --------- Co-authored-by: Michael Sproul * Fix failing attestation tests and misc electra attestation cleanup (#5810) * - get attestation related beacon chain tests to pass - observed attestations are now keyed off of data + committee index - rename op pool attestationref to compactattestationref - remove unwraps in agg pool and use options instead - cherry pick some changes from ef-tests-electra * cargo fmt * fix failing test * Revert dockerfile changes * make committee_index return option * function args shouldnt be a ref to attestation ref * fmt * fix dup imports --------- Co-authored-by: realbigsean * fix some todos (#5817) * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * add consolidations to merkle calc for inclusion proof * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * Remove Duplicate KZG Commitment Merkle Proof Code (#5874) * Remove Duplicate KZG Commitment Merkle Proof Code * s/tree_lists/fields/ * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * fix compile * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * Fix slasher tests (#5906) * Fix electra tests * Add electra attestations to double vote tests * Update superstruct to 0.8 * Merge remote-tracking branch 'origin/unstable' into electra_attestation_changes * Small cleanup in slasher tests * Clean up Electra observed aggregates (#5929) * Use consistent key in observed_attestations * Remove unwraps from observed aggregates * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * De-dup attestation constructor logic * Remove unwraps in Attestation construction * Dedup match_attestation_data * Remove outdated TODO * Use ForkName Ord in fork-choice tests * Use ForkName Ord in BeaconBlockBody * Make to_electra not fallible * Remove TestRandom impl for IndexedAttestation * Remove IndexedAttestation faulty Decode impl * Drop TestRandom impl * Add PendingAttestationInElectra * Indexed att on disk (#35) * indexed att on disk * fix lints * Update slasher/src/migrate.rs Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --------- Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> * add electra fork enabled fn to ForkName impl (#36) * add electra fork enabled fn to ForkName impl * remove inadvertent file * Update common/eth2/src/types.rs Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> * Dedup attestation constructor logic in attester cache * Use if let Ok for committee_bits * Dedup Attestation constructor code * Diff reduction in tests * Fix beacon_chain tests * Diff reduction * Use Ord for ForkName in pubsub * Resolve into_attestation_and_indices todo * Remove stale TODO * Fix beacon_chain tests * Test spec invariant * Use electra_enabled in pubsub * Remove get_indexed_attestation_from_signed_aggregate * Use ok_or instead of if let else * committees are sorted * remove dup method `get_indexed_attestation_from_committees` * Merge pull request #5940 from dapplion/electra_attestation_changes_lionreview Electra attestations #5712 review * update default persisted op pool deserialization * ensure aggregate and proof uses serde untagged on ref * Fork aware ssz static attestation tests * Electra attestation changes from Lions review (#5971) * dedup/cleanup and remove unneeded hashset use * remove irrelevant TODOs * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * Electra attestation changes sean review (#5972) * instantiate empty bitlist in unreachable code * clean up error conversion * fork enabled bool cleanup * remove a couple todos * return bools instead of options in `aggregate` and use the result * delete commented out code * use map macros in simple transformations * remove signers_disjoint_from * get ef tests compiling * get ef tests compiling * update intentionally excluded files * Avoid changing slasher schema for Electra * Delete slasher schema v4 * Fix clippy * Fix compilation of beacon_chain tests * Update database.rs * Update per_block_processing.rs * Add electra lightclient types * Update slasher/src/database.rs * fix imports * Merge pull request #5980 from dapplion/electra-lightclient Add electra lightclient types * Merge pull request #5975 from michaelsproul/electra-slasher-no-migration Avoid changing slasher schema for Electra * Update beacon_node/beacon_chain/src/attestation_verification.rs * Update beacon_node/beacon_chain/src/attestation_verification.rs * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes * Merge branch 'electra_attestation_changes' of https://github.com/realbigsean/lighthouse into block-processing-electra * Merge branch 'block-processing-electra' of https://github.com/sigp/lighthouse into electra-epoch-proc * Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra-epoch-proc * Update consensus/state_processing/src/per_epoch_processing/single_pass.rs * Update consensus/state_processing/src/per_epoch_processing/single_pass.rs * Update consensus/state_processing/src/per_epoch_processing/single_pass.rs * Update consensus/state_processing/src/per_epoch_processing/single_pass.rs * Update consensus/state_processing/src/per_epoch_processing/single_pass.rs --- .../src/common/initiate_validator_exit.rs | 21 +- consensus/state_processing/src/epoch_cache.rs | 49 +- .../src/per_epoch_processing/errors.rs | 3 + .../src/per_epoch_processing/single_pass.rs | 463 ++++++++++++++++-- consensus/types/src/chain_spec.rs | 10 +- consensus/types/src/validator.rs | 30 +- 6 files changed, 506 insertions(+), 70 deletions(-) diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index 8874e9ed4b..49e3a7390d 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -8,12 +8,12 @@ pub fn initiate_validator_exit( index: usize, spec: &ChainSpec, ) -> Result<(), Error> { - // We do things in a slightly different order to the spec here. Instead of immediately checking - // whether the validator has already exited, we instead prepare the exit cache and compute the - // cheap-to-calculate values from that. *Then* we look up the validator a single time in the - // validator tree (expensive), make the check and mutate as appropriate. Compared to the spec - // ordering, this saves us from looking up the validator in the validator registry multiple - // times. + let validator = state.get_validator_cow(index)?; + + // Return if the validator already initiated exit + if validator.exit_epoch != spec.far_future_epoch { + return Ok(()); + } // Ensure the exit cache is built. state.build_exit_cache(spec)?; @@ -36,14 +36,7 @@ pub fn initiate_validator_exit( exit_queue_epoch }; - let validator = state.get_validator_cow(index)?; - - // Return if the validator already initiated exit - if validator.exit_epoch != spec.far_future_epoch { - return Ok(()); - } - - let validator = validator.into_mut()?; + let validator = state.get_validator_mut(index)?; validator.exit_epoch = exit_queue_epoch; validator.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index b2f2d85407..0e940fabe4 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -9,6 +9,7 @@ use types::{ActivationQueue, BeaconState, ChainSpec, EthSpec, ForkName, Hash256} pub struct PreEpochCache { epoch_key: EpochCacheKey, effective_balances: Vec, + total_active_balance: u64, } impl PreEpochCache { @@ -36,27 +37,59 @@ impl PreEpochCache { Ok(Self { epoch_key, effective_balances: Vec::with_capacity(state.validators().len()), + total_active_balance: 0, }) } - pub fn push_effective_balance(&mut self, effective_balance: u64) { - self.effective_balances.push(effective_balance); + pub fn update_effective_balance( + &mut self, + validator_index: usize, + effective_balance: u64, + is_active_next_epoch: bool, + ) -> Result<(), EpochCacheError> { + if validator_index == self.effective_balances.len() { + self.effective_balances.push(effective_balance); + if is_active_next_epoch { + self.total_active_balance + .safe_add_assign(effective_balance)?; + } + + Ok(()) + } else if let Some(existing_balance) = self.effective_balances.get_mut(validator_index) { + // Update total active balance for a late change in effective balance. This happens when + // processing consolidations. + if is_active_next_epoch { + self.total_active_balance + .safe_add_assign(effective_balance)?; + self.total_active_balance + .safe_sub_assign(*existing_balance)?; + } + *existing_balance = effective_balance; + Ok(()) + } else { + Err(EpochCacheError::ValidatorIndexOutOfBounds { validator_index }) + } + } + + pub fn get_total_active_balance(&self) -> u64 { + self.total_active_balance } pub fn into_epoch_cache( self, - total_active_balance: u64, activation_queue: ActivationQueue, spec: &ChainSpec, ) -> Result { let epoch = self.epoch_key.epoch; + let total_active_balance = self.total_active_balance; let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_active_balance); let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let effective_balance_increment = spec.effective_balance_increment; - let max_effective_balance_eth = spec - .max_effective_balance - .safe_div(effective_balance_increment)?; + let max_effective_balance = + spec.max_effective_balance_for_fork(spec.fork_name_at_epoch(epoch)); + let max_effective_balance_eth = + max_effective_balance.safe_div(effective_balance_increment)?; let mut base_rewards = Vec::with_capacity(max_effective_balance_eth.safe_add(1)? as usize); @@ -131,9 +164,9 @@ pub fn initialize_epoch_cache( decision_block_root, }, effective_balances, + total_active_balance, }; - *state.epoch_cache_mut() = - pre_epoch_cache.into_epoch_cache(total_active_balance, activation_queue, spec)?; + *state.epoch_cache_mut() = pre_epoch_cache.into_epoch_cache(activation_queue, spec)?; Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index de481ec676..b6c9dbea52 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -25,6 +25,9 @@ pub enum EpochProcessingError { InvalidFlagIndex(usize), MilhouseError(milhouse::Error), EpochCache(EpochCacheError), + SinglePassMissingActivationQueue, + MissingEarliestExitEpoch, + MissingExitBalanceToConsume, } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index a9629e73e4..e5905b8fa2 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1,20 +1,24 @@ use crate::{ - common::update_progressive_balances_cache::initialize_progressive_balances_cache, + common::{ + decrease_balance, increase_balance, + update_progressive_balances_cache::initialize_progressive_balances_cache, + }, epoch_cache::{initialize_epoch_cache, PreEpochCache}, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; -use std::collections::BTreeSet; +use std::collections::{BTreeSet, HashMap}; use types::{ consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExitCache, ForkName, - ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, + ExitCache, ForkName, List, ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, + Unsigned, Validator, }; pub struct SinglePassConfig { @@ -22,6 +26,8 @@ pub struct SinglePassConfig { pub rewards_and_penalties: bool, pub registry_updates: bool, pub slashings: bool, + pub pending_balance_deposits: bool, + pub pending_consolidations: bool, pub effective_balance_updates: bool, } @@ -38,6 +44,8 @@ impl SinglePassConfig { rewards_and_penalties: true, registry_updates: true, slashings: true, + pending_balance_deposits: true, + pending_consolidations: true, effective_balance_updates: true, } } @@ -48,6 +56,8 @@ impl SinglePassConfig { rewards_and_penalties: false, registry_updates: false, slashings: false, + pending_balance_deposits: false, + pending_consolidations: false, effective_balance_updates: false, } } @@ -57,6 +67,7 @@ impl SinglePassConfig { struct StateContext { current_epoch: Epoch, next_epoch: Epoch, + finalized_checkpoint: Checkpoint, is_in_inactivity_leak: bool, total_active_balance: u64, churn_limit: u64, @@ -73,6 +84,15 @@ struct SlashingsContext { target_withdrawable_epoch: Epoch, } +struct PendingBalanceDepositsContext { + /// The value to set `next_deposit_index` to *after* processing completes. + next_deposit_index: usize, + /// The value to set `deposit_balance_to_consume` to *after* processing completes. + deposit_balance_to_consume: u64, + /// Total balance increases for each validator due to pending balance deposits. + validator_deposits_to_process: HashMap, +} + struct EffectiveBalancesContext { downward_threshold: u64, upward_threshold: u64, @@ -129,6 +149,7 @@ pub fn process_epoch_single_pass( let state_ctxt = &StateContext { current_epoch, next_epoch, + finalized_checkpoint, is_in_inactivity_leak, total_active_balance, churn_limit, @@ -139,6 +160,16 @@ pub fn process_epoch_single_pass( let slashings_ctxt = &SlashingsContext::new(state, state_ctxt, spec)?; let mut next_epoch_cache = PreEpochCache::new_for_next_epoch(state)?; + let pending_balance_deposits_ctxt = + if fork_name.electra_enabled() && conf.pending_balance_deposits { + Some(PendingBalanceDepositsContext::new(state, spec)?) + } else { + None + }; + + let mut earliest_exit_epoch = state.earliest_exit_epoch().ok(); + let mut exit_balance_to_consume = state.exit_balance_to_consume().ok(); + // Split the state into several disjoint mutable borrows. let ( validators, @@ -165,12 +196,19 @@ pub fn process_epoch_single_pass( // Compute shared values required for different parts of epoch processing. let rewards_ctxt = &RewardsAndPenaltiesContext::new(progressive_balances, state_ctxt, spec)?; - let activation_queue = &epoch_cache - .activation_queue()? - .get_validators_eligible_for_activation( - finalized_checkpoint.epoch, - activation_churn_limit as usize, - ); + + let mut activation_queues = if !fork_name.electra_enabled() { + let activation_queue = epoch_cache + .activation_queue()? + .get_validators_eligible_for_activation( + finalized_checkpoint.epoch, + activation_churn_limit as usize, + ); + let next_epoch_activation_queue = ActivationQueue::default(); + Some((activation_queue, next_epoch_activation_queue)) + } else { + None + }; let effective_balances_ctxt = &EffectiveBalancesContext::new(spec)?; // Iterate over the validators and related fields in one pass. @@ -178,10 +216,6 @@ pub fn process_epoch_single_pass( let mut balances_iter = balances.iter_cow(); let mut inactivity_scores_iter = inactivity_scores.iter_cow(); - // Values computed for the next epoch transition. - let mut next_epoch_total_active_balance = 0; - let mut next_epoch_activation_queue = ActivationQueue::default(); - for (index, &previous_epoch_participation, ¤t_epoch_participation) in izip!( 0..num_validators, previous_epoch_participation.iter(), @@ -246,13 +280,17 @@ pub fn process_epoch_single_pass( // `process_registry_updates` if conf.registry_updates { + let activation_queue_refs = activation_queues + .as_mut() + .map(|(current_queue, next_queue)| (&*current_queue, next_queue)); process_single_registry_update( &mut validator, validator_info, exit_cache, - activation_queue, - &mut next_epoch_activation_queue, + activation_queue_refs, state_ctxt, + earliest_exit_epoch.as_mut(), + exit_balance_to_consume.as_mut(), spec, )?; } @@ -262,13 +300,22 @@ pub fn process_epoch_single_pass( process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } + // `process_pending_balance_deposits` + if let Some(pending_balance_deposits_ctxt) = &pending_balance_deposits_ctxt { + process_pending_balance_deposits_for_validator( + &mut balance, + validator_info, + pending_balance_deposits_ctxt, + )?; + } + // `process_effective_balance_updates` if conf.effective_balance_updates { process_single_effective_balance_update( + validator_info.index, *balance, &mut validator, - validator_info, - &mut next_epoch_total_active_balance, + validator_info.current_epoch_participation, &mut next_epoch_cache, progressive_balances, effective_balances_ctxt, @@ -278,15 +325,56 @@ pub fn process_epoch_single_pass( } } - if conf.effective_balance_updates { - state.set_total_active_balance(next_epoch, next_epoch_total_active_balance, spec); - *state.epoch_cache_mut() = next_epoch_cache.into_epoch_cache( - next_epoch_total_active_balance, - next_epoch_activation_queue, + if conf.registry_updates && fork_name >= ForkName::Electra { + if let Ok(earliest_exit_epoch_state) = state.earliest_exit_epoch_mut() { + *earliest_exit_epoch_state = + earliest_exit_epoch.ok_or(Error::MissingEarliestExitEpoch)?; + } + if let Ok(exit_balance_to_consume_state) = state.exit_balance_to_consume_mut() { + *exit_balance_to_consume_state = + exit_balance_to_consume.ok_or(Error::MissingExitBalanceToConsume)?; + } + } + + // Finish processing pending balance deposits if relevant. + // + // This *could* be reordered after `process_pending_consolidations` which pushes only to the end + // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used + // by the spec and do this first. + if let Some(ctxt) = pending_balance_deposits_ctxt { + let new_pending_balance_deposits = List::try_from_iter( + state + .pending_balance_deposits()? + .iter_from(ctxt.next_deposit_index)? + .cloned(), + )?; + *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; + *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; + } + + // Process consolidations outside the single-pass loop, as they depend on balances for multiple + // validators and cannot be computed accurately inside the loop. + if fork_name.electra_enabled() && conf.pending_consolidations { + process_pending_consolidations( + state, + &mut next_epoch_cache, + effective_balances_ctxt, + state_ctxt, spec, )?; } + // Finally, finish updating effective balance caches. We need this to happen *after* processing + // of pending consolidations, which recomputes some effective balances. + if conf.effective_balance_updates { + let next_epoch_total_active_balance = next_epoch_cache.get_total_active_balance(); + state.set_total_active_balance(next_epoch, next_epoch_total_active_balance, spec); + let next_epoch_activation_queue = + activation_queues.map_or_else(ActivationQueue::default, |(_, queue)| queue); + *state.epoch_cache_mut() = + next_epoch_cache.into_epoch_cache(next_epoch_activation_queue, spec)?; + } + Ok(summary) } @@ -455,7 +543,42 @@ impl RewardsAndPenaltiesContext { } } +#[allow(clippy::too_many_arguments)] fn process_single_registry_update( + validator: &mut Cow, + validator_info: &ValidatorInfo, + exit_cache: &mut ExitCache, + activation_queues: Option<(&BTreeSet, &mut ActivationQueue)>, + state_ctxt: &StateContext, + earliest_exit_epoch: Option<&mut Epoch>, + exit_balance_to_consume: Option<&mut u64>, + spec: &ChainSpec, +) -> Result<(), Error> { + if !state_ctxt.fork_name.electra_enabled() { + let (activation_queue, next_epoch_activation_queue) = + activation_queues.ok_or(Error::SinglePassMissingActivationQueue)?; + process_single_registry_update_pre_electra( + validator, + validator_info, + exit_cache, + activation_queue, + next_epoch_activation_queue, + state_ctxt, + spec, + ) + } else { + process_single_registry_update_post_electra( + validator, + exit_cache, + state_ctxt, + earliest_exit_epoch.ok_or(Error::MissingEarliestExitEpoch)?, + exit_balance_to_consume.ok_or(Error::MissingExitBalanceToConsume)?, + spec, + ) + } +} + +fn process_single_registry_update_pre_electra( validator: &mut Cow, validator_info: &ValidatorInfo, exit_cache: &mut ExitCache, @@ -472,7 +595,7 @@ fn process_single_registry_update( if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance { - initiate_validator_exit(validator, exit_cache, state_ctxt, spec)?; + initiate_validator_exit(validator, exit_cache, state_ctxt, None, None, spec)?; } if activation_queue.contains(&validator_info.index) { @@ -491,10 +614,49 @@ fn process_single_registry_update( Ok(()) } +fn process_single_registry_update_post_electra( + validator: &mut Cow, + exit_cache: &mut ExitCache, + state_ctxt: &StateContext, + earliest_exit_epoch: &mut Epoch, + exit_balance_to_consume: &mut u64, + spec: &ChainSpec, +) -> Result<(), Error> { + let current_epoch = state_ctxt.current_epoch; + + if validator.is_eligible_for_activation_queue(spec, state_ctxt.fork_name) { + validator.make_mut()?.activation_eligibility_epoch = current_epoch.safe_add(1)?; + } + + if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance + { + initiate_validator_exit( + validator, + exit_cache, + state_ctxt, + Some(earliest_exit_epoch), + Some(exit_balance_to_consume), + spec, + )?; + } + + if validator.is_eligible_for_activation_with_finalized_checkpoint( + &state_ctxt.finalized_checkpoint, + spec, + ) { + validator.make_mut()?.activation_epoch = + spec.compute_activation_exit_epoch(current_epoch)?; + } + + Ok(()) +} + fn initiate_validator_exit( validator: &mut Cow, exit_cache: &mut ExitCache, state_ctxt: &StateContext, + earliest_exit_epoch: Option<&mut Epoch>, + exit_balance_to_consume: Option<&mut u64>, spec: &ChainSpec, ) -> Result<(), Error> { // Return if the validator already initiated exit @@ -502,16 +664,27 @@ fn initiate_validator_exit( return Ok(()); } - // Compute exit queue epoch - let delayed_epoch = spec.compute_activation_exit_epoch(state_ctxt.current_epoch)?; - let mut exit_queue_epoch = exit_cache - .max_epoch()? - .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); - let exit_queue_churn = exit_cache.get_churn_at(exit_queue_epoch)?; + let exit_queue_epoch = if state_ctxt.fork_name.electra_enabled() { + compute_exit_epoch_and_update_churn( + validator, + state_ctxt, + earliest_exit_epoch.ok_or(Error::MissingEarliestExitEpoch)?, + exit_balance_to_consume.ok_or(Error::MissingExitBalanceToConsume)?, + spec, + )? + } else { + // Compute exit queue epoch + let delayed_epoch = spec.compute_activation_exit_epoch(state_ctxt.current_epoch)?; + let mut exit_queue_epoch = exit_cache + .max_epoch()? + .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); + let exit_queue_churn = exit_cache.get_churn_at(exit_queue_epoch)?; - if exit_queue_churn >= state_ctxt.churn_limit { - exit_queue_epoch.safe_add_assign(1)?; - } + if exit_queue_churn >= state_ctxt.churn_limit { + exit_queue_epoch.safe_add_assign(1)?; + } + exit_queue_epoch + }; let validator = validator.make_mut()?; validator.exit_epoch = exit_queue_epoch; @@ -522,6 +695,64 @@ fn initiate_validator_exit( Ok(()) } +fn compute_exit_epoch_and_update_churn( + validator: &mut Cow, + state_ctxt: &StateContext, + earliest_exit_epoch_state: &mut Epoch, + exit_balance_to_consume_state: &mut u64, + spec: &ChainSpec, +) -> Result { + let exit_balance = validator.effective_balance; + let mut earliest_exit_epoch = std::cmp::max( + *earliest_exit_epoch_state, + spec.compute_activation_exit_epoch(state_ctxt.current_epoch)?, + ); + + let per_epoch_churn = get_activation_exit_churn_limit(state_ctxt, spec)?; + // New epoch for exits + let mut exit_balance_to_consume = if *earliest_exit_epoch_state < earliest_exit_epoch { + per_epoch_churn + } else { + *exit_balance_to_consume_state + }; + + // Exit doesn't fit in the current earliest epoch + if exit_balance > exit_balance_to_consume { + let balance_to_process = exit_balance.safe_sub(exit_balance_to_consume)?; + let additional_epochs = balance_to_process + .safe_sub(1)? + .safe_div(per_epoch_churn)? + .safe_add(1)?; + earliest_exit_epoch.safe_add_assign(additional_epochs)?; + exit_balance_to_consume.safe_add_assign(additional_epochs.safe_mul(per_epoch_churn)?)?; + } + // Consume the balance and update state variables + *exit_balance_to_consume_state = exit_balance_to_consume.safe_sub(exit_balance)?; + *earliest_exit_epoch_state = earliest_exit_epoch; + + Ok(earliest_exit_epoch) +} + +fn get_activation_exit_churn_limit( + state_ctxt: &StateContext, + spec: &ChainSpec, +) -> Result { + Ok(std::cmp::min( + spec.max_per_epoch_activation_exit_churn_limit, + get_balance_churn_limit(state_ctxt, spec)?, + )) +} + +fn get_balance_churn_limit(state_ctxt: &StateContext, spec: &ChainSpec) -> Result { + let total_active_balance = state_ctxt.total_active_balance; + let churn = std::cmp::max( + spec.min_per_epoch_churn_limit_electra, + total_active_balance.safe_div(spec.churn_limit_quotient)?, + ); + + Ok(churn.safe_sub(churn.safe_rem(spec.effective_balance_increment)?)?) +} + impl SlashingsContext { fn new( state: &BeaconState, @@ -568,6 +799,146 @@ fn process_single_slashing( Ok(()) } +impl PendingBalanceDepositsContext { + fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + let available_for_processing = state + .deposit_balance_to_consume()? + .safe_add(state.get_activation_exit_churn_limit(spec)?)?; + let mut processed_amount = 0; + let mut next_deposit_index = 0; + let mut validator_deposits_to_process = HashMap::new(); + + let pending_balance_deposits = state.pending_balance_deposits()?; + + for deposit in pending_balance_deposits.iter() { + if processed_amount.safe_add(deposit.amount)? > available_for_processing { + break; + } + validator_deposits_to_process + .entry(deposit.index as usize) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + + processed_amount.safe_add_assign(deposit.amount)?; + next_deposit_index.safe_add_assign(1)?; + } + + let deposit_balance_to_consume = if next_deposit_index == pending_balance_deposits.len() { + 0 + } else { + available_for_processing.safe_sub(processed_amount)? + }; + + Ok(Self { + next_deposit_index, + deposit_balance_to_consume, + validator_deposits_to_process, + }) + } +} + +fn process_pending_balance_deposits_for_validator( + balance: &mut Cow, + validator_info: &ValidatorInfo, + pending_balance_deposits_ctxt: &PendingBalanceDepositsContext, +) -> Result<(), Error> { + if let Some(deposit_amount) = pending_balance_deposits_ctxt + .validator_deposits_to_process + .get(&validator_info.index) + { + balance.make_mut()?.safe_add_assign(*deposit_amount)?; + } + Ok(()) +} + +/// We process pending consolidations after all of single-pass epoch processing, and then patch up +/// the effective balances for affected validators. +/// +/// This is safe because processing consolidations does not depend on the `effective_balance`. +fn process_pending_consolidations( + state: &mut BeaconState, + next_epoch_cache: &mut PreEpochCache, + effective_balances_ctxt: &EffectiveBalancesContext, + state_ctxt: &StateContext, + spec: &ChainSpec, +) -> Result<(), Error> { + let mut next_pending_consolidation: usize = 0; + let current_epoch = state.current_epoch(); + let pending_consolidations = state.pending_consolidations()?.clone(); + + let mut affected_validators = BTreeSet::new(); + + for pending_consolidation in &pending_consolidations { + let source_index = pending_consolidation.source_index as usize; + let target_index = pending_consolidation.target_index as usize; + let source_validator = state.get_validator(source_index)?; + if source_validator.slashed { + next_pending_consolidation.safe_add_assign(1)?; + continue; + } + if source_validator.withdrawable_epoch > current_epoch { + break; + } + + // Calculate the active balance while we have the source validator loaded. This is a safe + // reordering. + let source_balance = *state + .balances() + .get(source_index) + .ok_or(BeaconStateError::UnknownValidator(source_index))?; + let active_balance = + source_validator.get_active_balance(source_balance, spec, state_ctxt.fork_name); + + // Churn any target excess active balance of target and raise its max. + state.switch_to_compounding_validator(target_index, spec)?; + + // Move active balance to target. Excess balance is withdrawable. + decrease_balance(state, source_index, active_balance)?; + increase_balance(state, target_index, active_balance)?; + + affected_validators.insert(source_index); + affected_validators.insert(target_index); + + next_pending_consolidation.safe_add_assign(1)?; + } + + let new_pending_consolidations = List::try_from_iter( + state + .pending_consolidations()? + .iter_from(next_pending_consolidation)? + .cloned(), + )?; + *state.pending_consolidations_mut()? = new_pending_consolidations; + + // Re-process effective balance updates for validators affected by consolidations. + let (validators, balances, _, current_epoch_participation, _, progressive_balances, _, _) = + state.mutable_validator_fields()?; + for validator_index in affected_validators { + let balance = *balances + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + let mut validator = validators + .get_cow(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + let validator_current_epoch_participation = *current_epoch_participation + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + + process_single_effective_balance_update( + validator_index, + balance, + &mut validator, + validator_current_epoch_participation, + next_epoch_cache, + progressive_balances, + effective_balances_ctxt, + state_ctxt, + spec, + )?; + } + Ok(()) +} + impl EffectiveBalancesContext { fn new(spec: &ChainSpec) -> Result { let hysteresis_increment = spec @@ -584,18 +955,24 @@ impl EffectiveBalancesContext { } } +/// This function abstracts over phase0 and Electra effective balance processing. #[allow(clippy::too_many_arguments)] fn process_single_effective_balance_update( + validator_index: usize, balance: u64, validator: &mut Cow, - validator_info: &ValidatorInfo, - next_epoch_total_active_balance: &mut u64, + validator_current_epoch_participation: ParticipationFlags, next_epoch_cache: &mut PreEpochCache, progressive_balances: &mut ProgressiveBalancesCache, eb_ctxt: &EffectiveBalancesContext, state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { + // Use the higher effective balance limit if post-Electra and compounding withdrawal credentials + // are set. + let effective_balance_limit = + validator.get_validator_max_effective_balance(spec, state_ctxt.fork_name); + let old_effective_balance = validator.effective_balance; let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? < validator.effective_balance @@ -606,15 +983,13 @@ fn process_single_effective_balance_update( { min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, + effective_balance_limit, ) } else { validator.effective_balance }; - if validator.is_active_at(state_ctxt.next_epoch) { - next_epoch_total_active_balance.safe_add_assign(new_effective_balance)?; - } + let is_active_next_epoch = validator.is_active_at(state_ctxt.next_epoch); if new_effective_balance != old_effective_balance { validator.make_mut()?.effective_balance = new_effective_balance; @@ -623,14 +998,18 @@ fn process_single_effective_balance_update( // previous epoch once the epoch transition completes. progressive_balances.on_effective_balance_change( validator.slashed, - validator_info.current_epoch_participation, + validator_current_epoch_participation, old_effective_balance, new_effective_balance, )?; } - // Caching: update next epoch effective balances. - next_epoch_cache.push_effective_balance(new_effective_balance); + // Caching: update next epoch effective balances and total active balance. + next_epoch_cache.update_effective_balance( + validator_index, + new_effective_balance, + is_active_next_epoch, + )?; Ok(()) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d2f5909396..5c131d77c9 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -393,13 +393,21 @@ impl ChainSpec { state: &BeaconState, ) -> u64 { let fork_name = state.fork_name_unchecked(); - if fork_name >= ForkName::Electra { + if fork_name.electra_enabled() { self.whistleblower_reward_quotient_electra } else { self.whistleblower_reward_quotient } } + pub fn max_effective_balance_for_fork(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.max_effective_balance_electra + } else { + self.max_effective_balance + } + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 0054e95f9d..b5e92d1f5d 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, Hash256, - PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, + Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -87,15 +87,25 @@ impl Validator { } /// Returns `true` if the validator is eligible to be activated. - /// - /// Spec v0.12.1 pub fn is_eligible_for_activation( &self, state: &BeaconState, spec: &ChainSpec, + ) -> bool { + self.is_eligible_for_activation_with_finalized_checkpoint( + &state.finalized_checkpoint(), + spec, + ) + } + + /// Returns `true` if the validator is eligible to be activated. + pub fn is_eligible_for_activation_with_finalized_checkpoint( + &self, + finalized_checkpoint: &Checkpoint, + spec: &ChainSpec, ) -> bool { // Placement in queue is finalized - self.activation_eligibility_epoch <= state.finalized_checkpoint().epoch + self.activation_eligibility_epoch <= finalized_checkpoint.epoch // Has not yet been activated && self.activation_epoch == spec.far_future_epoch } @@ -255,6 +265,16 @@ impl Validator { spec.max_effective_balance } } + + pub fn get_active_balance( + &self, + validator_balance: u64, + spec: &ChainSpec, + current_fork: ForkName, + ) -> u64 { + let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + std::cmp::min(validator_balance, max_effective_balance) + } } impl Default for Validator { From 70bcba1e6b7a7123b2f48e1c77b9afb9bbb11c26 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 1 Jul 2024 03:36:40 +0200 Subject: [PATCH 11/11] Redb slasher backend impl (#4529) * initial redb impl * redb impl * remove phantom data * fixed table definition * fighting the borrow checker * a rough draft that doesnt cause lifetime issues * refactoring * refactor * refactor * passing unit tests * refactor * refactor * refactor * commit * move everything to one database * remove panics, ready for a review * merge * a working redb impl * passing a ref of txn to cursor * this tries to create a second write transaction when initializing cursor. breaks everything * Use 2 lifetimes and subtyping Also fixes a bug in last_key caused by rev and next_back cancelling out * Move table into cursor * Merge remote-tracking branch 'origin/unstable' into redb-slasher-backend-impl * changes based on feedback * update lmdb * fix lifetime issues * moving everything from Cursor to Transaction * update * upgrade to redb 2.0 * Merge branch 'unstable' of https://github.com/sigp/lighthouse into redb-slasher-backend-impl * bring back cursor * Merge branch 'unstable' of https://github.com/sigp/lighthouse into redb-slasher-backend-impl * fix delete while * linting * linting * switch to lmdb * update redb to v2.1 * build fixes, remove unwrap or default * another build error * hopefully this is the last build error * fmt * cargo.toml * fix mdbx * Merge branch 'unstable' of https://github.com/sigp/lighthouse into redb-slasher-backend-impl * Remove a collect * Merge remote-tracking branch 'origin/unstable' into redb-slasher-backend-impl * Merge branch 'redb-slasher-backend-impl' of https://github.com/eserilev/lighthouse into redb-slasher-backend-impl * re-enable test * fix failing slasher test * Merge remote-tracking branch 'origin/unstable' into redb-slasher-backend-impl * Rename DB file to `slasher.redb` --- Cargo.lock | 11 ++ Makefile | 2 +- lighthouse/Cargo.toml | 2 + lighthouse/tests/beacon_node.rs | 2 + slasher/Cargo.toml | 4 + slasher/src/config.rs | 9 +- slasher/src/database.rs | 61 +++--- slasher/src/database/interface.rs | 86 ++++++--- slasher/src/database/lmdb_impl.rs | 25 ++- slasher/src/database/mdbx_impl.rs | 25 ++- slasher/src/database/redb_impl.rs | 276 ++++++++++++++++++++++++++++ slasher/src/error.rs | 38 ++++ slasher/src/lib.rs | 2 +- slasher/tests/attester_slashings.rs | 2 +- slasher/tests/backend.rs | 20 +- slasher/tests/proposer_slashings.rs | 2 +- slasher/tests/random.rs | 2 +- slasher/tests/wrap_around.rs | 2 +- 18 files changed, 499 insertions(+), 72 deletions(-) create mode 100644 slasher/src/database/redb_impl.rs diff --git a/Cargo.lock b/Cargo.lock index d0ada21c80..f23777bc4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6678,6 +6678,15 @@ dependencies = [ "yasna", ] +[[package]] +name = "redb" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed7508e692a49b6b2290b56540384ccae9b1fb4d77065640b165835b56ffe3bb" +dependencies = [ + "libc", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -7635,6 +7644,7 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", + "derivative", "ethereum_ssz", "ethereum_ssz_derive", "filesystem", @@ -7650,6 +7660,7 @@ dependencies = [ "parking_lot 0.12.3", "rand", "rayon", + "redb", "safe_arith", "serde", "slog", diff --git a/Makefile b/Makefile index 3f8e688df1..7d144e55fb 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 67c3dc260e..912602776a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -22,6 +22,8 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] +# Support slasher redb backend. +slasher-redb = ["slasher/redb"] # Deprecated. This is now enabled by default on non windows targets. jemalloc = [] diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index caadf208e3..cd499f2ada 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2242,6 +2242,8 @@ fn slasher_broadcast_flag_false() { assert!(!slasher_config.broadcast); }); } + +#[cfg(all(feature = "lmdb"))] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 563c4599d8..01a8b9fb00 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -8,11 +8,13 @@ edition = { workspace = true } default = ["lmdb"] mdbx = ["dep:mdbx"] lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] +redb = ["dep:redb"] portable = ["types/portable"] [dependencies] bincode = { workspace = true } byteorder = { workspace = true } +derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } @@ -36,6 +38,8 @@ mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +redb = { version = "2.1", optional = true } + [dev-dependencies] maplit = { workspace = true } rayon = { workspace = true } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 1851e2e441..33d68fa0e5 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -15,16 +15,19 @@ pub const DEFAULT_MAX_DB_SIZE: usize = 512 * 1024; // 512 GiB pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(100_000); pub const DEFAULT_BROADCAST: bool = false; -#[cfg(all(feature = "mdbx", not(feature = "lmdb")))] +#[cfg(all(feature = "mdbx", not(any(feature = "lmdb", feature = "redb"))))] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Mdbx; #[cfg(feature = "lmdb")] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Lmdb; -#[cfg(not(any(feature = "mdbx", feature = "lmdb")))] +#[cfg(all(feature = "redb", not(any(feature = "mdbx", feature = "lmdb"))))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Redb; +#[cfg(not(any(feature = "mdbx", feature = "lmdb", feature = "redb")))] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Disabled; pub const MAX_HISTORY_LENGTH: usize = 1 << 16; pub const MEGABYTE: usize = 1 << 20; pub const MDBX_DATA_FILENAME: &str = "mdbx.dat"; +pub const REDB_DATA_FILENAME: &str = "slasher.redb"; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -64,6 +67,8 @@ pub enum DatabaseBackend { Mdbx, #[cfg(feature = "lmdb")] Lmdb, + #[cfg(feature = "redb")] + Redb, Disabled, } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 801abe9283..4f4729a123 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,6 +1,7 @@ pub mod interface; mod lmdb_impl; mod mdbx_impl; +mod redb_impl; use crate::{ metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Error, @@ -489,8 +490,7 @@ impl SlasherDB { } // Store the new indexed attestation at the end of the current table. - let db = &self.databases.indexed_attestation_db; - let mut cursor = txn.cursor(db)?; + let mut cursor = txn.cursor(&self.databases.indexed_attestation_db)?; let indexed_att_id = match cursor.last_key()? { // First ID is 1 so that 0 can be used to represent `null` in `CompactAttesterRecord`. @@ -504,7 +504,6 @@ impl SlasherDB { cursor.put(attestation_key.as_ref(), &data)?; drop(cursor); - // Update the (epoch, hash) to ID mapping. self.put_indexed_attestation_id(txn, &id_key, attestation_key)?; @@ -743,21 +742,17 @@ impl SlasherDB { return Ok(()); } - loop { - let (key_bytes, _) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; - - let (slot, _) = ProposerKey::parse(key_bytes)?; + let should_delete = |key: &[u8]| -> Result { + let mut should_delete = false; + let (slot, _) = ProposerKey::parse(Cow::from(key))?; if slot < min_slot { - cursor.delete_current()?; - - // End the loop if there is no next entry. - if cursor.next_key()?.is_none() { - break; - } - } else { - break; + should_delete = true; } - } + + Ok(should_delete) + }; + + cursor.delete_while(should_delete)?; Ok(()) } @@ -771,9 +766,6 @@ impl SlasherDB { .saturating_add(1u64) .saturating_sub(self.config.history_length as u64); - // Collect indexed attestation IDs to delete. - let mut indexed_attestation_ids = vec![]; - let mut cursor = txn.cursor(&self.databases.indexed_attestation_id_db)?; // Position cursor at first key, bailing out if the database is empty. @@ -781,27 +773,20 @@ impl SlasherDB { return Ok(()); } - loop { - let (key_bytes, value) = cursor - .get_current()? - .ok_or(Error::MissingIndexedAttestationIdKey)?; - - let (target_epoch, _) = IndexedAttestationIdKey::parse(key_bytes)?; - + let should_delete = |key: &[u8]| -> Result { + let (target_epoch, _) = IndexedAttestationIdKey::parse(Cow::from(key))?; if target_epoch < min_epoch { - indexed_attestation_ids.push(IndexedAttestationId::new( - IndexedAttestationId::parse(value)?, - )); - - cursor.delete_current()?; - - if cursor.next_key()?.is_none() { - break; - } - } else { - break; + return Ok(true); } - } + + Ok(false) + }; + + let indexed_attestation_ids = cursor + .delete_while(should_delete)? + .into_iter() + .map(|id| IndexedAttestationId::parse(id).map(IndexedAttestationId::new)) + .collect::, Error>>()?; drop(cursor); // Delete the indexed attestations. diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs index 5bb920383c..46cf9a4a0c 100644 --- a/slasher/src/database/interface.rs +++ b/slasher/src/database/interface.rs @@ -7,6 +7,8 @@ use std::path::PathBuf; use crate::database::lmdb_impl; #[cfg(feature = "mdbx")] use crate::database::mdbx_impl; +#[cfg(feature = "redb")] +use crate::database::redb_impl; #[derive(Debug)] pub enum Environment { @@ -14,6 +16,8 @@ pub enum Environment { Mdbx(mdbx_impl::Environment), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::Environment), + #[cfg(feature = "redb")] + Redb(redb_impl::Environment), Disabled, } @@ -23,6 +27,8 @@ pub enum RwTransaction<'env> { Mdbx(mdbx_impl::RwTransaction<'env>), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::RwTransaction<'env>), + #[cfg(feature = "redb")] + Redb(redb_impl::RwTransaction<'env>), Disabled(PhantomData<&'env ()>), } @@ -32,6 +38,8 @@ pub enum Database<'env> { Mdbx(mdbx_impl::Database<'env>), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::Database<'env>), + #[cfg(feature = "redb")] + Redb(redb_impl::Database<'env>), Disabled(PhantomData<&'env ()>), } @@ -54,6 +62,8 @@ pub enum Cursor<'env> { Mdbx(mdbx_impl::Cursor<'env>), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::Cursor<'env>), + #[cfg(feature = "redb")] + Redb(redb_impl::Cursor<'env>), Disabled(PhantomData<&'env ()>), } @@ -67,6 +77,8 @@ impl Environment { DatabaseBackend::Mdbx => mdbx_impl::Environment::new(config).map(Environment::Mdbx), #[cfg(feature = "lmdb")] DatabaseBackend::Lmdb => lmdb_impl::Environment::new(config).map(Environment::Lmdb), + #[cfg(feature = "redb")] + DatabaseBackend::Redb => redb_impl::Environment::new(config).map(Environment::Redb), DatabaseBackend::Disabled => Err(Error::SlasherDatabaseBackendDisabled), } } @@ -77,6 +89,8 @@ impl Environment { Self::Mdbx(env) => env.create_databases(), #[cfg(feature = "lmdb")] Self::Lmdb(env) => env.create_databases(), + #[cfg(feature = "redb")] + Self::Redb(env) => env.create_databases(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -87,6 +101,8 @@ impl Environment { Self::Mdbx(env) => env.begin_rw_txn().map(RwTransaction::Mdbx), #[cfg(feature = "lmdb")] Self::Lmdb(env) => env.begin_rw_txn().map(RwTransaction::Lmdb), + #[cfg(feature = "redb")] + Self::Redb(env) => env.begin_rw_txn().map(RwTransaction::Redb), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -98,6 +114,8 @@ impl Environment { Self::Mdbx(env) => env.filenames(config), #[cfg(feature = "lmdb")] Self::Lmdb(env) => env.filenames(config), + #[cfg(feature = "redb")] + Self::Redb(env) => env.filenames(config), _ => vec![], } } @@ -106,7 +124,7 @@ impl Environment { impl<'env> RwTransaction<'env> { pub fn get + ?Sized>( &'env self, - db: &Database<'env>, + db: &'env Database, key: &K, ) -> Result>, Error> { match (self, db) { @@ -114,6 +132,8 @@ impl<'env> RwTransaction<'env> { (Self::Mdbx(txn), Database::Mdbx(db)) => txn.get(db, key), #[cfg(feature = "lmdb")] (Self::Lmdb(txn), Database::Lmdb(db)) => txn.get(db, key), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.get(db, key), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -129,6 +149,8 @@ impl<'env> RwTransaction<'env> { (Self::Mdbx(txn), Database::Mdbx(db)) => txn.put(db, key, value), #[cfg(feature = "lmdb")] (Self::Lmdb(txn), Database::Lmdb(db)) => txn.put(db, key, value), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.put(db, key, value), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -139,16 +161,8 @@ impl<'env> RwTransaction<'env> { (Self::Mdbx(txn), Database::Mdbx(db)) => txn.del(db, key), #[cfg(feature = "lmdb")] (Self::Lmdb(txn), Database::Lmdb(db)) => txn.del(db, key), - _ => Err(Error::MismatchedDatabaseVariant), - } - } - - pub fn cursor<'a>(&'a mut self, db: &Database) -> Result, Error> { - match (self, db) { - #[cfg(feature = "mdbx")] - (Self::Mdbx(txn), Database::Mdbx(db)) => txn.cursor(db).map(Cursor::Mdbx), - #[cfg(feature = "lmdb")] - (Self::Lmdb(txn), Database::Lmdb(db)) => txn.cursor(db).map(Cursor::Lmdb), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.del(db, key), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -159,6 +173,20 @@ impl<'env> RwTransaction<'env> { Self::Mdbx(txn) => txn.commit(), #[cfg(feature = "lmdb")] Self::Lmdb(txn) => txn.commit(), + #[cfg(feature = "redb")] + Self::Redb(txn) => txn.commit(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn cursor<'a>(&'a mut self, db: &'a Database) -> Result, Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.cursor(db).map(Cursor::Mdbx), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.cursor(db).map(Cursor::Lmdb), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.cursor(db).map(Cursor::Redb), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -172,6 +200,8 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.first_key(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.first_key(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.first_key(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -183,6 +213,8 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.last_key(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.last_key(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.last_key(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -193,17 +225,8 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.next_key(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.next_key(), - _ => Err(Error::MismatchedDatabaseVariant), - } - } - - /// Get the key value pair at the current position. - pub fn get_current(&mut self) -> Result, Error> { - match self { - #[cfg(feature = "mdbx")] - Cursor::Mdbx(cursor) => cursor.get_current(), - #[cfg(feature = "lmdb")] - Cursor::Lmdb(cursor) => cursor.get_current(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.next_key(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -214,6 +237,8 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.delete_current(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.delete_current(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.delete_current(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -224,6 +249,23 @@ impl<'env> Cursor<'env> { Self::Mdbx(cursor) => cursor.put(key, value), #[cfg(feature = "lmdb")] Self::Lmdb(cursor) => cursor.put(key, value), + #[cfg(feature = "redb")] + Self::Redb(cursor) => cursor.put(key, value), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn delete_while( + &mut self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(txn) => txn.delete_while(f), + #[cfg(feature = "lmdb")] + Self::Lmdb(txn) => txn.delete_while(f), + #[cfg(feature = "redb")] + Self::Redb(txn) => txn.delete_while(f), _ => Err(Error::MismatchedDatabaseVariant), } } diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs index 78deaf1767..20d89a36fb 100644 --- a/slasher/src/database/lmdb_impl.rs +++ b/slasher/src/database/lmdb_impl.rs @@ -100,7 +100,7 @@ impl Environment { impl<'env> RwTransaction<'env> { pub fn get + ?Sized>( &'env self, - db: &Database<'env>, + db: &'env Database, key: &K, ) -> Result>, Error> { Ok(self.txn.get(db.db, key).optional()?.map(Cow::Borrowed)) @@ -182,6 +182,29 @@ impl<'env> Cursor<'env> { .put(&key, &value, RwTransaction::write_flags())?; Ok(()) } + + pub fn delete_while( + &mut self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + let mut result = vec![]; + + loop { + let (key_bytes, value) = self.get_current()?.ok_or(Error::MissingKey)?; + + if f(&key_bytes)? { + result.push(value); + self.delete_current()?; + if self.next_key()?.is_none() { + break; + } + } else { + break; + } + } + + Ok(result) + } } /// Mix-in trait for loading values from LMDB that may or may not exist. diff --git a/slasher/src/database/mdbx_impl.rs b/slasher/src/database/mdbx_impl.rs index d25f17e7ac..e249de963f 100644 --- a/slasher/src/database/mdbx_impl.rs +++ b/slasher/src/database/mdbx_impl.rs @@ -113,7 +113,7 @@ impl<'env> RwTransaction<'env> { pub fn get + ?Sized>( &'env self, - db: &Database<'env>, + db: &'env Database, key: &K, ) -> Result>, Error> { Ok(self.txn.get(&db.db, key.as_ref())?) @@ -183,4 +183,27 @@ impl<'env> Cursor<'env> { .put(key.as_ref(), value.as_ref(), RwTransaction::write_flags())?; Ok(()) } + + pub fn delete_while( + &mut self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + let mut result = vec![]; + + loop { + let (key_bytes, value) = self.get_current()?.ok_or(Error::MissingKey)?; + + if f(&key_bytes)? { + result.push(value); + self.delete_current()?; + if self.next_key()?.is_none() { + break; + } + } else { + break; + } + } + + Ok(result) + } } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs new file mode 100644 index 0000000000..da7b4e38ed --- /dev/null +++ b/slasher/src/database/redb_impl.rs @@ -0,0 +1,276 @@ +#![cfg(feature = "redb")] +use crate::{ + config::REDB_DATA_FILENAME, + database::{ + interface::{Key, OpenDatabases, Value}, + *, + }, + Config, Error, +}; +use derivative::Derivative; +use redb::{ReadableTable, TableDefinition}; +use std::{borrow::Cow, path::PathBuf}; + +#[derive(Debug)] +pub struct Environment { + _db_count: usize, + db: redb::Database, +} + +#[derive(Debug)] +pub struct Database<'env> { + table_name: String, + _phantom: PhantomData<&'env ()>, +} + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct RwTransaction<'env> { + #[derivative(Debug = "ignore")] + txn: redb::WriteTransaction, + _phantom: PhantomData<&'env ()>, +} + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct Cursor<'env> { + #[derivative(Debug = "ignore")] + txn: &'env redb::WriteTransaction, + db: &'env Database<'env>, + current_key: Option>, +} + +impl Environment { + pub fn new(config: &Config) -> Result { + let db_path = config.database_path.join(REDB_DATA_FILENAME); + let database = redb::Database::create(db_path)?; + + Ok(Environment { + _db_count: MAX_NUM_DBS, + db: database, + }) + } + + pub fn create_databases(&self) -> Result { + let indexed_attestation_db = self.create_table(INDEXED_ATTESTATION_DB)?; + let indexed_attestation_id_db = self.create_table(INDEXED_ATTESTATION_ID_DB)?; + let attesters_db = self.create_table(ATTESTERS_DB)?; + let attesters_max_targets_db = self.create_table(ATTESTERS_MAX_TARGETS_DB)?; + let min_targets_db = self.create_table(MIN_TARGETS_DB)?; + let max_targets_db = self.create_table(MAX_TARGETS_DB)?; + let current_epochs_db = self.create_table(CURRENT_EPOCHS_DB)?; + let proposers_db = self.create_table(PROPOSERS_DB)?; + let metadata_db = self.create_table(METADATA_DB)?; + + Ok(OpenDatabases { + indexed_attestation_db, + indexed_attestation_id_db, + attesters_db, + attesters_max_targets_db, + min_targets_db, + max_targets_db, + current_epochs_db, + proposers_db, + metadata_db, + }) + } + + pub fn create_table<'env>( + &'env self, + table_name: &'env str, + ) -> Result, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(table_name); + let tx = self.db.begin_write()?; + tx.open_table(table_definition)?; + tx.commit()?; + + Ok(crate::Database::Redb(Database { + table_name: table_name.to_string(), + _phantom: PhantomData, + })) + } + + pub fn filenames(&self, config: &Config) -> Vec { + vec![config.database_path.join(BASE_DB)] + } + + pub fn begin_rw_txn(&self) -> Result { + let mut txn = self.db.begin_write()?; + txn.set_durability(redb::Durability::Eventual); + Ok(RwTransaction { + txn, + _phantom: PhantomData, + }) + } +} + +impl<'env> RwTransaction<'env> { + pub fn get + ?Sized>( + &'env self, + db: &'env Database, + key: &K, + ) -> Result>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&db.table_name); + let table = self.txn.open_table(table_definition)?; + let result = table.get(key.as_ref())?; + if let Some(access_guard) = result { + let value = access_guard.value().to_vec(); + Ok(Some(Cow::from(value))) + } else { + Ok(None) + } + } + + pub fn put, V: AsRef<[u8]>>( + &mut self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&db.table_name); + let mut table = self.txn.open_table(table_definition)?; + table.insert(key.as_ref(), value.as_ref())?; + + Ok(()) + } + + pub fn del>(&mut self, db: &Database, key: K) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&db.table_name); + let mut table = self.txn.open_table(table_definition)?; + table.remove(key.as_ref())?; + + Ok(()) + } + + pub fn commit(self) -> Result<(), Error> { + self.txn.commit()?; + Ok(()) + } + + pub fn cursor<'a>(&'a mut self, db: &'a Database) -> Result, Error> { + Ok(Cursor { + txn: &self.txn, + db, + current_key: None, + }) + } +} + +impl<'env> Cursor<'env> { + pub fn first_key(&mut self) -> Result, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + let first = table + .iter()? + .next() + .map(|x| x.map(|(key, _)| key.value().to_vec())); + + if let Some(owned_key) = first { + let owned_key = owned_key?; + self.current_key = Some(Cow::from(owned_key)); + Ok(self.current_key.clone()) + } else { + Ok(None) + } + } + + pub fn last_key(&mut self) -> Result>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + let last = table + .iter()? + .next_back() + .map(|x| x.map(|(key, _)| key.value().to_vec())); + + if let Some(owned_key) = last { + let owned_key = owned_key?; + self.current_key = Some(Cow::from(owned_key)); + return Ok(self.current_key.clone()); + } + Ok(None) + } + + pub fn get_current(&self) -> Result, Value<'env>)>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + if let Some(key) = &self.current_key { + let result = table.get(key.as_ref())?; + + if let Some(access_guard) = result { + let value = access_guard.value().to_vec(); + return Ok(Some((key.clone(), Cow::from(value)))); + } + } + Ok(None) + } + + pub fn next_key(&mut self) -> Result>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + if let Some(current_key) = &self.current_key { + let range: std::ops::RangeFrom<&[u8]> = current_key..; + + let next = table + .range(range)? + .next() + .map(|x| x.map(|(key, _)| key.value().to_vec())); + + if let Some(owned_key) = next { + let owned_key = owned_key?; + self.current_key = Some(Cow::from(owned_key)); + return Ok(self.current_key.clone()); + } + } + Ok(None) + } + + pub fn delete_current(&self) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let mut table = self.txn.open_table(table_definition)?; + if let Some(key) = &self.current_key { + table.remove(key.as_ref())?; + } + Ok(()) + } + + pub fn delete_while( + &self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + let mut deleted_values = vec![]; + if let Some(current_key) = &self.current_key { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + + let mut table = self.txn.open_table(table_definition)?; + + let deleted = + table.extract_from_if(current_key.as_ref().., |key, _| f(key).unwrap_or(false))?; + + deleted.for_each(|result| { + if let Ok(item) = result { + let value = item.1.value().to_vec(); + deleted_values.push(Cow::from(value)); + } + }) + }; + Ok(deleted_values) + } + + pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let mut table = self.txn.open_table(table_definition)?; + table.insert(key.as_ref(), value.as_ref())?; + + Ok(()) + } +} diff --git a/slasher/src/error.rs b/slasher/src/error.rs index 8d3295b22a..b2e32f3dcd 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -8,6 +8,8 @@ pub enum Error { DatabaseMdbxError(mdbx::Error), #[cfg(feature = "lmdb")] DatabaseLmdbError(lmdb::Error), + #[cfg(feature = "redb")] + DatabaseRedbError(redb::Error), SlasherDatabaseBackendDisabled, MismatchedDatabaseVariant, DatabaseIOError(io::Error), @@ -67,6 +69,7 @@ pub enum Error { MissingIndexedAttestationId, MissingIndexedAttestationIdKey, InconsistentAttestationDataRoot, + MissingKey, } #[cfg(feature = "mdbx")] @@ -89,6 +92,41 @@ impl From for Error { } } +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TableError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TransactionError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::DatabaseError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::StorageError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::CommitError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + impl From for Error { fn from(e: io::Error) -> Self { Error::DatabaseIOError(e) diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 4d58fa7702..d3a26337d6 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -1,6 +1,6 @@ #![deny(missing_debug_implementations)] #![cfg_attr( - not(any(feature = "mdbx", feature = "lmdb")), + not(any(feature = "mdbx", feature = "lmdb", feature = "redb")), allow(unused, clippy::drop_non_drop) )] diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 902141d971..cc6e57d95d 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,4 +1,4 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use maplit::hashset; diff --git a/slasher/tests/backend.rs b/slasher/tests/backend.rs index fd1a6ae14f..c24b861b18 100644 --- a/slasher/tests/backend.rs +++ b/slasher/tests/backend.rs @@ -1,4 +1,4 @@ -#![cfg(feature = "lmdb")] +#![cfg(any(feature = "lmdb", feature = "redb"))] use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride}; use std::fs::File; @@ -41,7 +41,7 @@ fn no_override_with_existing_mdbx_db() { } #[test] -#[cfg(all(not(feature = "mdbx"), feature = "lmdb"))] +#[cfg(all(not(feature = "mdbx"), feature = "lmdb", not(feature = "redb")))] fn failed_override_with_existing_mdbx_db() { let tempdir = tempdir().unwrap(); let mut config = Config::new(tempdir.path().into()); @@ -55,3 +55,19 @@ fn failed_override_with_existing_mdbx_db() { ); assert_eq!(config.backend, DatabaseBackend::Lmdb); } + +#[test] +#[cfg(feature = "redb")] +fn failed_override_with_existing_mdbx_db() { + let tempdir = tempdir().unwrap(); + let mut config = Config::new(tempdir.path().into()); + + let filename = config.database_path.join(MDBX_DATA_FILENAME); + File::create(&filename).unwrap(); + + assert_eq!( + config.override_backend(), + DatabaseBackendOverride::Failure(filename) + ); + assert_eq!(config.backend, DatabaseBackend::Redb); +} diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index 2d2738087d..6d2a1f5176 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,4 +1,4 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use slasher::{ diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index ebfe0ef4e9..0aaaa63f65 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,4 +1,4 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use rand::prelude::*; diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index 9a42aeb60b..2ec56bc7d5 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,4 +1,4 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use slasher::{