mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-18 13:28:33 +00:00
Merge remote-tracking branch 'origin/unstable' into capella
This commit is contained in:
@@ -162,6 +162,7 @@ pub enum BeaconChainError {
|
||||
BlockRewardSlotError,
|
||||
BlockRewardAttestationError,
|
||||
BlockRewardSyncError,
|
||||
SyncCommitteeRewardsSyncError,
|
||||
HeadMissingFromForkChoice(Hash256),
|
||||
FinalizedBlockMissingFromForkChoice(Hash256),
|
||||
HeadBlockMissingFromForkChoice(Hash256),
|
||||
|
||||
@@ -41,6 +41,7 @@ pub mod schema_change;
|
||||
mod shuffling_cache;
|
||||
mod snapshot_cache;
|
||||
pub mod state_advance_timer;
|
||||
pub mod sync_committee_rewards;
|
||||
pub mod sync_committee_verification;
|
||||
pub mod test_utils;
|
||||
mod timeout_rw_lock;
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::{
|
||||
beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||
};
|
||||
use derivative::Derivative;
|
||||
use eth2::types::Hash256;
|
||||
use slot_clock::SlotClock;
|
||||
use std::time::Duration;
|
||||
use strum::AsRefStr;
|
||||
@@ -36,6 +37,8 @@ pub enum Error {
|
||||
SigSlotStartIsNone,
|
||||
/// Failed to construct a LightClientOptimisticUpdate from state.
|
||||
FailedConstructingUpdate,
|
||||
/// Unknown block with parent root.
|
||||
UnknownBlockParentRoot(Hash256),
|
||||
/// Beacon chain error occured.
|
||||
BeaconChainError(BeaconChainError),
|
||||
LightClientUpdateError(LightClientUpdateError),
|
||||
@@ -58,6 +61,7 @@ impl From<LightClientUpdateError> for Error {
|
||||
#[derivative(Clone(bound = "T: BeaconChainTypes"))]
|
||||
pub struct VerifiedLightClientOptimisticUpdate<T: BeaconChainTypes> {
|
||||
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
|
||||
pub parent_root: Hash256,
|
||||
seen_timestamp: Duration,
|
||||
}
|
||||
|
||||
@@ -107,6 +111,16 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
|
||||
None => return Err(Error::SigSlotStartIsNone),
|
||||
}
|
||||
|
||||
// check if we can process the optimistic update immediately
|
||||
// otherwise queue
|
||||
let canonical_root = light_client_optimistic_update
|
||||
.attested_header
|
||||
.canonical_root();
|
||||
|
||||
if canonical_root != head_block.message().parent_root() {
|
||||
return Err(Error::UnknownBlockParentRoot(canonical_root));
|
||||
}
|
||||
|
||||
let optimistic_update =
|
||||
LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?;
|
||||
|
||||
@@ -119,6 +133,7 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
|
||||
|
||||
Ok(Self {
|
||||
light_client_optimistic_update,
|
||||
parent_root: canonical_root,
|
||||
seen_timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
87
beacon_node/beacon_chain/src/sync_committee_rewards.rs
Normal file
87
beacon_node/beacon_chain/src/sync_committee_rewards.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
|
||||
use eth2::lighthouse::SyncCommitteeReward;
|
||||
use safe_arith::SafeArith;
|
||||
use slog::error;
|
||||
use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards;
|
||||
use std::collections::HashMap;
|
||||
use store::RelativeEpoch;
|
||||
use types::{BeaconBlockRef, BeaconState, ExecPayload};
|
||||
|
||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn compute_sync_committee_rewards<Payload: ExecPayload<T::EthSpec>>(
|
||||
&self,
|
||||
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
||||
state: &mut BeaconState<T::EthSpec>,
|
||||
) -> Result<Vec<SyncCommitteeReward>, BeaconChainError> {
|
||||
if block.slot() != state.slot() {
|
||||
return Err(BeaconChainError::BlockRewardSlotError);
|
||||
}
|
||||
|
||||
let spec = &self.spec;
|
||||
|
||||
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||
|
||||
let sync_aggregate = block.body().sync_aggregate()?;
|
||||
|
||||
let sync_committee = state.current_sync_committee()?.clone();
|
||||
|
||||
let sync_committee_indices = state.get_sync_committee_indices(&sync_committee)?;
|
||||
|
||||
let (participant_reward_value, proposer_reward_per_bit) =
|
||||
compute_sync_aggregate_rewards(state, spec).map_err(|e| {
|
||||
error!(
|
||||
self.log, "Error calculating sync aggregate rewards";
|
||||
"error" => ?e
|
||||
);
|
||||
BeaconChainError::SyncCommitteeRewardsSyncError
|
||||
})?;
|
||||
|
||||
let mut balances = HashMap::<usize, u64>::new();
|
||||
|
||||
let mut total_proposer_rewards = 0;
|
||||
let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?;
|
||||
|
||||
// Apply rewards to participant balances. Keep track of proposer rewards
|
||||
for (validator_index, participant_bit) in sync_committee_indices
|
||||
.iter()
|
||||
.zip(sync_aggregate.sync_committee_bits.iter())
|
||||
{
|
||||
let participant_balance = balances
|
||||
.entry(*validator_index)
|
||||
.or_insert_with(|| state.balances()[*validator_index]);
|
||||
|
||||
if participant_bit {
|
||||
participant_balance.safe_add_assign(participant_reward_value)?;
|
||||
|
||||
balances
|
||||
.entry(proposer_index)
|
||||
.or_insert_with(|| state.balances()[proposer_index])
|
||||
.safe_add_assign(proposer_reward_per_bit)?;
|
||||
|
||||
total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?;
|
||||
} else {
|
||||
*participant_balance = participant_balance.saturating_sub(participant_reward_value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(balances
|
||||
.iter()
|
||||
.filter_map(|(i, new_balance)| {
|
||||
let reward = if *i != proposer_index {
|
||||
*new_balance as i64 - state.balances()[*i] as i64
|
||||
} else if sync_committee_indices.contains(i) {
|
||||
*new_balance as i64
|
||||
- state.balances()[*i] as i64
|
||||
- total_proposer_rewards as i64
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
Some(SyncCommitteeReward {
|
||||
validator_index: *i as u64,
|
||||
reward,
|
||||
})
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
pub use crate::{
|
||||
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
||||
migrate::MigratorConfig,
|
||||
sync_committee_verification::Error as SyncCommitteeError,
|
||||
validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
||||
BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification,
|
||||
};
|
||||
@@ -2073,6 +2074,30 @@ where
|
||||
|
||||
(honest_head, faulty_head)
|
||||
}
|
||||
|
||||
pub fn process_sync_contributions(
|
||||
&self,
|
||||
sync_contributions: HarnessSyncContributions<E>,
|
||||
) -> Result<(), SyncCommitteeError> {
|
||||
let mut verified_contributions = Vec::with_capacity(sync_contributions.len());
|
||||
|
||||
for (_, contribution_and_proof) in sync_contributions {
|
||||
let signed_contribution_and_proof = contribution_and_proof.unwrap();
|
||||
|
||||
let verified_contribution = self
|
||||
.chain
|
||||
.verify_sync_contribution_for_gossip(signed_contribution_and_proof)?;
|
||||
|
||||
verified_contributions.push(verified_contribution);
|
||||
}
|
||||
|
||||
for verified_contribution in verified_contributions {
|
||||
self.chain
|
||||
.add_contribution_to_block_inclusion_pool(verified_contribution)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Junk `Debug` impl to satistfy certain trait bounds during testing.
|
||||
|
||||
@@ -5,6 +5,7 @@ mod capella;
|
||||
mod merge;
|
||||
mod op_verification;
|
||||
mod payload_invalidation;
|
||||
mod rewards;
|
||||
mod store_tests;
|
||||
mod sync_committee_verification;
|
||||
mod tests;
|
||||
|
||||
121
beacon_node/beacon_chain/tests/rewards.rs
Normal file
121
beacon_node/beacon_chain/tests/rewards.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
#![cfg(test)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use beacon_chain::test_utils::{
|
||||
generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType,
|
||||
};
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee},
|
||||
types::{Epoch, EthSpec, Keypair, MinimalEthSpec},
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 64;
|
||||
|
||||
lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
fn get_harness<E: EthSpec>() -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||
let mut spec = E::default_spec();
|
||||
|
||||
spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests
|
||||
|
||||
let harness = BeaconChainHarness::builder(E::default())
|
||||
.spec(spec)
|
||||
.keypairs(KEYPAIRS.to_vec())
|
||||
.fresh_ephemeral_store()
|
||||
.build();
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
harness
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sync_committee_rewards() {
|
||||
let num_block_produced = MinimalEthSpec::slots_per_epoch();
|
||||
let harness = get_harness::<MinimalEthSpec>();
|
||||
|
||||
let latest_block_root = harness
|
||||
.extend_chain(
|
||||
num_block_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Create and add sync committee message to op_pool
|
||||
let sync_contributions = harness.make_sync_contributions(
|
||||
&harness.get_current_state(),
|
||||
latest_block_root,
|
||||
harness.get_current_slot(),
|
||||
RelativeSyncCommittee::Current,
|
||||
);
|
||||
|
||||
harness
|
||||
.process_sync_contributions(sync_contributions)
|
||||
.unwrap();
|
||||
|
||||
// Add block
|
||||
let chain = &harness.chain;
|
||||
let (head_state, head_state_root) = harness.get_current_state_and_root();
|
||||
let target_slot = harness.get_current_slot() + 1;
|
||||
|
||||
let (block_root, mut state) = harness
|
||||
.add_attested_block_at_slot(target_slot, head_state, head_state_root, &[])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let block = harness.get_block(block_root).unwrap();
|
||||
let parent_block = chain
|
||||
.get_blinded_block(&block.parent_root())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let parent_state = chain
|
||||
.get_state(&parent_block.state_root(), Some(parent_block.slot()))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let reward_payload = chain
|
||||
.compute_sync_committee_rewards(block.message(), &mut state)
|
||||
.unwrap();
|
||||
|
||||
let rewards = reward_payload
|
||||
.iter()
|
||||
.map(|reward| (reward.validator_index, reward.reward))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let proposer_index = state
|
||||
.get_beacon_proposer_index(target_slot, &MinimalEthSpec::default_spec())
|
||||
.unwrap();
|
||||
|
||||
let mut mismatches = vec![];
|
||||
|
||||
for validator in state.validators() {
|
||||
let validator_index = state
|
||||
.clone()
|
||||
.get_validator_index(&validator.pubkey)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let pre_state_balance = parent_state.balances()[validator_index];
|
||||
let post_state_balance = state.balances()[validator_index];
|
||||
let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0);
|
||||
|
||||
if validator_index == proposer_index {
|
||||
continue; // Ignore proposer
|
||||
}
|
||||
|
||||
if pre_state_balance as i64 + *sync_committee_reward != post_state_balance as i64 {
|
||||
mismatches.push(validator_index.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
mismatches.len(),
|
||||
0,
|
||||
"Expect 0 mismatches, but these validators have mismatches on balance: {} ",
|
||||
mismatches.join(",")
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user