Compare commits

...

7 Commits

Author SHA1 Message Date
Michael Sproul
1030d2e203 Trying more stuff 2025-03-29 12:52:56 +11:00
Michael Sproul
5c3e7a44cd More progress 2025-03-28 09:06:22 +11:00
Michael Sproul
eb032bc5c6 Progress 2025-03-28 08:30:36 +11:00
Michael Sproul
296e50b0d1 WIP 2025-03-27 23:23:25 +11:00
Michael Sproul
6d5a2be7f9 Release v7.0.0-beta.5 (#7210)
New release for Pectra-enabled networks.
2025-03-27 03:42:34 +00:00
Michael Sproul
7d792e615c Fix xdelta3 output buffer issue (#7174)
* Fix xdelta3 output buffer issue

* Fix buckets

* Update commit hash to `main`

* Tag TODO(hdiff)

* Update cargo lock
2025-03-27 13:25:50 +11:00
Michael Sproul
0875326cb6 Prevent duplicate effective balance processing (#7209) 2025-03-27 12:53:38 +11:00
13 changed files with 443 additions and 50 deletions

10
Cargo.lock generated
View File

@@ -860,7 +860,7 @@ dependencies = [
[[package]]
name = "beacon_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"account_utils",
"beacon_chain",
@@ -1108,7 +1108,7 @@ dependencies = [
[[package]]
name = "boot_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"beacon_node",
"bytes",
@@ -4811,7 +4811,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lcli"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"account_utils",
"beacon_chain",
@@ -5366,7 +5366,7 @@ dependencies = [
[[package]]
name = "lighthouse"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"account_manager",
"account_utils",
@@ -10760,7 +10760,7 @@ dependencies = [
[[package]]
name = "xdelta3"
version = "0.1.5"
source = "git+http://github.com/sigp/xdelta3-rs?rev=50d63cdf1878e5cf3538e9aae5eed34a22c64e4a#50d63cdf1878e5cf3538e9aae5eed34a22c64e4a"
source = "git+http://github.com/sigp/xdelta3-rs?rev=4db64086bb02e9febb584ba93b9d16bb2ae3825a#4db64086bb02e9febb584ba93b9d16bb2ae3825a"
dependencies = [
"bindgen",
"cc",

View File

@@ -289,7 +289,7 @@ validator_metrics = { path = "validator_client/validator_metrics" }
validator_store = { path = "validator_client/validator_store" }
validator_test_rig = { path = "testing/validator_test_rig" }
warp_utils = { path = "common/warp_utils" }
xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" }
xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "4db64086bb02e9febb584ba93b9d16bb2ae3825a" }
zstd = "0.13"
[profile.maxperf]

View File

@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = [
"Paul Hauner <paul@paulhauner.com>",
"Age Manning <Age@AgeManning.com",

View File

@@ -2228,6 +2228,23 @@ where
((Arc::new(signed_block), blobs), state)
}
pub fn make_deposit_data(
&self,
keypair: &Keypair,
withdrawal_credentials: Hash256,
amount: u64,
) -> DepositData {
let pubkey = PublicKeyBytes::from(keypair.pk.clone());
let mut data = DepositData {
pubkey,
withdrawal_credentials,
amount,
signature: SignatureBytes::empty(),
};
data.signature = data.create_signature(&keypair.sk, &self.spec);
data
}
pub fn make_deposits<'a>(
&self,
state: &'a mut BeaconState<E>,
@@ -2239,19 +2256,14 @@ where
for _ in 0..num_deposits {
let keypair = Keypair::random();
let pubkeybytes = PublicKeyBytes::from(keypair.pk.clone());
let mut data = DepositData {
pubkey: pubkeybytes,
withdrawal_credentials: Hash256::from_slice(
&get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte)
[..],
),
amount: self.spec.min_deposit_amount,
signature: SignatureBytes::empty(),
};
data.signature = data.create_signature(&keypair.sk, &self.spec);
let withdrawal_credentials = Hash256::from_slice(
&get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte)[..],
);
let mut data = self.make_deposit_data(
&keypair,
withdrawal_credentials,
self.spec.min_deposit_amount,
);
if let Some(invalid_pubkey) = invalid_pubkey {
data.pubkey = invalid_pubkey;

View File

@@ -0,0 +1,290 @@
#![cfg(not(debug_assertions))] // Tests run too slow in debug.
use beacon_chain::{
builder::BeaconChainBuilder,
test_utils::{get_kzg, mock_execution_layer_from_parts, BeaconChainHarness, DiskHarnessType},
ChainConfig, MigratorConfig, NotifyExecutionLayer, StateSkipConfig,
};
use logging::test_logger;
use slog::debug;
use slot_clock::{SlotClock, TestingSlotClock};
use state_processing::{
per_block_processing, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot,
};
use std::sync::Arc;
use std::time::Duration;
use store::{database::interface::BeaconNodeBackend, HotColdDB, StoreConfig};
use tempfile::{tempdir, TempDir};
use types::*;
type E = MainnetEthSpec;
fn get_store(
db_path: &TempDir,
config: StoreConfig,
spec: Arc<ChainSpec>,
) -> Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>> {
let hot_path = db_path.path().join("chain_db");
let cold_path = db_path.path().join("freezer_db");
let blobs_path = db_path.path().join("blobs_db");
let log = test_logger();
HotColdDB::open(
&hot_path,
&cold_path,
&blobs_path,
|_, _, _| Ok(()),
config,
spec.into(),
log,
)
.expect("disk store should initialize")
}
#[tokio::test]
async fn signature_verify_chain_segment_pubkey_cache() {
let initial_validator_count = 32;
let deposit_slot = Slot::new(4 * E::slots_per_epoch() - 1);
let pre_deposit_slot = deposit_slot - 1;
let spec = Arc::new(ForkName::Electra.make_genesis_spec(E::default_spec()));
// Keep historic states on main harness.
let chain_config = ChainConfig {
reconstruct_historic_states: true,
..ChainConfig::default()
};
let harness = BeaconChainHarness::builder(E::default())
.chain_config(chain_config)
.spec(spec.clone())
.logger(logging::test_logger())
.deterministic_keypairs(initial_validator_count)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
harness
.execution_block_generator()
.move_to_terminal_block()
.unwrap();
Box::pin(harness.extend_to_slot(pre_deposit_slot)).await;
// Create a block with a deposit for a new validator.
let pre_deposit_state = harness.get_current_state();
assert_eq!(pre_deposit_state.slot(), pre_deposit_slot);
assert_eq!(pre_deposit_state.fork_name_unchecked(), ForkName::Electra);
// FIXME: Probably need to make this deterministic?
let new_keypair = Keypair::random();
let new_validator_pk_bytes = PublicKeyBytes::from(&new_keypair.pk);
let withdrawal_credentials = Hash256::ZERO;
let amount = spec.min_per_epoch_churn_limit_electra;
let deposit_data = harness.make_deposit_data(&new_keypair, withdrawal_credentials, amount);
let deposit_request = DepositRequest {
pubkey: deposit_data.pubkey,
withdrawal_credentials: deposit_data.withdrawal_credentials,
amount: deposit_data.amount,
signature: deposit_data.signature,
index: 0,
};
let ((jank_block, blobs), mut state) = harness
.make_block_with_modifier(pre_deposit_state, deposit_slot, |block| {
block
.body_mut()
.execution_requests_mut()
.unwrap()
.deposits
.push(deposit_request)
.unwrap();
})
.await;
// Compute correct state root.
// FIXME: this is kinda nasty
let mut ctxt = ConsensusContext::new(jank_block.slot());
per_block_processing(
&mut state,
&jank_block,
BlockSignatureStrategy::VerifyIndividual,
VerifyBlockRoot::True,
&mut ctxt,
&spec,
)
.unwrap();
let (mut block, _) = (*jank_block).clone().deconstruct();
*block.state_root_mut() = state.update_tree_hash_cache().unwrap();
let proposer_index = block.proposer_index() as usize;
let signed_block = Arc::new(block.sign(
&harness.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&spec,
));
let block_root = signed_block.canonical_root();
let block_contents = (signed_block, blobs);
harness
.process_block(deposit_slot, block_root, block_contents)
.await
.unwrap();
let post_block_state = harness.get_current_state();
assert_eq!(post_block_state.pending_deposits().unwrap().len(), 1);
assert_eq!(post_block_state.validators().len(), initial_validator_count);
// Advance to one slot before the finalization of the deposit.
Box::pin(harness.extend_to_slot(deposit_slot + 2 * E::slots_per_epoch())).await;
let pre_finalized_deposit_state = harness.get_current_state();
assert_eq!(
pre_finalized_deposit_state.validators().len(),
initial_validator_count
);
let new_epoch_start_slot = pre_finalized_deposit_state.slot() + 3 * E::slots_per_epoch() + 1;
// New validator should not be in the pubkey cache yet.
assert_eq!(
harness
.chain
.validator_index(&new_validator_pk_bytes)
.unwrap(),
None
);
let new_validator_index = initial_validator_count;
// Produce blocks in the next 3 epochs. Statistically one of these should be signed by our new
// validator (99% probability).
harness.extend_to_slot(new_epoch_start_slot).await;
// New validator should be in the pubkey cache now.
assert_eq!(
harness
.chain
.validator_index(&new_validator_pk_bytes)
.unwrap(),
Some(new_validator_index)
);
// Initialise a new harness using checkpoint sync, prior to the new deposit being finalized.
let datadir = tempdir().unwrap();
let store = get_store(&datadir, Default::default(), spec.clone());
let kzg = get_kzg(&spec);
let mock = mock_execution_layer_from_parts(
harness.spec.clone(),
harness.runtime.task_executor.clone(),
);
// Initialise a new beacon chain from the finalized checkpoint.
// The slot clock must be set to a time ahead of the checkpoint state.
let slot_clock = TestingSlotClock::new(
Slot::new(0),
Duration::from_secs(harness.chain.genesis_time),
Duration::from_secs(spec.seconds_per_slot),
);
slot_clock.set_slot(harness.get_current_slot().as_u64());
let checkpoint_slot = deposit_slot
.epoch(E::slots_per_epoch())
.start_slot(E::slots_per_epoch());
let mut checkpoint_state = harness
.chain
.state_at_slot(checkpoint_slot, StateSkipConfig::WithStateRoots)
.unwrap();
let checkpoint_state_root = checkpoint_state.update_tree_hash_cache().unwrap();
let checkpoint_block_root = checkpoint_state.get_latest_block_root(checkpoint_state_root);
let checkpoint_block = harness
.chain
.get_block(&checkpoint_block_root)
.await
.unwrap()
.unwrap();
let checkpoint_blobs_opt = harness
.chain
.get_or_reconstruct_blobs(&checkpoint_block_root)
.unwrap();
let genesis_state = harness
.chain
.state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots)
.unwrap();
let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1);
let log = harness.runtime.log.clone();
let beacon_chain = Arc::new(
BeaconChainBuilder::<DiskHarnessType<E>>::new(MainnetEthSpec, kzg)
.store(store.clone())
.custom_spec(spec.clone())
.task_executor(harness.chain.task_executor.clone())
.logger(harness.runtime.log.clone())
.weak_subjectivity_state(
checkpoint_state,
checkpoint_block.clone(),
checkpoint_blobs_opt.clone(),
genesis_state,
)
.unwrap()
.shutdown_sender(shutdown_tx)
.store_migrator_config(MigratorConfig::default().blocking())
.dummy_eth1_backend()
.expect("should build dummy backend")
.slot_clock(slot_clock)
.chain_config(ChainConfig::default())
.execution_layer(Some(mock.el))
.build()
.expect("should build"),
);
let chain_dump = harness.chain.chain_dump().unwrap();
let new_blocks = chain_dump
.iter()
.filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot);
let mut chain_segment = vec![];
let mut new_proposer_present = false;
for snapshot in new_blocks {
let block_root = snapshot.beacon_block_root;
let full_block = harness.chain.get_block(&block_root).await.unwrap().unwrap();
new_proposer_present |= full_block.message().proposer_index() == new_validator_index as u64;
println!(
"Proposal from validator {} at slot {}",
full_block.message().proposer_index(),
full_block.slot()
);
chain_segment
.push(harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)));
}
assert_ne!(chain_segment.len(), 0);
// This should succeed despite the new validator index being unknown to the checkpoint synced
// chain.
/*
assert!(
new_proposer_present,
"new proposer should be part of chain segment"
);
*/
assert_eq!(
beacon_chain
.validator_index(&new_validator_pk_bytes)
.unwrap(),
None,
);
beacon_chain
.process_chain_segment(chain_segment, NotifyExecutionLayer::Yes)
.await
.into_block_error()
.unwrap();
// Processing the chain segment should add the new validator to the cache.
assert_eq!(
beacon_chain
.validator_index(&new_validator_pk_bytes)
.unwrap(),
Some(new_validator_index),
);
}

View File

@@ -3,6 +3,7 @@ mod attestation_verification;
mod bellatrix;
mod block_verification;
mod capella;
mod electra;
mod events;
mod op_verification;
mod payload_invalidation;

View File

@@ -21,8 +21,8 @@ static EMPTY_PUBKEY: LazyLock<PublicKeyBytes> = LazyLock::new(PublicKeyBytes::em
pub enum Error {
InvalidHierarchy,
DiffDeletionsNotSupported,
UnableToComputeDiff,
UnableToApplyDiff,
UnableToComputeDiff(xdelta3::Error),
UnableToApplyDiff(xdelta3::Error),
BalancesIncompleteChunk,
Compression(std::io::Error),
InvalidSszState(ssz::DecodeError),
@@ -323,9 +323,15 @@ impl BytesDiff {
}
pub fn compute_xdelta(source_bytes: &[u8], target_bytes: &[u8]) -> Result<Self, Error> {
let bytes = xdelta3::encode(target_bytes, source_bytes)
.ok_or(Error::UnableToComputeDiff)
.unwrap();
// TODO(hdiff): Use a smaller estimate for the output diff buffer size, currently the
// xdelta3 lib will use 2x the size of the source plus the target length, which is 4x the
// size of the hdiff buffer. In practice, diffs are almost always smaller than buffers (by a
// signficiant factor), so this is 4-16x larger than necessary in a temporary allocation.
//
// We should use an estimated size that *should* be enough, and then dynamically increase it
// if we hit an insufficient space error.
let bytes =
xdelta3::encode(target_bytes, source_bytes).map_err(Error::UnableToComputeDiff)?;
Ok(Self { bytes })
}
@@ -334,8 +340,31 @@ impl BytesDiff {
}
pub fn apply_xdelta(&self, source: &[u8], target: &mut Vec<u8>) -> Result<(), Error> {
*target = xdelta3::decode(&self.bytes, source).ok_or(Error::UnableToApplyDiff)?;
Ok(())
// TODO(hdiff): Dynamic buffer allocation. This is a stopgap until we implement a schema
// change to store the output buffer size inside the `BytesDiff`.
let mut output_length = ((source.len() + self.bytes.len()) * 3) / 2;
let mut num_resizes = 0;
loop {
match xdelta3::decode_with_output_len(&self.bytes, source, output_length as u32) {
Ok(result_buffer) => {
*target = result_buffer;
metrics::observe(
&metrics::BEACON_HDIFF_BUFFER_APPLY_RESIZES,
num_resizes as f64,
);
return Ok(());
}
Err(xdelta3::Error::InsufficientOutputLength) => {
// Double the output buffer length and try again.
output_length *= 2;
num_resizes += 1;
}
Err(err) => {
return Err(Error::UnableToApplyDiff(err));
}
}
}
}
/// Byte size of this instance

View File

@@ -202,6 +202,13 @@ pub static BEACON_HDIFF_BUFFER_CLONE_TIMES: LazyLock<Result<Histogram>> = LazyLo
"Time required to clone hierarchical diff buffer bytes",
)
});
pub static BEACON_HDIFF_BUFFER_APPLY_RESIZES: LazyLock<Result<Histogram>> = LazyLock::new(|| {
try_create_histogram_with_buckets(
"store_hdiff_buffer_apply_resizes",
"Number of times during diff application that the output buffer had to be resized before decoding succeeded",
Ok(vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
)
});
/*
* Beacon Block
*/

View File

@@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }

View File

@@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol"
],
prefix = "Lighthouse/v7.0.0-beta.4-",
fallback = "Lighthouse/v7.0.0-beta.4"
prefix = "Lighthouse/v7.0.0-beta.5-",
fallback = "Lighthouse/v7.0.0-beta.5"
);
/// Returns the first eight characters of the latest commit hash for this build.
@@ -54,7 +54,7 @@ pub fn version_with_platform() -> String {
///
/// `1.5.1`
pub fn version() -> &'static str {
"7.0.0-beta.4"
"7.0.0-beta.5"
}
/// Returns the name of the current client running.

View File

@@ -175,6 +175,7 @@ pub fn process_epoch_single_pass<E: EthSpec>(
let mut earliest_exit_epoch = state.earliest_exit_epoch().ok();
let mut exit_balance_to_consume = state.exit_balance_to_consume().ok();
let validators_in_consolidations = get_validators_in_consolidations(state);
// Split the state into several disjoint mutable borrows.
let (
@@ -317,17 +318,26 @@ pub fn process_epoch_single_pass<E: EthSpec>(
// `process_effective_balance_updates`
if conf.effective_balance_updates {
process_single_effective_balance_update(
validator_info.index,
*balance,
&mut validator,
validator_info.current_epoch_participation,
&mut next_epoch_cache,
progressive_balances,
effective_balances_ctxt,
state_ctxt,
spec,
)?;
if validators_in_consolidations.contains(&validator_info.index) {
process_single_dummy_effective_balance_update(
validator_info.index,
&validator,
&mut next_epoch_cache,
state_ctxt,
)?;
} else {
process_single_effective_balance_update(
validator_info.index,
*balance,
&mut validator,
validator_info.current_epoch_participation,
&mut next_epoch_cache,
progressive_balances,
effective_balances_ctxt,
state_ctxt,
spec,
)?;
}
}
}
@@ -430,6 +440,7 @@ pub fn process_epoch_single_pass<E: EthSpec>(
if fork_name.electra_enabled() && conf.pending_consolidations {
process_pending_consolidations(
state,
&validators_in_consolidations,
&mut next_epoch_cache,
effective_balances_ctxt,
conf.effective_balance_updates,
@@ -1026,12 +1037,38 @@ fn process_pending_deposits_for_validator(
Ok(())
}
/// Return the set of validators referenced by consolidations, either as source or target.
///
/// This function is blind to whether the consolidations are valid and capable of being processed,
/// it just returns the set of all indices present in consolidations. This is *sufficient* to
/// make consolidations play nicely with effective balance updates. The algorithm used is:
///
/// - In the single pass: apply effective balance updates for all validators *not* referenced by
/// consolidations.
/// - Apply consolidations.
/// - Apply effective balance updates for all validators previously skipped.
///
/// Prior to Electra, the empty set is returned.
fn get_validators_in_consolidations<E: EthSpec>(state: &BeaconState<E>) -> BTreeSet<usize> {
let mut referenced_validators = BTreeSet::new();
if let Ok(pending_consolidations) = state.pending_consolidations() {
for pending_consolidation in pending_consolidations {
referenced_validators.insert(pending_consolidation.source_index as usize);
referenced_validators.insert(pending_consolidation.target_index as usize);
}
}
referenced_validators
}
/// We process pending consolidations after all of single-pass epoch processing, and then patch up
/// the effective balances for affected validators.
///
/// This is safe because processing consolidations does not depend on the `effective_balance`.
fn process_pending_consolidations<E: EthSpec>(
state: &mut BeaconState<E>,
validators_in_consolidations: &BTreeSet<usize>,
next_epoch_cache: &mut PreEpochCache,
effective_balances_ctxt: &EffectiveBalancesContext,
perform_effective_balance_updates: bool,
@@ -1042,8 +1079,6 @@ fn process_pending_consolidations<E: EthSpec>(
let next_epoch = state.next_epoch()?;
let pending_consolidations = state.pending_consolidations()?.clone();
let mut affected_validators = BTreeSet::new();
for pending_consolidation in &pending_consolidations {
let source_index = pending_consolidation.source_index as usize;
let target_index = pending_consolidation.target_index as usize;
@@ -1069,9 +1104,6 @@ fn process_pending_consolidations<E: EthSpec>(
decrease_balance(state, source_index, source_effective_balance)?;
increase_balance(state, target_index, source_effective_balance)?;
affected_validators.insert(source_index);
affected_validators.insert(target_index);
next_pending_consolidation.safe_add_assign(1)?;
}
@@ -1087,7 +1119,7 @@ fn process_pending_consolidations<E: EthSpec>(
// Re-process effective balance updates for validators affected by consolidations.
let (validators, balances, _, current_epoch_participation, _, progressive_balances, _, _) =
state.mutable_validator_fields()?;
for validator_index in affected_validators {
for &validator_index in validators_in_consolidations {
let balance = *balances
.get(validator_index)
.ok_or(BeaconStateError::UnknownValidator(validator_index))?;
@@ -1129,6 +1161,28 @@ impl EffectiveBalancesContext {
}
}
/// This function is called for validators that do not have their effective balance updated as
/// part of the single-pass loop. For these validators we compute their true effective balance
/// update after processing consolidations. However, to maintain the invariants of the
/// `PreEpochCache` we must register _some_ effective balance for them immediately.
fn process_single_dummy_effective_balance_update(
validator_index: usize,
validator: &Cow<Validator>,
next_epoch_cache: &mut PreEpochCache,
state_ctxt: &StateContext,
) -> Result<(), Error> {
// Populate the effective balance cache with the current effective balance. This will be
// overriden when `process_single_effective_balance_update` is called.
let is_active_next_epoch = validator.is_active_at(state_ctxt.next_epoch);
let temporary_effective_balance = validator.effective_balance;
next_epoch_cache.update_effective_balance(
validator_index,
temporary_effective_balance,
is_active_next_epoch,
)?;
Ok(())
}
/// This function abstracts over phase0 and Electra effective balance processing.
#[allow(clippy::too_many_arguments)]
fn process_single_effective_balance_update(

View File

@@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = { workspace = true }

View File

@@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
autotests = false