Compare commits

...

3 Commits

Author SHA1 Message Date
Michael Sproul
6d5a2be7f9 Release v7.0.0-beta.5 (#7210)
New release for Pectra-enabled networks.
2025-03-27 03:42:34 +00:00
Michael Sproul
7d792e615c Fix xdelta3 output buffer issue (#7174)
* Fix xdelta3 output buffer issue

* Fix buckets

* Update commit hash to `main`

* Tag TODO(hdiff)

* Update cargo lock
2025-03-27 13:25:50 +11:00
Michael Sproul
0875326cb6 Prevent duplicate effective balance processing (#7209) 2025-03-27 12:53:38 +11:00
10 changed files with 127 additions and 37 deletions

10
Cargo.lock generated
View File

@@ -860,7 +860,7 @@ dependencies = [
[[package]]
name = "beacon_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"account_utils",
"beacon_chain",
@@ -1108,7 +1108,7 @@ dependencies = [
[[package]]
name = "boot_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"beacon_node",
"bytes",
@@ -4811,7 +4811,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lcli"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"account_utils",
"beacon_chain",
@@ -5366,7 +5366,7 @@ dependencies = [
[[package]]
name = "lighthouse"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
dependencies = [
"account_manager",
"account_utils",
@@ -10760,7 +10760,7 @@ dependencies = [
[[package]]
name = "xdelta3"
version = "0.1.5"
source = "git+http://github.com/sigp/xdelta3-rs?rev=50d63cdf1878e5cf3538e9aae5eed34a22c64e4a#50d63cdf1878e5cf3538e9aae5eed34a22c64e4a"
source = "git+http://github.com/sigp/xdelta3-rs?rev=4db64086bb02e9febb584ba93b9d16bb2ae3825a#4db64086bb02e9febb584ba93b9d16bb2ae3825a"
dependencies = [
"bindgen",
"cc",

View File

@@ -289,7 +289,7 @@ validator_metrics = { path = "validator_client/validator_metrics" }
validator_store = { path = "validator_client/validator_store" }
validator_test_rig = { path = "testing/validator_test_rig" }
warp_utils = { path = "common/warp_utils" }
xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" }
xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "4db64086bb02e9febb584ba93b9d16bb2ae3825a" }
zstd = "0.13"
[profile.maxperf]

View File

@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = [
"Paul Hauner <paul@paulhauner.com>",
"Age Manning <Age@AgeManning.com",

View File

@@ -21,8 +21,8 @@ static EMPTY_PUBKEY: LazyLock<PublicKeyBytes> = LazyLock::new(PublicKeyBytes::em
pub enum Error {
InvalidHierarchy,
DiffDeletionsNotSupported,
UnableToComputeDiff,
UnableToApplyDiff,
UnableToComputeDiff(xdelta3::Error),
UnableToApplyDiff(xdelta3::Error),
BalancesIncompleteChunk,
Compression(std::io::Error),
InvalidSszState(ssz::DecodeError),
@@ -323,9 +323,15 @@ impl BytesDiff {
}
pub fn compute_xdelta(source_bytes: &[u8], target_bytes: &[u8]) -> Result<Self, Error> {
let bytes = xdelta3::encode(target_bytes, source_bytes)
.ok_or(Error::UnableToComputeDiff)
.unwrap();
// TODO(hdiff): Use a smaller estimate for the output diff buffer size, currently the
// xdelta3 lib will use 2x the size of the source plus the target length, which is 4x the
// size of the hdiff buffer. In practice, diffs are almost always smaller than buffers (by a
// signficiant factor), so this is 4-16x larger than necessary in a temporary allocation.
//
// We should use an estimated size that *should* be enough, and then dynamically increase it
// if we hit an insufficient space error.
let bytes =
xdelta3::encode(target_bytes, source_bytes).map_err(Error::UnableToComputeDiff)?;
Ok(Self { bytes })
}
@@ -334,8 +340,31 @@ impl BytesDiff {
}
pub fn apply_xdelta(&self, source: &[u8], target: &mut Vec<u8>) -> Result<(), Error> {
*target = xdelta3::decode(&self.bytes, source).ok_or(Error::UnableToApplyDiff)?;
Ok(())
// TODO(hdiff): Dynamic buffer allocation. This is a stopgap until we implement a schema
// change to store the output buffer size inside the `BytesDiff`.
let mut output_length = ((source.len() + self.bytes.len()) * 3) / 2;
let mut num_resizes = 0;
loop {
match xdelta3::decode_with_output_len(&self.bytes, source, output_length as u32) {
Ok(result_buffer) => {
*target = result_buffer;
metrics::observe(
&metrics::BEACON_HDIFF_BUFFER_APPLY_RESIZES,
num_resizes as f64,
);
return Ok(());
}
Err(xdelta3::Error::InsufficientOutputLength) => {
// Double the output buffer length and try again.
output_length *= 2;
num_resizes += 1;
}
Err(err) => {
return Err(Error::UnableToApplyDiff(err));
}
}
}
}
/// Byte size of this instance

View File

@@ -202,6 +202,13 @@ pub static BEACON_HDIFF_BUFFER_CLONE_TIMES: LazyLock<Result<Histogram>> = LazyLo
"Time required to clone hierarchical diff buffer bytes",
)
});
pub static BEACON_HDIFF_BUFFER_APPLY_RESIZES: LazyLock<Result<Histogram>> = LazyLock::new(|| {
try_create_histogram_with_buckets(
"store_hdiff_buffer_apply_resizes",
"Number of times during diff application that the output buffer had to be resized before decoding succeeded",
Ok(vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
)
});
/*
* Beacon Block
*/

View File

@@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }

View File

@@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol"
],
prefix = "Lighthouse/v7.0.0-beta.4-",
fallback = "Lighthouse/v7.0.0-beta.4"
prefix = "Lighthouse/v7.0.0-beta.5-",
fallback = "Lighthouse/v7.0.0-beta.5"
);
/// Returns the first eight characters of the latest commit hash for this build.
@@ -54,7 +54,7 @@ pub fn version_with_platform() -> String {
///
/// `1.5.1`
pub fn version() -> &'static str {
"7.0.0-beta.4"
"7.0.0-beta.5"
}
/// Returns the name of the current client running.

View File

@@ -175,6 +175,7 @@ pub fn process_epoch_single_pass<E: EthSpec>(
let mut earliest_exit_epoch = state.earliest_exit_epoch().ok();
let mut exit_balance_to_consume = state.exit_balance_to_consume().ok();
let validators_in_consolidations = get_validators_in_consolidations(state);
// Split the state into several disjoint mutable borrows.
let (
@@ -317,17 +318,26 @@ pub fn process_epoch_single_pass<E: EthSpec>(
// `process_effective_balance_updates`
if conf.effective_balance_updates {
process_single_effective_balance_update(
validator_info.index,
*balance,
&mut validator,
validator_info.current_epoch_participation,
&mut next_epoch_cache,
progressive_balances,
effective_balances_ctxt,
state_ctxt,
spec,
)?;
if validators_in_consolidations.contains(&validator_info.index) {
process_single_dummy_effective_balance_update(
validator_info.index,
&validator,
&mut next_epoch_cache,
state_ctxt,
)?;
} else {
process_single_effective_balance_update(
validator_info.index,
*balance,
&mut validator,
validator_info.current_epoch_participation,
&mut next_epoch_cache,
progressive_balances,
effective_balances_ctxt,
state_ctxt,
spec,
)?;
}
}
}
@@ -430,6 +440,7 @@ pub fn process_epoch_single_pass<E: EthSpec>(
if fork_name.electra_enabled() && conf.pending_consolidations {
process_pending_consolidations(
state,
&validators_in_consolidations,
&mut next_epoch_cache,
effective_balances_ctxt,
conf.effective_balance_updates,
@@ -1026,12 +1037,38 @@ fn process_pending_deposits_for_validator(
Ok(())
}
/// Return the set of validators referenced by consolidations, either as source or target.
///
/// This function is blind to whether the consolidations are valid and capable of being processed,
/// it just returns the set of all indices present in consolidations. This is *sufficient* to
/// make consolidations play nicely with effective balance updates. The algorithm used is:
///
/// - In the single pass: apply effective balance updates for all validators *not* referenced by
/// consolidations.
/// - Apply consolidations.
/// - Apply effective balance updates for all validators previously skipped.
///
/// Prior to Electra, the empty set is returned.
fn get_validators_in_consolidations<E: EthSpec>(state: &BeaconState<E>) -> BTreeSet<usize> {
let mut referenced_validators = BTreeSet::new();
if let Ok(pending_consolidations) = state.pending_consolidations() {
for pending_consolidation in pending_consolidations {
referenced_validators.insert(pending_consolidation.source_index as usize);
referenced_validators.insert(pending_consolidation.target_index as usize);
}
}
referenced_validators
}
/// We process pending consolidations after all of single-pass epoch processing, and then patch up
/// the effective balances for affected validators.
///
/// This is safe because processing consolidations does not depend on the `effective_balance`.
fn process_pending_consolidations<E: EthSpec>(
state: &mut BeaconState<E>,
validators_in_consolidations: &BTreeSet<usize>,
next_epoch_cache: &mut PreEpochCache,
effective_balances_ctxt: &EffectiveBalancesContext,
perform_effective_balance_updates: bool,
@@ -1042,8 +1079,6 @@ fn process_pending_consolidations<E: EthSpec>(
let next_epoch = state.next_epoch()?;
let pending_consolidations = state.pending_consolidations()?.clone();
let mut affected_validators = BTreeSet::new();
for pending_consolidation in &pending_consolidations {
let source_index = pending_consolidation.source_index as usize;
let target_index = pending_consolidation.target_index as usize;
@@ -1069,9 +1104,6 @@ fn process_pending_consolidations<E: EthSpec>(
decrease_balance(state, source_index, source_effective_balance)?;
increase_balance(state, target_index, source_effective_balance)?;
affected_validators.insert(source_index);
affected_validators.insert(target_index);
next_pending_consolidation.safe_add_assign(1)?;
}
@@ -1087,7 +1119,7 @@ fn process_pending_consolidations<E: EthSpec>(
// Re-process effective balance updates for validators affected by consolidations.
let (validators, balances, _, current_epoch_participation, _, progressive_balances, _, _) =
state.mutable_validator_fields()?;
for validator_index in affected_validators {
for &validator_index in validators_in_consolidations {
let balance = *balances
.get(validator_index)
.ok_or(BeaconStateError::UnknownValidator(validator_index))?;
@@ -1129,6 +1161,28 @@ impl EffectiveBalancesContext {
}
}
/// This function is called for validators that do not have their effective balance updated as
/// part of the single-pass loop. For these validators we compute their true effective balance
/// update after processing consolidations. However, to maintain the invariants of the
/// `PreEpochCache` we must register _some_ effective balance for them immediately.
fn process_single_dummy_effective_balance_update(
validator_index: usize,
validator: &Cow<Validator>,
next_epoch_cache: &mut PreEpochCache,
state_ctxt: &StateContext,
) -> Result<(), Error> {
// Populate the effective balance cache with the current effective balance. This will be
// overriden when `process_single_effective_balance_update` is called.
let is_active_next_epoch = validator.is_active_at(state_ctxt.next_epoch);
let temporary_effective_balance = validator.effective_balance;
next_epoch_cache.update_effective_balance(
validator_index,
temporary_effective_balance,
is_active_next_epoch,
)?;
Ok(())
}
/// This function abstracts over phase0 and Electra effective balance processing.
#[allow(clippy::too_many_arguments)]
fn process_single_effective_balance_update(

View File

@@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = { workspace = true }

View File

@@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "7.0.0-beta.4"
version = "7.0.0-beta.5"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
autotests = false