From dc73791f35dff0484a35ddedba4b58c6ca34c3c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 22 Jan 2025 22:55:55 +0000 Subject: [PATCH 01/13] update script for new mergify syntax (#6597) --- .github/mergify.yml | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 4c4046cf67..ff08f2d349 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -1,3 +1,36 @@ +pull_request_rules: + - name: Ask to resolve conflict + conditions: + - conflict + - -author=dependabot[bot] + - or: + - -draft # Don't report conflicts on regular draft. + - and: # Do report conflicts on draft that are scheduled for the next major release. + - draft + - milestone~=v[0-9]\.[0-9]{2} + actions: + comment: + message: This pull request has merge conflicts. Could you please resolve them + @{{author}}? 🙏 + + - name: Approve trivial maintainer PRs + conditions: + - base=master + - label=trivial + - author=@sigp/lighthouse + actions: + review: + type: APPROVE + + - name: Add ready-to-merge labeled PRs to merge queue + conditions: + # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection + - base=master + - label=send-it + actions: + queue: + + queue_rules: - name: default batch_size: 8 @@ -6,10 +39,11 @@ queue_rules: merge_method: squash commit_message_template: | {{ title }} (#{{ number }}) - - {% for commit in commits %} - * {{ commit.commit_message }} - {% endfor %} + + {{ body | get_section("## Issue Addressed", "") }} + + + {{ body | get_section("## Proposed Changes", "") }} queue_conditions: - "#approved-reviews-by >= 1" - "check-success=license/cla" From 6b6f2beb7d1736a07b68de8ef4ffa1c8e4b5feab Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 23 Jan 2025 11:01:11 +1100 Subject: [PATCH 02/13] Fix branch/tag names in mergify config (#6842) --- .github/mergify.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index ff08f2d349..9a74414e72 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -15,7 +15,7 @@ pull_request_rules: - name: Approve trivial maintainer PRs conditions: - - base=master + - base=unstable - label=trivial - author=@sigp/lighthouse actions: @@ -25,8 +25,8 @@ pull_request_rules: - name: Add ready-to-merge labeled PRs to merge queue conditions: # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection - - base=master - - label=send-it + - base=unstable + - label=ready-to-merge actions: queue: From 029b4f21047c37d1ffde51c554737b1aa0880f97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 24 Jan 2025 00:43:51 +0000 Subject: [PATCH 03/13] Improve mergify config (#6852) * improve mergify config * negate conflict --- .github/mergify.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 9a74414e72..1aa24f8302 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -18,6 +18,7 @@ pull_request_rules: - base=unstable - label=trivial - author=@sigp/lighthouse + - -conflict actions: review: type: APPROVE @@ -26,11 +27,11 @@ pull_request_rules: conditions: # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection - base=unstable - - label=ready-to-merge + - label=ready-for-merge + - label!=do-not-merge actions: queue: - queue_rules: - name: default batch_size: 8 @@ -48,6 +49,7 @@ queue_rules: - "#approved-reviews-by >= 1" - "check-success=license/cla" - "check-success=target-branch-check" + - "label!=do-not-merge" merge_conditions: - "check-success=test-suite-success" - "check-success=local-testnet-success" From bf955c7543dac8911a6f6c334b5b3ca4ef728d9c Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 14 Feb 2025 10:23:38 +1100 Subject: [PATCH 04/13] Update mergify conditions for `trivial` and `ready-for-merge` labels to satisfy if base is not `stable` (#6997) --- .github/mergify.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 1aa24f8302..73267904b8 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -15,7 +15,7 @@ pull_request_rules: - name: Approve trivial maintainer PRs conditions: - - base=unstable + - base!=stable - label=trivial - author=@sigp/lighthouse - -conflict @@ -26,7 +26,7 @@ pull_request_rules: - name: Add ready-to-merge labeled PRs to merge queue conditions: # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection - - base=unstable + - base!=stable - label=ready-for-merge - label!=do-not-merge actions: From bff6dd300aec2703037c21f6c2626fe2300e2408 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 15 Apr 2025 19:48:21 +1000 Subject: [PATCH 05/13] Update withdrawals processing (spec v1.5.0-beta.6) --- .../state_processing/src/per_block_processing.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index ef4799c245..6339f9003d 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -517,7 +517,7 @@ pub fn get_expected_withdrawals( let epoch = state.current_epoch(); let mut withdrawal_index = state.next_withdrawal_index()?; let mut validator_index = state.next_withdrawal_validator_index()?; - let mut withdrawals = vec![]; + let mut withdrawals = Vec::::with_capacity(E::max_withdrawals_per_payload()); let fork_name = state.fork_name_unchecked(); // [New in Electra:EIP7251] @@ -532,19 +532,27 @@ pub fn get_expected_withdrawals( break; } - let withdrawal_balance = state.get_balance(withdrawal.validator_index as usize)?; let validator = state.get_validator(withdrawal.validator_index as usize)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; - let has_excess_balance = withdrawal_balance > spec.min_activation_balance; + let total_withdrawn = withdrawals + .iter() + .filter_map(|w| { + (w.validator_index == withdrawal.validator_index).then_some(w.amount) + }) + .safe_sum()?; + let balance = state + .get_balance(withdrawal.validator_index as usize)? + .safe_sub(total_withdrawn)?; + let has_excess_balance = balance > spec.min_activation_balance; if validator.exit_epoch == spec.far_future_epoch && has_sufficient_effective_balance && has_excess_balance { let withdrawable_balance = std::cmp::min( - withdrawal_balance.safe_sub(spec.min_activation_balance)?, + balance.safe_sub(spec.min_activation_balance)?, withdrawal.amount, ); withdrawals.push(Withdrawal { From 6fad6fba6a0400a9f100b0ea98cb4993d3ec441f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 16 Apr 2025 08:54:53 +1000 Subject: [PATCH 06/13] Release v7.0.0-beta.6 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eee67a413e..0531161684 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -860,7 +860,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" dependencies = [ "account_utils", "beacon_chain", @@ -1108,7 +1108,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" dependencies = [ "beacon_node", "bytes", @@ -4813,7 +4813,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" dependencies = [ "account_utils", "beacon_chain", @@ -5368,7 +5368,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d6c61341a3..5390615291 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd5e31e3ab..64e151301a 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.5-", - fallback = "Lighthouse/v7.0.0-beta.5" + prefix = "Lighthouse/v7.0.0-beta.6-", + fallback = "Lighthouse/v7.0.0-beta.6" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.5" + "7.0.0-beta.6" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 0b5355d249..ba958bdeed 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index e8c5874a91..9fc788dc28 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.5" +version = "7.0.0-beta.6" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From 5352d5f78a7df9fd394639d83ff397df22db8e5b Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Pinalie <2850825+jybp@users.noreply.github.com> Date: Thu, 17 Apr 2025 02:58:36 +0200 Subject: [PATCH 07/13] Update proposer_slashings and attester_slashings amounts for electra. (#7316) Did not find a specific issue beside https://github.com/sigp/lighthouse/issues/6821 Leverage `whistleblower_reward_quotient_for_state` to have accurate post-electra `proposer_slashings` and `attester_slashings` fields returned by `/eth/v1/beacon/rewards/blocks/`. --- .../beacon_chain/src/beacon_block_reward.rs | 4 +- beacon_node/beacon_chain/tests/rewards.rs | 98 +++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 43 ++++++++ common/eth2/src/lib.rs | 12 ++- 4 files changed, 150 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index e0bb79bf38..fb49790107 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -139,7 +139,7 @@ impl BeaconChain { state .get_validator(proposer_slashing.proposer_index() as usize)? .effective_balance - .safe_div(self.spec.whistleblower_reward_quotient)?, + .safe_div(self.spec.whistleblower_reward_quotient_for_state(state))?, )?; } @@ -161,7 +161,7 @@ impl BeaconChain { state .get_validator(attester_index as usize)? .effective_balance - .safe_div(self.spec.whistleblower_reward_quotient)?, + .safe_div(self.spec.whistleblower_reward_quotient_for_state(state))?, )?; } } diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 710752d9cc..8d34c65e47 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -256,6 +256,35 @@ async fn test_rewards_base_inactivity_leak_justification_epoch() { ); } +#[tokio::test] +async fn test_rewards_electra_slashings() { + let spec = ForkName::Electra.make_genesis_spec(E::default_spec()); + let harness = get_electra_harness(spec); + let state = harness.get_current_state(); + + harness.extend_slots(E::slots_per_epoch() as usize).await; + + let mut initial_balances = harness.get_current_state().balances().to_vec(); + + // add an attester slashing and calculate slashing penalties + harness.add_attester_slashing(vec![0]).unwrap(); + let slashed_balance_1 = initial_balances.get_mut(0).unwrap(); + let validator_1_effective_balance = state.get_effective_balance(0).unwrap(); + let delta_1 = validator_1_effective_balance + / harness.spec.min_slashing_penalty_quotient_for_state(&state); + *slashed_balance_1 -= delta_1; + + // add a proposer slashing and calculating slashing penalties + harness.add_proposer_slashing(1).unwrap(); + let slashed_balance_2 = initial_balances.get_mut(1).unwrap(); + let validator_2_effective_balance = state.get_effective_balance(1).unwrap(); + let delta_2 = validator_2_effective_balance + / harness.spec.min_slashing_penalty_quotient_for_state(&state); + *slashed_balance_2 -= delta_2; + + check_all_electra_rewards(&harness, initial_balances).await; +} + #[tokio::test] async fn test_rewards_base_slashings() { let spec = ForkName::Base.make_genesis_spec(E::default_spec()); @@ -696,6 +725,75 @@ async fn test_rewards_base_subset_only() { check_all_base_rewards_for_subset(&harness, initial_balances, validators_subset).await; } +async fn check_all_electra_rewards( + harness: &BeaconChainHarness>, + mut balances: Vec, +) { + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward(signed_block.message(), &mut state) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } + + harness.extend_slots(1).await; + } + + // compute reward deltas for all validators in epoch 0 + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(0), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert_eq!( + ideal_rewards.len() as u64, + harness.spec.max_effective_balance_electra / harness.spec.effective_balance_increment + ); + + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + apply_attestation_rewards(&mut balances, total_rewards); + apply_other_rewards(&mut balances, &proposal_rewards_map); + apply_other_rewards(&mut balances, &sync_committee_rewards_map); + + // verify expected balances against actual balances + let actual_balances: Vec = harness.get_current_state().balances().to_vec(); + + assert_eq!(balances, actual_balances); +} + async fn check_all_base_rewards( harness: &BeaconChainHarness>, balances: Vec, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a5aeb30e1a..7e9d1e49fb 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use either::Either; +use eth2::lighthouse::StandardBlockReward; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, @@ -6381,6 +6382,34 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(true)); } + + async fn test_get_beacon_rewards_blocks_at_head(&self) -> StandardBlockReward { + self.client + .get_beacon_rewards_blocks(CoreBlockId::Head) + .await + .unwrap() + .data + } + + async fn test_beacon_block_rewards_electra(self) -> Self { + for _ in 0..E::slots_per_epoch() { + let state = self.harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + self.harness.make_block_return_pre_state(state, slot).await; + + let beacon_block_reward = self + .harness + .chain + .compute_beacon_block_reward(signed_block.message(), &mut state) + .unwrap(); + self.harness.extend_slots(1).await; + let api_beacon_block_reward = self.test_get_beacon_rewards_blocks_at_head().await; + assert_eq!(beacon_block_reward, api_beacon_block_reward); + } + self + } } async fn poll_events, eth2::Error>> + Unpin, E: EthSpec>( @@ -7522,3 +7551,17 @@ async fn expected_withdrawals_valid_capella() { .test_get_expected_withdrawals_capella() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_beacon_rewards_blocks_electra() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_beacon_block_rewards_electra() + .await; +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index c806ae065b..28ca21e16e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -20,6 +20,7 @@ use derivative::Derivative; use either::Either; use futures::Stream; use futures_util::StreamExt; +use lighthouse::StandardBlockReward; use lighthouse_network::PeerId; use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; @@ -1677,17 +1678,18 @@ impl BeaconNodeHttpClient { } /// `GET beacon/rewards/blocks` - pub async fn get_beacon_rewards_blocks(&self, epoch: Epoch) -> Result<(), Error> { + pub async fn get_beacon_rewards_blocks( + &self, + block_id: BlockId, + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("beacon") .push("rewards") - .push("blocks"); - - path.query_pairs_mut() - .append_pair("epoch", &epoch.to_string()); + .push("blocks") + .push(&block_id.to_string()); self.get(path).await } From fd82ee2f8101887623d7b7a2af919c4089685fa3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 17 Apr 2025 14:46:43 +1000 Subject: [PATCH 08/13] Release v7.0.0-beta.7 (#7333) --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0531161684..e4473348cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -860,7 +860,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" dependencies = [ "account_utils", "beacon_chain", @@ -1108,7 +1108,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" dependencies = [ "beacon_node", "bytes", @@ -4813,7 +4813,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" dependencies = [ "account_utils", "beacon_chain", @@ -5368,7 +5368,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5390615291..7ab29d37db 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 64e151301a..15c99080d8 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.6-", - fallback = "Lighthouse/v7.0.0-beta.6" + prefix = "Lighthouse/v7.0.0-beta.7-", + fallback = "Lighthouse/v7.0.0-beta.7" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.6" + "7.0.0-beta.7" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index ba958bdeed..d9dd8c1e3e 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9fc788dc28..384720cf73 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.6" +version = "7.0.0-beta.7" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From c32569ab83bb736ce40b7c2df9975d09aac67a6f Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 17 Apr 2025 18:18:45 +1000 Subject: [PATCH 09/13] Restore HTTP API logging and add more metrics (#7225) #7124 - Restores previous HTTP logging with tracing compatible syntax - Adds metrics for certain missing endpoints (and alphabetized the existing ones) --- beacon_node/http_api/src/lib.rs | 102 ++++++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 19 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 412b756684..386d9fe33a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -215,35 +215,66 @@ pub fn prometheus_metrics() -> warp::filters::log::Log warp::filters::log::Log warp::filters::log::Log { + warp::log::custom(move |info| { + let status = info.status(); + // Ensure elapsed time is in milliseconds. + let elapsed = info.elapsed().as_secs_f64() * 1000.0; + let path = info.path(); + let method = info.method().to_string(); + + if status == StatusCode::OK + || status == StatusCode::NOT_FOUND + || status == StatusCode::PARTIAL_CONTENT + { + debug!( + elapsed_ms = %elapsed, + status = %status, + path = %path, + method = %method, + "Processed HTTP API request" + ); + } else { + warn!( + elapsed_ms = %elapsed, + status = %status, + path = %path, + method = %method, + "Error processing HTTP API request" + ); + } + }) +} + /// Creates a server that will serve requests using information from `ctx`. /// /// The server will shut down gracefully when the `shutdown` future resolves. @@ -4884,6 +4947,7 @@ pub fn serve( ), ) .recover(warp_utils::reject::handle_rejection) + .with(tracing_logging()) .with(prometheus_metrics()) // Add a `Server` header. .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) From 410af7c5f5dcb58bc602e543478b36edf64a47e9 Mon Sep 17 00:00:00 2001 From: Varun Doshi <61531351+varun-doshi@users.noreply.github.com> Date: Thu, 17 Apr 2025 15:01:23 +0530 Subject: [PATCH 10/13] feat: update mainnet bootnodes (#7279) Fixes #7266 Updates mainnet bootnodes and added 2 new Teku bootnodes --- .../mainnet/boot_enr.yaml | 54 ++++++++++++------- lighthouse/tests/beacon_node.rs | 2 +- 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 1ae519387a..70aeaac9c5 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -1,20 +1,34 @@ -# Lighthouse Team (Sigma Prime) -- enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I -- enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I -- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I -- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I -# EF Team -- enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg -- enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg -- enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg -- enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg -# Teku team (Consensys) -- enr:-KG4QNTx85fjxABbSq_Rta9wy56nQ1fHK0PewJbGjLm1M4bMGx5-3Qq4ZX2-iFJ0pys_O90sVXNNOxp2E7afBsGsBrgDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaECGXWQ-rQ2KZKRH1aOW4IlPDBkY4XDphxg9pxKytFCkayDdGNwgiMog3VkcIIjKA -- enr:-KG4QF4B5WrlFcRhUU6dZETwY5ZzAXnA0vGC__L1Kdw602nDZwXSTs5RFXFIFUnbQJmhNGVU6OIX7KVrCSTODsz1tK4DhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQExNYEiXNlY3AyNTZrMaECQmM9vp7KhaXhI-nqL_R0ovULLCFSFTa9CPPSdb1zPX6DdGNwgiMog3VkcIIjKA -# Prysm team (Prysmatic Labs) -- enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg -- enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA -- enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg -# Nimbus team -- enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM +# Eth mainnet consensus layer bootnodes +# --------------------------------------- +# 1. Tag nodes with maintainer +# 2. Keep nodes updated +# 3. Review PRs: check ENR duplicates, fork-digest, connection. + +# Teku team's bootnodes +- enr:-Iu4QLm7bZGdAt9NSeJG0cEnJohWcQTQaI9wFLu3Q7eHIDfrI4cwtzvEW3F3VbG9XdFXlrHyFGeXPn9snTCQJ9bnMRABgmlkgnY0gmlwhAOTJQCJc2VjcDI1NmsxoQIZdZD6tDYpkpEfVo5bgiU8MGRjhcOmHGD2nErK0UKRrIN0Y3CCIyiDdWRwgiMo # 3.147.37.0 | aws-us-east-2-ohio +- enr:-Iu4QEDJ4Wa_UQNbK8Ay1hFEkXvd8psolVK6OhfTL9irqz3nbXxxWyKwEplPfkju4zduVQj6mMhUCm9R2Lc4YM5jPcIBgmlkgnY0gmlwhANrfESJc2VjcDI1NmsxoQJCYz2-nsqFpeEj6eov9HSi9QssIVIVNr0I89J1vXM9foN0Y3CCIyiDdWRwgiMo # 3.107.124.68 | aws-ap-southeast-2-sydney + +# Prylab team's bootnodes +- enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg # 18.223.219.100 | aws-us-east-2-ohio +- enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA # 18.223.219.100 | aws-us-east-2-ohio +- enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg # 18.223.219.100 | aws-us-east-2-ohio + +# Lighthouse team (Sigma Prime) +- enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I # 172.105.173.25 | linode-au-sydney +- enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I # 139.162.196.49 | linode-uk-london +- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I # 139.99.217.220 | ovh-au-sydney +- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I # 139.99.78.39 | ovh-singapore + +# EF bootnodes +- enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg # 3.17.30.69 | aws-us-east-2-ohio +- enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg # 18.216.248.220 | aws-us-east-2-ohio +- enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg # 54.178.44.198 | aws-ap-northeast-1-tokyo +- enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg # 54.65.172.253 | aws-ap-northeast-1-tokyo + +# Nimbus team's bootnodes +- enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM # 3.120.104.18 | aws-eu-central-1-frankfurt +- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM # 3.64.117.223 | aws-eu-central-1-frankfurt + +# Lodestar team's bootnodes +- enr:-IS4QPi-onjNsT5xAIAenhCGTDl4z-4UOR25Uq-3TmG4V3kwB9ljLTb_Kp1wdjHNj-H8VVLRBSSWVZo3GUe3z6k0E-IBgmlkgnY0gmlwhKB3_qGJc2VjcDI1NmsxoQMvAfgB4cJXvvXeM6WbCG86CstbSxbQBSGx31FAwVtOTYN1ZHCCIyg # 160.119.254.161 | hostafrica-southafrica +- enr:-KG4QCb8NC3gEM3I0okStV5BPX7Bg6ZXTYCzzbYyEXUPGcZtHmvQtiJH4C4F2jG7azTcb9pN3JlgpfxAnRVFzJ3-LykBgmlkgnY0gmlwhFPlR9KDaXA2kP6AAAAAAAAAAlBW__4my5iJc2VjcDI1NmsxoQLdUv9Eo9sxCt0tc_CheLOWnX59yHJtkBSOL7kpxdJ6GYN1ZHCCIyiEdWRwNoIjKA # 83.229.71.210 | kamatera-telaviv-israel \ No newline at end of file diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index aad11c50d7..ea4716c010 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1274,7 +1274,7 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let number_of_boot_nodes = 15; + let number_of_boot_nodes = 17; CommandLineTest::new() .run_with_zero_port() From 80fe133d2c4ce7145a408c0b13582d2982cb2397 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 17 Apr 2025 17:31:26 +0800 Subject: [PATCH 11/13] Update Lighthouse Book for Electra features (#7280) * #7227 --- .github/workflows/release.yml | 4 +- account_manager/src/validator/exit.rs | 2 +- beacon_node/client/src/notifier.rs | 4 +- book/src/SUMMARY.md | 2 + book/src/advanced_blobs.md | 6 +- book/src/advanced_database_migrations.md | 12 +- book/src/archived.md | 2 +- ...nagement.md => archived_key_management.md} | 0 ...gration.md => archived_merge_migration.md} | 0 book/src/developers_architecture.md | 5 + book/src/faq.md | 125 +++--------------- book/src/imgs/developers_architecture.svg | 4 + book/src/mainnet_validator.md | 8 +- book/src/validator_consolidation.md | 30 +++++ book/src/validator_management.md | 2 +- book/src/validator_sweep.md | 4 + book/src/validator_voluntary_exit.md | 6 +- scripts/tests/doppelganger_protection.sh | 4 +- wordlist.txt | 2 + 19 files changed, 95 insertions(+), 127 deletions(-) rename book/src/{archived-key-management.md => archived_key_management.md} (100%) rename book/src/{archived-merge-migration.md => archived_merge_migration.md} (100%) create mode 100644 book/src/developers_architecture.md create mode 100644 book/src/imgs/developers_architecture.svg create mode 100644 book/src/validator_consolidation.md diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cfba601fad..d8a52dd010 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -221,7 +221,7 @@ jobs: |Non-Staking Users| |---| *See [Update - Priorities](https://lighthouse-book.sigmaprime.io/installation-priorities.html) + Priorities](https://lighthouse-book.sigmaprime.io/installation_priorities.html) more information about this table.* ## All Changes @@ -230,7 +230,7 @@ jobs: ## Binaries - [See pre-built binaries documentation.](https://lighthouse-book.sigmaprime.io/installation-binaries.html) + [See pre-built binaries documentation.](https://lighthouse-book.sigmaprime.io/installation_binaries.html) The binaries are signed with Sigma Prime's PGP key: `15E66D941F697E28F49381F426416DC3F30674B0` diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index ea1a24da1f..8a2cdb8400 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -27,7 +27,7 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; -pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; +pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/validator_voluntary_exit.html"; pub fn cli_app() -> Command { Command::new("exit") diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index d103d48dfb..53c9c85c00 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -353,7 +353,7 @@ async fn bellatrix_readiness_logging( if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { error!( info = "you need an execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html", + https://lighthouse-book.sigmaprime.io/archived_merge_migration.html", "Execution endpoint required" ); } @@ -433,7 +433,7 @@ async fn capella_readiness_logging( if !beacon_chain.is_time_to_prepare_for_deneb(current_slot) { error!( info = "you need a Capella enabled execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html", + https://lighthouse-book.sigmaprime.io/archived_merge_migration.html", "Execution endpoint required" ); } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 3d09e3a6a5..44f0861564 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -22,6 +22,7 @@ * [Doppelganger Protection](./validator_doppelganger.md) * [Suggested Fee Recipient](./validator_fee_recipient.md) * [Validator Graffiti](./validator_graffiti.md) + * [Consolidation](./validator_consolidation.md) * [APIs](./api.md) * [Beacon Node API](./api_bn.md) * [Lighthouse API](./api_lighthouse.md) @@ -61,6 +62,7 @@ * [Development Environment](./contributing_setup.md) * [FAQs](./faq.md) * [Protocol Developers](./developers.md) + * [Lighthouse Architecture](./developers_architecture.md) * [Security Researchers](./security.md) * [Archived](./archived.md) * [Merge Migration](./archived_merge_migration.md) diff --git a/book/src/advanced_blobs.md b/book/src/advanced_blobs.md index aa995b8e1d..524f70219f 100644 --- a/book/src/advanced_blobs.md +++ b/book/src/advanced_blobs.md @@ -6,7 +6,7 @@ In the Deneb network upgrade, one of the changes is the implementation of EIP-48 1. What is the storage requirement for blobs? - We expect an additional increase of ~50 GB of storage requirement for blobs (on top of what is required by the consensus and execution clients database). The calculation is as below: + After Deneb, we expect an additional increase of ~50 GB of storage requirement for blobs (on top of what is required by the consensus and execution clients database). The calculation is as below: One blob is 128 KB in size. Each block can carry a maximum of 6 blobs. Blobs will be kept for 4096 epochs and pruned afterwards. This means that the maximum increase in storage requirement will be: @@ -16,6 +16,8 @@ In the Deneb network upgrade, one of the changes is the implementation of EIP-48 However, the blob base fee targets 3 blobs per block and it works similarly to how EIP-1559 operates in the Ethereum gas fee. Therefore, practically it is very likely to average to 3 blobs per blocks, which translates to a storage requirement of 48 GB. + After Electra, the target blobs is increased to 6 blobs per block. This means blobs storage is expected to use ~100GB of disk space. + 1. Do I have to add any flags for blobs? No, you can use the default values for blob-related flags, which means you do not need add or remove any flags. @@ -25,7 +27,7 @@ In the Deneb network upgrade, one of the changes is the implementation of EIP-48 Use the flag `--prune-blobs false` in the beacon node. The storage requirement will be: ```text - 2**17 bytes * 3 blobs / block * 7200 blocks / day * 30 days = 79GB / month or 948GB / year + 2**17 bytes * 6 blobs / block * 7200 blocks / day * 30 days = 158GB / month or 1896GB / year ``` To keep blobs for a custom period, you may use the flag `--blob-prune-margin-epochs ` which keeps blobs for 4096+EPOCHS specified in the flag. diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index a9bfb00ccd..3c56fcadc1 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -7,7 +7,8 @@ been applied automatically and in a _backwards compatible_ way. However, backwards compatibility does not imply the ability to _downgrade_ to a prior version of Lighthouse after upgrading. To facilitate smooth downgrades, Lighthouse v2.3.0 and above includes a -command for applying database downgrades. +command for applying database downgrades. If a downgrade is available _from_ a schema version, +it is listed in the table below under the "Downgrade available?" header. **Everything on this page applies to the Lighthouse _beacon node_, not to the validator client or the slasher**. @@ -16,12 +17,8 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | -| v5.3.0 | Aug 2024 | v21 | yes | -| v5.2.0 | Jun 2024 | v19 | no | -| v5.1.0 | Mar 2024 | v19 | no | -| v5.0.0 | Feb 2024 | v19 | no | -| v4.6.0 | Dec 2023 | v19 | no | > **Note**: All point releases (e.g. v4.4.1) are schema-compatible with the prior minor release > (e.g. v4.4.0). @@ -209,8 +206,9 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v7.0.0 | Apr 2025 | v22 | no | | v6.0.0 | Nov 2024 | v22 | no | -| v5.3.0 | Aug 2024 | v21 | yes | +| v5.3.0 | Aug 2024 | v21 | yes before Electra using <= v7.0.0 | | v5.2.0 | Jun 2024 | v19 | yes before Deneb using <= v5.2.1 | | v5.1.0 | Mar 2024 | v19 | yes before Deneb using <= v5.2.1 | | v5.0.0 | Feb 2024 | v19 | yes before Deneb using <= v5.2.1 | diff --git a/book/src/archived.md b/book/src/archived.md index 7b6e4b7e8e..d37cd9aa15 100644 --- a/book/src/archived.md +++ b/book/src/archived.md @@ -1,3 +1,3 @@ # Archived -This section keeps the topics that are deprecated or less applicable for archived purposes. +This section keeps the topics that are deprecated. Documentation in this section is for informational purposes only and will not be maintained. diff --git a/book/src/archived-key-management.md b/book/src/archived_key_management.md similarity index 100% rename from book/src/archived-key-management.md rename to book/src/archived_key_management.md diff --git a/book/src/archived-merge-migration.md b/book/src/archived_merge_migration.md similarity index 100% rename from book/src/archived-merge-migration.md rename to book/src/archived_merge_migration.md diff --git a/book/src/developers_architecture.md b/book/src/developers_architecture.md new file mode 100644 index 0000000000..1150525512 --- /dev/null +++ b/book/src/developers_architecture.md @@ -0,0 +1,5 @@ +# Lighthouse architecture + +A technical walkthrough of Lighthouse's architecture can be found at: [Lighthouse technical walkthrough](https://www.youtube.com/watch?v=pLHhTh_vGZ0) + +![Lighthouse architecture](imgs/developers_architecture.svg) diff --git a/book/src/faq.md b/book/src/faq.md index a741834501..b0dd696902 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -17,7 +17,6 @@ ## [Validator](#validator-1) -- [Why does it take so long for a validator to be activated?](#vc-activation) - [Can I use redundancy in my staking setup?](#vc-redundancy) - [I am missing attestations. Why?](#vc-missed-attestations) - [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#vc-head-vote) @@ -112,10 +111,7 @@ After checkpoint forwards sync completes, the beacon node will start to download INFO Downloading historical blocks est_time: --, distance: 4524545 slots (89 weeks 5 days), service: slot_notifier ``` -If the same log appears every minute and you do not see progress in downloading historical blocks, you can try one of the followings: - -- Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. -- Restart the beacon node. +If the same log appears every minute and you do not see progress in downloading historical blocks, check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. ### I proposed a block but the beacon node shows `could not publish message` with error `duplicate` as below, should I be worried? @@ -154,29 +150,13 @@ This is a normal behaviour. Since [v4.1.0](https://github.com/sigp/lighthouse/re ### My beacon node logs `WARN Error processing HTTP API request`, what should I do? -This warning usually comes with an http error code. Some examples are given below: +An example of the log is shown below -1. The log shows: +```text +WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs +``` - ```text - WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs - ``` - - The error is `500 Internal Server Error`. This suggests that the execution client is not synced. Once the execution client is synced, the error will disappear. - -1. The log shows: - - ```text - WARN Error processing HTTP API request method: POST, path: /eth/v1/validator/duties/attester/199565, status: 503 Service Unavailable, elapsed: 96.787µs - ``` - - The error is `503 Service Unavailable`. This means that the beacon node is still syncing. When this happens, the validator client will log: - - ```text - ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing - ``` - - This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. +This warning usually happens when the validator client sends a request to the beacon node, but the beacon node is unable to fulfil the request. This can be due to the execution client is not synced/is syncing and/or the beacon node is syncing. The error show go away when the node is synced. ### My beacon node logs `WARN Error signalling fork choice waiter`, what should I do? @@ -190,13 +170,21 @@ This suggests that the computer resources are being overwhelmed. It could be due ### My beacon node logs `ERRO Aggregate attestation queue full`, what should I do? -An example of the full log is shown below: +Some examples of the full log is shown below: ```text ERRO Aggregate attestation queue full, queue_len: 4096, msg: the system has insufficient resources for load, module: network::beacon_processor:1542 +ERRO Attestation delay queue is full msg: system resources may be saturated, queue_size: 16384, service: bproc ``` -This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. +This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. Some common reasons are: + +- when the beacon node is downloading historical blocks +- the execution client is syncing +- disk IO is being overwhelmed +- parallel API queries to the beacon node + +If the node is syncing or downloading historical blocks, the error should disappear when the resources used return to normal or when the node is synced. ### My beacon node logs `WARN Failed to finalize deposit cache`, what should I do? @@ -204,77 +192,6 @@ This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will ## Validator -### Why does it take so long for a validator to be activated? - -After validators create their execution layer deposit transaction there are two waiting -periods before they can start producing blocks and attestations: - -1. Waiting for the beacon chain to recognise the execution layer block containing the - deposit (generally takes ~13.6 hours). -1. Waiting in the queue for validator activation. - -Detailed answers below: - -#### 1. Waiting for the beacon chain to detect the execution layer deposit - -Since the beacon chain uses the execution layer for validator on-boarding, beacon chain -validators must listen to event logs from the deposit contract. Since the -latest blocks of the execution chain are vulnerable to re-orgs due to minor network -partitions, beacon nodes follow the execution chain at a distance of 2048 blocks -(~6.8 hours) (see -[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#process-deposit)). -This follow distance protects the beacon chain from on-boarding validators that -are likely to be removed due to an execution chain re-org. - -Now we know there's a 6.8 hours delay before the beacon nodes even _consider_ an -execution layer block. Once they _are_ considering these blocks, there's a voting period -where beacon validators vote on which execution block hash to include in the beacon chain. This -period is defined as 64 epochs (~6.8 hours, see -[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#time-parameters)). -During this voting period, each beacon block producer includes an -[`Eth1Data`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#eth1data) -in their block which counts as a vote towards what that validator considers to -be the head of the execution chain at the start of the voting period (with respect -to `ETH1_FOLLOW_DISTANCE`, of course). You can see the exact voting logic -[here](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#eth1-data). - -These two delays combined represent the time between an execution layer deposit being -included in an execution data vote and that validator appearing in the beacon chain. -The `ETH1_FOLLOW_DISTANCE` delay causes a minimum delay of ~6.8 hours and -`ETH1_VOTING_PERIOD` means that if a validator deposit happens just _before_ -the start of a new voting period then they might not notice this delay at all. -However, if the validator deposit happens just _after_ the start of the new -voting period the validator might have to wait ~6.8 hours for next voting -period. In times of very severe network issues, the network may even fail -to vote in new execution layer blocks, thus stopping all new validator deposits and causing the wait to be longer. - -#### 2. Waiting for a validator to be activated - -If a validator has provided an invalid public key or signature, they will -_never_ be activated. -They will simply be forgotten by the beacon chain! But, if those parameters were -correct, once the execution layer delays have elapsed and the validator appears in the -beacon chain, there's _another_ delay before the validator becomes "active" -(canonical definition -[here](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations. - -Firstly, the validator won't become active until their beacon chain balance is -equal to or greater than -[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#gwei-values) -(32 ETH on mainnet, usually 3.2 ETH on testnets). Once this balance is reached, -the validator must wait until the start of the next epoch (up to 6.4 minutes) -for the -[`process_registry_updates`](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#registry-updates) -routine to run. This routine activates validators with respect to a [churn -limit](https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#get_validator_churn_limit); -it will only allow the number of validators to increase (churn) by a certain -amount. If a new validator isn't within the churn limit from the front of the queue, -they will need to wait another epoch (6.4 minutes) for their next chance. This -repeats until the queue is cleared. The churn limit for validators joining the beacon chain is capped at 8 per epoch or 1800 per day. If, for example, there are 9000 validators waiting to be activated, this means that the waiting time can take up to 5 days. - -Once a validator has been activated, congratulations! It's time to -produce blocks and attestations! - ### Can I use redundancy in my staking setup? You should **never** use duplicate/redundant validator keypairs or validator clients (i.e., don't @@ -299,15 +216,15 @@ Another cause for missing attestations is the block arriving late, or there are An example of the log: (debug logs can be found under `$datadir/beacon/logs`): ```text -Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441 +DEBG Delayed head block, set_as_head_time_ms: 37, imported_time_ms: 1824, attestable_delay_ms: 3660, available_delay_ms: 3491, execution_time_ms: 78, consensus_time_ms: 161, blob_delay_ms: 3291, observed_delay_ms: 3250, total_delay_ms: 5352, slot: 11429888, proposer_index: 778696, block_root: 0x34cc0675ad5fd052699af2ff37b858c3eb8186c5b29fdadb1dabd246caf79e43, service: beacon, module: beacon_chain::canonical_head:1440 ``` -The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation will fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. +The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s then it has missed the window for attestation, and the attestation will fail. In the above example, the delay is mostly caused by a late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest to the block due to the block arriving late. Another example of log: ``` -DEBG Delayed head block, set_as_head_time_ms: 22, imported_time_ms: 312, attestable_delay_ms: 7052, available_delay_ms: 6874, execution_time_ms: 4694, blob_delay_ms: 2159, observed_delay_ms: 2179, total_delay_ms: 7209, slot: 1885922, proposer_index: 606896, block_root: 0x9966df24d24e722d7133068186f0caa098428696e9f441ac416d0aca70cc0a23, service: beacon, module: beacon_chain::canonical_head:1441 +DEBG Delayed head block, set_as_head_time_ms: 22, imported_time_ms: 312, attestable_delay_ms: 7052, available_delay_ms: 6874, execution_time_ms: 4694, consensus_time_ms: 232, blob_delay_ms: 2159, observed_delay_ms: 2179, total_delay_ms: 7209, slot: 1885922, proposer_index: 606896, block_root: 0x9966df24d24e722d7133068186f0caa098428696e9f441ac416d0aca70cc0a23, service: beacon, module: beacon_chain::canonical_head:1441 /159.69.68.247/tcp/9000, service: libp2p, module: lighthouse_network::service:1811 ``` @@ -323,7 +240,7 @@ Another possible reason for missing the head vote is due to a chain "reorg". A r ### Can I submit a voluntary exit message without running a beacon node? -Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/docs/voluntary-exit.md). +Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/docs/validator_voluntary_exit.md). It is also noted that you can submit your BLS-to-execution-change message to update your withdrawal credentials from type `0x00` to `0x01` using the same link. @@ -345,7 +262,7 @@ If you do not want to stop `lighthouse vc`, you can use the [key manager API](./ ### How can I delete my validator once it is imported? -Lighthouse supports the [KeyManager API](https://ethereum.github.io/keymanager-APIs/#/Local%20Key%20Manager/deleteKeys) to delete validators and remove them from the `validator_definitions.yml` file. To do so, start the validator client with the flag `--http` and call the API. +You can use the `lighthouse vm delete` command to delete validator keys, see [validator manager delete](./validator_manager_api.md#delete). If you are looking to delete the validators in one node and import it to another, you can use the [validator-manager](./validator_manager_move.md) to move the validators across nodes without the hassle of deleting and importing the keys. diff --git a/book/src/imgs/developers_architecture.svg b/book/src/imgs/developers_architecture.svg new file mode 100644 index 0000000000..66c9c0ec89 --- /dev/null +++ b/book/src/imgs/developers_architecture.svg @@ -0,0 +1,4 @@ + + + +
p2p network
p2p network
rust-libp2p
rust-libp2p
lighthouse_network
lighthouse_network
gossipsub
gossipsub
http_api
http_api
validator client
validator client
crypto
crypto
bls
bls
blst
blst
kzg
kzg
ckzg
ckzg
discv5
discv5
slasher
slasher
store
store
execution_layer
execution_layer
execution client
execution client
operation_pool
operation_pool
mev-boost
mev-boost
builder_client
builder_client
beacon_processor
beacon_processor
tokio
tokio
network
network
gossip_methods
gossip_methods
rpc_methods
rpc_methods
sync
sync
beacon_chain
beacon_chain
block_verification
block_verification
attestation_verification
attestation_verificati...
blob_verification
blob_verification
blob_verification
blob_verification
light_client_*
light_client_*
block_verification
block_verification
import_block
import_block
produce_block
produce_block
Linux/macOS/Windows
Linux/macOS/Windows
Legend
Legend
= internal crate
= internal crate
= external crate
= external crate
= file
= file
= function/method
= function/method
= external service/component
= external service/compone...
consensus
consensus
types
types
state_processing
state_processing
ethereum_ssz
ethereum_ssz
tree_hash
tree_hash
milhouse
milhouse
fork_choice
fork_choice
merkle_proof
merkle_proof
sha2
sha2
leveldb
leveldb
\ No newline at end of file diff --git a/book/src/mainnet_validator.md b/book/src/mainnet_validator.md index d21d49f0c9..ba35ba6f12 100644 --- a/book/src/mainnet_validator.md +++ b/book/src/mainnet_validator.md @@ -33,7 +33,7 @@ There are five primary steps to become a validator: 1. [Start an execution client and Lighthouse beacon node](#step-2-start-an-execution-client-and-lighthouse-beacon-node) 1. [Import validator keys into Lighthouse](#step-3-import-validator-keys-to-lighthouse) 1. [Start Lighthouse validator client](#step-4-start-lighthouse-validator-client) -1. [Submit deposit](#step-5-submit-deposit-32eth-per-validator) +1. [Submit deposit](#step-5-submit-deposit-a-minimum-of-32eth-to-activate-one-validator) > **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". @@ -151,13 +151,13 @@ Once this log appears (and there are no errors) the `lighthouse vc` application will ensure that the validator starts performing its duties and being rewarded by the protocol. -### Step 5: Submit deposit (32ETH per validator) +### Step 5: Submit deposit (a minimum of 32ETH to activate one validator) -After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Holesky staking launchpad](https://holesky.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending 32ETH per validator to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. +After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Holesky staking launchpad](https://holesky.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending ETH to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. > **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. -Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. +Once the deposit transaction is confirmed, it will take a minimum of ~13 minutes to a few days to activate your validator, depending on the queue. Once your validator is activated, the validator client will start to publish attestations each epoch: diff --git a/book/src/validator_consolidation.md b/book/src/validator_consolidation.md new file mode 100644 index 0000000000..10ab5bd97d --- /dev/null +++ b/book/src/validator_consolidation.md @@ -0,0 +1,30 @@ +# Consolidation + +With the [Pectra](https://ethereum.org/en/history/#pectra) upgrade, a validator can hold a stake of up to 2048 ETH. This is done by updating the validator withdrawal credentials to type 0x02. With 0x02 withdrawal credentials, it is possible to consolidate two or more validators into a single validator with a higher stake. + +Let's take a look at an example: Initially, validators A and B are both with 0x01 withdrawal credentials with 32 ETH. Let's say we want to consolidate the balance of validator B to validator A, so that the balance of validator A becomes 64 ETH. These are the steps: + +1. Update the withdrawal credentials of validator A to 0x02. You can do this using [Siren](./ui.md) or the [staking launchpad](https://launchpad.ethereum.org/en/). Select: + - source validator: validator A + - target validator: validator A + > Note: After the update, the withdrawal credential type 0x02 cannot be reverted to 0x01, unless the validator exits and makes a fresh deposit. + +2. Perform consolidation by selecting: + - source validator: validator B + - target validator: validator A + + and then execute the transaction. + + Depending on the exit queue and pending consolidations, the process could take from a day to weeks. The outcome is: + - validator A has 64 ETH + - validator B has 0 ETH (i.e., validator B has exited the beacon chain) + +The consolidation process can be repeated to consolidate more validators into validator A. + +It is important to note that there are some conditions required to perform consolidation, a few common ones are: + +- the **withdrawal address** of the source and target validators **must be the same**. +- the _target validator_ **must** have a withdrawal credential **type 0x02**. The source validator could have a 0x01 or 0x02 withdrawal credential. +- the source validator must be active for at least 256 epochs to be able to perform consolidation. + +Note that if a user were to send a consolidation transaction that does not meet the conditions, the transaction can still be accepted by the execution layer. However, the consolidation will fail once it reaches the consensus layer (where the checks are performed). Therefore, it is recommended to check that the conditions are fulfilled before sending a consolidation transaction. diff --git a/book/src/validator_management.md b/book/src/validator_management.md index 18abfb1538..3bfac37ac6 100644 --- a/book/src/validator_management.md +++ b/book/src/validator_management.md @@ -151,7 +151,7 @@ ensure their `secrets-dir` is organised as below: ### Manual configuration The automatic validator discovery process works out-of-the-box with validators -that are created using the `lighthouse account validator new` command. The +that are created using the `lighthouse account validator create` command. The details of this process are only interesting to those who are using keystores generated with another tool or have a non-standard requirements. diff --git a/book/src/validator_sweep.md b/book/src/validator_sweep.md index b707988e84..0755c06d51 100644 --- a/book/src/validator_sweep.md +++ b/book/src/validator_sweep.md @@ -5,6 +5,10 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. - if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. The validator sweep is automatic and it does not incur any fees to withdraw. +## Partial withdrawals via the execution layer + +With the [Pectra](https://ethereum.org/en/history/#pectra) upgrade, validators with 0x02 withdrawal credentials can partially withdraw staked funds via the execution layer by sending a transaction using the withdrawal address. You can withdraw down to a validator balance of 32 ETH. For example, if the validator balance is 40 ETH, you can withdraw up to 8 ETH. You can use [Siren](./ui.md) or the [staking launchpad](https://launchpad.ethereum.org/en/) to execute partial withdrawals. + ## FAQ 1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? diff --git a/book/src/validator_voluntary_exit.md b/book/src/validator_voluntary_exit.md index 6261f2e267..c17c0f4fc4 100644 --- a/book/src/validator_voluntary_exit.md +++ b/book/src/validator_voluntary_exit.md @@ -45,7 +45,7 @@ WARNING: WARNING: THIS IS AN IRREVERSIBLE OPERATION -PLEASE VISIT https://lighthouse-book.sigmaprime.io/voluntary-exit.html +PLEASE VISIT https://lighthouse-book.sigmaprime.io/validator_voluntary_exit.html TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT. Enter the exit phrase from the above URL to confirm the voluntary exit: @@ -58,6 +58,10 @@ Please keep your validator running till exit epoch Exit epoch in approximately 1920 secs ``` +## Exit via the execution layer + +The voluntary exit above is via the consensus layer. With the [Pectra](https://ethereum.org/en/history/#pectra) upgrade, validators with 0x01 and 0x02 withdrawal credentials can also exit their validators via the execution layer by sending a transaction using the withdrawal address. You can use [Siren](./ui.md) or the [staking launchpad](https://launchpad.ethereum.org/en/) to send an exit transaction. + ## Full withdrawal of staked fund After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023, if a user initiates a voluntary exit, they will receive the full staked funds to the withdrawal address, provided that the validator has withdrawal credentials of type `0x01`. For more information on how fund withdrawal works, please visit [Ethereum.org](https://ethereum.org/en/staking/withdrawals/#how-do-withdrawals-work) website. diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 80070a0791..8e2eaa623a 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -128,7 +128,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then # Sleep three epochs, then make sure all validators were active in epoch 2. Use # `is_previous_epoch_target_attester` from epoch 3 for a complete view of epoch 2 inclusion. # - # See: https://lighthouse-book.sigmaprime.io/validator-inclusion.html + # See: https://lighthouse-book.sigmaprime.io/api_validator_inclusion.html echo "Waiting three epochs..." sleep $(( $SECONDS_PER_SLOT * 32 * 3 )) @@ -156,7 +156,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then # Sleep two epochs, then make sure all validators were active in epoch 4. Use # `is_previous_epoch_target_attester` from epoch 5 for a complete view of epoch 4 inclusion. # - # See: https://lighthouse-book.sigmaprime.io/validator-inclusion.html + # See: https://lighthouse-book.sigmaprime.io/api_validator_inclusion.html echo "Waiting two more epochs..." sleep $(( $SECONDS_PER_SLOT * 32 * 2 )) for val in 0x*; do diff --git a/wordlist.txt b/wordlist.txt index 7adbfe9032..9feb07b67b 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -66,6 +66,7 @@ Nethermind NodeJS NullLogger PathBuf +Pectra PowerShell PPA Pre @@ -236,6 +237,7 @@ validators validator's vc virt +walkthrough webapp withdrawable yaml From 54f7bc5b2c153963084a4c6edb6c4acfb2317159 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 22 Apr 2025 09:21:03 +1000 Subject: [PATCH 12/13] Release v7.0.0 (#7288) New v7.0.0 release for Electra on mainnet. --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4473348cd..9e15ce9a58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -860,7 +860,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.7" +version = "7.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -1108,7 +1108,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.7" +version = "7.0.0" dependencies = [ "beacon_node", "bytes", @@ -4813,7 +4813,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.7" +version = "7.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -5368,7 +5368,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.7" +version = "7.0.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7ab29d37db..79dad05886 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.7" +version = "7.0.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 15c99080d8..919e3976e7 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.7-", - fallback = "Lighthouse/v7.0.0-beta.7" + prefix = "Lighthouse/v7.0.0-", + fallback = "Lighthouse/v7.0.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.7" + "7.0.0" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index d9dd8c1e3e..639f2130a4 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.7" +version = "7.0.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 384720cf73..50a80cbbe3 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.7" +version = "7.0.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From 9f4b0cdc28553270268d0098516c888fac563824 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 22 Apr 2025 16:46:31 +0800 Subject: [PATCH 13/13] Fix Kurtosis doppelganger CI (#7343) --- scripts/tests/doppelganger_protection.sh | 66 +++++++++++++++--------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 8e2eaa623a..86c9705ee4 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -74,18 +74,27 @@ if [[ "$BEHAVIOR" == "failure" ]]; then vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end" service_name=vc-1-doppelganger - kurtosis service add \ - --files /validator_keys:$vc_1_keys_artifact_id,/testnet:el_cl_genesis_data \ - $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ - vc \ - --debug-level info \ - --testnet-dir=/testnet \ - --validators-dir=/validator_keys/keys \ - --secrets-dir=/validator_keys/secrets \ - --init-slashing-protection \ - --beacon-nodes=http://$bn_2_url:$bn_2_port \ - --enable-doppelganger-protection \ - --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + kurtosis service add $ENCLAVE_NAME $service_name --json-service-config - << EOF + { + "image": "$LH_IMAGE_NAME", + "files": { + "/validator_keys": ["$vc_1_keys_artifact_id"], + "/testnet": ["el_cl_genesis_data"] + }, + "cmd": [ + "lighthouse", + "vc", + "--debug-level", "info", + "--testnet-dir=/testnet", + "--validators-dir=/validator_keys/keys", + "--secrets-dir=/validator_keys/secrets", + "--init-slashing-protection", + "--beacon-nodes=http://$bn_2_url:$bn_2_port", + "--enable-doppelganger-protection", + "--suggested-fee-recipient", "0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990" + ] + } +EOF # Check if doppelganger VC has stopped and exited. Exit code 1 means the check timed out and VC is still running. check_exit_cmd="until [ \$(get_service_status $service_name) != 'RUNNING' ]; do sleep 1; done" @@ -110,18 +119,27 @@ if [[ "$BEHAVIOR" == "success" ]]; then vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end" service_name=vc-4 - kurtosis service add \ - --files /validator_keys:$vc_4_keys_artifact_id,/testnet:el_cl_genesis_data \ - $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ - vc \ - --debug-level debug \ - --testnet-dir=/testnet \ - --validators-dir=/validator_keys/keys \ - --secrets-dir=/validator_keys/secrets \ - --init-slashing-protection \ - --beacon-nodes=http://$bn_2_url:$bn_2_port \ - --enable-doppelganger-protection \ - --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + kurtosis service add $ENCLAVE_NAME $service_name --json-service-config - << EOF + { + "image": "$LH_IMAGE_NAME", + "files": { + "/validator_keys": ["$vc_4_keys_artifact_id"], + "/testnet": ["el_cl_genesis_data"] + }, + "cmd": [ + "lighthouse", + "vc", + "--debug-level", "info", + "--testnet-dir=/testnet", + "--validators-dir=/validator_keys/keys", + "--secrets-dir=/validator_keys/secrets", + "--init-slashing-protection", + "--beacon-nodes=http://$bn_2_url:$bn_2_port", + "--enable-doppelganger-protection", + "--suggested-fee-recipient", "0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990" + ] + } +EOF doppelganger_failure=0